Merge remote-tracking branch 'origin/master' into bump_v1.2.0

Conflicts:
	VERSION
	daemon/container.go
	daemon/daemon.go

Signed-off-by: Victor Vieux <vieux@docker.com>
diff --git a/.mailmap b/.mailmap
index 6837586..47860de 100644
--- a/.mailmap
+++ b/.mailmap
@@ -1,4 +1,9 @@
-# Generate AUTHORS: git log --format='%aN <%aE>' | sort -uf
+# Generate AUTHORS: hack/generate-authors.sh
+
+# Tip for finding duplicates (besides scanning the output of AUTHORS for name
+# duplicates that aren't also email duplicates): scan the output of:
+#   git log --format='%aE - %aN' | sort -uf
+
 <charles.hooper@dotcloud.com> <chooper@plumata.com>
 <daniel.mizyrycki@dotcloud.com> <daniel@dotcloud.com>
 <daniel.mizyrycki@dotcloud.com> <mzdaniel@glidelink.net>
@@ -47,8 +52,9 @@
 Daniel Mizyrycki <daniel.mizyrycki@dotcloud.com> root <root@vagrant-ubuntu-12.10.vagrantup.com>
 Jean-Baptiste Dalido <jeanbaptiste@appgratis.com>
 <proppy@google.com> <proppy@aminche.com>
-<michael@crosbymichael.com> <crosby.michael@gmail.com>
-<github@metaliveblog.com> <github@developersupport.net>
+<michael@docker.com> <michael@crosbymichael.com>
+<michael@docker.com> <crosby.michael@gmail.com>
+<github@developersupport.net> <github@metaliveblog.com> 
 <brandon@ifup.org> <brandon@ifup.co>
 <dano@spotify.com> <daniel.norberg@gmail.com>
 <danny@codeaholics.org> <Danny.Yates@mailonline.co.uk>
@@ -78,3 +84,16 @@
 Liang-Chi Hsieh <viirya@gmail.com>
 Aleksa Sarai <cyphar@cyphar.com>
 Will Weaver <monkey@buildingbananas.com>
+Timothy Hobbs <timothyhobbs@seznam.cz>
+Nathan LeClaire <nathan.leclaire@docker.com> <nathan.leclaire@gmail.com>
+Nathan LeClaire <nathan.leclaire@docker.com> <nathanleclaire@gmail.com>
+<github@hollensbe.org> <erik+github@hollensbe.org>
+<github@albersweb.de> <albers@users.noreply.github.com>
+<lsm5@fedoraproject.org> <lsm5@redhat.com>
+<marc@marc-abramowitz.com> <msabramo@gmail.com>
+Matthew Heon <mheon@redhat.com> <mheon@mheonlaptop.redhat.com>
+<bernat@luffy.cx> <vincent@bernat.im>
+<p@pwaller.net> <peter@scraperwiki.com>
+<andrew.weiss@outlook.com> <andrew.weiss@microsoft.com>
+Francisco Carriedo <fcarriedo@gmail.com>
+<julienbordellier@gmail.com> <git@julienbordellier.com>
diff --git a/.travis.yml b/.travis.yml
index ae03d6c..55fa904 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -3,10 +3,20 @@
 
 language: go
 
-go: 1.2
+go:
+# This should match the version in the Dockerfile.
+  - 1.3.1
+# Test against older versions too, just for a little extra retrocompat.
+  - 1.2
+
+# Let us have pretty experimental Docker-based Travis workers.
+# (These spin up much faster than the VM-based ones.)
+sudo: false
 
 # Disable the normal go build.
-install: true
+install:
+  - export DOCKER_BUILDTAGS='exclude_graphdriver_btrfs exclude_graphdriver_devicemapper' # btrfs and devicemapper fail to compile thanks to a couple missing headers (which we can't install thanks to "sudo: false")
+  - export AUTO_GOPATH=1
 
 before_script:
   - env | sort
@@ -14,5 +24,7 @@
 script:
   - hack/make.sh validate-dco
   - hack/make.sh validate-gofmt
+  - ./hack/make.sh dynbinary
+  - DOCKER_CLIENTONLY=1 ./hack/make.sh dynbinary
 
 # vim:set sw=2 ts=2:
diff --git a/AUTHORS b/AUTHORS
index 10f01fb..43904e9 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -1,5 +1,5 @@
 # This file lists all individuals having contributed content to the repository.
-# For how it is generated, see `.mailmap`.
+# For how it is generated, see `hack/generate-authors.sh`.
 
 Aanand Prasad <aanand.prasad@gmail.com>
 Aaron Feng <aaron.feng@gmail.com>
@@ -9,33 +9,43 @@
 Adam Singer <financeCoding@gmail.com>
 Aditya <aditya@netroy.in>
 Adrian Mouat <adrian.mouat@gmail.com>
+Adrien Folie <folie.adrien@gmail.com>
+AJ Bowen <aj@gandi.net>
+Al Tobey <al@ooyala.com>
 alambike <alambike@gmail.com>
+Albert Zhang <zhgwenming@gmail.com>
 Aleksa Sarai <cyphar@cyphar.com>
+Alex Gaynor <alex.gaynor@gmail.com>
+Alex Warhawk <ax.warhawk@gmail.com>
 Alexander Larsson <alexl@redhat.com>
+Alexander Shopov <ash@kambanaria.org>
 Alexandr Morozov <lk4d4math@gmail.com>
 Alexey Kotlyarov <alexey@infoxchange.net.au>
 Alexey Shamrin <shamrin@gmail.com>
-Alex Gaynor <alex.gaynor@gmail.com>
 Alexis THOMAS <fr.alexisthomas@gmail.com>
 almoehi <almoehi@users.noreply.github.com>
-Al Tobey <al@ooyala.com>
 amangoel <amangoel@gmail.com>
+AnandkumarPatel <anandkumarpatel@gmail.com>
+Andre Dublin <81dublin@gmail.com>
 Andrea Luzzardi <aluzzardi@gmail.com>
+Andrea Turli <andrea.turli@gmail.com>
 Andreas Savvides <andreas@editd.com>
 Andreas Tiefenthaler <at@an-ti.eu>
-Andrea Turli <andrea.turli@gmail.com>
 Andrew Duckworth <grillopress@gmail.com>
+Andrew France <andrew@avito.co.uk>
 Andrew Macgregor <andrew.macgregor@agworld.com.au>
 Andrew Munsell <andrew@wizardapps.net>
-Andrews Medina <andrewsmedina@gmail.com>
+Andrew Weiss <andrew.weiss@outlook.com>
 Andrew Williams <williams.andrew@gmail.com>
+Andrews Medina <andrewsmedina@gmail.com>
 Andy Chambers <anchambers@paypal.com>
 andy diller <dillera@gmail.com>
 Andy Goldstein <agoldste@redhat.com>
 Andy Kipp <andy@rstudio.com>
-Andy Rothfusz <github@metaliveblog.com>
+Andy Rothfusz <github@developersupport.net>
 Andy Smith <github@anarkystic.com>
 Anthony Bishopric <git@anthonybishopric.com>
+Anton Löfgren <anton.lofgren@gmail.com>
 Anton Nikitin <anton.k.nikitin@gmail.com>
 Antony Messerli <amesserl@rackspace.com>
 apocas <petermdias@gmail.com>
@@ -44,25 +54,34 @@
 Barnaby Gray <barnaby@pickle.me.uk>
 Barry Allard <barry.allard@gmail.com>
 Bartłomiej Piotrowski <b@bpiotrowski.pl>
-Benjamin Atkin <ben@benatkin.com>
-Benoit Chesneau <bchesneau@gmail.com>
+bdevloed <boris.de.vloed@gmail.com>
+Ben Firshman <ben@firshman.co.uk>
 Ben Sargent <ben@brokendigits.com>
 Ben Toews <mastahyeti@gmail.com>
 Ben Wiklund <ben@daisyowl.com>
+Benjamin Atkin <ben@benatkin.com>
+Benoit Chesneau <bchesneau@gmail.com>
 Bernerd Schaefer <bj.schaefer@gmail.com>
 Bhiraj Butala <abhiraj.butala@gmail.com>
 bin liu <liubin0329@users.noreply.github.com>
 Bouke Haarsma <bouke@webatoom.nl>
+Boyd Hemphill <boyd@feedmagnet.com>
 Brandon Liu <bdon@bdon.org>
 Brandon Philips <brandon@ifup.org>
+Brandon Rhodes <brandon@rhodesmill.org>
+Brett Kochendorfer <brett.kochendorfer@gmail.com>
+Brian (bex) Exelbierd <bexelbie@redhat.com>
 Brian Dorsey <brian@dorseys.org>
 Brian Flad <bflad417@gmail.com>
 Brian Goff <cpuguy83@gmail.com>
 Brian McCallister <brianm@skife.org>
 Brian Olsen <brian@maven-group.org>
 Brian Shumate <brian@couchbase.com>
+Brice Jaglin <bjaglin@teads.tv>
 Briehan Lombaard <briehan.lombaard@gmail.com>
 Bruno Bigras <bigras.bruno@gmail.com>
+Bruno Renié <brutasse@gmail.com>
+Bryan Bess <squarejaw@bsbess.com>
 Bryan Matsuo <bryan.matsuo@gmail.com>
 Bryan Murphy <bmurphy1976@gmail.com>
 Caleb Spare <cespare@gmail.com>
@@ -73,19 +92,35 @@
 Charles Lindsay <chaz@chazomatic.us>
 Charles Merriam <charles.merriam@gmail.com>
 Charlie Lewis <charliel@lab41.org>
+Chewey <prosto-chewey@users.noreply.github.com>
 Chia-liang Kao <clkao@clkao.org>
+Chris Alfonso <calfonso@redhat.com>
+Chris Snow <chsnow123@gmail.com>
 Chris St. Pierre <chris.a.st.pierre@gmail.com>
+chrismckinnel <chris.mckinnel@tangentlabs.co.uk>
+Christian Berendt <berendt@b1-systems.de>
+ChristoperBiscardi <biscarch@sketcht.com>
+Christophe Troestler <christophe.Troestler@umons.ac.be>
 Christopher Currie <codemonkey+github@gmail.com>
 Christopher Rigor <crigor@gmail.com>
-Christophe Troestler <christophe.Troestler@umons.ac.be>
+Ciro S. Costa <ciro.costa@usp.br>
 Clayton Coleman <ccoleman@redhat.com>
 Colin Dunklau <colin.dunklau@gmail.com>
 Colin Rice <colin@daedrum.net>
+Colin Walters <walters@verbum.org>
 Cory Forsyth <cory.forsyth@gmail.com>
+cpuguy83 <cpuguy83@gmail.com>
 cressie176 <github@stephen-cresswell.net>
+Cruceru Calin-Cristian <crucerucalincristian@gmail.com>
+Daan van Berkel <daan.v.berkel.1980@gmail.com>
 Dafydd Crosby <dtcrsby@gmail.com>
 Dan Buch <d.buch@modcloth.com>
 Dan Hirsch <thequux@upstandinghackers.com>
+Dan Keder <dan.keder@gmail.com>
+Dan McPherson <dmcphers@redhat.com>
+Dan Stine <sw@stinemail.com>
+Dan Walsh <dwalsh@redhat.com>
+Dan Williams <me@deedubs.com>
 Daniel Exner <dex@dragonslave.de>
 Daniel Garcia <daniel@danielgarcia.info>
 Daniel Gasienica <daniel@gasienica.ch>
@@ -95,22 +130,21 @@
 Daniel Robinson <gottagetmac@gmail.com>
 Daniel Von Fange <daniel@leancoder.com>
 Daniel YC Lin <dlin.tw@gmail.com>
-Dan Keder <dan.keder@gmail.com>
-Dan McPherson <dmcphers@redhat.com>
+Daniel, Dao Quang Minh <dqminh89@gmail.com>
 Danny Berger <dpb587@gmail.com>
 Danny Yates <danny@codeaholics.org>
-Dan Stine <sw@stinemail.com>
-Dan Walsh <dwalsh@redhat.com>
-Dan Williams <me@deedubs.com>
 Darren Coxall <darren@darrencoxall.com>
 Darren Shepherd <darren.s.shepherd@gmail.com>
 David Anderson <dave@natulte.net>
 David Calavera <david.calavera@gmail.com>
+David Corking <dmc-source@dcorking.com>
 David Gageot <david@gageot.net>
 David Mcanulty <github@hellspark.com>
 David Röthlisberger <david@rothlis.net>
 David Sissitka <me@dsissitka.com>
 Deni Bertovic <deni@kset.org>
+Derek <crq@kernel.org>
+Deric Crago <deric.crago@gmail.com>
 Dinesh Subhraveti <dineshs@altiscale.com>
 Djibril Koné <kone.djibril@gmail.com>
 dkumor <daniel@dkumor.com>
@@ -118,8 +152,10 @@
 Dolph Mathews <dolph.mathews@gmail.com>
 Dominik Honnef <dominik@honnef.co>
 Don Spaulding <donspauldingii@gmail.com>
-Dražen Lučanin <kermit666@gmail.com>
+Doug Davis <dug@us.ibm.com>
+doug tangren <d.tangren@gmail.com>
 Dr Nic Williams <drnicwilliams@gmail.com>
+Dražen Lučanin <kermit666@gmail.com>
 Dustin Sallings <dustin@spy.net>
 Edmund Wagner <edmund-wagner@web.de>
 Eiichi Tsukata <devel@etsukata.com>
@@ -130,13 +166,17 @@
 Eric Hanchrow <ehanchrow@ine.com>
 Eric Lee <thenorthsecedes@gmail.com>
 Eric Myhre <hash@exultant.us>
-Erik Hollensbe <erik+github@hollensbe.org>
+Eric Windisch <eric@windisch.us>
+Eric Windisch <ewindisch@docker.com>
+Erik Hollensbe <github@hollensbe.org>
+Erik Inge Bolsø <knan@redpill-linpro.com>
 Erno Hopearuoho <erno.hopearuoho@gmail.com>
 eugenkrizo <eugen.krizo@gmail.com>
 Evan Hazlett <ejhazlett@gmail.com>
 Evan Krall <krall@yelp.com>
 Evan Phoenix <evan@fallingsnow.net>
 Evan Wies <evan@neomantra.net>
+evanderkoogh <info@erronis.nl>
 Eystein Måløy Stenberg <eystein.maloy.stenberg@cfengine.com>
 ezbercih <cem.ezberci@gmail.com>
 Fabio Falci <fabiofalci@gmail.com>
@@ -147,12 +187,16 @@
 Felix Rabe <felix@rabe.io>
 Fernando <fermayo@gmail.com>
 Flavio Castelli <fcastelli@suse.com>
+FLGMwt <ryan.stelly@live.com>
+Francisco Carriedo <fcarriedo@gmail.com>
 Francisco Souza <f@souza.cc>
 Frank Macreery <frank@macreery.com>
+Fred Lifton <fred.lifton@docker.com>
 Frederick F. Kautz IV <fkautz@alumni.cmu.edu>
 Frederik Loeffert <frederik@zitrusmedia.de>
 Freek Kalter <freek@kalteronline.org>
 Gabe Rosenhouse <gabe@missionst.com>
+Gabor Nagy <mail@aigeruth.hu>
 Gabriel Monroy <gabriel@opdemand.com>
 Galen Sampson <galen.sampson@gmail.com>
 Gareth Rushgrove <gareth@morethanseven.net>
@@ -160,75 +204,106 @@
 Gereon Frey <gereon.frey@dynport.de>
 German DZ <germ@ndz.com.ar>
 Gert van Valkenhoef <g.h.m.van.valkenhoef@rug.nl>
+Giuseppe Mazzotta <gdm85@users.noreply.github.com>
+Gleb Fotengauer-Malinovskiy <glebfm@altlinux.org>
+Glyn Normington <gnormington@gopivotal.com>
 Goffert van Gool <goffert@phusion.nl>
 Graydon Hoare <graydon@pobox.com>
 Greg Thornton <xdissent@me.com>
 grunny <mwgrunny@gmail.com>
+Guilherme Salgado <gsalgado@gmail.com>
 Guillaume J. Charmes <guillaume.charmes@docker.com>
 Gurjeet Singh <gurjeet@singh.im>
 Guruprasad <lgp171188@gmail.com>
+Harald Albers <github@albersweb.de>
 Harley Laue <losinggeneration@gmail.com>
 Hector Castro <hectcastro@gmail.com>
+Henning Sprang <henning.sprang@gmail.com>
 Hobofan <goisser94@gmail.com>
+Hollie Teal <hollie.teal@docker.com>
+Hollie Teal <hollietealok@users.noreply.github.com>
+hollietealok <hollie@docker.com>
 Hunter Blanks <hunter@twilio.com>
+hyeongkyu.lee <hyeongkyu.lee@navercorp.com>
+Ian Babrou <ibobrik@gmail.com>
+Ian Bull <irbull@gmail.com>
+Ian Main <imain@redhat.com>
 Ian Truslove <ian.truslove@gmail.com>
 ILYA Khlopotov <ilya.khlopotov@gmail.com>
 inglesp <peter.inglesby@gmail.com>
 Isaac Dupree <antispam@idupree.com>
 Isabel Jimenez <contact.isabeljimenez@gmail.com>
 Isao Jonas <isao.jonas@gmail.com>
+Ivan Fraixedes <ifcdev@gmail.com>
 Jack Danger Canty <jackdanger@squareup.com>
-jakedt <jake@devtable.com>
 Jake Moshenko <jake@devtable.com>
+jakedt <jake@devtable.com>
 James Allen <jamesallen0108@gmail.com>
 James Carr <james.r.carr@gmail.com>
 James DeFelice <james.defelice@ishisystems.com>
 James Harrison Fisher <jameshfisher@gmail.com>
+James Kyle <james@jameskyle.org>
 James Mills <prologic@shortcircuit.net.au>
 James Turnbull <james@lovedthanlost.net>
+Jan Pazdziora <jpazdziora@redhat.com>
+Jan Toebes <jan@toebes.info>
+Jaroslaw Zabiello <hipertracker@gmail.com>
 jaseg <jaseg@jaseg.net>
+Jason Giedymin <jasong@apache.org>
+Jason Hall <imjasonh@gmail.com>
+Jason Livesay <ithkuil@gmail.com>
 Jason McVetta <jason.mcvetta@gmail.com>
 Jason Plum <jplum@devonit.com>
 Jean-Baptiste Barth <jeanbaptiste.barth@gmail.com>
 Jean-Baptiste Dalido <jeanbaptiste@appgratis.com>
 Jeff Lindsay <progrium@gmail.com>
+Jeff Welch <whatthejeff@gmail.com>
+Jeffrey Bolle <jeffreybolle@gmail.com>
 Jeremy Grosser <jeremy@synack.me>
-Jérôme Petazzoni <jerome.petazzoni@dotcloud.com>
 Jesse Dubay <jesse@thefortytwo.net>
+Jezeniel Zapanta <jpzapanta22@gmail.com>
 Jilles Oldenbeuving <ojilles@gmail.com>
 Jim Alateras <jima@comware.com.au>
+Jim Perrin <jperrin@centos.org>
 Jimmy Cuadra <jimmy@jimmycuadra.com>
+Jiří Župka <jzupka@redhat.com>
 Joe Beda <joe.github@bedafamily.com>
-Joel Handwell <joelhandwell@gmail.com>
 Joe Shaw <joe@joeshaw.org>
 Joe Van Dyk <joe@tanga.com>
+Joel Handwell <joelhandwell@gmail.com>
 Joffrey F <joffrey@docker.com>
 Johan Euphrosine <proppy@google.com>
-Johannes 'fish' Ziemke <github@freigeist.org>
 Johan Rydberg <johan.rydberg@gmail.com>
+Johannes 'fish' Ziemke <github@freigeist.org>
 John Costa <john.costa@gmail.com>
 John Feminella <jxf@jxf.me>
 John Gardiner Myers <jgmyers@proofpoint.com>
+John OBrien III <jobrieniii@yahoo.com>
 John Warwick <jwarwick@gmail.com>
+Jon Wedaman <jweede@gmail.com>
 Jonas Pfenniger <jonas@pfenniger.name>
+Jonathan Boulle <jonathanboulle@gmail.com>
+Jonathan Camp <jonathan@irondojo.com>
 Jonathan McCrohan <jmccrohan@gmail.com>
 Jonathan Mueller <j.mueller@apoveda.ch>
 Jonathan Pares <jonathanpa@users.noreply.github.com>
 Jonathan Rudenberg <jonathan@titanous.com>
-Jon Wedaman <jweede@gmail.com>
 Joost Cassee <joost@cassee.net>
 Jordan Arentsen <blissdev@gmail.com>
 Jordan Sissel <jls@semicomplete.com>
 Joseph Anthony Pasquale Holsten <joseph@josephholsten.com>
 Joseph Hager <ajhager@gmail.com>
+Josh <jokajak@gmail.com>
 Josh Hawn <josh.hawn@docker.com>
 Josh Poimboeuf <jpoimboe@redhat.com>
 JP <jpellerin@leapfrogonline.com>
 Julien Barbier <write0@gmail.com>
+Julien Bordellier <julienbordellier@gmail.com>
 Julien Dubois <julien.dubois@gmail.com>
 Justin Force <justin.force@gmail.com>
 Justin Plock <jplock@users.noreply.github.com>
 Justin Simonelis <justin.p.simonelis@gmail.com>
+Jérôme Petazzoni <jerome.petazzoni@dotcloud.com>
 Karan Lyons <karan@karanlyons.com>
 Karl Grzeszczak <karlgrz@gmail.com>
 Kato Kazuyoshi <kato.kazuyoshi@gmail.com>
@@ -236,36 +311,49 @@
 Keli Hu <dev@keli.hu>
 Ken Cochrane <kencochrane@gmail.com>
 Ken ICHIKAWA <ichikawa.ken@jp.fujitsu.com>
+Kevin "qwazerty" Houdebert <kevin.houdebert@gmail.com>
 Kevin Clark <kevin.clark@gmail.com>
 Kevin J. Lynagh <kevin@keminglabs.com>
 Kevin Menard <kevin@nirvdrum.com>
 Kevin Wallace <kevin@pentabarf.net>
 Keyvan Fatehi <keyvanfatehi@gmail.com>
-kim0 <email.ahmedkamal@googlemail.com>
+kies <lleelm@gmail.com>
 Kim BKC Carlbacker <kim.carlbacker@gmail.com>
+kim0 <email.ahmedkamal@googlemail.com>
 Kimbro Staken <kstaken@kstaken.com>
 Kiran Gangadharan <kiran.daredevil@gmail.com>
+knappe <tyler.knappe@gmail.com>
+Kohei Tsuruta <coheyxyz@gmail.com>
 Konstantin Pelykh <kpelykh@zettaset.com>
 Kyle Conroy <kyle.j.conroy@gmail.com>
+kyu <leehk1227@gmail.com>
+Lachlan Coote <lcoote@vmware.com>
 lalyos <lalyos@yahoo.com>
 Lance Chen <cyen0312@gmail.com>
 Lars R. Damerow <lars@pixar.com>
 Laurie Voss <github@seldo.com>
+leeplay <hyeongkyu.lee@navercorp.com>
+Len Weincier <len@cloudafrica.net>
+Levi Gross <levi@levigross.com>
 Lewis Peckover <lew+github@lew.io>
 Liang-Chi Hsieh <viirya@gmail.com>
-Lokesh Mandvekar <lsm5@redhat.com>
+Lokesh Mandvekar <lsm5@fedoraproject.org>
 Louis Opter <kalessin@kalessin.fr>
 lukaspustina <lukas.pustina@centerdevice.com>
 lukemarsden <luke@digital-crocus.com>
 Mahesh Tiyyagura <tmahesh@gmail.com>
+Manfred Zabarauskas <manfredas@zabarauskas.com>
 Manuel Meurer <manuel@krautcomputing.com>
 Manuel Woelker <github@manuel.woelker.org>
 Marc Abramowitz <marc@marc-abramowitz.com>
 Marc Kuo <kuomarc2@gmail.com>
+Marc Tamsky <mtamsky@gmail.com>
 Marco Hennings <marco.hennings@freiheit.com>
 Marcus Farkas <toothlessgear@finitebox.com>
 Marcus Ramberg <marcus@nordaaker.com>
+marcuslinke <marcus.linke@gmx.de>
 Marek Goldmann <marek.goldmann@gmail.com>
+Marius Voila <marius.voila@gmail.com>
 Mark Allen <mrallen1@yahoo.com>
 Mark McGranaghan <mmcgrana@gmail.com>
 Marko Mikulicic <mmikulicic@gmail.com>
@@ -278,30 +366,40 @@
 Matt Apperson <me@mattapperson.com>
 Matt Bachmann <bachmann.matt@gmail.com>
 Matt Haggard <haggardii@gmail.com>
+Matthew Heon <mheon@redhat.com>
 Matthew Mueller <mattmuelle@gmail.com>
 Matthias Klumpp <matthias@tenstral.net>
 Matthias Kühnle <git.nivoc@neverbox.com>
 mattymo <raytrac3r@gmail.com>
-Maxime Petazzoni <max@signalfuse.com>
-Maxim Treskin <zerthurd@gmail.com>
+mattyw <mattyw@me.com>
 Max Shytikov <mshytikov@gmail.com>
+Maxim Treskin <zerthurd@gmail.com>
+Maxime Petazzoni <max@signalfuse.com>
 meejah <meejah@meejah.ca>
 Michael Brown <michael@netdirect.ca>
-Michael Crosby <michael@crosbymichael.com>
+Michael Crosby <michael@docker.com>
 Michael Gorsuch <gorsuch@github.com>
 Michael Neale <michael.neale@gmail.com>
+Michael Prokop <github@michael-prokop.at>
 Michael Stapelberg <michael+gh@stapelberg.de>
+Michaël Pailloncy <mpapo.dev@gmail.com>
+Michiel@unhosted <michiel@unhosted.org>
 Miguel Angel Fernández <elmendalerenda@gmail.com>
+Mike Chelen <michael.chelen@gmail.com>
 Mike Gaffney <mike@uberu.com>
 Mike MacCana <mike.maccana@gmail.com>
 Mike Naberezny <mike@naberezny.com>
+Mike Snitzer <snitzer@redhat.com>
 Mikhail Sobolev <mss@mawhrin.net>
 Mohit Soni <mosoni@ebay.com>
 Morgante Pell <morgante.pell@morgante.net>
 Morten Siebuhr <sbhr@sbhr.dk>
+Mrunal Patel <mrunalp@gmail.com>
 Nan Monnand Deng <monnand@gmail.com>
+Naoki Orii <norii@cs.cmu.edu>
 Nate Jones <nate@endot.org>
 Nathan Kleyn <nathan@nathankleyn.com>
+Nathan LeClaire <nathan.leclaire@docker.com>
 Nelson Chen <crazysim@gmail.com>
 Niall O'Higgins <niallo@unworkable.org>
 Nick Payne <nick@kurai.co.uk>
@@ -309,15 +407,20 @@
 Nick Stinemates <nick@stinemates.org>
 Nicolas Dudebout <nicolas.dudebout@gatech.edu>
 Nicolas Kaiser <nikai@nikai.net>
+NikolaMandic <mn080202@gmail.com>
 noducks <onemannoducks@gmail.com>
 Nolan Darilek <nolan@thewordnerd.info>
+O.S. Tezer <ostezer@gmail.com>
+OddBloke <daniel@daniel-watkins.co.uk>
 odk- <github@odkurzacz.org>
 Oguz Bilgic <fisyonet@gmail.com>
 Ole Reifschneider <mail@ole-reifschneider.de>
-O.S. Tezer <ostezer@gmail.com>
+Olivier Gambier <dmp42@users.noreply.github.com>
 pandrew <letters@paulnotcom.se>
 Pascal Borreli <pascal@borreli.com>
+Patrick Hemmer <patrick.hemmer@gmail.com>
 pattichen <craftsbear@gmail.com>
+Paul <paul9869@gmail.com>
 Paul Annesley <paul@annesley.cc>
 Paul Bowsher <pbowsher@globalpersonals.co.uk>
 Paul Hammond <paul@paulhammond.org>
@@ -325,49 +428,71 @@
 Paul Lietar <paul@lietar.net>
 Paul Morie <pmorie@gmail.com>
 Paul Nasrat <pnasrat@gmail.com>
-Paul <paul9869@gmail.com>
+Paul Weaver <pauweave@cisco.com>
+Peter Bourgon <peter@bourgon.org>
 Peter Braden <peterbraden@peterbraden.co.uk>
-Peter Waller <peter@scraperwiki.com>
-Phillip Alexander <git@phillipalexander.io>
+Peter Waller <p@pwaller.net>
+Phil <underscorephil@gmail.com>
 Phil Spitler <pspitler@gmail.com>
+Phillip Alexander <git@phillipalexander.io>
 Piergiuliano Bossi <pgbossi@gmail.com>
 Pierre-Alain RIVIERE <pariviere@ippon.fr>
 Piotr Bogdan <ppbogdan@gmail.com>
 pysqz <randomq@126.com>
 Quentin Brossard <qbrossard@gmail.com>
+r0n22 <cameron.regan@gmail.com>
 Rafal Jeczalik <rjeczalik@gmail.com>
 Rajat Pandit <rp@rajatpandit.com>
+Rajdeep Dua <dua_rajdeep@yahoo.com>
 Ralph Bean <rbean@redhat.com>
 Ramkumar Ramachandra <artagnon@gmail.com>
 Ramon van Alteren <ramon@vanalteren.nl>
 Renato Riccieri Santos Zannon <renato.riccieri@gmail.com>
 rgstephens <greg@udon.org>
 Rhys Hiltner <rhys@twitch.tv>
+Richard Harvey <richard@squarecows.com>
 Richo Healey <richo@psych0tik.net>
 Rick Bradley <rick@users.noreply.github.com>
+Rick van de Loo <rickvandeloo@gmail.com>
+Robert Bachmann <rb@robertbachmann.at>
 Robert Obryk <robryk@gmail.com>
 Roberto G. Hashioka <roberto.hashioka@docker.com>
+Robin Speekenbrink <robin@kingsquare.nl>
 robpc <rpcann@gmail.com>
 Rodrigo Vaz <rodrigo.vaz@gmail.com>
 Roel Van Nyen <roel.vannyen@gmail.com>
 Roger Peppe <rogpeppe@gmail.com>
 Rohit Jnagal <jnagal@google.com>
+Roland Huß <roland@jolokia.org>
 Roland Moriz <rmoriz@users.noreply.github.com>
+Ron Smits <ron.smits@gmail.com>
 Rovanion Luckey <rovanion.luckey@gmail.com>
+Rudolph Gottesheim <r.gottesheim@loot.at>
+Ryan Anderson <anderson.ryanc@gmail.com>
 Ryan Aslett <github@mixologic.com>
 Ryan Fowler <rwfowler@gmail.com>
 Ryan O'Donnell <odonnellryanc@gmail.com>
 Ryan Seto <ryanseto@yak.net>
 Ryan Thomas <rthomas@atlassian.com>
+s-ko <aleks@s-ko.net>
 Sam Alba <sam.alba@gmail.com>
+Sam Bailey <cyprix@cyprix.com.au>
 Sam J Sharpe <sam.sharpe@digital.cabinet-office.gov.uk>
+Sam Reis <sreis@atlassian.com>
 Sam Rijs <srijs@airpost.net>
 Samuel Andaya <samuel@andaya.net>
+satoru <satorulogic@gmail.com>
+Satoshi Amemiya <satoshi_amemiya@voyagegroup.com>
 Scott Bessler <scottbessler@gmail.com>
 Scott Collier <emailscottcollier@gmail.com>
 Sean Cronin <seancron@gmail.com>
 Sean P. Kane <skane@newrelic.com>
-Sébastien Stormacq <sebsto@users.noreply.github.com>
+Sebastiaan van Stijn <github@gone.nl>
+Sebastiaan van Stijn <thaJeztah@users.noreply.github.com>
+Senthil Kumar Selvaraj <senthil.thecoder@gmail.com>
+SeongJae Park <sj38.park@gmail.com>
+Shane Canon <scanon@lbl.gov>
+shaunol <shaunol@gmail.com>
 Shawn Landden <shawn@churchofgit.com>
 Shawn Siefkas <shawn.siefkas@meredith.com>
 Shih-Yuan Lee <fourdollars@gmail.com>
@@ -378,14 +503,19 @@
 Solomon Hykes <solomon@docker.com>
 Song Gao <song@gao.io>
 Soulou <leo@unbekandt.eu>
+soulshake <amy@gandi.net>
 Sridatta Thatipamala <sthatipamala@gmail.com>
 Sridhar Ratnakumar <sridharr@activestate.com>
 Steeve Morin <steeve.morin@gmail.com>
 Stefan Praszalowicz <stefan@greplin.com>
+Stephen Crosby <stevecrozz@gmail.com>
 Steven Burgess <steven.a.burgess@hotmail.com>
 sudosurootdev <sudosurootdev@gmail.com>
-Sven Dowideit <SvenDowideit@home.org.au>
+Sven Dowideit <svendowideit@home.org.au>
 Sylvain Bellemare <sylvain.bellemare@ezeep.com>
+Sébastien <sebastien@yoozio.com>
+Sébastien Luttringer <seblu@seblu.net>
+Sébastien Stormacq <sebsto@users.noreply.github.com>
 tang0th <tang0th@gmx.com>
 Tatsuki Sugiura <sugi@nemui.org>
 Tehmasp Chaudhri <tehmasp@gmail.com>
@@ -400,19 +530,24 @@
 Tianon Gravi <admwiggin@gmail.com>
 Tibor Vass <teabee89@gmail.com>
 Tim Bosse <taim@bosboot.org>
-Timothy Hobbs <timothyhobbs@seznam.cz>
 Tim Ruffles <oi@truffles.me.uk>
+Tim Ruffles <timruffles@googlemail.com>
 Tim Terhorst <mynamewastaken+git@gmail.com>
+Timothy Hobbs <timothyhobbs@seznam.cz>
 tjmehta <tj@init.me>
 Tobias Bieniek <Tobias.Bieniek@gmx.de>
+Tobias Gesellchen <tobias@gesellix.de>
 Tobias Schmidt <ts@soundcloud.com>
 Tobias Schwab <tobias.schwab@dynport.de>
 Todd Lunter <tlunter@gmail.com>
 Tom Fotherby <tom+github@peopleperhour.com>
 Tom Hulihan <hulihan.tom159@gmail.com>
+Tom Maaswinkel <tom.maaswinkel@12wiki.eu>
 Tommaso Visconti <tommaso.visconti@gmail.com>
 Tony Daws <tony@daws.ca>
+tpng <benny.tpng@gmail.com>
 Travis Cline <travis.cline@gmail.com>
+Trent Ogren <tedwardo2@gmail.com>
 Tyler Brock <tyler.brock@gmail.com>
 Tzu-Jung Lee <roylee17@gmail.com>
 Ulysse Carion <ulyssecarion@gmail.com>
@@ -434,21 +569,29 @@
 Vladimir Bulyga <xx@ccxx.cc>
 Vladimir Kirillov <proger@wilab.org.ua>
 Vladimir Rutsky <altsysrq@gmail.com>
+waitingkuo <waitingkuo0527@gmail.com>
 Walter Leibbrandt <github@wrl.co.za>
 Walter Stanish <walter@pratyeka.org>
 WarheadsSE <max@warheads.net>
 Wes Morgan <cap10morgan@gmail.com>
 Will Dietz <w@wdtz.org>
-William Delanoue <william.delanoue@gmail.com>
-William Henry <whenry@redhat.com>
 Will Rouesnel <w.rouesnel@gmail.com>
 Will Weaver <monkey@buildingbananas.com>
+William Delanoue <william.delanoue@gmail.com>
+William Henry <whenry@redhat.com>
+William Riancho <wr.wllm@gmail.com>
+William Thurston <thurstw@amazon.com>
+wyc <wayne@neverfear.org>
 Xiuming Chen <cc@cxm.cc>
 Yang Bai <hamo.by@gmail.com>
 Yasunori Mahata <nori@mahata.net>
 Yurii Rashkovskii <yrashk@gmail.com>
+Zac Dover <zdover@redhat.com>
 Zain Memon <zain@inzain.net>
 Zaiste! <oh@zaiste.net>
+Zane DeGraffenried <zane.deg@gmail.com>
 Zilin Du <zilin.du@gmail.com>
 zimbatm <zimbatm@zimbatm.com>
+Zoltan Tombol <zoltan.tombol@gmail.com>
 zqh <zqhxuyuan@gmail.com>
+Álvaro Lázaro <alvaro.lazaro.g@gmail.com>
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 8ec9ce3..9b89ea4 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -313,7 +313,7 @@
 - Add newlines to the JSON stream functions.
 
 #### Runtime
-* Do not ping the registry from the CLI. All requests to registres flow through the daemon.
+* Do not ping the registry from the CLI. All requests to registries flow through the daemon.
 - Check for nil information return in the lxc driver. This fixes panics with older lxc versions.
 - Devicemapper: cleanups and fix for unmount. Fixes two problems which were causing unmount to fail intermittently.
 - Devicemapper: remove directory when removing device. Directories don't get left behind when removing the device.
@@ -905,7 +905,7 @@
 
 + Add domainname support
 + Implement image filtering with path.Match
-* Remove unnecesasry warnings
+* Remove unnecessary warnings
 * Remove os/user dependency
 * Only mount the hostname file when the config exists
 * Handle signals within the `docker login` command
@@ -928,7 +928,7 @@
 + Hack: Vendor all dependencies
 * Remote API: Bump to v1.5
 * Packaging: Break down hack/make.sh into small scripts, one per 'bundle': test, binary, ubuntu etc.
-* Documentation: General improvments
+* Documentation: General improvements
 
 ## 0.6.1 (2013-08-23)
 
@@ -1198,7 +1198,7 @@
 * Prevent rm of running containers
 * Use go1.1 cookiejar
 - Fix issue detaching from running TTY container
-- Forbid parralel push/pull for a single image/repo. Fixes #311
+- Forbid parallel push/pull for a single image/repo. Fixes #311
 - Fix race condition within Run command when attaching.
 
 #### Client
@@ -1314,7 +1314,7 @@
 + Add caching to docker builder
 + Add support for docker builder with native API as top level command
 + Implement ENV within docker builder
-- Check the command existance prior create and add Unit tests for the case
+- Check the command existence prior create and add Unit tests for the case
 * use any whitespaces instead of tabs
 
 #### Runtime
@@ -1353,13 +1353,13 @@
 
 #### Runtime
 
-- Fix the command existance check
+- Fix the command existence check
 - strings.Split may return an empty string on no match
 - Fix an index out of range crash if cgroup memory is not
 
 #### Documentation
 
-* Various improvments
+* Various improvements
 + New example: sharing data between 2 couchdb databases
 
 #### Other
@@ -1389,7 +1389,7 @@
 ## 0.2.0 (2013-04-23)
 
 - Runtime: ghost containers can be killed and waited for
-* Documentation: update install intructions
+* Documentation: update install instructions
 - Packaging: fix Vagrantfile
 - Development: automate releasing binaries and ubuntu packages
 + Add a changelog
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index d07b972..3ed8bf9 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -4,14 +4,52 @@
 started. They are probably not perfect, please let us know if anything
 feels wrong or incomplete.
 
+## Topics
+
+* [Security Reports](#security-reports)
+* [Design and Cleanup Proposals](#design-and-cleanup-proposals)
+* [Reporting Issues](#reporting-issues)
+* [Build Environment](#build-environment)
+* [Contribution Guidelines](#contribution-guidelines)
+* [Community Guidelines](#docker-community-guidelines)
+
+## Security Reports
+
+Please **DO NOT** file an issue for security related issues. Please send your
+reports to [security@docker.com](mailto:security@docker.com) instead.
+
+## Design and Cleanup Proposals
+
+When considering a design proposal, we are looking for:
+
+* A description of the problem this design proposal solves
+* An issue -- not a pull request -- that describes what you will take action on
+  * Please prefix your issue with `Proposal:` in the title
+* Please review [the existing Proposals](https://github.com/dotcloud/docker/issues?direction=asc&labels=Proposal&page=1&sort=created&state=open)
+  before reporting a new issue. You can always pair with someone if you both
+  have the same idea.
+
+When considering a cleanup task, we are looking for:
+
+* A description of the refactors made
+  * Please note any logic changes if necessary
+* A pull request with the code
+  * Please prefix your PR's title with `Cleanup:` so we can quickly address it.
+  * Your pull request must remain up to date with master, so rebase as necessary.
+
 ## Reporting Issues
 
-When reporting [issues](https://github.com/dotcloud/docker/issues) 
-on GitHub please include your host OS (Ubuntu 12.04, Fedora 19, etc),
-the output of `uname -a` and the output of `docker version` along with
-the output of `docker -D info`. Please include the steps required to reproduce
-the problem if possible and applicable.
-This information will help us review and fix your issue faster.
+When reporting [issues](https://github.com/docker/docker/issues) on
+GitHub please include your host OS (Ubuntu 12.04, Fedora 19, etc).
+Please include:
+
+* The output of `uname -a`.
+* The output of `docker version`.
+* The output of `docker -D info`.
+
+Please also include the steps required to reproduce the problem if
+possible and applicable.  This information will help us review and fix
+your issue faster.
 
 ## Build Environment
 
@@ -34,7 +72,7 @@
 We're trying very hard to keep Docker lean and focused. We don't want it
 to do everything for everybody. This means that we might decide against
 incorporating a new feature. However, there might be a way to implement
-that feature *on top of* docker.
+that feature *on top of* Docker.
 
 ### Discuss your design on the mailing list
 
@@ -48,7 +86,7 @@
 ### Create issues...
 
 Any significant improvement should be documented as [a GitHub
-issue](https://github.com/dotcloud/docker/issues) before anybody
+issue](https://github.com/docker/docker/issues) before anybody
 starts working on it.
 
 ### ...but check for existing issues first!
@@ -60,12 +98,12 @@
 
 ### Conventions
 
-Fork the repo and make changes on your fork in a feature branch:
+Fork the repository and make changes on your fork in a feature branch:
 
-- If it's a bugfix branch, name it XXX-something where XXX is the number of the
-  issue
+- If it's a bug fix branch, name it XXXX-something where XXXX is the number of the
+  issue.
 - If it's a feature branch, create an enhancement issue to announce your
-  intentions, and name it XXX-something where XXX is the number of the issue.
+  intentions, and name it XXXX-something where XXXX is the number of the issue.
 
 Submit unit tests for your changes.  Go has a great test framework built in; use
 it! Take a look at existing tests for inspiration. Run the full test suite on
@@ -73,18 +111,16 @@
 
 Update the documentation when creating or modifying features. Test
 your documentation changes for clarity, concision, and correctness, as
-well as a clean documentation build. See ``docs/README.md`` for more
-information on building the docs and how docs get released.
+well as a clean documentation build. See `docs/README.md` for more
+information on building the docs and how they get released.
 
 Write clean code. Universally formatted code promotes ease of writing, reading,
 and maintenance. Always run `gofmt -s -w file.go` on each changed file before
-committing your changes. Most editors have plugins that do this automatically.
+committing your changes. Most editors have plug-ins that do this automatically.
 
 Pull requests descriptions should be as clear as possible and include a
 reference to all the issues that they address.
 
-Pull requests must not contain commits from other users or branches.
-
 Commit messages must start with a capitalized and short summary (max. 50
 chars) written in the imperative, followed by an optional, more detailed
 explanatory text which is separated from the summary by an empty line.
@@ -95,26 +131,33 @@
 request automatically, but the reviewers will not be notified unless you
 comment.
 
+Pull requests must be cleanly rebased ontop of master without multiple branches
+mixed into the PR.
+
+**Git tip**: If your PR no longer merges cleanly, use `rebase master` in your
+feature branch to update your pull request rather than `merge master`.
+
 Before the pull request is merged, make sure that you squash your commits into
 logical units of work using `git rebase -i` and `git push -f`. After every
 commit the test suite should be passing. Include documentation changes in the
 same commit so that a revert would remove all traces of the feature or fix.
 
-Commits that fix or close an issue should include a reference like `Closes #XXX`
-or `Fixes #XXX`, which will automatically close the issue when merged.
+Commits that fix or close an issue should include a reference like
+`Closes #XXXX` or `Fixes #XXXX`, which will automatically close the
+issue when merged.
 
-Please do not add yourself to the AUTHORS file, as it is regenerated
+Please do not add yourself to the `AUTHORS` file, as it is regenerated
 regularly from the Git history.
 
 ### Merge approval
 
-Docker maintainers use LGTM (looks good to me) in comments on the code review
+Docker maintainers use LGTM (Looks Good To Me) in comments on the code review
 to indicate acceptance.
 
 A change requires LGTMs from an absolute majority of the maintainers of each
-component affected. For example, if a change affects docs/ and registry/, it
-needs an absolute majority from the maintainers of docs/ AND, separately, an
-absolute majority of the maintainers of registry.
+component affected. For example, if a change affects `docs/` and `registry/`, it
+needs an absolute majority from the maintainers of `docs/` AND, separately, an
+absolute majority of the maintainers of `registry/`.
 
 For more details see [MAINTAINERS.md](hack/MAINTAINERS.md)
 
@@ -137,7 +180,6 @@
 Everyone is permitted to copy and distribute verbatim copies of this
 license document, but changing it is not allowed.
 
-
 Developer's Certificate of Origin 1.1
 
 By making a contribution to this project, I certify that:
@@ -165,20 +207,18 @@
     this project or the open source license(s) involved.
 ```
 
-then you just add a line to every git commit message:
+Then you just add a line to every git commit message:
 
-    Docker-DCO-1.1-Signed-off-by: Joe Smith <joe.smith@email.com> (github: github_handle)
+    Signed-off-by: Joe Smith <joe.smith@email.com>
 
-using your real name (sorry, no pseudonyms or anonymous contributions.)
+Using your real name (sorry, no pseudonyms or anonymous contributions.)
 
-One way to automate this, is customise your get ``commit.template`` by adding
-a ``prepare-commit-msg`` hook to your docker checkout:
+If you set your `user.name` and `user.email` git configs, you can sign your
+commit automatically with `git commit -s`.
 
-```
-curl -o .git/hooks/prepare-commit-msg https://raw.githubusercontent.com/dotcloud/docker/master/contrib/prepare-commit-msg.hook && chmod +x .git/hooks/prepare-commit-msg
-```
-
-* Note: the above script expects to find your GitHub user name in ``git config --get github.user``
+Note that the old-style `Docker-DCO-1.1-Signed-off-by: ...` format is still
+accepted, so there is no need to update outstanding pull requests to the new
+format right away, but please do adjust your processes for future contributions.
 
 #### Small patch exception
 
@@ -194,11 +234,83 @@
 
 ### How can I become a maintainer?
 
-* Step 1: learn the component inside out
-* Step 2: make yourself useful by contributing code, bugfixes, support etc.
-* Step 3: volunteer on the irc channel (#docker@freenode)
-* Step 4: propose yourself at a scheduled docker meeting in #docker-dev
+* Step 1: Learn the component inside out
+* Step 2: Make yourself useful by contributing code, bug fixes, support etc.
+* Step 3: Volunteer on the IRC channel (#docker at Freenode)
+* Step 4: Propose yourself at a scheduled docker meeting in #docker-dev
 
-Don't forget: being a maintainer is a time investment. Make sure you will have time to make yourself available.
-You don't have to be a maintainer to make a difference on the project!
+Don't forget: being a maintainer is a time investment. Make sure you
+will have time to make yourself available.  You don't have to be a
+maintainer to make a difference on the project!
+
+### IRC Meetings
+
+There are two monthly meetings taking place on #docker-dev IRC to accomodate all timezones.
+Anybody can ask for a topic to be discussed prior to the meeting.
+
+If you feel the conversation is going off-topic, feel free to point it out.
+
+For the exact dates and times, have a look at [the irc-minutes repo](https://github.com/docker/irc-minutes).
+They also contain all the notes from previous meetings.
+
+## Docker Community Guidelines
+
+We want to keep the Docker community awesome, growing and collaborative. We
+need your help to keep it that way. To help with this we've come up with some
+general guidelines for the community as a whole:
+
+* Be nice: Be courteous, respectful and polite to fellow community members: no
+  regional, racial, gender, or other abuse will be tolerated. We like nice people
+  way better than mean ones!
+
+* Encourage diversity and participation: Make everyone in our community
+  feel welcome, regardless of their background and the extent of their
+  contributions, and do everything possible to encourage participation in
+  our community.
+
+* Keep it legal: Basically, don't get us in trouble. Share only content that
+  you own, do not share private or sensitive information, and don't break the
+  law.
+
+* Stay on topic: Make sure that you are posting to the correct channel
+  and avoid off-topic discussions. Remember when you update an issue or
+  respond to an email you are potentially sending to a large number of
+  people.  Please consider this before you update.  Also remember that
+  nobody likes spam.
+
+### Guideline Violations — 3 Strikes Method
+
+The point of this section is not to find opportunities to punish people, but we
+do need a fair way to deal with people who are making our community suck.
+
+1. First occurrence: We'll give you a friendly, but public reminder that the
+   behavior is inappropriate according to our guidelines.
+
+2. Second occurrence: We will send you a private message with a warning that
+   any additional violations will result in removal from the community.
+
+3. Third occurrence: Depending on the violation, we may need to delete or ban
+   your account.
+
+**Notes:**
+
+* Obvious spammers are banned on first occurrence. If we don't do this, we'll
+  have spam all over the place.
+
+* Violations are forgiven after 6 months of good behavior, and we won't
+  hold a grudge.
+
+* People who commit minor infractions will get some education,
+  rather than hammering them in the 3 strikes process.
+
+* The rules apply equally to everyone in the community, no matter how
+  much you've contributed.
+
+* Extreme violations of a threatening, abusive, destructive or illegal nature
+  will be addressed immediately and are not subject to 3 strikes or
+  forgiveness.
+
+* Contact james@docker.com to report abuse or appeal violations. In the case of
+  appeals, we know that mistakes happen, and we'll work with you to come up with
+  a fair solution if there has been a misunderstanding.
 
diff --git a/Dockerfile b/Dockerfile
index 283e0a3..8f47b0d 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -6,7 +6,7 @@
 # docker build -t docker .
 #
 # # Mount your source in an interactive container for quick testing:
-# docker run -v `pwd`:/go/src/github.com/dotcloud/docker --privileged -i -t docker bash
+# docker run -v `pwd`:/go/src/github.com/docker/docker --privileged -i -t docker bash
 #
 # # Run the test suite:
 # docker run --privileged docker hack/make.sh test
@@ -28,8 +28,7 @@
 MAINTAINER	Tianon Gravi <admwiggin@gmail.com> (@tianon)
 
 # Packaged dependencies
-RUN	apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -yq \
-	apt-utils \
+RUN	apt-get update && apt-get install -y \
 	aufs-tools \
 	automake \
 	btrfs-tools \
@@ -43,7 +42,7 @@
 	libsqlite3-dev \
 	lxc=1.0* \
 	mercurial \
-	pandoc \
+	parallel \
 	reprepro \
 	ruby1.9.1 \
 	ruby1.9.1-dev \
@@ -60,9 +59,10 @@
 # see https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL
 
 # Install Go
-RUN	curl -s https://go.googlecode.com/files/go1.2.1.src.tar.gz | tar -v -C /usr/local -xz
+RUN	curl -sSL https://golang.org/dl/go1.3.1.src.tar.gz | tar -v -C /usr/local -xz
 ENV	PATH	/usr/local/go/bin:$PATH
-ENV	GOPATH	/go:/go/src/github.com/dotcloud/docker/vendor
+ENV	GOPATH	/go:/go/src/github.com/docker/docker/vendor
+ENV PATH /go/bin:$PATH
 RUN	cd /usr/local/go/src && ./make.bash --no-clean 2>&1
 
 # Compile Go for cross compilation
@@ -80,6 +80,12 @@
 # TODO replace FPM with some very minimal debhelper stuff
 RUN	gem install --no-rdoc --no-ri fpm --version 1.0.2
 
+# Install man page generator
+RUN mkdir -p /go/src/github.com/cpuguy83 \
+    && git clone -b v1 https://github.com/cpuguy83/go-md2man.git /go/src/github.com/cpuguy83/go-md2man \
+    && cd /go/src/github.com/cpuguy83/go-md2man \
+    && go get -v ./...
+
 # Get the "busybox" image source so we can build locally instead of pulling
 RUN	git clone -b buildroot-2014.02 https://github.com/jpetazzo/docker-busybox.git /docker-busybox
 
@@ -94,11 +100,11 @@
 RUN useradd --create-home --gid docker unprivilegeduser
 
 VOLUME	/var/lib/docker
-WORKDIR	/go/src/github.com/dotcloud/docker
+WORKDIR	/go/src/github.com/docker/docker
 ENV	DOCKER_BUILDTAGS	apparmor selinux
 
 # Wrap all commands in the "docker-in-docker" script to allow nested containers
 ENTRYPOINT	["hack/dind"]
 
 # Upload docker source
-ADD	.	/go/src/github.com/dotcloud/docker
+COPY	.	/go/src/github.com/docker/docker
diff --git a/FIXME b/FIXME
deleted file mode 100644
index 4f27d36..0000000
--- a/FIXME
+++ /dev/null
@@ -1,24 +0,0 @@
-
-## FIXME
-
-This file is a loose collection of things to improve in the codebase, for the internal
-use of the maintainers.
-
-They are not big enough to be in the roadmap, not user-facing enough to be github issues,
-and not important enough to be discussed in the mailing list.
-
-They are just like FIXME comments in the source code, except we're not sure where in the source
-to put them - so we put them here :)
-
-
-* Run linter on codebase
-* Unify build commands and regular commands
-* Move source code into src/ subdir for clarity
-* docker build: on non-existent local path for ADD, don't show full absolute path on the host
-* use size header for progress bar in pull
-* Clean up context upload in build!!!
-* Parallel pull
-* Upgrade dockerd without stopping containers
-* Simple command to remove all untagged images (`docker rmi $(docker images | awk '/^<none>/ { print $3 }')`)
-* Simple command to clean up containers for disk space
-* Clean up the ProgressReader api, it's a PITA to use
diff --git a/Makefile b/Makefile
index 2d07b39..40c623a 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,8 @@
 .PHONY: all binary build cross default docs docs-build docs-shell shell test test-unit test-integration test-integration-cli validate
 
 # to allow `make BINDDIR=. shell` or `make BINDDIR= test`
-BINDDIR := bundles
+# (default to no bind mount if DOCKER_HOST is set)
+BINDDIR := $(if $(DOCKER_HOST),,bundles)
 # to allow `make DOCSPORT=9000 docs`
 DOCSPORT := 8000
 
@@ -9,7 +10,7 @@
 GITCOMMIT := $(shell git rev-parse --short HEAD 2>/dev/null)
 DOCKER_IMAGE := docker$(if $(GIT_BRANCH),:$(GIT_BRANCH))
 DOCKER_DOCS_IMAGE := docker-docs$(if $(GIT_BRANCH),:$(GIT_BRANCH))
-DOCKER_MOUNT := $(if $(BINDDIR),-v "$(CURDIR)/$(BINDDIR):/go/src/github.com/dotcloud/docker/$(BINDDIR)")
+DOCKER_MOUNT := $(if $(BINDDIR),-v "$(CURDIR)/$(BINDDIR):/go/src/github.com/docker/docker/$(BINDDIR)")
 
 DOCKER_RUN_DOCKER := docker run --rm -it --privileged -e TESTFLAGS -e TESTDIRS -e DOCKER_GRAPHDRIVER -e DOCKER_EXECDRIVER $(DOCKER_MOUNT) "$(DOCKER_IMAGE)"
 # to allow `make DOCSDIR=docs docs-shell`
@@ -33,7 +34,7 @@
 	$(DOCKER_RUN_DOCS) -p $(if $(DOCSPORT),$(DOCSPORT):)8000 "$(DOCKER_DOCS_IMAGE)" bash
 
 docs-release: docs-build
-	$(DOCKER_RUN_DOCS) "$(DOCKER_DOCS_IMAGE)" ./release.sh
+	$(DOCKER_RUN_DOCS) -e BUILD_ROOT "$(DOCKER_DOCS_IMAGE)" ./release.sh
 
 test: build
 	$(DOCKER_RUN_DOCKER) hack/make.sh binary cross test-unit test-integration test-integration-cli
diff --git a/README.md b/README.md
index 3c378de..857cd3c 100644
--- a/README.md
+++ b/README.md
@@ -131,9 +131,8 @@
 
 ```bash
 FROM ubuntu:12.04
-RUN apt-get update
-RUN apt-get install -q -y python python-pip curl
-RUN curl -L https://github.com/shykes/helloflask/archive/master.tar.gz | tar -xzv
+RUN apt-get update && apt-get install -y python python-pip curl
+RUN curl -sSL https://github.com/shykes/helloflask/archive/master.tar.gz | tar -xzv
 RUN cd helloflask-master && pip install -r requirements.txt
 ```
 
@@ -178,6 +177,9 @@
 Contributing to Docker
 ======================
 
+[![GoDoc](https://godoc.org/github.com/docker/docker?status.png)](https://godoc.org/github.com/docker/docker)
+[![Travis](https://travis-ci.org/docker/docker.svg?branch=master)](https://travis-ci.org/docker/docker)
+
 Want to hack on Docker? Awesome! There are instructions to get you
 started [here](CONTRIBUTING.md).
 
diff --git a/VERSION b/VERSION
index 45a1b3f..fe00fb1 100644
--- a/VERSION
+++ b/VERSION
@@ -1 +1 @@
-1.1.2
+1.1.2-dev
\ No newline at end of file
diff --git a/api/README.md b/api/README.md
index 3ef33f8..453f61a 100644
--- a/api/README.md
+++ b/api/README.md
@@ -1,5 +1,5 @@
 This directory contains code pertaining to the Docker API:
 
- - Used by the docker client when comunicating with the docker deamon
+ - Used by the docker client when communicating with the docker daemon
 
- - Used by third party tools wishing to interface with the docker deamon
+ - Used by third party tools wishing to interface with the docker daemon
diff --git a/api/client/cli.go b/api/client/cli.go
index bb5d191..d80f9cc 100644
--- a/api/client/cli.go
+++ b/api/client/cli.go
@@ -10,11 +10,24 @@
 	"strings"
 	"text/template"
 
-	flag "github.com/dotcloud/docker/pkg/mflag"
-	"github.com/dotcloud/docker/pkg/term"
-	"github.com/dotcloud/docker/registry"
+	flag "github.com/docker/docker/pkg/mflag"
+	"github.com/docker/docker/pkg/term"
+	"github.com/docker/docker/registry"
 )
 
+type DockerCli struct {
+	proto      string
+	addr       string
+	configFile *registry.ConfigFile
+	in         io.ReadCloser
+	out        io.Writer
+	err        io.Writer
+	isTerminal bool
+	terminalFd uintptr
+	tlsConfig  *tls.Config
+	scheme     string
+}
+
 var funcMap = template.FuncMap{
 	"json": func(v interface{}) string {
 		a, _ := json.Marshal(v)
@@ -34,7 +47,8 @@
 	return method.Interface().(func(...string) error), true
 }
 
-func (cli *DockerCli) ParseCommands(args ...string) error {
+// Cmd executes the specified command
+func (cli *DockerCli) Cmd(args ...string) error {
 	if len(args) > 0 {
 		method, exists := cli.getMethod(args[0])
 		if !exists {
@@ -97,16 +111,3 @@
 		scheme:     scheme,
 	}
 }
-
-type DockerCli struct {
-	proto      string
-	addr       string
-	configFile *registry.ConfigFile
-	in         io.ReadCloser
-	out        io.Writer
-	err        io.Writer
-	isTerminal bool
-	terminalFd uintptr
-	tlsConfig  *tls.Config
-	scheme     string
-}
diff --git a/api/client/commands.go b/api/client/commands.go
index df2125f..81b0668 100644
--- a/api/client/commands.go
+++ b/api/client/commands.go
@@ -22,19 +22,21 @@
 	"text/template"
 	"time"
 
-	"github.com/dotcloud/docker/api"
-	"github.com/dotcloud/docker/archive"
-	"github.com/dotcloud/docker/dockerversion"
-	"github.com/dotcloud/docker/engine"
-	"github.com/dotcloud/docker/nat"
-	"github.com/dotcloud/docker/opts"
-	"github.com/dotcloud/docker/pkg/signal"
-	"github.com/dotcloud/docker/pkg/term"
-	"github.com/dotcloud/docker/pkg/units"
-	"github.com/dotcloud/docker/registry"
-	"github.com/dotcloud/docker/runconfig"
-	"github.com/dotcloud/docker/utils"
-	"github.com/dotcloud/docker/utils/filters"
+	"github.com/docker/docker/api"
+	"github.com/docker/docker/archive"
+	"github.com/docker/docker/dockerversion"
+	"github.com/docker/docker/engine"
+	"github.com/docker/docker/nat"
+	"github.com/docker/docker/opts"
+	"github.com/docker/docker/pkg/log"
+	"github.com/docker/docker/pkg/parsers"
+	"github.com/docker/docker/pkg/parsers/filters"
+	"github.com/docker/docker/pkg/signal"
+	"github.com/docker/docker/pkg/term"
+	"github.com/docker/docker/pkg/units"
+	"github.com/docker/docker/registry"
+	"github.com/docker/docker/runconfig"
+	"github.com/docker/docker/utils"
 )
 
 const (
@@ -67,7 +69,8 @@
 		{"inspect", "Return low-level information on a container"},
 		{"kill", "Kill a running container"},
 		{"load", "Load an image from a tar archive"},
-		{"login", "Register or log in to the Docker registry server"},
+		{"login", "Register or log in to a Docker registry server"},
+		{"logout", "Log out from a Docker registry server"},
 		{"logs", "Fetch the logs of a container"},
 		{"port", "Lookup the public-facing port that is NAT-ed to PRIVATE_PORT"},
 		{"pause", "Pause all processes within a container"},
@@ -161,28 +164,32 @@
 		if _, err = os.Stat(filename); os.IsNotExist(err) {
 			return fmt.Errorf("no Dockerfile found in %s", cmd.Arg(0))
 		}
-		if err = utils.ValidateContextDirectory(root); err != nil {
+		var excludes []string
+		ignore, err := ioutil.ReadFile(path.Join(root, ".dockerignore"))
+		if err != nil && !os.IsNotExist(err) {
+			return fmt.Errorf("Error reading .dockerignore: '%s'", err)
+		}
+		for _, pattern := range strings.Split(string(ignore), "\n") {
+			ok, err := filepath.Match(pattern, "Dockerfile")
+			if err != nil {
+				return fmt.Errorf("Bad .dockerignore pattern: '%s', error: %s", pattern, err)
+			}
+			if ok {
+				return fmt.Errorf("Dockerfile was excluded by .dockerignore pattern '%s'", pattern)
+			}
+			excludes = append(excludes, pattern)
+		}
+		if err = utils.ValidateContextDirectory(root, excludes); err != nil {
 			return fmt.Errorf("Error checking context is accessible: '%s'. Please check permissions and try again.", err)
 		}
 		options := &archive.TarOptions{
 			Compression: archive.Uncompressed,
-		}
-		if ignore, err := ioutil.ReadFile(path.Join(root, ".dockerignore")); err != nil && !os.IsNotExist(err) {
-			return fmt.Errorf("Error reading .dockerignore: '%s'", err)
-		} else if err == nil {
-			for _, pattern := range strings.Split(string(ignore), "\n") {
-				ok, err := filepath.Match(pattern, "Dockerfile")
-				if err != nil {
-					utils.Errorf("Bad .dockerignore pattern: '%s', error: %s", pattern, err)
-					continue
-				}
-				if ok {
-					return fmt.Errorf("Dockerfile was excluded by .dockerignore pattern '%s'", pattern)
-				}
-				options.Excludes = append(options.Excludes, pattern)
-			}
+			Excludes:    excludes,
 		}
 		context, err = archive.TarWithOptions(root, options)
+		if err != nil {
+			return err
+		}
 	}
 	var body io.Reader
 	// Setup an upload progress bar
@@ -196,7 +203,7 @@
 
 	//Check if the given image name can be resolved
 	if *tag != "" {
-		repository, _ := utils.ParseRepositoryTag(*tag)
+		repository, _ := parsers.ParseRepositoryTag(*tag)
 		if _, _, err := registry.ResolveRepositoryName(repository); err != nil {
 			return err
 		}
@@ -349,6 +356,32 @@
 	return nil
 }
 
+// log out from a Docker registry
+func (cli *DockerCli) CmdLogout(args ...string) error {
+	cmd := cli.Subcmd("logout", "[SERVER]", "Log out from a Docker registry, if no server is specified \""+registry.IndexServerAddress()+"\" is the default.")
+
+	if err := cmd.Parse(args); err != nil {
+		return nil
+	}
+	serverAddress := registry.IndexServerAddress()
+	if len(cmd.Args()) > 0 {
+		serverAddress = cmd.Arg(0)
+	}
+
+	cli.LoadConfigFile()
+	if _, ok := cli.configFile.Configs[serverAddress]; !ok {
+		fmt.Fprintf(cli.out, "Not logged in to %s\n", serverAddress)
+	} else {
+		fmt.Fprintf(cli.out, "Remove login credentials for %s\n", serverAddress)
+		delete(cli.configFile.Configs, serverAddress)
+
+		if err := registry.SaveConfig(cli.configFile); err != nil {
+			return fmt.Errorf("Failed to save docker config: %v", err)
+		}
+	}
+	return nil
+}
+
 // 'docker wait': block until a container stops
 func (cli *DockerCli) CmdWait(args ...string) error {
 	cmd := cli.Subcmd("wait", "CONTAINER [CONTAINER...]", "Block until a container stops, then print its exit code.")
@@ -391,6 +424,7 @@
 	if dockerversion.GITCOMMIT != "" {
 		fmt.Fprintf(cli.out, "Git commit (client): %s\n", dockerversion.GITCOMMIT)
 	}
+	fmt.Fprintf(cli.out, "OS/Arch (client): %s/%s\n", runtime.GOOS, runtime.GOARCH)
 
 	body, _, err := readBody(cli.call("GET", "/version", nil, false))
 	if err != nil {
@@ -400,11 +434,11 @@
 	out := engine.NewOutput()
 	remoteVersion, err := out.AddEnv()
 	if err != nil {
-		utils.Errorf("Error reading remote version: %s\n", err)
+		log.Errorf("Error reading remote version: %s", err)
 		return err
 	}
 	if _, err := out.Write(body); err != nil {
-		utils.Errorf("Error reading remote version: %s\n", err)
+		log.Errorf("Error reading remote version: %s", err)
 		return err
 	}
 	out.Close()
@@ -440,7 +474,7 @@
 	}
 
 	if _, err := out.Write(body); err != nil {
-		utils.Errorf("Error reading remote info: %s\n", err)
+		log.Errorf("Error reading remote info: %s", err)
 		return err
 	}
 	out.Close()
@@ -457,6 +491,7 @@
 	}
 	fmt.Fprintf(cli.out, "Execution Driver: %s\n", remoteInfo.Get("ExecutionDriver"))
 	fmt.Fprintf(cli.out, "Kernel Version: %s\n", remoteInfo.Get("KernelVersion"))
+	fmt.Fprintf(cli.out, "Operating System: %s\n", remoteInfo.Get("OperatingSystem"))
 
 	if remoteInfo.GetBool("Debug") || os.Getenv("DEBUG") != "" {
 		fmt.Fprintf(cli.out, "Debug mode (server): %v\n", remoteInfo.GetBool("Debug"))
@@ -471,9 +506,6 @@
 		if initPath := remoteInfo.Get("InitPath"); initPath != "" {
 			fmt.Fprintf(cli.out, "Init Path: %s\n", initPath)
 		}
-		if len(remoteInfo.GetList("Sockets")) != 0 {
-			fmt.Fprintf(cli.out, "Sockets: %v\n", remoteInfo.GetList("Sockets"))
-		}
 	}
 
 	if len(remoteInfo.GetList("IndexServerAddress")) != 0 {
@@ -551,7 +583,7 @@
 }
 
 func (cli *DockerCli) forwardAllSignals(cid string) chan os.Signal {
-	sigc := make(chan os.Signal, 1)
+	sigc := make(chan os.Signal, 128)
 	signal.CatchAll(sigc)
 	go func() {
 		for s := range sigc {
@@ -566,10 +598,10 @@
 				}
 			}
 			if sig == "" {
-				utils.Errorf("Unsupported signal: %d. Discarding.", s)
+				log.Errorf("Unsupported signal: %d. Discarding.", s)
 			}
 			if _, _, err := readBody(cli.call("POST", fmt.Sprintf("/containers/%s/kill?signal=%s", cid, sig), nil, false)); err != nil {
-				utils.Debugf("Error sending signal: %s", err)
+				log.Debugf("Error sending signal: %s", err)
 			}
 		}
 	}()
@@ -659,7 +691,7 @@
 	if *openStdin || *attach {
 		if tty && cli.isTerminal {
 			if err := cli.monitorTtySize(cmd.Arg(0)); err != nil {
-				utils.Errorf("Error monitoring TTY size: %s\n", err)
+				log.Errorf("Error monitoring TTY size: %s", err)
 			}
 		}
 		return <-cErr
@@ -982,7 +1014,7 @@
 	cmd := cli.Subcmd("rm", "[OPTIONS] CONTAINER [CONTAINER...]", "Remove one or more containers")
 	v := cmd.Bool([]string{"v", "-volumes"}, false, "Remove the volumes associated with the container")
 	link := cmd.Bool([]string{"l", "#link", "-link"}, false, "Remove the specified link and not the underlying container")
-	force := cmd.Bool([]string{"f", "-force"}, false, "Force removal of running container")
+	force := cmd.Bool([]string{"f", "-force"}, false, "Force the removal of a running container (uses SIGKILL)")
 
 	if err := cmd.Parse(args); err != nil {
 		return nil
@@ -991,6 +1023,7 @@
 		cmd.Usage()
 		return nil
 	}
+
 	val := url.Values{}
 	if *v {
 		val.Set("v", "1")
@@ -998,6 +1031,7 @@
 	if *link {
 		val.Set("link", "1")
 	}
+
 	if *force {
 		val.Set("force", "1")
 	}
@@ -1051,16 +1085,19 @@
 		return nil
 	}
 
-	var src, repository, tag string
+	var (
+		v          = url.Values{}
+		src        = cmd.Arg(0)
+		repository = cmd.Arg(1)
+	)
+
+	v.Set("fromSrc", src)
+	v.Set("repo", repository)
 
 	if cmd.NArg() == 3 {
 		fmt.Fprintf(cli.err, "[DEPRECATED] The format 'URL|- [REPOSITORY [TAG]]' as been deprecated. Please use URL|- [REPOSITORY[:TAG]]\n")
-		src, repository, tag = cmd.Arg(0), cmd.Arg(1), cmd.Arg(2)
-	} else {
-		src = cmd.Arg(0)
-		repository, tag = utils.ParseRepositoryTag(cmd.Arg(1))
+		v.Set("tag", cmd.Arg(2))
 	}
-	v := url.Values{}
 
 	if repository != "" {
 		//Check if the given image name can be resolved
@@ -1069,10 +1106,6 @@
 		}
 	}
 
-	v.Set("repo", repository)
-	v.Set("tag", tag)
-	v.Set("fromSrc", src)
-
 	var in io.Reader
 
 	if src == "-" {
@@ -1096,7 +1129,7 @@
 
 	cli.LoadConfigFile()
 
-	remote, tag := utils.ParseRepositoryTag(name)
+	remote, tag := parsers.ParseRepositoryTag(name)
 
 	// Resolve the Repository name from fqn to hostname + name
 	hostname, _, err := registry.ResolveRepositoryName(remote)
@@ -1158,12 +1191,18 @@
 		cmd.Usage()
 		return nil
 	}
+	var (
+		v      = url.Values{}
+		remote = cmd.Arg(0)
+	)
 
-	remote, parsedTag := utils.ParseRepositoryTag(cmd.Arg(0))
+	v.Set("fromImage", remote)
+
 	if *tag == "" {
-		*tag = parsedTag
+		v.Set("tag", *tag)
 	}
 
+	remote, _ = parsers.ParseRepositoryTag(remote)
 	// Resolve the Repository name from fqn to hostname + name
 	hostname, _, err := registry.ResolveRepositoryName(remote)
 	if err != nil {
@@ -1174,9 +1213,6 @@
 
 	// Resolve the Auth config relevant for this server
 	authConfig := cli.configFile.ResolveAuthConfig(hostname)
-	v := url.Values{}
-	v.Set("fromImage", remote)
-	v.Set("tag", *tag)
 
 	pull := func(authConfig registry.AuthConfig) error {
 		buf, err := json.Marshal(authConfig)
@@ -1216,7 +1252,7 @@
 	flViz := cmd.Bool([]string{"#v", "#viz", "#-viz"}, false, "Output graph in graphviz format")
 	flTree := cmd.Bool([]string{"#t", "#tree", "#-tree"}, false, "Output graph in tree format")
 
-	var flFilter opts.ListOpts
+	flFilter := opts.NewListOpts(nil)
 	cmd.Var(&flFilter, []string{"f", "-filter"}, "Provide filter values (i.e. 'dangling=true')")
 
 	if err := cmd.Parse(args); err != nil {
@@ -1349,7 +1385,7 @@
 		for _, out := range outs.Data {
 			for _, repotag := range out.GetList("RepoTags") {
 
-				repo, tag := utils.ParseRepositoryTag(repotag)
+				repo, tag := parsers.ParseRepositoryTag(repotag)
 				outID := out.Get("Id")
 				if !*noTrunc {
 					outID = utils.TruncateID(outID)
@@ -1449,6 +1485,9 @@
 	before := cmd.String([]string{"#beforeId", "#-before-id", "-before"}, "", "Show only container created before Id or Name, include non-running ones.")
 	last := cmd.Int([]string{"n"}, -1, "Show n last created containers, include non-running ones.")
 
+	flFilter := opts.NewListOpts(nil)
+	cmd.Var(&flFilter, []string{"f", "-filter"}, "Provide filter values. Valid filters:\nexited=<int> - containers with exit code of <int>")
+
 	if err := cmd.Parse(args); err != nil {
 		return nil
 	}
@@ -1472,6 +1511,24 @@
 		v.Set("size", "1")
 	}
 
+	// Consolidate all filter flags, and sanity check them.
+	// They'll get processed in the daemon/server.
+	psFilterArgs := filters.Args{}
+	for _, f := range flFilter.GetAll() {
+		var err error
+		psFilterArgs, err = filters.ParseFlag(f, psFilterArgs)
+		if err != nil {
+			return err
+		}
+	}
+	if len(psFilterArgs) > 0 {
+		filterJson, err := filters.ToParam(psFilterArgs)
+		if err != nil {
+			return err
+		}
+		v.Set("filters", filterJson)
+	}
+
 	body, _, err := readBody(cli.call("GET", "/containers/json?"+v.Encode(), nil, false))
 	if err != nil {
 		return err
@@ -1511,6 +1568,7 @@
 				outCommand = out.Get("Command")
 				ports      = engine.NewTable("", 0)
 			)
+			outCommand = strconv.Quote(outCommand)
 			if !*noTrunc {
 				outCommand = utils.Trunc(outCommand, 20)
 			}
@@ -1549,7 +1607,7 @@
 
 	var (
 		name            = cmd.Arg(0)
-		repository, tag = utils.ParseRepositoryTag(cmd.Arg(1))
+		repository, tag = parsers.ParseRepositoryTag(cmd.Arg(1))
 	)
 
 	if name == "" || len(cmd.Args()) > 2 {
@@ -1614,7 +1672,7 @@
 		loc = time.FixedZone(time.Now().Zone())
 	)
 	var setTime = func(key, value string) {
-		format := "2006-01-02 15:04:05 -0700 MST"
+		format := time.RFC3339Nano
 		if len(value) < len(format) {
 			format = format[:len(value)]
 		}
@@ -1736,7 +1794,7 @@
 	var (
 		cmd     = cli.Subcmd("attach", "[OPTIONS] CONTAINER", "Attach to a running container")
 		noStdin = cmd.Bool([]string{"#nostdin", "-no-stdin"}, false, "Do not attach STDIN")
-		proxy   = cmd.Bool([]string{"#sig-proxy", "-sig-proxy"}, true, "Proxify all received signals to the process (even in non-TTY mode). SIGCHLD is not proxied.")
+		proxy   = cmd.Bool([]string{"#sig-proxy", "-sig-proxy"}, true, "Proxy all received signals to the process (even in non-TTY mode). SIGCHLD, SIGKILL, and SIGSTOP are not proxied.")
 	)
 
 	if err := cmd.Parse(args); err != nil {
@@ -1770,7 +1828,7 @@
 
 	if tty && cli.isTerminal {
 		if err := cli.monitorTtySize(cmd.Arg(0)); err != nil {
-			utils.Debugf("Error monitoring TTY size: %s", err)
+			log.Debugf("Error monitoring TTY size: %s", err)
 		}
 	}
 
@@ -1862,7 +1920,7 @@
 type ports []int
 
 func (cli *DockerCli) CmdTag(args ...string) error {
-	cmd := cli.Subcmd("tag", "[OPTIONS] IMAGE [REGISTRYHOST/][USERNAME/]NAME[:TAG]", "Tag an image into a repository")
+	cmd := cli.Subcmd("tag", "[OPTIONS] IMAGE[:TAG] [REGISTRYHOST/][USERNAME/]NAME[:TAG]", "Tag an image into a repository")
 	force := cmd.Bool([]string{"f", "#force", "-force"}, false, "Force")
 	if err := cmd.Parse(args); err != nil {
 		return nil
@@ -1873,7 +1931,7 @@
 	}
 
 	var (
-		repository, tag = utils.ParseRepositoryTag(cmd.Arg(1))
+		repository, tag = parsers.ParseRepositoryTag(cmd.Arg(1))
 		v               = url.Values{}
 	)
 
@@ -1894,6 +1952,41 @@
 	return nil
 }
 
+func (cli *DockerCli) pullImage(image string) error {
+	v := url.Values{}
+	repos, tag := parsers.ParseRepositoryTag(image)
+	// pull only the image tagged 'latest' if no tag was specified
+	if tag == "" {
+		tag = "latest"
+	}
+	v.Set("fromImage", repos)
+	v.Set("tag", tag)
+
+	// Resolve the Repository name from fqn to hostname + name
+	hostname, _, err := registry.ResolveRepositoryName(repos)
+	if err != nil {
+		return err
+	}
+
+	// Load the auth config file, to be able to pull the image
+	cli.LoadConfigFile()
+
+	// Resolve the Auth config relevant for this server
+	authConfig := cli.configFile.ResolveAuthConfig(hostname)
+	buf, err := json.Marshal(authConfig)
+	if err != nil {
+		return err
+	}
+
+	registryAuthHeader := []string{
+		base64.URLEncoding.EncodeToString(buf),
+	}
+	if err = cli.stream("POST", "/images/create?"+v.Encode(), nil, cli.err, map[string][]string{"X-Registry-Auth": registryAuthHeader}); err != nil {
+		return err
+	}
+	return nil
+}
+
 func (cli *DockerCli) CmdRun(args ...string) error {
 	// FIXME: just use runconfig.Parse already
 	config, hostConfig, cmd, err := runconfig.ParseSubcommand(cli.Subcmd("run", "[OPTIONS] IMAGE [COMMAND] [ARG...]", "Run a command in a new container"), args, nil)
@@ -1955,37 +2048,10 @@
 	if statusCode == 404 {
 		fmt.Fprintf(cli.err, "Unable to find image '%s' locally\n", config.Image)
 
-		v := url.Values{}
-		repos, tag := utils.ParseRepositoryTag(config.Image)
-		// pull only the image tagged 'latest' if no tag was specified
-		if tag == "" {
-			tag = "latest"
-		}
-		v.Set("fromImage", repos)
-		v.Set("tag", tag)
-
-		// Resolve the Repository name from fqn to hostname + name
-		hostname, _, err := registry.ResolveRepositoryName(repos)
-		if err != nil {
+		if err = cli.pullImage(config.Image); err != nil {
 			return err
 		}
-
-		// Load the auth config file, to be able to pull the image
-		cli.LoadConfigFile()
-
-		// Resolve the Auth config relevant for this server
-		authConfig := cli.configFile.ResolveAuthConfig(hostname)
-		buf, err := json.Marshal(authConfig)
-		if err != nil {
-			return err
-		}
-
-		registryAuthHeader := []string{
-			base64.URLEncoding.EncodeToString(buf),
-		}
-		if err = cli.stream("POST", "/images/create?"+v.Encode(), nil, cli.err, map[string][]string{"X-Registry-Auth": registryAuthHeader}); err != nil {
-			return err
-		}
+		// Retry
 		if stream, _, err = cli.call("POST", "/containers/create?"+containerValues.Encode(), config, false); err != nil {
 			return err
 		}
@@ -2033,9 +2099,9 @@
 
 	// Block the return until the chan gets closed
 	defer func() {
-		utils.Debugf("End of CmdRun(), Waiting for hijack to finish.")
+		log.Debugf("End of CmdRun(), Waiting for hijack to finish.")
 		if _, ok := <-hijacked; ok {
-			utils.Errorf("Hijack did not finish (chan still open)")
+			log.Errorf("Hijack did not finish (chan still open)")
 		}
 	}()
 
@@ -2081,7 +2147,7 @@
 		}
 	case err := <-errCh:
 		if err != nil {
-			utils.Debugf("Error hijack: %s", err)
+			log.Debugf("Error hijack: %s", err)
 			return err
 		}
 	}
@@ -2093,13 +2159,13 @@
 
 	if (config.AttachStdin || config.AttachStdout || config.AttachStderr) && config.Tty && cli.isTerminal {
 		if err := cli.monitorTtySize(runResult.Get("Id")); err != nil {
-			utils.Errorf("Error monitoring TTY size: %s\n", err)
+			log.Errorf("Error monitoring TTY size: %s", err)
 		}
 	}
 
 	if errCh != nil {
 		if err := <-errCh; err != nil {
-			utils.Debugf("Error hijack: %s", err)
+			log.Debugf("Error hijack: %s", err)
 			return err
 		}
 	}
diff --git a/api/client/hijack.go b/api/client/hijack.go
index 0a9d5d8..ba6ebfb 100644
--- a/api/client/hijack.go
+++ b/api/client/hijack.go
@@ -11,10 +11,11 @@
 	"runtime"
 	"strings"
 
-	"github.com/dotcloud/docker/api"
-	"github.com/dotcloud/docker/dockerversion"
-	"github.com/dotcloud/docker/pkg/term"
-	"github.com/dotcloud/docker/utils"
+	"github.com/docker/docker/api"
+	"github.com/docker/docker/dockerversion"
+	"github.com/docker/docker/pkg/log"
+	"github.com/docker/docker/pkg/term"
+	"github.com/docker/docker/utils"
 )
 
 func (cli *DockerCli) dial() (net.Conn, error) {
@@ -88,12 +89,12 @@
 			}()
 
 			// When TTY is ON, use regular copy
-			if setRawTerminal {
+			if setRawTerminal && stdout != nil {
 				_, err = io.Copy(stdout, br)
 			} else {
 				_, err = utils.StdCopy(stdout, stderr, br)
 			}
-			utils.Debugf("[hijack] End of stdout")
+			log.Debugf("[hijack] End of stdout")
 			return err
 		})
 	}
@@ -101,15 +102,15 @@
 	sendStdin := utils.Go(func() error {
 		if in != nil {
 			io.Copy(rwc, in)
-			utils.Debugf("[hijack] End of stdin")
+			log.Debugf("[hijack] End of stdin")
 		}
 		if tcpc, ok := rwc.(*net.TCPConn); ok {
 			if err := tcpc.CloseWrite(); err != nil {
-				utils.Debugf("Couldn't send EOF: %s\n", err)
+				log.Debugf("Couldn't send EOF: %s", err)
 			}
 		} else if unixc, ok := rwc.(*net.UnixConn); ok {
 			if err := unixc.CloseWrite(); err != nil {
-				utils.Debugf("Couldn't send EOF: %s\n", err)
+				log.Debugf("Couldn't send EOF: %s", err)
 			}
 		}
 		// Discard errors due to pipe interruption
@@ -118,14 +119,14 @@
 
 	if stdout != nil || stderr != nil {
 		if err := <-receiveStdout; err != nil {
-			utils.Debugf("Error receiveStdout: %s", err)
+			log.Debugf("Error receiveStdout: %s", err)
 			return err
 		}
 	}
 
 	if !cli.isTerminal {
 		if err := <-sendStdin; err != nil {
-			utils.Debugf("Error sendStdin: %s", err)
+			log.Debugf("Error sendStdin: %s", err)
 			return err
 		}
 	}
diff --git a/api/client/utils.go b/api/client/utils.go
index 13b5241..e4ef8d3 100644
--- a/api/client/utils.go
+++ b/api/client/utils.go
@@ -17,12 +17,13 @@
 	"strings"
 	"syscall"
 
-	"github.com/dotcloud/docker/api"
-	"github.com/dotcloud/docker/dockerversion"
-	"github.com/dotcloud/docker/engine"
-	"github.com/dotcloud/docker/pkg/term"
-	"github.com/dotcloud/docker/registry"
-	"github.com/dotcloud/docker/utils"
+	"github.com/docker/docker/api"
+	"github.com/docker/docker/dockerversion"
+	"github.com/docker/docker/engine"
+	"github.com/docker/docker/pkg/log"
+	"github.com/docker/docker/pkg/term"
+	"github.com/docker/docker/registry"
+	"github.com/docker/docker/utils"
 )
 
 var (
@@ -165,7 +166,7 @@
 		} else {
 			_, err = utils.StdCopy(stdout, stderr, resp.Body)
 		}
-		utils.Debugf("[stream] End of stdout")
+		log.Debugf("[stream] End of stdout")
 		return err
 	}
 	return nil
@@ -180,7 +181,7 @@
 	v.Set("h", strconv.Itoa(height))
 	v.Set("w", strconv.Itoa(width))
 	if _, _, err := readBody(cli.call("POST", "/containers/"+id+"/resize?"+v.Encode(), nil, false)); err != nil {
-		utils.Debugf("Error resize: %s", err)
+		log.Debugf("Error resize: %s", err)
 	}
 }
 
@@ -237,7 +238,7 @@
 	}
 	ws, err := term.GetWinsize(cli.terminalFd)
 	if err != nil {
-		utils.Debugf("Error getting size: %s", err)
+		log.Debugf("Error getting size: %s", err)
 		if ws == nil {
 			return 0, 0
 		}
diff --git a/api/common.go b/api/common.go
index e737050..5cc33a9 100644
--- a/api/common.go
+++ b/api/common.go
@@ -5,19 +5,20 @@
 	"mime"
 	"strings"
 
-	"github.com/dotcloud/docker/engine"
-	"github.com/dotcloud/docker/pkg/version"
-	"github.com/dotcloud/docker/utils"
+	"github.com/docker/docker/engine"
+	"github.com/docker/docker/pkg/log"
+	"github.com/docker/docker/pkg/parsers"
+	"github.com/docker/docker/pkg/version"
 )
 
 const (
-	APIVERSION        version.Version = "1.13"
+	APIVERSION        version.Version = "1.14"
 	DEFAULTHTTPHOST                   = "127.0.0.1"
 	DEFAULTUNIXSOCKET                 = "/var/run/docker.sock"
 )
 
 func ValidateHost(val string) (string, error) {
-	host, err := utils.ParseHost(DEFAULTHTTPHOST, DEFAULTUNIXSOCKET, val)
+	host, err := parsers.ParseHost(DEFAULTHTTPHOST, DEFAULTUNIXSOCKET, val)
 	if err != nil {
 		return val, err
 	}
@@ -42,7 +43,7 @@
 func MatchesContentType(contentType, expectedType string) bool {
 	mimetype, _, err := mime.ParseMediaType(contentType)
 	if err != nil {
-		utils.Errorf("Error parsing media type: %s error: %s", contentType, err.Error())
+		log.Errorf("Error parsing media type: %s error: %s", contentType, err.Error())
 	}
 	return err == nil && mimetype == expectedType
 }
diff --git a/api/server/MAINTAINERS b/api/server/MAINTAINERS
index c92a061..310a159 100644
--- a/api/server/MAINTAINERS
+++ b/api/server/MAINTAINERS
@@ -1,2 +1,3 @@
 Victor Vieux <vieux@docker.com> (@vieux)
-Johan Euphrosine <proppy@google.com> (@proppy)
+# off the grid until september
+# Johan Euphrosine <proppy@google.com> (@proppy)
diff --git a/api/server/server.go b/api/server/server.go
index b3a0590..96f5bca 100644
--- a/api/server/server.go
+++ b/api/server/server.go
@@ -11,7 +11,6 @@
 	"fmt"
 	"io"
 	"io/ioutil"
-	"log"
 	"net"
 	"net/http"
 	"net/http/pprof"
@@ -21,16 +20,18 @@
 	"syscall"
 
 	"code.google.com/p/go.net/websocket"
-
-	"github.com/dotcloud/docker/api"
-	"github.com/dotcloud/docker/engine"
-	"github.com/dotcloud/docker/pkg/listenbuffer"
-	"github.com/dotcloud/docker/pkg/systemd"
-	"github.com/dotcloud/docker/pkg/user"
-	"github.com/dotcloud/docker/pkg/version"
-	"github.com/dotcloud/docker/registry"
-	"github.com/dotcloud/docker/utils"
+	"github.com/docker/libcontainer/user"
 	"github.com/gorilla/mux"
+
+	"github.com/docker/docker/api"
+	"github.com/docker/docker/engine"
+	"github.com/docker/docker/pkg/listenbuffer"
+	"github.com/docker/docker/pkg/log"
+	"github.com/docker/docker/pkg/parsers"
+	"github.com/docker/docker/pkg/systemd"
+	"github.com/docker/docker/pkg/version"
+	"github.com/docker/docker/registry"
+	"github.com/docker/docker/utils"
 )
 
 var (
@@ -87,7 +88,7 @@
 	}
 
 	if err != nil {
-		utils.Errorf("HTTP Error: statusCode=%d %s", statusCode, err.Error())
+		log.Errorf("HTTP Error: statusCode=%d %s", statusCode, err.Error())
 		http.Error(w, err.Error(), statusCode)
 	}
 }
@@ -237,10 +238,10 @@
 		outsLegacy := engine.NewTable("Created", 0)
 		for _, out := range outs.Data {
 			for _, repoTag := range out.GetList("RepoTags") {
-				parts := strings.Split(repoTag, ":")
+				repo, tag := parsers.ParseRepositoryTag(repoTag)
 				outLegacy := &engine.Env{}
-				outLegacy.Set("Repository", parts[0])
-				outLegacy.Set("Tag", parts[1])
+				outLegacy.Set("Repository", repo)
+				outLegacy.SetJson("Tag", tag)
 				outLegacy.Set("Id", out.Get("Id"))
 				outLegacy.SetInt64("Created", out.GetInt64("Created"))
 				outLegacy.SetInt64("Size", out.GetInt64("Size"))
@@ -301,7 +302,7 @@
 	if vars == nil {
 		return fmt.Errorf("Missing parameter")
 	}
-	var job = eng.Job("changes", vars["name"])
+	var job = eng.Job("container_changes", vars["name"])
 	streamJSON(job, w, false)
 
 	return job.Run()
@@ -338,6 +339,7 @@
 	job.Setenv("since", r.Form.Get("since"))
 	job.Setenv("before", r.Form.Get("before"))
 	job.Setenv("limit", r.Form.Get("limit"))
+	job.Setenv("filters", r.Form.Get("filters"))
 
 	if version.GreaterThanOrEqualTo("1.5") {
 		streamJSON(job, w, false)
@@ -437,7 +439,7 @@
 		stdoutBuffer = bytes.NewBuffer(nil)
 	)
 	if err := config.Decode(r.Body); err != nil {
-		utils.Errorf("%s", err)
+		log.Errorf("%s", err)
 	}
 
 	if r.FormValue("pause") == "" && version.GreaterThanOrEqualTo("1.13") {
@@ -468,6 +470,7 @@
 
 	var (
 		image = r.Form.Get("fromImage")
+		repo  = r.Form.Get("repo")
 		tag   = r.Form.Get("tag")
 		job   *engine.Job
 	)
@@ -482,18 +485,24 @@
 		}
 	}
 	if image != "" { //pull
+		if tag == "" {
+			image, tag = parsers.ParseRepositoryTag(image)
+		}
 		metaHeaders := map[string][]string{}
 		for k, v := range r.Header {
 			if strings.HasPrefix(k, "X-Meta-") {
 				metaHeaders[k] = v
 			}
 		}
-		job = eng.Job("pull", r.Form.Get("fromImage"), tag)
+		job = eng.Job("pull", image, tag)
 		job.SetenvBool("parallel", version.GreaterThan("1.3"))
 		job.SetenvJson("metaHeaders", metaHeaders)
 		job.SetenvJson("authConfig", authConfig)
 	} else { //import
-		job = eng.Job("import", r.Form.Get("fromSrc"), r.Form.Get("repo"), tag)
+		if tag == "" {
+			repo, tag = parsers.ParseRepositoryTag(repo)
+		}
+		job = eng.Job("import", r.Form.Get("fromSrc"), repo, tag)
 		job.Stdin.Add(r.Body)
 	}
 
@@ -670,10 +679,12 @@
 	if vars == nil {
 		return fmt.Errorf("Missing parameter")
 	}
-	job := eng.Job("container_delete", vars["name"])
+	job := eng.Job("delete", vars["name"])
+
+	job.Setenv("forceRemove", r.Form.Get("force"))
+
 	job.Setenv("removeVolume", r.Form.Get("v"))
 	job.Setenv("removeLink", r.Form.Get("link"))
-	job.Setenv("forceRemove", r.Form.Get("force"))
 	if err := job.Run(); err != nil {
 		return err
 	}
@@ -706,13 +717,16 @@
 	)
 
 	// allow a nil body for backwards compatibility
-	if r.Body != nil {
-		if api.MatchesContentType(r.Header.Get("Content-Type"), "application/json") {
-			if err := job.DecodeEnv(r.Body); err != nil {
-				return err
-			}
+	if r.Body != nil && r.ContentLength > 0 {
+		if !api.MatchesContentType(r.Header.Get("Content-Type"), "application/json") {
+			return fmt.Errorf("Content-Type of application/json is required")
+		}
+
+		if err := job.DecodeEnv(r.Body); err != nil {
+			return err
 		}
 	}
+
 	if err := job.Run(); err != nil {
 		if err.Error() == "Container already started" {
 			w.WriteHeader(http.StatusNotModified)
@@ -864,7 +878,7 @@
 		job.Stdout.Add(ws)
 		job.Stderr.Set(ws)
 		if err := job.Run(); err != nil {
-			utils.Errorf("Error attaching websocket: %s", err)
+			log.Errorf("Error attaching websocket: %s", err)
 		}
 	})
 	h.ServeHTTP(w, r)
@@ -991,7 +1005,7 @@
 	job := eng.Job("container_copy", vars["name"], copyData.Get("Resource"))
 	job.Stdout.Add(w)
 	if err := job.Run(); err != nil {
-		utils.Errorf("%s", err.Error())
+		log.Errorf("%s", err.Error())
 		if strings.Contains(err.Error(), "No such container") {
 			w.WriteHeader(http.StatusNotFound)
 		} else if strings.Contains(err.Error(), "no such file or directory") {
@@ -1019,16 +1033,16 @@
 func makeHttpHandler(eng *engine.Engine, logging bool, localMethod string, localRoute string, handlerFunc HttpApiFunc, enableCors bool, dockerVersion version.Version) http.HandlerFunc {
 	return func(w http.ResponseWriter, r *http.Request) {
 		// log the request
-		utils.Debugf("Calling %s %s", localMethod, localRoute)
+		log.Debugf("Calling %s %s", localMethod, localRoute)
 
 		if logging {
-			log.Println(r.Method, r.RequestURI)
+			log.Infof("%s %s", r.Method, r.RequestURI)
 		}
 
 		if strings.Contains(r.Header.Get("User-Agent"), "Docker-Client/") {
 			userAgent := strings.Split(r.Header.Get("User-Agent"), "/")
 			if len(userAgent) == 2 && !dockerVersion.Equal(version.Version(userAgent[1])) {
-				utils.Debugf("Warning: client and server don't have the same version (client: %s, server: %s)", userAgent[1], dockerVersion)
+				log.Debugf("Warning: client and server don't have the same version (client: %s, server: %s)", userAgent[1], dockerVersion)
 			}
 		}
 		version := version.Version(mux.Vars(r)["version"])
@@ -1045,7 +1059,7 @@
 		}
 
 		if err := handlerFunc(eng, version, w, r, mux.Vars(r)); err != nil {
-			utils.Errorf("Error making handler: %s", err)
+			log.Errorf("Handler for %s %s returned error: %s", localMethod, localRoute, err)
 			httpError(w, err)
 		}
 	}
@@ -1134,7 +1148,7 @@
 
 	for method, routes := range m {
 		for route, fct := range routes {
-			utils.Debugf("Registering %s, %s", method, route)
+			log.Debugf("Registering %s, %s", method, route)
 			// NOTE: scope issue, make sure the variables are local and won't be changed
 			localRoute := route
 			localFct := fct
@@ -1181,7 +1195,7 @@
 	chErrors := make(chan error, len(ls))
 
 	// We don't want to start serving on these sockets until the
-	// "initserver" job has completed. Otherwise required handlers
+	// daemon is initialized and installed. Otherwise required handlers
 	// won't be ready.
 	<-activationLock
 
@@ -1224,7 +1238,7 @@
 		return err
 	}
 
-	utils.Debugf("%s group found. gid: %d", nameOrGid, gid)
+	log.Debugf("%s group found. gid: %d", nameOrGid, gid)
 	return os.Chown(addr, 0, gid)
 }
 
@@ -1295,7 +1309,7 @@
 	switch proto {
 	case "tcp":
 		if !strings.HasPrefix(addr, "127.0.0.1") && !job.GetenvBool("TlsVerify") {
-			log.Println("/!\\ DON'T BIND ON ANOTHER IP ADDRESS THAN 127.0.0.1 IF YOU DON'T KNOW WHAT YOU'RE DOING /!\\")
+			log.Infof("/!\\ DON'T BIND ON ANOTHER IP ADDRESS THAN 127.0.0.1 IF YOU DON'T KNOW WHAT YOU'RE DOING /!\\")
 		}
 	case "unix":
 		socketGroup := job.Getenv("SocketGroup")
@@ -1303,7 +1317,7 @@
 			if err := changeGroup(addr, socketGroup); err != nil {
 				if socketGroup == "docker" {
 					// if the user hasn't explicitly specified the group ownership, don't fail on errors.
-					utils.Debugf("Warning: could not chgrp %s to docker: %s", addr, err.Error())
+					log.Debugf("Warning: could not chgrp %s to docker: %s", addr, err.Error())
 				} else {
 					return err
 				}
@@ -1338,7 +1352,7 @@
 			return job.Errorf("usage: %s PROTO://ADDR [PROTO://ADDR ...]", job.Name)
 		}
 		go func() {
-			log.Printf("Listening for HTTP on %s (%s)\n", protoAddrParts[0], protoAddrParts[1])
+			log.Infof("Listening for HTTP on %s (%s)", protoAddrParts[0], protoAddrParts[1])
 			chErrors <- ListenAndServe(protoAddrParts[0], protoAddrParts[1], job)
 		}()
 	}
diff --git a/api/server/server_unit_test.go b/api/server/server_unit_test.go
index 2d14f89..950fea5 100644
--- a/api/server/server_unit_test.go
+++ b/api/server/server_unit_test.go
@@ -7,11 +7,13 @@
 	"io"
 	"net/http"
 	"net/http/httptest"
+	"reflect"
 	"strings"
 	"testing"
 
-	"github.com/dotcloud/docker/api"
-	"github.com/dotcloud/docker/engine"
+	"github.com/docker/docker/api"
+	"github.com/docker/docker/engine"
+	"github.com/docker/docker/pkg/version"
 )
 
 func TestGetBoolParam(t *testing.T) {
@@ -111,8 +113,105 @@
 	if v.GetInt("Containers") != 1 {
 		t.Fatalf("%#v\n", v)
 	}
-	if r.HeaderMap.Get("Content-Type") != "application/json" {
-		t.Fatalf("%#v\n", r)
+	assertContentType(r, "application/json", t)
+}
+
+func TestGetImagesJSON(t *testing.T) {
+	eng := engine.New()
+	var called bool
+	eng.Register("images", func(job *engine.Job) engine.Status {
+		called = true
+		v := createEnvFromGetImagesJSONStruct(sampleImage)
+		if _, err := v.WriteTo(job.Stdout); err != nil {
+			return job.Error(err)
+		}
+		return engine.StatusOK
+	})
+	r := serveRequest("GET", "/images/json", nil, eng, t)
+	if !called {
+		t.Fatal("handler was not called")
+	}
+	assertHttpNotError(r, t)
+	assertContentType(r, "application/json", t)
+	var observed getImagesJSONStruct
+	if err := json.Unmarshal(r.Body.Bytes(), &observed); err != nil {
+		t.Fatal(err)
+	}
+	if !reflect.DeepEqual(observed, sampleImage) {
+		t.Errorf("Expected %#v but got %#v", sampleImage, observed)
+	}
+}
+
+func TestGetImagesJSONFilter(t *testing.T) {
+	eng := engine.New()
+	filter := "nothing"
+	eng.Register("images", func(job *engine.Job) engine.Status {
+		filter = job.Getenv("filter")
+		return engine.StatusOK
+	})
+	serveRequest("GET", "/images/json?filter=aaaa", nil, eng, t)
+	if filter != "aaaa" {
+		t.Errorf("%#v", filter)
+	}
+}
+
+func TestGetImagesJSONFilters(t *testing.T) {
+	eng := engine.New()
+	filter := "nothing"
+	eng.Register("images", func(job *engine.Job) engine.Status {
+		filter = job.Getenv("filters")
+		return engine.StatusOK
+	})
+	serveRequest("GET", "/images/json?filters=nnnn", nil, eng, t)
+	if filter != "nnnn" {
+		t.Errorf("%#v", filter)
+	}
+}
+
+func TestGetImagesJSONAll(t *testing.T) {
+	eng := engine.New()
+	allFilter := "-1"
+	eng.Register("images", func(job *engine.Job) engine.Status {
+		allFilter = job.Getenv("all")
+		return engine.StatusOK
+	})
+	serveRequest("GET", "/images/json?all=1", nil, eng, t)
+	if allFilter != "1" {
+		t.Errorf("%#v", allFilter)
+	}
+}
+
+func TestGetImagesJSONLegacyFormat(t *testing.T) {
+	eng := engine.New()
+	var called bool
+	eng.Register("images", func(job *engine.Job) engine.Status {
+		called = true
+		outsLegacy := engine.NewTable("Created", 0)
+		outsLegacy.Add(createEnvFromGetImagesJSONStruct(sampleImage))
+		if _, err := outsLegacy.WriteListTo(job.Stdout); err != nil {
+			return job.Error(err)
+		}
+		return engine.StatusOK
+	})
+	r := serveRequestUsingVersion("GET", "/images/json", "1.6", nil, eng, t)
+	if !called {
+		t.Fatal("handler was not called")
+	}
+	assertHttpNotError(r, t)
+	assertContentType(r, "application/json", t)
+	images := engine.NewTable("Created", 0)
+	if _, err := images.ReadListFrom(r.Body.Bytes()); err != nil {
+		t.Fatal(err)
+	}
+	if images.Len() != 1 {
+		t.Fatalf("Expected 1 image, %d found", images.Len())
+	}
+	image := images.Data[0]
+	if image.Get("Tag") != "test-tag" {
+		t.Errorf("Expected tag 'test-tag', found '%s'", image.Get("Tag"))
+	}
+	if image.Get("Repository") != "test-name" {
+		t.Errorf("Expected repository 'test-name', found '%s'", image.Get("Repository"))
 	}
 }
 
@@ -123,12 +222,12 @@
 	eng.Register("container_inspect", func(job *engine.Job) engine.Status {
 		called = true
 		if job.Args[0] != name {
-			t.Fatalf("name != '%s': %#v", name, job.Args[0])
+			t.Errorf("name != '%s': %#v", name, job.Args[0])
 		}
 		if api.APIVERSION.LessThan("1.12") && !job.GetenvBool("dirty") {
-			t.Fatal("dirty env variable not set")
+			t.Errorf("dirty env variable not set")
 		} else if api.APIVERSION.GreaterThanOrEqualTo("1.12") && job.GetenvBool("dirty") {
-			t.Fatal("dirty env variable set when it shouldn't")
+			t.Errorf("dirty env variable set when it shouldn't")
 		}
 		v := &engine.Env{}
 		v.SetBool("dirty", true)
@@ -141,9 +240,7 @@
 	if !called {
 		t.Fatal("handler was not called")
 	}
-	if r.HeaderMap.Get("Content-Type") != "application/json" {
-		t.Fatalf("%#v\n", r)
-	}
+	assertContentType(r, "application/json", t)
 	var stdoutJson interface{}
 	if err := json.Unmarshal(r.Body.Bytes(), &stdoutJson); err != nil {
 		t.Fatalf("%#v", err)
@@ -178,21 +275,19 @@
 	if !called {
 		t.Fatal("handler was not called")
 	}
-	if r.HeaderMap.Get("Content-Type") != "application/json" {
-		t.Fatalf("%#v\n", r)
-	}
+	assertContentType(r, "application/json", t)
 	var stdout_json struct {
 		Since int
 		Until int
 	}
 	if err := json.Unmarshal(r.Body.Bytes(), &stdout_json); err != nil {
-		t.Fatalf("%#v", err)
+		t.Fatal(err)
 	}
 	if stdout_json.Since != 1 {
-		t.Fatalf("since != 1: %#v", stdout_json.Since)
+		t.Errorf("since != 1: %#v", stdout_json.Since)
 	}
 	if stdout_json.Until != 0 {
-		t.Fatalf("until != 0: %#v", stdout_json.Until)
+		t.Errorf("until != 0: %#v", stdout_json.Until)
 	}
 }
 
@@ -319,13 +414,77 @@
 	}
 }
 
+func TestGetImagesByName(t *testing.T) {
+	eng := engine.New()
+	name := "image_name"
+	var called bool
+	eng.Register("image_inspect", func(job *engine.Job) engine.Status {
+		called = true
+		if job.Args[0] != name {
+			t.Fatalf("name != '%s': %#v", name, job.Args[0])
+		}
+		if api.APIVERSION.LessThan("1.12") && !job.GetenvBool("dirty") {
+			t.Fatal("dirty env variable not set")
+		} else if api.APIVERSION.GreaterThanOrEqualTo("1.12") && job.GetenvBool("dirty") {
+			t.Fatal("dirty env variable set when it shouldn't")
+		}
+		v := &engine.Env{}
+		v.SetBool("dirty", true)
+		if _, err := v.WriteTo(job.Stdout); err != nil {
+			return job.Error(err)
+		}
+		return engine.StatusOK
+	})
+	r := serveRequest("GET", "/images/"+name+"/json", nil, eng, t)
+	if !called {
+		t.Fatal("handler was not called")
+	}
+	if r.HeaderMap.Get("Content-Type") != "application/json" {
+		t.Fatalf("%#v\n", r)
+	}
+	var stdoutJson interface{}
+	if err := json.Unmarshal(r.Body.Bytes(), &stdoutJson); err != nil {
+		t.Fatalf("%#v", err)
+	}
+	if stdoutJson.(map[string]interface{})["dirty"].(float64) != 1 {
+		t.Fatalf("%#v", stdoutJson)
+	}
+}
+
+func TestDeleteContainers(t *testing.T) {
+	eng := engine.New()
+	name := "foo"
+	var called bool
+	eng.Register("delete", func(job *engine.Job) engine.Status {
+		called = true
+		if len(job.Args) == 0 {
+			t.Fatalf("Job arguments is empty")
+		}
+		if job.Args[0] != name {
+			t.Fatalf("name != '%s': %#v", name, job.Args[0])
+		}
+		return engine.StatusOK
+	})
+	r := serveRequest("DELETE", "/containers/"+name, nil, eng, t)
+	if !called {
+		t.Fatalf("handler was not called")
+	}
+	if r.Code != http.StatusNoContent {
+		t.Fatalf("Got status %d, expected %d", r.Code, http.StatusNoContent)
+	}
+}
+
 func serveRequest(method, target string, body io.Reader, eng *engine.Engine, t *testing.T) *httptest.ResponseRecorder {
+	return serveRequestUsingVersion(method, target, api.APIVERSION, body, eng, t)
+}
+
+func serveRequestUsingVersion(method, target string, version version.Version, body io.Reader, eng *engine.Engine, t *testing.T) *httptest.ResponseRecorder {
 	r := httptest.NewRecorder()
 	req, err := http.NewRequest(method, target, body)
 	if err != nil {
 		t.Fatal(err)
 	}
-	if err := ServeRequest(eng, api.APIVERSION, r, req); err != nil {
+	if err := ServeRequest(eng, version, r, req); err != nil {
 		t.Fatal(err)
 	}
 	return r
@@ -351,3 +510,46 @@
 	}
 	return &buf
 }
+
+func assertContentType(recorder *httptest.ResponseRecorder, content_type string, t *testing.T) {
+	if recorder.HeaderMap.Get("Content-Type") != content_type {
+		t.Fatalf("%#v\n", recorder)
+	}
+}
+
+// XXX: Duplicated from integration/utils_test.go, but maybe that's OK as that
+// should die as soon as we converted all integration tests?
+// assertHttpNotError expect the given response to not have an error.
+// Otherwise the it causes the test to fail.
+func assertHttpNotError(r *httptest.ResponseRecorder, t *testing.T) {
+	// Non-error http status are [200, 400)
+	if r.Code < http.StatusOK || r.Code >= http.StatusBadRequest {
+		t.Fatal(fmt.Errorf("Unexpected http error: %v", r.Code))
+	}
+}
+
+func createEnvFromGetImagesJSONStruct(data getImagesJSONStruct) *engine.Env {
+	v := &engine.Env{}
+	v.SetList("RepoTags", data.RepoTags)
+	v.Set("Id", data.Id)
+	v.SetInt64("Created", data.Created)
+	v.SetInt64("Size", data.Size)
+	v.SetInt64("VirtualSize", data.VirtualSize)
+	return v
+}
+
+type getImagesJSONStruct struct {
+	RepoTags    []string
+	Id          string
+	Created     int64
+	Size        int64
+	VirtualSize int64
+}
+
+var sampleImage getImagesJSONStruct = getImagesJSONStruct{
+	RepoTags:    []string{"test-name:test-tag"},
+	Id:          "ID",
+	Created:     999,
+	Size:        777,
+	VirtualSize: 666,
+}
diff --git a/archive/MAINTAINERS b/archive/MAINTAINERS
index 1e998f8..2aac726 100644
--- a/archive/MAINTAINERS
+++ b/archive/MAINTAINERS
@@ -1 +1,2 @@
-Michael Crosby <michael@crosbymichael.com> (@crosbymichael)
+Cristian Staretu <cristian.staretu@gmail.com> (@unclejack)
+Tibor Vass <teabee89@gmail.com> (@tiborvass)
diff --git a/archive/archive.go b/archive/archive.go
index 2ba62f5..7d9f7fb 100644
--- a/archive/archive.go
+++ b/archive/archive.go
@@ -16,9 +16,11 @@
 	"strings"
 	"syscall"
 
-	"github.com/dotcloud/docker/pkg/system"
-	"github.com/dotcloud/docker/utils"
-	"github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
+	"github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
+
+	"github.com/docker/docker/pkg/log"
+	"github.com/docker/docker/pkg/system"
+	"github.com/docker/docker/utils"
 )
 
 type (
@@ -61,7 +63,7 @@
 		Xz:    {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00},
 	} {
 		if len(source) < len(m) {
-			utils.Debugf("Len too short")
+			log.Debugf("Len too short")
 			continue
 		}
 		if bytes.Compare(m, source[:len(m)]) == 0 {
@@ -83,7 +85,7 @@
 	if err != nil {
 		return nil, err
 	}
-	utils.Debugf("[tar autodetect] n: %v", bs)
+	log.Debugf("[tar autodetect] n: %v", bs)
 
 	compression := DetectCompression(bs)
 
@@ -131,7 +133,7 @@
 	return ""
 }
 
-func addTarFile(path, name string, tw *tar.Writer) error {
+func addTarFile(path, name string, tw *tar.Writer, twBuf *bufio.Writer) error {
 	fi, err := os.Lstat(path)
 	if err != nil {
 		return err
@@ -177,15 +179,22 @@
 	}
 
 	if hdr.Typeflag == tar.TypeReg {
-		if file, err := os.Open(path); err != nil {
+		file, err := os.Open(path)
+		if err != nil {
 			return err
-		} else {
-			_, err := io.Copy(tw, file)
-			if err != nil {
-				return err
-			}
-			file.Close()
 		}
+
+		twBuf.Reset(tw)
+		_, err = io.Copy(twBuf, file)
+		file.Close()
+		if err != nil {
+			return err
+		}
+		err = twBuf.Flush()
+		if err != nil {
+			return err
+		}
+		twBuf.Reset(nil)
 	}
 
 	return nil
@@ -245,7 +254,7 @@
 		}
 
 	case tar.TypeXGlobalHeader:
-		utils.Debugf("PAX Global Extended Headers found and ignored")
+		log.Debugf("PAX Global Extended Headers found and ignored")
 		return nil
 
 	default:
@@ -328,10 +337,12 @@
 			options.Includes = []string{"."}
 		}
 
+		twBuf := bufio.NewWriterSize(nil, twBufSize)
+
 		for _, include := range options.Includes {
 			filepath.Walk(filepath.Join(srcPath, include), func(filePath string, f os.FileInfo, err error) error {
 				if err != nil {
-					utils.Debugf("Tar: Can't stat file %s to tar: %s\n", srcPath, err)
+					log.Debugf("Tar: Can't stat file %s to tar: %s", srcPath, err)
 					return nil
 				}
 
@@ -340,23 +351,21 @@
 					return nil
 				}
 
-				for _, exclude := range options.Excludes {
-					matched, err := filepath.Match(exclude, relFilePath)
-					if err != nil {
-						utils.Errorf("Error matching: %s (pattern: %s)", relFilePath, exclude)
-						return err
-					}
-					if matched {
-						utils.Debugf("Skipping excluded path: %s", relFilePath)
-						if f.IsDir() {
-							return filepath.SkipDir
-						}
-						return nil
-					}
+				skip, err := utils.Matches(relFilePath, options.Excludes)
+				if err != nil {
+					log.Debugf("Error matching %s", relFilePath, err)
+					return err
 				}
 
-				if err := addTarFile(filePath, relFilePath, tw); err != nil {
-					utils.Debugf("Can't add file %s to tar: %s\n", srcPath, err)
+				if skip {
+					if f.IsDir() {
+						return filepath.SkipDir
+					}
+					return nil
+				}
+
+				if err := addTarFile(filePath, relFilePath, tw, twBuf); err != nil {
+					log.Debugf("Can't add file %s to tar: %s", srcPath, err)
 				}
 				return nil
 			})
@@ -364,13 +373,13 @@
 
 		// Make sure to check the error on Close.
 		if err := tw.Close(); err != nil {
-			utils.Debugf("Can't close tar writer: %s\n", err)
+			log.Debugf("Can't close tar writer: %s", err)
 		}
 		if err := compressWriter.Close(); err != nil {
-			utils.Debugf("Can't close compress writer: %s\n", err)
+			log.Debugf("Can't close compress writer: %s", err)
 		}
 		if err := pipeWriter.Close(); err != nil {
-			utils.Debugf("Can't close pipe writer: %s\n", err)
+			log.Debugf("Can't close pipe writer: %s", err)
 		}
 	}()
 
@@ -383,10 +392,18 @@
 //  identity (uncompressed), gzip, bzip2, xz.
 // FIXME: specify behavior when target path exists vs. doesn't exist.
 func Untar(archive io.Reader, dest string, options *TarOptions) error {
+	if options == nil {
+		options = &TarOptions{}
+	}
+
 	if archive == nil {
 		return fmt.Errorf("Empty archive")
 	}
 
+	if options.Excludes == nil {
+		options.Excludes = []string{}
+	}
+
 	decompressedArchive, err := DecompressStream(archive)
 	if err != nil {
 		return err
@@ -394,10 +411,12 @@
 	defer decompressedArchive.Close()
 
 	tr := tar.NewReader(decompressedArchive)
+	trBuf := bufio.NewReaderSize(nil, trBufSize)
 
 	var dirs []*tar.Header
 
 	// Iterate through the files in the archive.
+loop:
 	for {
 		hdr, err := tr.Next()
 		if err == io.EOF {
@@ -411,6 +430,12 @@
 		// Normalize name, for safety and for a simple is-root check
 		hdr.Name = filepath.Clean(hdr.Name)
 
+		for _, exclude := range options.Excludes {
+			if strings.HasPrefix(hdr.Name, exclude) {
+				continue loop
+			}
+		}
+
 		if !strings.HasSuffix(hdr.Name, "/") {
 			// Not the root directory, ensure that the parent directory exists
 			parent := filepath.Dir(hdr.Name)
@@ -439,7 +464,8 @@
 				}
 			}
 		}
-		if err := createTarFile(path, dest, hdr, tr, options == nil || !options.NoLchown); err != nil {
+		trBuf.Reset(tr)
+		if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown); err != nil {
 			return err
 		}
 
@@ -465,7 +491,7 @@
 // the output of one piped into the other. If either Tar or Untar fails,
 // TarUntar aborts and returns the error.
 func TarUntar(src string, dst string) error {
-	utils.Debugf("TarUntar(%s %s)", src, dst)
+	log.Debugf("TarUntar(%s %s)", src, dst)
 	archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed})
 	if err != nil {
 		return err
@@ -502,11 +528,11 @@
 		return CopyFileWithTar(src, dst)
 	}
 	// Create dst, copy src's content into it
-	utils.Debugf("Creating dest directory: %s", dst)
+	log.Debugf("Creating dest directory: %s", dst)
 	if err := os.MkdirAll(dst, 0755); err != nil && !os.IsExist(err) {
 		return err
 	}
-	utils.Debugf("Calling TarUntar(%s, %s)", src, dst)
+	log.Debugf("Calling TarUntar(%s, %s)", src, dst)
 	return TarUntar(src, dst)
 }
 
@@ -517,7 +543,7 @@
 // If `dst` ends with a trailing slash '/', the final destination path
 // will be `dst/base(src)`.
 func CopyFileWithTar(src, dst string) (err error) {
-	utils.Debugf("CopyFileWithTar(%s, %s)", src, dst)
+	log.Debugf("CopyFileWithTar(%s, %s)", src, dst)
 	srcSt, err := os.Stat(src)
 	if err != nil {
 		return err
@@ -544,19 +570,19 @@
 		}
 		defer srcF.Close()
 
-		tw := tar.NewWriter(w)
 		hdr, err := tar.FileInfoHeader(srcSt, "")
 		if err != nil {
 			return err
 		}
 		hdr.Name = filepath.Base(dst)
+		tw := tar.NewWriter(w)
+		defer tw.Close()
 		if err := tw.WriteHeader(hdr); err != nil {
 			return err
 		}
 		if _, err := io.Copy(tw, srcF); err != nil {
 			return err
 		}
-		tw.Close()
 		return nil
 	})
 	defer func() {
diff --git a/archive/archive_test.go b/archive/archive_test.go
index 61ee0af..b46f953 100644
--- a/archive/archive_test.go
+++ b/archive/archive_test.go
@@ -11,7 +11,7 @@
 	"testing"
 	"time"
 
-	"github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
+	"github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
 )
 
 func TestCmdStreamLargeStderr(t *testing.T) {
@@ -109,6 +109,9 @@
 	if err := ioutil.WriteFile(path.Join(origin, "2"), []byte("welcome!"), 0700); err != nil {
 		t.Fatal(err)
 	}
+	if err := ioutil.WriteFile(path.Join(origin, "3"), []byte("will be ignored"), 0700); err != nil {
+		t.Fatal(err)
+	}
 
 	for _, c := range []Compression{
 		Uncompressed,
@@ -116,13 +119,14 @@
 	} {
 		changes, err := tarUntar(t, origin, &TarOptions{
 			Compression: c,
+			Excludes:    []string{"3"},
 		})
 
 		if err != nil {
 			t.Fatalf("Error tar/untar for compression %s: %s", c.Extension(), err)
 		}
 
-		if len(changes) != 0 {
+		if len(changes) != 1 || changes[0].Path != "/3" {
 			t.Fatalf("Unexpected differences after tarUntar: %v", changes)
 		}
 	}
@@ -199,3 +203,42 @@
 		t.Fatalf("%s not found in the archive", "root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/Plack::Middleware::LighttpdScriptNameFix.3pm")
 	}
 }
+
+func prepareUntarSourceDirectory(numberOfFiles int, targetPath string) (int, error) {
+	fileData := []byte("fooo")
+	for n := 0; n < numberOfFiles; n++ {
+		fileName := fmt.Sprintf("file-%d", n)
+		if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil {
+			return 0, err
+		}
+	}
+	totalSize := numberOfFiles * len(fileData)
+	return totalSize, nil
+}
+
+func BenchmarkTarUntar(b *testing.B) {
+	origin, err := ioutil.TempDir("", "docker-test-untar-origin")
+	if err != nil {
+		b.Fatal(err)
+	}
+	tempDir, err := ioutil.TempDir("", "docker-test-untar-destination")
+	if err != nil {
+		b.Fatal(err)
+	}
+	target := path.Join(tempDir, "dest")
+	n, err := prepareUntarSourceDirectory(100, origin)
+	if err != nil {
+		b.Fatal(err)
+	}
+	b.ResetTimer()
+	b.SetBytes(int64(n))
+	defer os.RemoveAll(origin)
+	defer os.RemoveAll(tempDir)
+	for n := 0; n < b.N; n++ {
+		err := TarUntar(origin, target)
+		if err != nil {
+			b.Fatal(err)
+		}
+		os.RemoveAll(target)
+	}
+}
diff --git a/archive/changes.go b/archive/changes.go
index 1e588b8..a591e8a 100644
--- a/archive/changes.go
+++ b/archive/changes.go
@@ -1,6 +1,7 @@
 package archive
 
 import (
+	"bufio"
 	"bytes"
 	"fmt"
 	"io"
@@ -10,9 +11,10 @@
 	"syscall"
 	"time"
 
-	"github.com/dotcloud/docker/pkg/system"
-	"github.com/dotcloud/docker/utils"
-	"github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
+	"github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
+
+	"github.com/docker/docker/pkg/log"
+	"github.com/docker/docker/pkg/system"
 )
 
 type ChangeType int
@@ -343,6 +345,7 @@
 	tw := tar.NewWriter(writer)
 
 	go func() {
+		twBuf := bufio.NewWriterSize(nil, twBufSize)
 		// In general we log errors here but ignore them because
 		// during e.g. a diff operation the container can continue
 		// mutating the filesystem and we can see transient errors
@@ -361,19 +364,19 @@
 					ChangeTime: timestamp,
 				}
 				if err := tw.WriteHeader(hdr); err != nil {
-					utils.Debugf("Can't write whiteout header: %s\n", err)
+					log.Debugf("Can't write whiteout header: %s", err)
 				}
 			} else {
 				path := filepath.Join(dir, change.Path)
-				if err := addTarFile(path, change.Path[1:], tw); err != nil {
-					utils.Debugf("Can't add file %s to tar: %s\n", path, err)
+				if err := addTarFile(path, change.Path[1:], tw, twBuf); err != nil {
+					log.Debugf("Can't add file %s to tar: %s", path, err)
 				}
 			}
 		}
 
 		// Make sure to check the error on Close.
 		if err := tw.Close(); err != nil {
-			utils.Debugf("Can't close layer: %s\n", err)
+			log.Debugf("Can't close layer: %s", err)
 		}
 		writer.Close()
 	}()
diff --git a/archive/common.go b/archive/common.go
new file mode 100644
index 0000000..2aac34e
--- /dev/null
+++ b/archive/common.go
@@ -0,0 +1,4 @@
+package archive
+
+const twBufSize = 32 * 1024
+const trBufSize = 32 * 1024
diff --git a/archive/diff.go b/archive/diff.go
index d169669..a805f2c 100644
--- a/archive/diff.go
+++ b/archive/diff.go
@@ -1,6 +1,7 @@
 package archive
 
 import (
+	"bufio"
 	"fmt"
 	"io"
 	"io/ioutil"
@@ -9,7 +10,7 @@
 	"strings"
 	"syscall"
 
-	"github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
+	"github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
 )
 
 // Linux device nodes are a bit weird due to backwards compat with 16 bit device nodes.
@@ -32,6 +33,7 @@
 	}
 
 	tr := tar.NewReader(layer)
+	trBuf := bufio.NewReaderSize(nil, trBufSize)
 
 	var dirs []*tar.Header
 
@@ -108,7 +110,8 @@
 				}
 			}
 
-			srcData := io.Reader(tr)
+			trBuf.Reset(tr)
+			srcData := io.Reader(trBuf)
 			srcHdr := hdr
 
 			// Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so
diff --git a/archive/wrap.go b/archive/wrap.go
index 03ea508..b8b6019 100644
--- a/archive/wrap.go
+++ b/archive/wrap.go
@@ -2,7 +2,7 @@
 
 import (
 	"bytes"
-	"github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
+	"github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
 	"io/ioutil"
 )
 
diff --git a/builtins/builtins.go b/builtins/builtins.go
index 3fa0651..0aa2f43 100644
--- a/builtins/builtins.go
+++ b/builtins/builtins.go
@@ -3,14 +3,14 @@
 import (
 	"runtime"
 
-	"github.com/dotcloud/docker/api"
-	apiserver "github.com/dotcloud/docker/api/server"
-	"github.com/dotcloud/docker/daemon/networkdriver/bridge"
-	"github.com/dotcloud/docker/dockerversion"
-	"github.com/dotcloud/docker/engine"
-	"github.com/dotcloud/docker/registry"
-	"github.com/dotcloud/docker/server"
-	"github.com/dotcloud/docker/utils"
+	"github.com/docker/docker/api"
+	apiserver "github.com/docker/docker/api/server"
+	"github.com/docker/docker/daemon/networkdriver/bridge"
+	"github.com/docker/docker/dockerversion"
+	"github.com/docker/docker/engine"
+	"github.com/docker/docker/events"
+	"github.com/docker/docker/pkg/parsers/kernel"
+	"github.com/docker/docker/registry"
 )
 
 func Register(eng *engine.Engine) error {
@@ -20,6 +20,9 @@
 	if err := remote(eng); err != nil {
 		return err
 	}
+	if err := events.New().Install(eng); err != nil {
+		return err
+	}
 	if err := eng.Register("version", dockerVersion); err != nil {
 		return err
 	}
@@ -50,9 +53,6 @@
 // These components should be broken off into plugins of their own.
 //
 func daemon(eng *engine.Engine) error {
-	if err := eng.Register("initserver", server.InitServer); err != nil {
-		return err
-	}
 	return eng.Register("init_networkdriver", bridge.InitDriver)
 }
 
@@ -65,7 +65,7 @@
 	v.Set("GoVersion", runtime.Version())
 	v.Set("Os", runtime.GOOS)
 	v.Set("Arch", runtime.GOARCH)
-	if kernelVersion, err := utils.GetKernelVersion(); err == nil {
+	if kernelVersion, err := kernel.GetKernelVersion(); err == nil {
 		v.Set("KernelVersion", kernelVersion.String())
 	}
 	if _, err := v.WriteTo(job.Stdout); err != nil {
diff --git a/contrib/check-config.sh b/contrib/check-config.sh
index fe4b9f1..cb6a4f2 100755
--- a/contrib/check-config.sh
+++ b/contrib/check-config.sh
@@ -113,6 +113,23 @@
 	echo "    $(wrap_color '(see https://github.com/tianon/cgroupfs-mount)' yellow)"
 fi
 
+if [ "$(cat /sys/module/apparmor/parameters/enabled 2>/dev/null)" = 'Y' ]; then
+	echo -n '- '
+	if command -v apparmor_parser &> /dev/null; then
+		echo "$(wrap_good 'apparmor' 'enabled and tools installed')"
+	else
+		echo "$(wrap_bad 'apparmor' 'enabled, but apparmor_parser missing')"
+		echo -n '    '
+		if command -v apt-get &> /dev/null; then
+			echo "$(wrap_color '(use "apt-get install apparmor" to fix this)')"
+		elif command -v yum &> /dev/null; then
+			echo "$(wrap_color '(your best bet is "yum install apparmor-parser")')"
+		else
+			echo "$(wrap_color '(look for an "apparmor" package for your distribution)')"
+		fi
+	fi
+fi
+
 flags=(
 	NAMESPACES {NET,PID,IPC,UTS}_NS
 	DEVPTS_MULTIPLE_INSTANCES
diff --git a/contrib/completion/bash/docker b/contrib/completion/bash/docker
index 8939556..d6d622f 100755
--- a/contrib/completion/bash/docker
+++ b/contrib/completion/bash/docker
@@ -156,7 +156,7 @@
 		*)
 			local counter="$(__docker_pos_first_nonflag '-t|--tag')"
 			if [ $cword -eq $counter ]; then
-				_filedir
+				_filedir -d
 			fi
 			;;
 	esac
@@ -485,21 +485,52 @@
 _docker_run()
 {
 	case "$prev" in
-		--cidfile)
+		-a|--attach)
+			COMPREPLY=( $( compgen -W 'stdin stdout stderr' -- "$cur" ) )
+			return
+			;;
+		--cidfile|--env-file)
 			_filedir
+			return
 			;;
 		--volumes-from)
 			__docker_containers_all
+			return
 			;;
 		-v|--volume)
-			# TODO something magical with colons and _filedir ?
+			case "$cur" in
+				*:*)
+					# TODO somehow do _filedir for stuff inside the image, if it's already specified (which is also somewhat difficult to determine)
+					;;
+				'')
+					COMPREPLY=( $( compgen -W '/' -- "$cur" ) )
+					compopt -o nospace
+					;;
+				/*)
+					_filedir
+					compopt -o nospace
+					;;
+			esac
 			return
 			;;
 		-e|--env)
 			COMPREPLY=( $( compgen -e -- "$cur" ) )
+			compopt -o nospace
 			return
 			;;
-		--entrypoint|-h|--hostname|-m|--memory|-u|--user|-w|--workdir|-c|--cpu-shares|-n|--name|-a|--attach|--link|-p|--publish|--expose|--dns|--lxc-conf)
+		--link)
+			case "$cur" in
+				*:*)
+					;;
+				*)
+					__docker_containers_running
+					COMPREPLY=( $( compgen -W "${COMPREPLY[*]}" -S ':' ) )
+					compopt -o nospace
+					;;
+			esac
+			return
+			;;
+		--entrypoint|-h|--hostname|-m|--memory|-u|--user|-w|--workdir|-c|--cpu-shares|-n|--name|-p|--publish|--expose|--dns|--lxc-conf)
 			return
 			;;
 		*)
diff --git a/contrib/completion/fish/docker.fish b/contrib/completion/fish/docker.fish
index a4a9365..ba83526 100644
--- a/contrib/completion/fish/docker.fish
+++ b/contrib/completion/fish/docker.fish
@@ -85,7 +85,7 @@
 complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -a '(__fish_print_docker_containers all)' -d "Container"
 
 # cp
-complete -c docker -f -n '__fish_docker_no_subcommand' -a cp -d 'Copy files/folders from a container's filesystem to the host path'
+complete -c docker -f -n '__fish_docker_no_subcommand' -a cp -d "Copy files/folders from a container's filesystem to the host path"
 
 # diff
 complete -c docker -f -n '__fish_docker_no_subcommand' -a diff -d "Inspect changes on a container's filesystem"
diff --git a/contrib/completion/zsh/_docker b/contrib/completion/zsh/_docker
index 3f96f00..faf17b2 100644
--- a/contrib/completion/zsh/_docker
+++ b/contrib/completion/zsh/_docker
@@ -1,58 +1,118 @@
-#compdef docker 
+#compdef docker
 #
 # zsh completion for docker (http://docker.com)
 #
-# version:  0.2.2
-# author:   Felix Riedel
-# license:  BSD License
+# version:  0.3.0
 # github:   https://github.com/felixr/docker-zsh-completion
 #
+# contributers:
+#   - Felix Riedel
+#   - Vincent Bernat
+#
+# license:
+#
+# Copyright (c) 2013, Felix Riedel
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright
+#       notice, this list of conditions and the following disclaimer in the
+#       documentation and/or other materials provided with the distribution.
+#     * Neither the name of the <organization> nor the
+#       names of its contributors may be used to endorse or promote products
+#       derived from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
 
 __parse_docker_list() {
-    sed -e '/^ID/d' -e 's/[ ]\{2,\}/|/g' -e 's/ \([hdwm]\)\(inutes\|ays\|ours\|eeks\)/\1/' | awk ' BEGIN {FS="|"} { printf("%s:%7s, %s\n", $1, $4, $2)}'
+        awk '
+NR == 1 {
+    idx=1;i=0;f[i]=0
+    header=$0
+    while ( match(header, /  ([A-Z]+|[A-Z]+ [A-Z]+)/) ) {
+        idx += RSTART+1
+        f[++i]=idx
+        header = substr($0,idx)
+    }
+    f[++i]=999
+}
+
+NR > 1 '"$1"' {
+    for(j=0;j<i;j++) {
+        x[j] = substr($0, f[j], f[j+1]-f[j]-1)
+        gsub(/[ ]+$/, "", x[j])
+    }
+    printf("%s:%7s, %s\n", x[0], x[3], x[1])
+    if (x[6] != "") {
+       split(x[6], names, /,/)
+       for (name in names) printf("%s:%7s, %s\n", names[name], x[3], x[1])
+    }
+}
+'| sed -e 's/ \([hdwm]\)\(inutes\|ays\|ours\|eeks\)/\1/'
 }
 
 __docker_stoppedcontainers() {
     local expl
-    declare -a stoppedcontainers 
-    stoppedcontainers=(${(f)"$(docker ps -a | grep --color=never 'Exit' |  __parse_docker_list )"})
-    _describe -t containers-stopped "Stopped Containers" stoppedcontainers 
+    declare -a stoppedcontainers
+    stoppedcontainers=(${(f)"$(_call_program commands docker ps -a |  __parse_docker_list '&& / Exit/')"})
+    _describe -t containers-stopped "Stopped Containers" stoppedcontainers "$@"
 }
 
 __docker_runningcontainers() {
     local expl
-    declare -a containers 
+    declare -a containers
 
-    containers=(${(f)"$(docker ps | __parse_docker_list)"})
-    _describe -t containers-active "Running Containers" containers 
+    containers=(${(f)"$(_call_program commands docker ps | __parse_docker_list)"})
+    _describe -t containers-active "Running Containers" containers "$@"
 }
 
 __docker_containers () {
-    __docker_stoppedcontainers 
-    __docker_runningcontainers
+    __docker_stoppedcontainers "$@"
+    __docker_runningcontainers "$@"
 }
 
 __docker_images () {
     local expl
     declare -a images
-    images=(${(f)"$(docker images | awk '(NR > 1){printf("%s\\:%s\n", $1,$2)}')"})
-    images=($images ${(f)"$(docker images | awk '(NR > 1){printf("%s:%-15s in %s\n", $3,$2,$1)}')"})
+    images=(${(f)"$(_call_program commands docker images | awk '(NR > 1 && $1 != "<none>"){printf("%s", $1);if ($2 != "<none>") printf("\\:%s", $2); printf("\n")}')"})
+    images=($images ${(f)"$(_call_program commands docker images | awk '(NR > 1){printf("%s:%-15s in %s\n", $3,$2,$1)}')"})
     _describe -t docker-images "Images" images
 }
 
 __docker_tags() {
     local expl
     declare -a tags
-    tags=(${(f)"$(docker images | awk '(NR>1){print $2}'| sort | uniq)"})
+    tags=(${(f)"$(_call_program commands docker images | awk '(NR>1){print $2}'| sort | uniq)"})
     _describe -t docker-tags "tags" tags
 }
 
+__docker_repositories_with_tags() {
+    if compset -P '*:'; then
+        __docker_tags
+    else
+        __docker_repositories -qS ":"
+    fi
+}
+
 __docker_search() {
     # declare -a dockersearch
     local cache_policy
     zstyle -s ":completion:${curcontext}:" cache-policy cache_policy
     if [[ -z "$cache_policy" ]]; then
-        zstyle ":completion:${curcontext}:" cache-policy __docker_caching_policy 
+        zstyle ":completion:${curcontext}:" cache-policy __docker_caching_policy
     fi
 
     local searchterm cachename
@@ -60,14 +120,14 @@
     cachename=_docker-search-$searchterm
 
     local expl
-    local -a result 
+    local -a result
     if ( [[ ${(P)+cachename} -eq 0 ]] || _cache_invalid ${cachename#_} ) \
         && ! _retrieve_cache ${cachename#_}; then
         _message "Searching for ${searchterm}..."
-        result=(${(f)"$(docker search ${searchterm} | awk '(NR>2){print $1}')"})
+        result=(${(f)"$(_call_program commands docker search ${searchterm} | awk '(NR>2){print $1}')"})
         _store_cache ${cachename#_} result
-    fi 
-    _wanted dockersearch expl 'Available images' compadd -a result 
+    fi
+    _wanted dockersearch expl 'Available images' compadd -a result
 }
 
 __docker_caching_policy()
@@ -81,8 +141,8 @@
 __docker_repositories () {
     local expl
     declare -a repos
-    repos=(${(f)"$(docker images | sed -e '1d' -e 's/[ ].*//' | sort | uniq)"})
-    _describe -t docker-repos "Repositories" repos
+    repos=(${(f)"$(_call_program commands docker images | sed -e '1d' -e 's/[ ].*//' | sort | uniq)"})
+    _describe -t docker-repos "Repositories" repos "$@"
 }
 
 __docker_commands () {
@@ -91,15 +151,15 @@
 
     zstyle -s ":completion:${curcontext}:" cache-policy cache_policy
     if [[ -z "$cache_policy" ]]; then
-        zstyle ":completion:${curcontext}:" cache-policy __docker_caching_policy 
+        zstyle ":completion:${curcontext}:" cache-policy __docker_caching_policy
     fi
 
     if ( [[ ${+_docker_subcommands} -eq 0 ]] || _cache_invalid docker_subcommands) \
-        && ! _retrieve_cache docker_subcommands; 
+        && ! _retrieve_cache docker_subcommands;
     then
-        _docker_subcommands=(${${(f)"$(_call_program commands 
+        _docker_subcommands=(${${(f)"$(_call_program commands
         docker 2>&1 | sed -e '1,6d' -e '/^[ ]*$/d' -e 's/[ ]*\([^ ]\+\)\s*\([^ ].*\)/\1:\2/' )"}})
-        _docker_subcommands=($_docker_subcommands 'help:Show help for a command') 
+        _docker_subcommands=($_docker_subcommands 'help:Show help for a command')
         _store_cache docker_subcommands _docker_subcommands
     fi
     _describe -t docker-commands "docker command" _docker_subcommands
@@ -108,100 +168,206 @@
 __docker_subcommand () {
     local -a _command_args
     case "$words[1]" in
-        (attach|wait)
-            _arguments ':containers:__docker_runningcontainers'
+        (attach)
+            _arguments \
+                '--no-stdin[Do not attach stdin]' \
+                '--sig-proxy[Proxify all received signal]' \
+                ':containers:__docker_runningcontainers'
             ;;
         (build)
             _arguments \
-                '-t=-:repository:__docker_repositories' \
+                '--no-cache[Do not use cache when building the image]' \
+                '-q[Suppress verbose build output]' \
+                '--rm[Remove intermediate containers after a successful build]' \
+                '-t=-:repository:__docker_repositories_with_tags' \
                 ':path or URL:_directories'
             ;;
         (commit)
             _arguments \
+                '--author=-[Author]:author: ' \
+                '-m=-[Commit message]:message: ' \
+                '--run=-[Configuration automatically applied when the image is run]:configuration: ' \
                 ':container:__docker_containers' \
-                ':repository:__docker_repositories' \
-                ':tag: '
+                ':repository:__docker_repositories_with_tags'
             ;;
-        (diff|export|logs)
+        (cp)
+            _arguments \
+                ':container:->container' \
+                ':hostpath:_files'
+            case $state in
+                (container)
+                    if compset -P '*:'; then
+                        _files
+                    else
+                        __docker_containers -qS ":"
+                    fi
+                    ;;
+            esac
+            ;;
+        (diff|export)
             _arguments '*:containers:__docker_containers'
             ;;
         (history)
-            _arguments '*:images:__docker_images'
+            _arguments \
+                '--no-trunc[Do not truncate output]' \
+                '-q[Only show numeric IDs]' \
+                '*:images:__docker_images'
             ;;
         (images)
             _arguments \
                 '-a[Show all images]' \
+                '--no-trunc[Do not truncate output]' \
+                '-q[Only show numeric IDs]' \
+                '--tree[Output graph in tree format]' \
+                '--viz[Output graph in graphviz format]' \
                 ':repository:__docker_repositories'
             ;;
         (inspect)
-            _arguments '*:containers:__docker_containers'
+            _arguments \
+                '--format=-[Format the output using the given go template]:template: ' \
+                '*:containers:__docker_containers'
             ;;
-        (history)
-            _arguments ':images:__docker_images'
+        (import)
+            _arguments \
+                ':URL:(- http:// file://)' \
+                ':repository:__docker_repositories_with_tags'
+            ;;
+        (info)
+            ;;
+        (import)
+            _arguments \
+                ':URL:(- http:// file://)' \
+                ':repository:__docker_repositories_with_tags'
+            ;;
+        (insert)
+            _arguments '1:containers:__docker_containers' \
+                       '2:URL:(http:// file://)' \
+                       '3:file:_files'
             ;;
         (kill)
             _arguments '*:containers:__docker_runningcontainers'
             ;;
+        (load)
+            ;;
+        (login)
+            _arguments \
+                '-e=-[Email]:email: ' \
+                '-p=-[Password]:password: ' \
+                '-u=-[Username]:username: ' \
+                ':server: '
+            ;;
+        (logs)
+            _arguments \
+                '-f[Follow log output]' \
+                '*:containers:__docker_containers'
+            ;;
         (port)
-            _arguments '1:containers:__docker_runningcontainers'
+            _arguments \
+                '1:containers:__docker_runningcontainers' \
+                '2:port:_ports'
             ;;
         (start)
-            _arguments '*:containers:__docker_stoppedcontainers'
+            _arguments \
+                '-a[Attach container'"'"'s stdout/stderr and forward all signals]' \
+                '-i[Attach container'"'"'s stding]' \
+                '*:containers:__docker_stoppedcontainers'
             ;;
         (rm)
-            _arguments '-v[Remove the volumes associated to the container]' \
+            _arguments \
+                '--link[Remove the specified link and not the underlying container]' \
+                '-v[Remove the volumes associated to the container]' \
                 '*:containers:__docker_stoppedcontainers'
             ;;
         (rmi)
-            _arguments '-v[Remove the volumes associated to the container]' \
+            _arguments \
                 '*:images:__docker_images'
             ;;
-        (top)
-            _arguments '1:containers:__docker_runningcontainers'
-            ;;
         (restart|stop)
             _arguments '-t=-[Number of seconds to try to stop for before killing the container]:seconds to before killing:(1 5 10 30 60)' \
                 '*:containers:__docker_runningcontainers'
             ;;
         (top)
-            _arguments ':containers:__docker_runningcontainers'
+            _arguments \
+                '1:containers:__docker_runningcontainers' \
+                '(-)*:: :->ps-arguments'
+            case $state in
+                (ps-arguments)
+                    _ps
+                    ;;
+            esac
+
             ;;
         (ps)
-            _arguments '-a[Show all containers. Only running containers are shown by default]' \
-                '-h[Show help]' \
-                '--before-id=-[Show only container created before Id, include non-running one]:containers:__docker_containers' \
-            '-n=-[Show n last created containers, include non-running one]:n:(1 5 10 25 50)'
+            _arguments \
+                '-a[Show all containers]' \
+                '--before=-[Show only container created before...]:containers:__docker_containers' \
+                '-l[Show only the latest created container]' \
+                '-n=-[Show n last created containers, include non-running one]:n:(1 5 10 25 50)' \
+                '--no-trunc[Do not truncate output]' \
+                '-q[Only show numeric IDs]' \
+                '-s[Display sizes]' \
+                '--since=-[Show only containers created since...]:containers:__docker_containers'
             ;;
         (tag)
             _arguments \
                 '-f[force]'\
                 ':image:__docker_images'\
-                ':repository:__docker_repositories' \
-                ':tag:__docker_tags'
+                ':repository:__docker_repositories_with_tags'
             ;;
         (run)
             _arguments \
-                '-a=-[Attach to stdin, stdout or stderr]:toggle:(true false)' \
-                '-c=-[CPU shares (relative weight)]:CPU shares: ' \
+                '-P[Publish all exposed ports to the host]' \
+                '-a[Attach to stdin, stdout or stderr]' \
+                '-c=-[CPU shares (relative weight)]:CPU shares:(0 10 100 200 500 800 1000)' \
+                '--cidfile=-[Write the container ID to the file]:CID file:_files' \
                 '-d[Detached mode: leave the container running in the background]' \
-                '*--dns=[Set custom dns servers]:dns server: ' \
-                '*-e=[Set environment variables]:environment variable: ' \
+                '*--dns=-[Set custom dns servers]:dns server: ' \
+                '*-e=-[Set environment variables]:environment variable: ' \
                 '--entrypoint=-[Overwrite the default entrypoint of the image]:entry point: ' \
+                '*--expose=-[Expose a port from the container without publishing it]: ' \
                 '-h=-[Container host name]:hostname:_hosts' \
                 '-i[Keep stdin open even if not attached]' \
+                '--link=-[Add link to another container]:link:->link' \
+                '--lxc-conf=-[Add custom lxc options]:lxc options: ' \
                 '-m=-[Memory limit (in bytes)]:limit: ' \
-                '*-p=-[Expose a container''s port to the host]:port:_ports' \
-                '-t=-[Allocate a pseudo-tty]:toggle:(true false)' \
+                '--name=-[Container name]:name: ' \
+                '*-p=-[Expose a container'"'"'s port to the host]:port:_ports' \
+                '--privileged[Give extended privileges to this container]' \
+                '--rm[Remove intermediate containers when it exits]' \
+                '--sig-proxy[Proxify all received signal]' \
+                '-t[Allocate a pseudo-tty]' \
                 '-u=-[Username or UID]:user:_users' \
                 '*-v=-[Bind mount a volume (e.g. from the host: -v /host:/container, from docker: -v /container)]:volume: '\
                 '--volumes-from=-[Mount volumes from the specified container]:volume: ' \
+                '-w=-[Working directory inside the container]:directory:_directories' \
                 '(-):images:__docker_images' \
                 '(-):command: _command_names -e' \
                 '*::arguments: _normal'
-                ;;
+
+            case $state in
+                (link)
+                    if compset -P '*:'; then
+                        _wanted alias expl 'Alias' compadd -E ""
+                    else
+                        __docker_runningcontainers -qS ":"
+                    fi
+                    ;;
+            esac
+
+            ;;
         (pull|search)
             _arguments ':name:__docker_search'
             ;;
+        (push)
+            _arguments ':repository:__docker_repositories_with_tags'
+            ;;
+        (save)
+            _arguments \
+                ':images:__docker_images'
+            ;;
+        (wait)
+            _arguments ':containers:__docker_runningcontainers'
+            ;;
         (help)
             _arguments ':subcommand:__docker_commands'
             ;;
@@ -212,24 +378,31 @@
 }
 
 _docker () {
+    # Support for subservices, which allows for `compdef _docker docker-shell=_docker_containers`.
+    # Based on /usr/share/zsh/functions/Completion/Unix/_git without support for `ret`.
+    if [[ $service != docker ]]; then
+        _call_function - _$service
+        return
+    fi
+
     local curcontext="$curcontext" state line
     typeset -A opt_args
 
     _arguments -C \
       '-H=-[tcp://host:port to bind/connect to]:socket: ' \
          '(-): :->command' \
-         '(-)*:: :->option-or-argument' 
+         '(-)*:: :->option-or-argument'
 
     if (( CURRENT == 1 )); then
 
     fi
-    case $state in 
+    case $state in
         (command)
             __docker_commands
             ;;
         (option-or-argument)
             curcontext=${curcontext%:*:*}:docker-$words[1]:
-            __docker_subcommand 
+            __docker_subcommand
             ;;
     esac
 }
diff --git a/contrib/crashTest.go b/contrib/crashTest.go
deleted file mode 100644
index 6da89bf..0000000
--- a/contrib/crashTest.go
+++ /dev/null
@@ -1,125 +0,0 @@
-package main
-
-import (
-	"fmt"
-	"io"
-	"log"
-	"net"
-	"os"
-	"os/exec"
-	"path"
-	"time"
-)
-
-var DOCKERPATH = path.Join(os.Getenv("DOCKERPATH"), "docker")
-
-// WARNING: this crashTest will 1) crash your host, 2) remove all containers
-func runDaemon() (*exec.Cmd, error) {
-	os.Remove("/var/run/docker.pid")
-	exec.Command("rm", "-rf", "/var/lib/docker/containers").Run()
-	cmd := exec.Command(DOCKERPATH, "-d")
-	outPipe, err := cmd.StdoutPipe()
-	if err != nil {
-		return nil, err
-	}
-	errPipe, err := cmd.StderrPipe()
-	if err != nil {
-		return nil, err
-	}
-	if err := cmd.Start(); err != nil {
-		return nil, err
-	}
-	go func() {
-		io.Copy(os.Stdout, outPipe)
-	}()
-	go func() {
-		io.Copy(os.Stderr, errPipe)
-	}()
-	return cmd, nil
-}
-
-func crashTest() error {
-	if err := exec.Command("/bin/bash", "-c", "while true; do true; done").Start(); err != nil {
-		return err
-	}
-
-	var endpoint string
-	if ep := os.Getenv("TEST_ENDPOINT"); ep == "" {
-		endpoint = "192.168.56.1:7979"
-	} else {
-		endpoint = ep
-	}
-
-	c := make(chan bool)
-	var conn io.Writer
-
-	go func() {
-		conn, _ = net.Dial("tcp", endpoint)
-		c <- false
-	}()
-	go func() {
-		time.Sleep(2 * time.Second)
-		c <- true
-	}()
-	<-c
-
-	restartCount := 0
-	totalTestCount := 1
-	for {
-		daemon, err := runDaemon()
-		if err != nil {
-			return err
-		}
-		restartCount++
-		//		time.Sleep(5000 * time.Millisecond)
-		var stop bool
-		go func() error {
-			stop = false
-			for i := 0; i < 100 && !stop; {
-				func() error {
-					cmd := exec.Command(DOCKERPATH, "run", "ubuntu", "echo", fmt.Sprintf("%d", totalTestCount))
-					i++
-					totalTestCount++
-					outPipe, err := cmd.StdoutPipe()
-					if err != nil {
-						return err
-					}
-					inPipe, err := cmd.StdinPipe()
-					if err != nil {
-						return err
-					}
-					if err := cmd.Start(); err != nil {
-						return err
-					}
-					if conn != nil {
-						go io.Copy(conn, outPipe)
-					}
-
-					// Expecting error, do not check
-					inPipe.Write([]byte("hello world!!!!!\n"))
-					go inPipe.Write([]byte("hello world!!!!!\n"))
-					go inPipe.Write([]byte("hello world!!!!!\n"))
-					inPipe.Close()
-
-					if err := cmd.Wait(); err != nil {
-						return err
-					}
-					outPipe.Close()
-					return nil
-				}()
-			}
-			return nil
-		}()
-		time.Sleep(20 * time.Second)
-		stop = true
-		if err := daemon.Process.Kill(); err != nil {
-			return err
-		}
-	}
-}
-
-func main() {
-	if err := crashTest(); err != nil {
-		log.Println(err)
-	}
-}
diff --git a/contrib/desktop-integration/iceweasel/Dockerfile b/contrib/desktop-integration/iceweasel/Dockerfile
index 80d6a55..0f3e8f2 100644
--- a/contrib/desktop-integration/iceweasel/Dockerfile
+++ b/contrib/desktop-integration/iceweasel/Dockerfile
@@ -29,7 +29,7 @@
 MAINTAINER Daniel Mizyrycki <daniel@docker.com>
 
 # Install Iceweasel and "sudo"
-RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -yq iceweasel sudo
+RUN apt-get update && apt-get install -y iceweasel sudo
 
 # create sysadmin account
 RUN useradd -m -d /data -p saIVpsc0EVTwA sysadmin
diff --git a/contrib/docker-device-tool/device_tool.go b/contrib/docker-device-tool/device_tool.go
index a9327f9..23d19f0 100644
--- a/contrib/docker-device-tool/device_tool.go
+++ b/contrib/docker-device-tool/device_tool.go
@@ -3,7 +3,7 @@
 import (
 	"flag"
 	"fmt"
-	"github.com/dotcloud/docker/daemon/graphdriver/devmapper"
+	"github.com/docker/docker/daemon/graphdriver/devmapper"
 	"os"
 	"path"
 	"sort"
diff --git a/contrib/host-integration/Dockerfile.dev b/contrib/host-integration/Dockerfile.dev
index 8002165..1c0fbd8 100644
--- a/contrib/host-integration/Dockerfile.dev
+++ b/contrib/host-integration/Dockerfile.dev
@@ -19,7 +19,7 @@
 ENV		GOPATH	  /go
 ENV		PATH	  $GOROOT/bin:$PATH
 
-RUN		go get github.com/dotcloud/docker && cd /go/src/github.com/dotcloud/docker && git checkout v0.6.3
+RUN		go get github.com/docker/docker && cd /go/src/github.com/docker/docker && git checkout v0.6.3
 ADD		manager.go	/manager/
 RUN		cd /manager && go build -o /usr/bin/manager
 
diff --git a/contrib/host-integration/manager.go b/contrib/host-integration/manager.go
index 2798a5d..c0b488b 100644
--- a/contrib/host-integration/manager.go
+++ b/contrib/host-integration/manager.go
@@ -5,7 +5,7 @@
 	"encoding/json"
 	"flag"
 	"fmt"
-	"github.com/dotcloud/docker"
+	"github.com/docker/docker"
 	"os"
 	"strings"
 	"text/template"
diff --git a/contrib/host-integration/manager.sh b/contrib/host-integration/manager.sh
index fecf4bf..8ea296f 100755
--- a/contrib/host-integration/manager.sh
+++ b/contrib/host-integration/manager.sh
@@ -37,7 +37,7 @@
 	exit 1
 fi
 
-# TODO https://github.com/dotcloud/docker/issues/734 (docker inspect formatting)
+# TODO https://github.com/docker/docker/issues/734 (docker inspect formatting)
 #if command -v docker > /dev/null 2>&1; then
 #	image="$(docker inspect -f '{{.Image}}' "$cid")"
 #	if [ "$image" ]; then
diff --git a/contrib/init/systemd/MAINTAINERS b/contrib/init/systemd/MAINTAINERS
new file mode 100644
index 0000000..760a76d
--- /dev/null
+++ b/contrib/init/systemd/MAINTAINERS
@@ -0,0 +1,2 @@
+Lokesh Mandvekar <lsm5@fedoraproject.org> (@lsm5)
+Brandon Philips <brandon.philips@coreos.com> (@philips)
diff --git a/contrib/init/systemd/docker.service b/contrib/init/systemd/docker.service
index 6f3cc33..0cb31e3 100644
--- a/contrib/init/systemd/docker.service
+++ b/contrib/init/systemd/docker.service
@@ -1,13 +1,13 @@
 [Unit]
 Description=Docker Application Container Engine
 Documentation=http://docs.docker.com
-After=network.target
+After=network.target docker.socket
+Requires=docker.socket
 
 [Service]
-ExecStart=/usr/bin/docker -d
-Restart=on-failure
+ExecStart=/usr/bin/docker -d -H fd://
 LimitNOFILE=1048576
 LimitNPROC=1048576
 
 [Install]
-WantedBy=multi-user.target
+Also=docker.socket
diff --git a/contrib/init/systemd/socket-activation/docker.socket b/contrib/init/systemd/docker.socket
similarity index 70%
rename from contrib/init/systemd/socket-activation/docker.socket
rename to contrib/init/systemd/docker.socket
index 3635c89..9db5049 100644
--- a/contrib/init/systemd/socket-activation/docker.socket
+++ b/contrib/init/systemd/docker.socket
@@ -3,6 +3,9 @@
 
 [Socket]
 ListenStream=/var/run/docker.sock
+SocketMode=0660
+SocketUser=root
+SocketGroup=docker
 
 [Install]
 WantedBy=sockets.target
diff --git a/contrib/init/systemd/socket-activation/docker.service b/contrib/init/systemd/socket-activation/docker.service
deleted file mode 100644
index 4af7137..0000000
--- a/contrib/init/systemd/socket-activation/docker.service
+++ /dev/null
@@ -1,13 +0,0 @@
-[Unit]
-Description=Docker Application Container Engine
-Documentation=http://docs.docker.com
-After=network.target
-
-[Service]
-ExecStart=/usr/bin/docker -d -H fd://
-Restart=on-failure
-LimitNOFILE=1048576
-LimitNPROC=1048576
-
-[Install]
-WantedBy=multi-user.target
diff --git a/contrib/init/sysvinit-debian/docker b/contrib/init/sysvinit-debian/docker
index d79d9c6..cf33c83 100755
--- a/contrib/init/sysvinit-debian/docker
+++ b/contrib/init/sysvinit-debian/docker
@@ -1,4 +1,5 @@
 #!/bin/sh
+set -e
 
 ### BEGIN INIT INFO
 # Provides:           docker
@@ -130,7 +131,7 @@
 		;;
 
 	status)
-		status_of_proc -p "$DOCKER_SSD_PIDFILE" "$DOCKER" docker
+		status_of_proc -p "$DOCKER_SSD_PIDFILE" "$DOCKER" "$DOCKER_DESC"
 		;;
 
 	*)
@@ -138,5 +139,3 @@
 		exit 1
 		;;
 esac
-
-exit 0
diff --git a/contrib/init/sysvinit-redhat/docker b/contrib/init/sysvinit-redhat/docker
index aa94c04..0c98509 100755
--- a/contrib/init/sysvinit-redhat/docker
+++ b/contrib/init/sysvinit-redhat/docker
@@ -50,7 +50,7 @@
         pid=$!
         touch $lockfile
         # wait up to 10 seconds for the pidfile to exist.  see
-        # https://github.com/dotcloud/docker/issues/5359
+        # https://github.com/docker/docker/issues/5359
         tries=0
         while [ ! -f $pidfile -a $tries -lt 10 ]; do
             sleep 1
diff --git a/contrib/mkimage-alpine.sh b/contrib/mkimage-alpine.sh
index 0bf328e..b9869ae 100755
--- a/contrib/mkimage-alpine.sh
+++ b/contrib/mkimage-alpine.sh
@@ -19,12 +19,12 @@
 }
 
 apkv() {
-	curl -s $REPO/$ARCH/APKINDEX.tar.gz | tar -Oxz |
+	curl -sSL $REPO/$ARCH/APKINDEX.tar.gz | tar -Oxz |
 		grep '^P:apk-tools-static$' -A1 | tail -n1 | cut -d: -f2
 }
 
 getapk() {
-	curl -s $REPO/$ARCH/apk-tools-static-$(apkv).apk |
+	curl -sSL $REPO/$ARCH/apk-tools-static-$(apkv).apk |
 		tar -xz -C $TMP sbin/apk.static
 }
 
diff --git a/contrib/mkimage-arch.sh b/contrib/mkimage-arch.sh
index 1f52cbc..e83b2b6 100755
--- a/contrib/mkimage-arch.sh
+++ b/contrib/mkimage-arch.sh
@@ -5,8 +5,13 @@
 set -e
 
 hash pacstrap &>/dev/null || {
-    echo "Could not find pacstrap. Run pacman -S arch-install-scripts"
-    exit 1
+	echo "Could not find pacstrap. Run pacman -S arch-install-scripts"
+	exit 1
+}
+
+hash expect &>/dev/null || {
+	echo "Could not find expect. Run pacman -S expect"
+	exit 1
 }
 
 ROOTFS=$(mktemp -d ${TMPDIR:-/var/tmp}/rootfs-archlinux-XXXXXXXXXX)
@@ -15,7 +20,21 @@
 # packages to ignore for space savings
 PKGIGNORE=linux,jfsutils,lvm2,cryptsetup,groff,man-db,man-pages,mdadm,pciutils,pcmciautils,reiserfsprogs,s-nail,xfsprogs
 
-pacstrap -C ./mkimage-arch-pacman.conf -c -d -G -i $ROOTFS base haveged --ignore $PKGIGNORE
+expect <<EOF
+	set send_slow {1 .1}
+	proc send {ignore arg} {
+		sleep .1
+		exp_send -s -- \$arg
+	}
+	set timeout 60
+
+	spawn pacstrap -C ./mkimage-arch-pacman.conf -c -d -G -i $ROOTFS base haveged --ignore $PKGIGNORE
+	expect {
+		-exact "anyway? \[Y/n\] " { send -- "n\r"; exp_continue }
+		-exact "(default=all): " { send -- "\r"; exp_continue }
+		-exact "installation? \[Y/n\]" { send -- "y\r"; exp_continue }
+	}
+EOF
 
 arch-chroot $ROOTFS /bin/sh -c "haveged -w 1024; pacman-key --init; pkill haveged; pacman -Rs --noconfirm haveged; pacman-key --populate archlinux"
 arch-chroot $ROOTFS /bin/sh -c "ln -s /usr/share/zoneinfo/UTC /etc/localtime"
diff --git a/contrib/mkimage-debootstrap.sh b/contrib/mkimage-debootstrap.sh
index 0a3df14..d9d6aae 100755
--- a/contrib/mkimage-debootstrap.sh
+++ b/contrib/mkimage-debootstrap.sh
@@ -144,7 +144,7 @@
 	#  initctl (for some pesky upstart scripts)
 	sudo chroot . dpkg-divert --local --rename --add /sbin/initctl
 	sudo ln -sf /bin/true sbin/initctl
-	# see https://github.com/dotcloud/docker/issues/446#issuecomment-16953173
+	# see https://github.com/docker/docker/issues/446#issuecomment-16953173
 	
 	# shrink the image, since apt makes us fat (wheezy: ~157.5MB vs ~120MB)
 	sudo chroot . apt-get clean
diff --git a/contrib/mkimage/debootstrap b/contrib/mkimage/debootstrap
index 96d22dd..fcda497 100755
--- a/contrib/mkimage/debootstrap
+++ b/contrib/mkimage/debootstrap
@@ -83,7 +83,7 @@
 		Dir::Cache::srcpkgcache "";
 
 		# Note that we do realize this isn't the ideal way to do this, and are always
-		# open to better suggestions (https://github.com/dotcloud/docker/issues).
+		# open to better suggestions (https://github.com/docker/docker/issues).
 	EOF
 
 	# remove apt-cache translations for fast "apt-get update"
@@ -95,6 +95,21 @@
 
 	Acquire::Languages "none";
 	EOF
+
+	echo >&2 "+ echo Acquire::GzipIndexes 'true' > '$rootfsDir/etc/apt/apt.conf.d/docker-gzip-indexes'"
+	cat > "$rootfsDir/etc/apt/apt.conf.d/docker-gzip-indexes" <<-'EOF'
+	# Since Docker users using "RUN apt-get update && apt-get install -y ..." in
+	# their Dockerfiles don't go delete the lists files afterwards, we want them to
+	# be as small as possible on-disk, so we explicitly request "gz" versions and
+	# tell Apt to keep them gzipped on-disk.
+
+	# For comparison, an "apt-get update" layer without this on a pristine
+	# "debian:wheezy" base image was "29.88 MB", where with this it was only
+	# "8.273 MB".
+
+	Acquire::GzipIndexes "true";
+	Acquire::CompressionTypes::Order:: "gz";
+	EOF
 fi
 
 if [ -z "$DONT_TOUCH_SOURCES_LIST" ]; then
@@ -123,9 +138,9 @@
 					" "$rootfsDir/etc/apt/sources.list"
 					echo "deb http://security.debian.org $suite/updates main" >> "$rootfsDir/etc/apt/sources.list"
 					# LTS
-					if [ "$suite" = 'squeeze' ]; then
+					if [ "$suite" = 'squeeze' -o "$suite" = 'oldstable' ]; then
 						head -1 "$rootfsDir/etc/apt/sources.list" \
-							| sed "s/ $suite / ${suite}-lts /" \
+							| sed "s/ $suite / squeeze-lts /" \
 								>> "$rootfsDir/etc/apt/sources.list"
 					fi
 				)
@@ -173,4 +188,6 @@
 	# delete all the apt list files since they're big and get stale quickly
 	rm -rf "$rootfsDir/var/lib/apt/lists"/*
 	# this forces "apt-get update" in dependent images, which is also good
+	
+	mkdir "$rootfsDir/var/lib/apt/lists/partial" # Lucid... "E: Lists directory /var/lib/apt/lists/partial is missing."
 )
diff --git a/contrib/nuke-graph-directory.sh b/contrib/nuke-graph-directory.sh
new file mode 100755
index 0000000..f44c45a
--- /dev/null
+++ b/contrib/nuke-graph-directory.sh
@@ -0,0 +1,64 @@
+#!/bin/sh
+set -e
+
+dir="$1"
+
+if [ -z "$dir" ]; then
+	{
+		echo 'This script is for destroying old /var/lib/docker directories more safely than'
+		echo '  "rm -rf", which can cause data loss or other serious issues.'
+		echo
+		echo "usage: $0 directory"
+		echo "   ie: $0 /var/lib/docker"
+	} >&2
+	exit 1
+fi
+
+if [ "$(id -u)" != 0 ]; then
+	echo >&2 "error: $0 must be run as root"
+	exit 1
+fi
+
+if [ ! -d "$dir" ]; then
+	echo >&2 "error: $dir is not a directory"
+	exit 1
+fi
+
+dir="$(readlink -f "$dir")"
+
+echo
+echo "Nuking $dir ..."
+echo '  (if this is wrong, press Ctrl+C NOW!)'
+echo
+
+( set -x; sleep 10 )
+echo
+
+dir_in_dir() {
+	inner="$1"
+	outer="$2"
+	[ "${inner#$outer}" != "$inner" ]
+}
+
+# let's start by unmounting any submounts in $dir
+#   (like -v /home:... for example - DON'T DELETE MY HOME DIRECTORY BRU!)
+for mount in $(awk '{ print $5 }' /proc/self/mountinfo); do
+	mount="$(readlink -f "$mount" || true)"
+	if dir_in_dir "$mount" "$dir"; then
+		( set -x; umount -f "$mount" )
+	fi
+done
+
+# now, let's go destroy individual btrfs subvolumes, if any exist
+if command -v btrfs &> /dev/null; then
+	root="$(df "$dir" | awk 'NR>1 { print $NF }')"
+	for subvol in $(btrfs subvolume list -o "$root" 2>/dev/null | awk -F' path ' '{ print $2 }'); do
+		subvolDir="$root/$subvol"
+		if dir_in_dir "$subvolDir" "$dir"; then
+			( set -x; btrfs subvolume delete "$subvolDir" )
+		fi
+	done
+fi
+
+# finally, DESTROY ALL THINGS
+( set -x; rm -rf "$dir" )
diff --git a/contrib/prepare-commit-msg.hook b/contrib/prepare-commit-msg.hook
deleted file mode 100644
index b0fe0bf..0000000
--- a/contrib/prepare-commit-msg.hook
+++ /dev/null
@@ -1,10 +0,0 @@
-#!/bin/sh
-#       Auto sign all commits to allow them to be used by the Docker project.
-#       see https://github.com/dotcloud/docker/blob/master/CONTRIBUTING.md#sign-your-work
-#
-GH_USER=$(git config --get github.user)
-SOB=$(git var GIT_AUTHOR_IDENT | sed -n "s/^\(.*>\).*$/Docker-DCO-1.1-Signed-off-by: \1 \(github: $GH_USER\)/p")
-grep -qs "^$SOB" "$1" || { 
-	echo 
-	echo "$SOB" 
-} >> "$1"
diff --git a/daemon/MAINTAINERS b/daemon/MAINTAINERS
new file mode 100644
index 0000000..434aad9
--- /dev/null
+++ b/daemon/MAINTAINERS
@@ -0,0 +1,6 @@
+Solomon Hykes <solomon@docker.com> (@shykes)
+Victor Vieux <vieux@docker.com> (@vieux)
+Michael Crosby <michael@crosbymichael.com> (@crosbymichael)
+Cristian Staretu <cristian.staretu@gmail.com> (@unclejack)
+Tibor Vass <teabee89@gmail.com> (@tiborvass)
+volumes.go: Brian Goff <cpuguy83@gmail.com> (@cpuguy83)
diff --git a/daemon/attach.go b/daemon/attach.go
index 0e3b8b8..b1b06e2 100644
--- a/daemon/attach.go
+++ b/daemon/attach.go
@@ -1,11 +1,124 @@
 package daemon
 
 import (
+	"encoding/json"
+	"fmt"
 	"io"
+	"os"
+	"time"
 
-	"github.com/dotcloud/docker/utils"
+	"github.com/docker/docker/engine"
+	"github.com/docker/docker/pkg/jsonlog"
+	"github.com/docker/docker/pkg/log"
+	"github.com/docker/docker/utils"
 )
 
+func (daemon *Daemon) ContainerAttach(job *engine.Job) engine.Status {
+	if len(job.Args) != 1 {
+		return job.Errorf("Usage: %s CONTAINER\n", job.Name)
+	}
+
+	var (
+		name   = job.Args[0]
+		logs   = job.GetenvBool("logs")
+		stream = job.GetenvBool("stream")
+		stdin  = job.GetenvBool("stdin")
+		stdout = job.GetenvBool("stdout")
+		stderr = job.GetenvBool("stderr")
+	)
+
+	container := daemon.Get(name)
+	if container == nil {
+		return job.Errorf("No such container: %s", name)
+	}
+
+	//logs
+	if logs {
+		cLog, err := container.ReadLog("json")
+		if err != nil && os.IsNotExist(err) {
+			// Legacy logs
+			log.Debugf("Old logs format")
+			if stdout {
+				cLog, err := container.ReadLog("stdout")
+				if err != nil {
+					log.Errorf("Error reading logs (stdout): %s", err)
+				} else if _, err := io.Copy(job.Stdout, cLog); err != nil {
+					log.Errorf("Error streaming logs (stdout): %s", err)
+				}
+			}
+			if stderr {
+				cLog, err := container.ReadLog("stderr")
+				if err != nil {
+					log.Errorf("Error reading logs (stderr): %s", err)
+				} else if _, err := io.Copy(job.Stderr, cLog); err != nil {
+					log.Errorf("Error streaming logs (stderr): %s", err)
+				}
+			}
+		} else if err != nil {
+			log.Errorf("Error reading logs (json): %s", err)
+		} else {
+			dec := json.NewDecoder(cLog)
+			for {
+				l := &jsonlog.JSONLog{}
+
+				if err := dec.Decode(l); err == io.EOF {
+					break
+				} else if err != nil {
+					log.Errorf("Error streaming logs: %s", err)
+					break
+				}
+				if l.Stream == "stdout" && stdout {
+					fmt.Fprintf(job.Stdout, "%s", l.Log)
+				}
+				if l.Stream == "stderr" && stderr {
+					fmt.Fprintf(job.Stderr, "%s", l.Log)
+				}
+			}
+		}
+	}
+
+	//stream
+	if stream {
+		var (
+			cStdin           io.ReadCloser
+			cStdout, cStderr io.Writer
+			cStdinCloser     io.Closer
+		)
+
+		if stdin {
+			r, w := io.Pipe()
+			go func() {
+				defer w.Close()
+				defer log.Debugf("Closing buffered stdin pipe")
+				io.Copy(w, job.Stdin)
+			}()
+			cStdin = r
+			cStdinCloser = job.Stdin
+		}
+		if stdout {
+			cStdout = job.Stdout
+		}
+		if stderr {
+			cStderr = job.Stderr
+		}
+
+		<-daemon.Attach(container, cStdin, cStdinCloser, cStdout, cStderr)
+
+		// If we are in stdinonce mode, wait for the process to end
+		// otherwise, simply return
+		if container.Config.StdinOnce && !container.Config.Tty {
+			container.State.WaitStop(-1 * time.Second)
+		}
+	}
+	return engine.StatusOK
+}
+
+// FIXME: this should be private, and every outside subsystem
+// should go through the "container_attach" job. But that would require
+// that job to be properly documented, as well as the relationship betweem
+// Attach and ContainerAttach.
+//
+// This method is in use by builder/builder.go.
 func (daemon *Daemon) Attach(container *Container, stdin io.ReadCloser, stdinCloser io.Closer, stdout io.Writer, stderr io.Writer) chan error {
 	var (
 		cStdout, cStderr io.ReadCloser
@@ -19,8 +132,8 @@
 			errors <- err
 		} else {
 			go func() {
-				utils.Debugf("attach: stdin: begin")
-				defer utils.Debugf("attach: stdin: end")
+				log.Debugf("attach: stdin: begin")
+				defer log.Debugf("attach: stdin: end")
 				// No matter what, when stdin is closed (io.Copy unblock), close stdout and stderr
 				if container.Config.StdinOnce && !container.Config.Tty {
 					defer cStdin.Close()
@@ -43,7 +156,7 @@
 					err = nil
 				}
 				if err != nil {
-					utils.Errorf("attach: stdin: %s", err)
+					log.Errorf("attach: stdin: %s", err)
 				}
 				errors <- err
 			}()
@@ -56,8 +169,8 @@
 		} else {
 			cStdout = p
 			go func() {
-				utils.Debugf("attach: stdout: begin")
-				defer utils.Debugf("attach: stdout: end")
+				log.Debugf("attach: stdout: begin")
+				defer log.Debugf("attach: stdout: end")
 				// If we are in StdinOnce mode, then close stdin
 				if container.Config.StdinOnce && stdin != nil {
 					defer stdin.Close()
@@ -70,7 +183,7 @@
 					err = nil
 				}
 				if err != nil {
-					utils.Errorf("attach: stdout: %s", err)
+					log.Errorf("attach: stdout: %s", err)
 				}
 				errors <- err
 			}()
@@ -81,7 +194,7 @@
 				defer stdinCloser.Close()
 			}
 			if cStdout, err := container.StdoutPipe(); err != nil {
-				utils.Errorf("attach: stdout pipe: %s", err)
+				log.Errorf("attach: stdout pipe: %s", err)
 			} else {
 				io.Copy(&utils.NopWriter{}, cStdout)
 			}
@@ -94,8 +207,8 @@
 		} else {
 			cStderr = p
 			go func() {
-				utils.Debugf("attach: stderr: begin")
-				defer utils.Debugf("attach: stderr: end")
+				log.Debugf("attach: stderr: begin")
+				defer log.Debugf("attach: stderr: end")
 				// If we are in StdinOnce mode, then close stdin
 				if container.Config.StdinOnce && stdin != nil {
 					defer stdin.Close()
@@ -108,7 +221,7 @@
 					err = nil
 				}
 				if err != nil {
-					utils.Errorf("attach: stderr: %s", err)
+					log.Errorf("attach: stderr: %s", err)
 				}
 				errors <- err
 			}()
@@ -120,7 +233,7 @@
 			}
 
 			if cStderr, err := container.StderrPipe(); err != nil {
-				utils.Errorf("attach: stdout pipe: %s", err)
+				log.Errorf("attach: stdout pipe: %s", err)
 			} else {
 				io.Copy(&utils.NopWriter{}, cStderr)
 			}
@@ -140,14 +253,14 @@
 		// FIXME: how to clean up the stdin goroutine without the unwanted side effect
 		// of closing the passed stdin? Add an intermediary io.Pipe?
 		for i := 0; i < nJobs; i += 1 {
-			utils.Debugf("attach: waiting for job %d/%d", i+1, nJobs)
+			log.Debugf("attach: waiting for job %d/%d", i+1, nJobs)
 			if err := <-errors; err != nil {
-				utils.Errorf("attach: job %d returned error %s, aborting all jobs", i+1, err)
+				log.Errorf("attach: job %d returned error %s, aborting all jobs", i+1, err)
 				return err
 			}
-			utils.Debugf("attach: job %d completed successfully", i+1)
+			log.Debugf("attach: job %d completed successfully", i+1)
 		}
-		utils.Debugf("attach: all jobs completed successfully")
+		log.Debugf("attach: all jobs completed successfully")
 		return nil
 	})
 }
diff --git a/server/buildfile.go b/daemon/build.go
similarity index 82%
rename from server/buildfile.go
rename to daemon/build.go
index 71fed66..a572dc2 100644
--- a/server/buildfile.go
+++ b/daemon/build.go
@@ -1,4 +1,4 @@
-package server
+package daemon
 
 import (
 	"crypto/sha256"
@@ -10,6 +10,7 @@
 	"io/ioutil"
 	"net/url"
 	"os"
+	"os/exec"
 	"path"
 	"path/filepath"
 	"reflect"
@@ -19,16 +20,99 @@
 	"syscall"
 	"time"
 
-	"github.com/dotcloud/docker/archive"
-	"github.com/dotcloud/docker/daemon"
-	"github.com/dotcloud/docker/nat"
-	"github.com/dotcloud/docker/pkg/symlink"
-	"github.com/dotcloud/docker/pkg/system"
-	"github.com/dotcloud/docker/registry"
-	"github.com/dotcloud/docker/runconfig"
-	"github.com/dotcloud/docker/utils"
+	"github.com/docker/docker/archive"
+	"github.com/docker/docker/engine"
+	"github.com/docker/docker/nat"
+	"github.com/docker/docker/pkg/log"
+	"github.com/docker/docker/pkg/parsers"
+	"github.com/docker/docker/pkg/symlink"
+	"github.com/docker/docker/pkg/system"
+	"github.com/docker/docker/pkg/tarsum"
+	"github.com/docker/docker/registry"
+	"github.com/docker/docker/runconfig"
+	"github.com/docker/docker/utils"
 )
 
+func (daemon *Daemon) CmdBuild(job *engine.Job) engine.Status {
+	if len(job.Args) != 0 {
+		return job.Errorf("Usage: %s\n", job.Name)
+	}
+	var (
+		remoteURL      = job.Getenv("remote")
+		repoName       = job.Getenv("t")
+		suppressOutput = job.GetenvBool("q")
+		noCache        = job.GetenvBool("nocache")
+		rm             = job.GetenvBool("rm")
+		forceRm        = job.GetenvBool("forcerm")
+		authConfig     = &registry.AuthConfig{}
+		configFile     = &registry.ConfigFile{}
+		tag            string
+		context        io.ReadCloser
+	)
+	job.GetenvJson("authConfig", authConfig)
+	job.GetenvJson("configFile", configFile)
+	repoName, tag = parsers.ParseRepositoryTag(repoName)
+
+	if remoteURL == "" {
+		context = ioutil.NopCloser(job.Stdin)
+	} else if utils.IsGIT(remoteURL) {
+		if !strings.HasPrefix(remoteURL, "git://") {
+			remoteURL = "https://" + remoteURL
+		}
+		root, err := ioutil.TempDir("", "docker-build-git")
+		if err != nil {
+			return job.Error(err)
+		}
+		defer os.RemoveAll(root)
+
+		if output, err := exec.Command("git", "clone", "--recursive", remoteURL, root).CombinedOutput(); err != nil {
+			return job.Errorf("Error trying to use git: %s (%s)", err, output)
+		}
+
+		c, err := archive.Tar(root, archive.Uncompressed)
+		if err != nil {
+			return job.Error(err)
+		}
+		context = c
+	} else if utils.IsURL(remoteURL) {
+		f, err := utils.Download(remoteURL)
+		if err != nil {
+			return job.Error(err)
+		}
+		defer f.Body.Close()
+		dockerFile, err := ioutil.ReadAll(f.Body)
+		if err != nil {
+			return job.Error(err)
+		}
+		c, err := archive.Generate("Dockerfile", string(dockerFile))
+		if err != nil {
+			return job.Error(err)
+		}
+		context = c
+	}
+	defer context.Close()
+
+	sf := utils.NewStreamFormatter(job.GetenvBool("json"))
+	b := NewBuildFile(daemon, daemon.eng,
+		&utils.StdoutFormater{
+			Writer:          job.Stdout,
+			StreamFormatter: sf,
+		},
+		&utils.StderrFormater{
+			Writer:          job.Stdout,
+			StreamFormatter: sf,
+		},
+		!suppressOutput, !noCache, rm, forceRm, job.Stdout, sf, authConfig, configFile)
+	id, err := b.Build(context)
+	if err != nil {
+		return job.Error(err)
+	}
+	if repoName != "" {
+		daemon.Repositories().Set(repoName, tag, id, false)
+	}
+	return engine.StatusOK
+}
+
 var (
 	ErrDockerfileEmpty = errors.New("Dockerfile cannot be empty")
 )
@@ -40,15 +124,15 @@
 }
 
 type buildFile struct {
-	daemon *daemon.Daemon
-	srv    *Server
+	daemon *Daemon
+	eng    *engine.Engine
 
 	image      string
 	maintainer string
 	config     *runconfig.Config
 
 	contextPath string
-	context     *utils.TarSum
+	context     *tarsum.TarSum
 
 	verbose      bool
 	utilizeCache bool
@@ -67,6 +151,9 @@
 	// Deprecated, original writer used for ImagePull. To be removed.
 	outOld io.Writer
 	sf     *utils.StreamFormatter
+
+	// cmdSet indicates is CMD was set in current Dockerfile
+	cmdSet bool
 }
 
 func (b *buildFile) clearTmp(containers map[string]struct{}) {
@@ -85,7 +172,7 @@
 	image, err := b.daemon.Repositories().LookupImage(name)
 	if err != nil {
 		if b.daemon.Graph().IsNotExist(err) {
-			remote, tag := utils.ParseRepositoryTag(name)
+			remote, tag := parsers.ParseRepositoryTag(name)
 			pullRegistryAuth := b.authConfig
 			if len(b.configFile.Configs) > 0 {
 				// The request came with a full auth config file, we prefer to use that
@@ -96,7 +183,7 @@
 				resolvedAuth := b.configFile.ResolveAuthConfig(endpoint)
 				pullRegistryAuth = &resolvedAuth
 			}
-			job := b.srv.Eng.Job("pull", remote, tag)
+			job := b.eng.Job("pull", remote, tag)
 			job.SetenvBool("json", b.sf.Json())
 			job.SetenvBool("parallel", true)
 			job.SetenvJson("authConfig", pullRegistryAuth)
@@ -118,7 +205,7 @@
 		b.config = image.Config
 	}
 	if b.config.Env == nil || len(b.config.Env) == 0 {
-		b.config.Env = append(b.config.Env, "HOME=/", "PATH="+daemon.DefaultPathEnv)
+		b.config.Env = append(b.config.Env, "PATH="+DefaultPathEnv)
 	}
 	// Process ONBUILD triggers if they exist
 	if nTriggers := len(b.config.OnBuild); nTriggers != 0 {
@@ -167,20 +254,20 @@
 
 // probeCache checks to see if image-caching is enabled (`b.utilizeCache`)
 // and if so attempts to look up the current `b.image` and `b.config` pair
-// in the current server `b.srv`. If an image is found, probeCache returns
+// in the current server `b.daemon`. If an image is found, probeCache returns
 // `(true, nil)`. If no image is found, it returns `(false, nil)`. If there
 // is any error, it returns `(false, err)`.
 func (b *buildFile) probeCache() (bool, error) {
 	if b.utilizeCache {
-		if cache, err := b.srv.ImageGetCached(b.image, b.config); err != nil {
+		if cache, err := b.daemon.ImageGetCached(b.image, b.config); err != nil {
 			return false, err
 		} else if cache != nil {
 			fmt.Fprintf(b.outStream, " ---> Using cache\n")
-			utils.Debugf("[BUILDER] Use cached version")
+			log.Debugf("[BUILDER] Use cached version")
 			b.image = cache.ID
 			return true, nil
 		} else {
-			utils.Debugf("[BUILDER] Cache miss")
+			log.Debugf("[BUILDER] Cache miss")
 		}
 	}
 	return false, nil
@@ -196,12 +283,13 @@
 	}
 
 	cmd := b.config.Cmd
-	b.config.Cmd = nil
+	// set Cmd manually, this is special case only for Dockerfiles
+	b.config.Cmd = config.Cmd
 	runconfig.Merge(b.config, config)
 
 	defer func(cmd []string) { b.config.Cmd = cmd }(cmd)
 
-	utils.Debugf("Command to be executed: %v", b.config.Cmd)
+	log.Debugf("Command to be executed: %v", b.config.Cmd)
 
 	hit, err := b.probeCache()
 	if err != nil {
@@ -291,7 +379,7 @@
 func (b *buildFile) buildCmdFromJson(args string) []string {
 	var cmd []string
 	if err := json.Unmarshal([]byte(args), &cmd); err != nil {
-		utils.Debugf("Error unmarshalling: %s, setting to /bin/sh -c", err)
+		log.Debugf("Error unmarshalling: %s, setting to /bin/sh -c", err)
 		cmd = []string{"/bin/sh", "-c", args}
 	}
 	return cmd
@@ -303,12 +391,17 @@
 	if err := b.commit("", b.config.Cmd, fmt.Sprintf("CMD %v", cmd)); err != nil {
 		return err
 	}
+	b.cmdSet = true
 	return nil
 }
 
 func (b *buildFile) CmdEntrypoint(args string) error {
 	entrypoint := b.buildCmdFromJson(args)
 	b.config.Entrypoint = entrypoint
+	// if there is no cmd in current Dockerfile - cleanup cmd
+	if !b.cmdSet {
+		b.config.Cmd = nil
+	}
 	if err := b.commit("", b.config.Cmd, fmt.Sprintf("ENTRYPOINT %v", entrypoint)); err != nil {
 		return err
 	}
@@ -404,7 +497,7 @@
 	return nil
 }
 
-func (b *buildFile) addContext(container *daemon.Container, orig, dest string, decompress bool) error {
+func (b *buildFile) addContext(container *Container, orig, dest string, decompress bool) error {
 	var (
 		err        error
 		destExists = true
@@ -459,7 +552,7 @@
 		if err := archive.UntarPath(origPath, tarDest); err == nil {
 			return nil
 		} else if err != io.EOF {
-			utils.Debugf("Couldn't untar %s to %s: %s", origPath, tarDest, err)
+			log.Debugf("Couldn't untar %s to %s: %s", origPath, tarDest, err)
 		}
 	}
 
@@ -553,7 +646,7 @@
 		if err != nil {
 			return err
 		}
-		tarSum := &utils.TarSum{Reader: r, DisableCompression: true}
+		tarSum := &tarsum.TarSum{Reader: r, DisableCompression: true}
 		if _, err := io.Copy(ioutil.Discard, tarSum); err != nil {
 			return err
 		}
@@ -656,7 +749,7 @@
 	return b.runContextCommand(args, true, true, "ADD")
 }
 
-func (b *buildFile) create() (*daemon.Container, error) {
+func (b *buildFile) create() (*Container, error) {
 	if b.image == "" {
 		return nil, fmt.Errorf("Please provide a source image with `from` prior to run")
 	}
@@ -677,10 +770,15 @@
 	return c, nil
 }
 
-func (b *buildFile) run(c *daemon.Container) error {
+func (b *buildFile) run(c *Container) error {
 	var errCh chan error
 	if b.verbose {
 		errCh = utils.Go(func() error {
+			// FIXME: call the 'attach' job so that daemon.Attach can be made private
+			//
+			// FIXME (LK4D4): Also, maybe makes sense to call "logs" job, it is like attach
+			// but without hijacking for stdin. Also, with attach there can be race
+			// condition because of some output already was printed before it.
 			return <-b.daemon.Attach(c, nil, nil, b.outStream, b.errStream)
 		})
 	}
@@ -775,7 +873,7 @@
 		return "", err
 	}
 
-	b.context = &utils.TarSum{Reader: decompressedStream, DisableCompression: true}
+	b.context = &tarsum.TarSum{Reader: decompressedStream, DisableCompression: true}
 	if err := archive.Untar(b.context, tmpdirPath, nil); err != nil {
 		return "", err
 	}
@@ -889,10 +987,10 @@
 	})
 }
 
-func NewBuildFile(srv *Server, outStream, errStream io.Writer, verbose, utilizeCache, rm bool, forceRm bool, outOld io.Writer, sf *utils.StreamFormatter, auth *registry.AuthConfig, authConfigFile *registry.ConfigFile) BuildFile {
+func NewBuildFile(d *Daemon, eng *engine.Engine, outStream, errStream io.Writer, verbose, utilizeCache, rm bool, forceRm bool, outOld io.Writer, sf *utils.StreamFormatter, auth *registry.AuthConfig, authConfigFile *registry.ConfigFile) BuildFile {
 	return &buildFile{
-		daemon:        srv.daemon,
-		srv:           srv,
+		daemon:        d,
+		eng:           eng,
 		config:        &runconfig.Config{},
 		outStream:     outStream,
 		errStream:     errStream,
diff --git a/daemon/changes.go b/daemon/changes.go
new file mode 100644
index 0000000..1e5726e
--- /dev/null
+++ b/daemon/changes.go
@@ -0,0 +1,32 @@
+package daemon
+
+import (
+	"github.com/docker/docker/engine"
+)
+
+func (daemon *Daemon) ContainerChanges(job *engine.Job) engine.Status {
+	if n := len(job.Args); n != 1 {
+		return job.Errorf("Usage: %s CONTAINER", job.Name)
+	}
+	name := job.Args[0]
+	if container := daemon.Get(name); container != nil {
+		outs := engine.NewTable("", 0)
+		changes, err := container.Changes()
+		if err != nil {
+			return job.Error(err)
+		}
+		for _, change := range changes {
+			out := &engine.Env{}
+			if err := out.Import(change); err != nil {
+				return job.Error(err)
+			}
+			outs.Add(out)
+		}
+		if _, err := outs.WriteListTo(job.Stdout); err != nil {
+			return job.Error(err)
+		}
+	} else {
+		return job.Errorf("No such container: %s", name)
+	}
+	return engine.StatusOK
+}
diff --git a/daemon/commit.go b/daemon/commit.go
new file mode 100644
index 0000000..950925a
--- /dev/null
+++ b/daemon/commit.go
@@ -0,0 +1,84 @@
+package daemon
+
+import (
+	"github.com/docker/docker/engine"
+	"github.com/docker/docker/image"
+	"github.com/docker/docker/runconfig"
+)
+
+func (daemon *Daemon) ContainerCommit(job *engine.Job) engine.Status {
+	if len(job.Args) != 1 {
+		return job.Errorf("Not enough arguments. Usage: %s CONTAINER\n", job.Name)
+	}
+	name := job.Args[0]
+
+	container := daemon.Get(name)
+	if container == nil {
+		return job.Errorf("No such container: %s", name)
+	}
+
+	var (
+		config    = container.Config
+		newConfig runconfig.Config
+	)
+
+	if err := job.GetenvJson("config", &newConfig); err != nil {
+		return job.Error(err)
+	}
+
+	if err := runconfig.Merge(&newConfig, config); err != nil {
+		return job.Error(err)
+	}
+
+	img, err := daemon.Commit(container, job.Getenv("repo"), job.Getenv("tag"), job.Getenv("comment"), job.Getenv("author"), job.GetenvBool("pause"), &newConfig)
+	if err != nil {
+		return job.Error(err)
+	}
+	job.Printf("%s\n", img.ID)
+	return engine.StatusOK
+}
+
+// Commit creates a new filesystem image from the current state of a container.
+// The image can optionally be tagged into a repository
+func (daemon *Daemon) Commit(container *Container, repository, tag, comment, author string, pause bool, config *runconfig.Config) (*image.Image, error) {
+	if pause {
+		container.Pause()
+		defer container.Unpause()
+	}
+
+	if err := container.Mount(); err != nil {
+		return nil, err
+	}
+	defer container.Unmount()
+
+	rwTar, err := container.ExportRw()
+	if err != nil {
+		return nil, err
+	}
+	defer rwTar.Close()
+
+	// Create a new image from the container's base layers + a new layer from container changes
+	var (
+		containerID, containerImage string
+		containerConfig             *runconfig.Config
+	)
+
+	if container != nil {
+		containerID = container.ID
+		containerImage = container.Image
+		containerConfig = container.Config
+	}
+
+	img, err := daemon.graph.Create(rwTar, containerID, containerImage, comment, author, containerConfig, config)
+	if err != nil {
+		return nil, err
+	}
+
+	// Register the image if needed
+	if repository != "" {
+		if err := daemon.repositories.Set(repository, tag, img.ID, true); err != nil {
+			return img, err
+		}
+	}
+	return img, nil
+}
diff --git a/daemon/config.go b/daemon/config.go
new file mode 100644
index 0000000..a396bd0
--- /dev/null
+++ b/daemon/config.go
@@ -0,0 +1,70 @@
+package daemon
+
+import (
+	"net"
+
+	"github.com/docker/docker/daemon/networkdriver"
+	"github.com/docker/docker/opts"
+	flag "github.com/docker/docker/pkg/mflag"
+)
+
+const (
+	defaultNetworkMtu    = 1500
+	DisableNetworkBridge = "none"
+)
+
+// Config define the configuration of a docker daemon
+// These are the configuration settings that you pass
+// to the docker daemon when you launch it with say: `docker -d -e lxc`
+// FIXME: separate runtime configuration from http api configuration
+type Config struct {
+	Pidfile                     string
+	Root                        string
+	AutoRestart                 bool
+	Dns                         []string
+	DnsSearch                   []string
+	EnableIptables              bool
+	EnableIpForward             bool
+	DefaultIp                   net.IP
+	BridgeIface                 string
+	BridgeIP                    string
+	InterContainerCommunication bool
+	GraphDriver                 string
+	GraphOptions                []string
+	ExecDriver                  string
+	Mtu                         int
+	DisableNetwork              bool
+	EnableSelinuxSupport        bool
+	Context                     map[string][]string
+}
+
+// InstallFlags adds command-line options to the top-level flag parser for
+// the current process.
+// Subsequent calls to `flag.Parse` will populate config with values parsed
+// from the command-line.
+func (config *Config) InstallFlags() {
+	flag.StringVar(&config.Pidfile, []string{"p", "-pidfile"}, "/var/run/docker.pid", "Path to use for daemon PID file")
+	flag.StringVar(&config.Root, []string{"g", "-graph"}, "/var/lib/docker", "Path to use as the root of the Docker runtime")
+	flag.BoolVar(&config.AutoRestart, []string{"#r", "#-restart"}, true, "--restart on the daemon has been deprecated infavor of --restart policies on docker run")
+	flag.BoolVar(&config.EnableIptables, []string{"#iptables", "-iptables"}, true, "Enable Docker's addition of iptables rules")
+	flag.BoolVar(&config.EnableIpForward, []string{"#ip-forward", "-ip-forward"}, true, "Enable net.ipv4.ip_forward")
+	flag.StringVar(&config.BridgeIP, []string{"#bip", "-bip"}, "", "Use this CIDR notation address for the network bridge's IP, not compatible with -b")
+	flag.StringVar(&config.BridgeIface, []string{"b", "-bridge"}, "", "Attach containers to a pre-existing network bridge\nuse 'none' to disable container networking")
+	flag.BoolVar(&config.InterContainerCommunication, []string{"#icc", "-icc"}, true, "Enable inter-container communication")
+	flag.StringVar(&config.GraphDriver, []string{"s", "-storage-driver"}, "", "Force the Docker runtime to use a specific storage driver")
+	flag.StringVar(&config.ExecDriver, []string{"e", "-exec-driver"}, "native", "Force the Docker runtime to use a specific exec driver")
+	flag.BoolVar(&config.EnableSelinuxSupport, []string{"-selinux-enabled"}, false, "Enable selinux support. SELinux does not presently support the BTRFS storage driver")
+	flag.IntVar(&config.Mtu, []string{"#mtu", "-mtu"}, 0, "Set the containers network MTU\nif no value is provided: default to the default route MTU or 1500 if no default route is available")
+	opts.IPVar(&config.DefaultIp, []string{"#ip", "-ip"}, "0.0.0.0", "Default IP address to use when binding container ports")
+	opts.ListVar(&config.GraphOptions, []string{"-storage-opt"}, "Set storage driver options")
+	// FIXME: why the inconsistency between "hosts" and "sockets"?
+	opts.IPListVar(&config.Dns, []string{"#dns", "-dns"}, "Force Docker to use specific DNS servers")
+	opts.DnsSearchListVar(&config.DnsSearch, []string{"-dns-search"}, "Force Docker to use specific DNS search domains")
+}
+
+func GetDefaultNetworkMtu() int {
+	if iface, err := networkdriver.GetDefaultRouteIface(); err == nil {
+		return iface.MTU
+	}
+	return defaultNetworkMtu
+}
diff --git a/daemon/container.go b/daemon/container.go
index 30337de..df6bd66 100644
--- a/daemon/container.go
+++ b/daemon/container.go
@@ -6,7 +6,6 @@
 	"fmt"
 	"io"
 	"io/ioutil"
-	"log"
 	"os"
 	"path"
 	"path/filepath"
@@ -17,18 +16,21 @@
 
 	"github.com/docker/libcontainer/devices"
 	"github.com/docker/libcontainer/label"
-	"github.com/dotcloud/docker/archive"
-	"github.com/dotcloud/docker/daemon/execdriver"
-	"github.com/dotcloud/docker/daemon/graphdriver"
-	"github.com/dotcloud/docker/engine"
-	"github.com/dotcloud/docker/image"
-	"github.com/dotcloud/docker/links"
-	"github.com/dotcloud/docker/nat"
-	"github.com/dotcloud/docker/pkg/networkfs/etchosts"
-	"github.com/dotcloud/docker/pkg/networkfs/resolvconf"
-	"github.com/dotcloud/docker/pkg/symlink"
-	"github.com/dotcloud/docker/runconfig"
-	"github.com/dotcloud/docker/utils"
+
+	"github.com/docker/docker/archive"
+	"github.com/docker/docker/daemon/execdriver"
+	"github.com/docker/docker/daemon/graphdriver"
+	"github.com/docker/docker/engine"
+	"github.com/docker/docker/image"
+	"github.com/docker/docker/links"
+	"github.com/docker/docker/nat"
+	"github.com/docker/docker/pkg/broadcastwriter"
+	"github.com/docker/docker/pkg/log"
+	"github.com/docker/docker/pkg/networkfs/etchosts"
+	"github.com/docker/docker/pkg/networkfs/resolvconf"
+	"github.com/docker/docker/pkg/symlink"
+	"github.com/docker/docker/runconfig"
+	"github.com/docker/docker/utils"
 )
 
 const DefaultPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
@@ -66,13 +68,14 @@
 	ExecDriver     string
 
 	command   *execdriver.Command
-	stdout    *utils.WriteBroadcaster
-	stderr    *utils.WriteBroadcaster
+	stdout    *broadcastwriter.BroadcastWriter
+	stderr    *broadcastwriter.BroadcastWriter
 	stdin     io.ReadCloser
 	stdinPipe io.WriteCloser
 
 	daemon                   *Daemon
 	MountLabel, ProcessLabel string
+	RestartCount             int
 
 	Volumes map[string]string
 	// Store rw/ro in a separate structure to preserve reverse-compatibility on-disk.
@@ -81,6 +84,7 @@
 	hostConfig *runconfig.HostConfig
 
 	activeLinks map[string]*links.Link
+	monitor     *containerMonitor
 }
 
 func (container *Container) FromDisk() error {
@@ -105,7 +109,7 @@
 	return container.readHostConfig()
 }
 
-func (container *Container) ToDisk() error {
+func (container *Container) toDisk() error {
 	data, err := json.Marshal(container)
 	if err != nil {
 		return err
@@ -124,6 +128,13 @@
 	return container.WriteHostConfig()
 }
 
+func (container *Container) ToDisk() error {
+	container.Lock()
+	err := container.toDisk()
+	container.Unlock()
+	return err
+}
+
 func (container *Container) readHostConfig() error {
 	container.hostConfig = &runconfig.HostConfig{}
 	// If the hostconfig file does not exist, do not read it.
@@ -160,6 +171,13 @@
 	return ioutil.WriteFile(pth, data, 0666)
 }
 
+func (container *Container) LogEvent(action string) {
+	d := container.daemon
+	if err := d.eng.Job("log", action, container.ID, d.Repositories().ImageName(container.Image)).Run(); err != nil {
+		log.Errorf("Error logging event %s for %s: %s", action, container.ID, err)
+	}
+}
+
 func (container *Container) getResourcePath(path string) (string, error) {
 	cleanPath := filepath.Join("/", path)
 	return symlink.FollowSymlinkInScope(filepath.Join(container.basefs, cleanPath), container.basefs)
@@ -208,6 +226,20 @@
 		return fmt.Errorf("invalid network mode: %s", c.hostConfig.NetworkMode)
 	}
 
+	// Build lists of devices allowed and created within the container.
+	userSpecifiedDevices := make([]*devices.Device, len(c.hostConfig.Devices))
+	for i, deviceMapping := range c.hostConfig.Devices {
+		device, err := devices.GetDevice(deviceMapping.PathOnHost, deviceMapping.CgroupPermissions)
+		device.Path = deviceMapping.PathInContainer
+		if err != nil {
+			return fmt.Errorf("error gathering device information while adding custom device %s", err)
+		}
+		userSpecifiedDevices[i] = device
+	}
+	allowedDevices := append(devices.DefaultAllowedDevices, userSpecifiedDevices...)
+
+	autoCreatedDevices := append(devices.DefaultAutoCreatedDevices, userSpecifiedDevices...)
+
 	// TODO: this can be removed after lxc-conf is fully deprecated
 	mergeLxcConfIntoOptions(c.hostConfig, context)
 
@@ -230,8 +262,10 @@
 		User:               c.Config.User,
 		Config:             context,
 		Resources:          resources,
-		AllowedDevices:     devices.DefaultAllowedDevices,
-		AutoCreatedDevices: devices.DefaultAutoCreatedDevices,
+		AllowedDevices:     allowedDevices,
+		AutoCreatedDevices: autoCreatedDevices,
+		CapAdd:             c.hostConfig.CapAdd,
+		CapDrop:            c.hostConfig.CapDrop,
 	}
 	c.command.SysProcAttr = &syscall.SysProcAttr{Setsid: true}
 	c.command.Env = env
@@ -245,6 +279,7 @@
 	if container.State.IsRunning() {
 		return nil
 	}
+
 	// if we encounter and error during start we need to ensure that any other
 	// setup has been cleaned up properly
 	defer func() {
@@ -280,9 +315,6 @@
 	if err := setupMountsForContainer(container); err != nil {
 		return err
 	}
-	if err := container.startLoggingToDisk(); err != nil {
-		return err
-	}
 
 	return container.waitForStart()
 }
@@ -463,40 +495,8 @@
 	container.NetworkSettings = &NetworkSettings{}
 }
 
-func (container *Container) monitor(callback execdriver.StartCallback) error {
-	var (
-		err      error
-		exitCode int
-	)
-
-	pipes := execdriver.NewPipes(container.stdin, container.stdout, container.stderr, container.Config.OpenStdin)
-	exitCode, err = container.daemon.Run(container, pipes, callback)
-	if err != nil {
-		utils.Errorf("Error running container: %s", err)
-	}
-	container.State.SetStopped(exitCode)
-
-	// Cleanup
-	container.cleanup()
-
-	// Re-create a brand new stdin pipe once the container exited
-	if container.Config.OpenStdin {
-		container.stdin, container.stdinPipe = io.Pipe()
-	}
-	if container.daemon != nil && container.daemon.srv != nil {
-		container.daemon.srv.LogEvent("die", container.ID, container.daemon.repositories.ImageName(container.Image))
-	}
-	if container.daemon != nil && container.daemon.srv != nil && container.daemon.srv.IsRunning() {
-		// FIXME: here is race condition between two RUN instructions in Dockerfile
-		// because they share same runconfig and change image. Must be fixed
-		// in server/buildfile.go
-		if err := container.ToDisk(); err != nil {
-			utils.Errorf("Error dumping container %s state to disk: %s\n", container.ID, err)
-		}
-	}
-	return err
-}
-
+// cleanup releases any network resources allocated to the container along with any rules
+// around how containers are linked together.  It also unmounts the container's root filesystem.
 func (container *Container) cleanup() {
 	container.releaseNetwork()
 
@@ -506,30 +506,14 @@
 			link.Disable()
 		}
 	}
-	if container.Config.OpenStdin {
-		if err := container.stdin.Close(); err != nil {
-			utils.Errorf("%s: Error close stdin: %s", container.ID, err)
-		}
-	}
-	if err := container.stdout.CloseWriters(); err != nil {
-		utils.Errorf("%s: Error close stdout: %s", container.ID, err)
-	}
-	if err := container.stderr.CloseWriters(); err != nil {
-		utils.Errorf("%s: Error close stderr: %s", container.ID, err)
-	}
-	if container.command != nil && container.command.Terminal != nil {
-		if err := container.command.Terminal.Close(); err != nil {
-			utils.Errorf("%s: Error closing terminal: %s", container.ID, err)
-		}
-	}
 
 	if err := container.Unmount(); err != nil {
-		log.Printf("%v: Failed to umount filesystem: %v", container.ID, err)
+		log.Errorf("%v: Failed to umount filesystem: %v", container.ID, err)
 	}
 }
 
 func (container *Container) KillSig(sig int) error {
-	utils.Debugf("Sending %d to %s", sig, container.ID)
+	log.Debugf("Sending %d to %s", sig, container.ID)
 	container.Lock()
 	defer container.Unlock()
 
@@ -541,6 +525,18 @@
 	if !container.State.IsRunning() {
 		return nil
 	}
+
+	// signal to the monitor that it should not restart the container
+	// after we send the kill signal
+	container.monitor.ExitOnNext()
+
+	// if the container is currently restarting we do not need to send the signal
+	// to the process.  Telling the monitor that it should exit on it's next event
+	// loop is enough
+	if container.State.IsRestarting() {
+		return nil
+	}
+
 	return container.daemon.Kill(container, sig)
 }
 
@@ -578,7 +574,7 @@
 	if _, err := container.State.WaitStop(10 * time.Second); err != nil {
 		// Ensure that we don't kill ourselves
 		if pid := container.State.GetPid(); pid != 0 {
-			log.Printf("Container %s failed to exit within 10 seconds of kill - trying direct SIGKILL", utils.TruncateID(container.ID))
+			log.Infof("Container %s failed to exit within 10 seconds of kill - trying direct SIGKILL", utils.TruncateID(container.ID))
 			if err := syscall.Kill(pid, 9); err != nil {
 				return err
 			}
@@ -596,7 +592,7 @@
 
 	// 1. Send a SIGTERM
 	if err := container.KillSig(15); err != nil {
-		log.Print("Failed to send SIGTERM to the process, force killing")
+		log.Infof("Failed to send SIGTERM to the process, force killing")
 		if err := container.KillSig(9); err != nil {
 			return err
 		}
@@ -604,7 +600,7 @@
 
 	// 2. Wait for the process to exit on its own
 	if _, err := container.State.WaitStop(time.Duration(seconds) * time.Second); err != nil {
-		log.Printf("Container %v failed to exit within %d seconds of SIGTERM - using the force", container.ID, seconds)
+		log.Infof("Container %v failed to exit within %d seconds of SIGTERM - using the force", container.ID, seconds)
 		// 3. If it doesn't, then send SIGKILL
 		if err := container.Kill(); err != nil {
 			container.State.WaitStop(-1 * time.Second)
@@ -733,7 +729,7 @@
 	)
 
 	if err := container.Mount(); err != nil {
-		utils.Errorf("Warning: failed to compute size of container rootfs %s: %s", container.ID, err)
+		log.Errorf("Warning: failed to compute size of container rootfs %s: %s", container.ID, err)
 		return sizeRw, sizeRootfs
 	}
 	defer container.Unmount()
@@ -741,7 +737,7 @@
 	if differ, ok := container.daemon.driver.(graphdriver.Differ); ok {
 		sizeRw, err = differ.DiffSize(container.ID)
 		if err != nil {
-			utils.Errorf("Warning: driver %s couldn't return diff size of container %s: %s", driver, container.ID, err)
+			log.Errorf("Warning: driver %s couldn't return diff size of container %s: %s", driver, container.ID, err)
 			// FIXME: GetSize should return an error. Not changing it now in case
 			// there is a side-effect.
 			sizeRw = -1
@@ -838,7 +834,7 @@
 		if link, exists := container.activeLinks[name]; exists {
 			link.Disable()
 		} else {
-			utils.Debugf("Could not find active link for %s", name)
+			log.Debugf("Could not find active link for %s", name)
 		}
 	}
 }
@@ -853,18 +849,16 @@
 		daemon = container.daemon
 	)
 
-	if config.NetworkMode == "host" {
-		container.ResolvConfPath = "/etc/resolv.conf"
-		return nil
-	}
-
 	resolvConf, err := resolvconf.Get()
 	if err != nil {
 		return err
 	}
+	container.ResolvConfPath, err = container.getRootResourcePath("resolv.conf")
+	if err != nil {
+		return err
+	}
 
-	// If custom dns exists, then create a resolv.conf for the container
-	if len(config.Dns) > 0 || len(daemon.config.Dns) > 0 || len(config.DnsSearch) > 0 || len(daemon.config.DnsSearch) > 0 {
+	if config.NetworkMode != "host" && (len(config.Dns) > 0 || len(daemon.config.Dns) > 0 || len(config.DnsSearch) > 0 || len(daemon.config.DnsSearch) > 0) {
 		var (
 			dns       = resolvconf.GetNameservers(resolvConf)
 			dnsSearch = resolvconf.GetSearchDomains(resolvConf)
@@ -879,18 +873,9 @@
 		} else if len(daemon.config.DnsSearch) > 0 {
 			dnsSearch = daemon.config.DnsSearch
 		}
-
-		resolvConfPath, err := container.getRootResourcePath("resolv.conf")
-		if err != nil {
-			return err
-		}
-		container.ResolvConfPath = resolvConfPath
-
 		return resolvconf.Build(container.ResolvConfPath, dns, dnsSearch)
-	} else {
-		container.ResolvConfPath = "/etc/resolv.conf"
 	}
-	return nil
+	return ioutil.WriteFile(container.ResolvConfPath, resolvConf, 0644)
 }
 
 func (container *Container) initializeNetworking() error {
@@ -950,15 +935,15 @@
 // Make sure the config is compatible with the current kernel
 func (container *Container) verifyDaemonSettings() {
 	if container.Config.Memory > 0 && !container.daemon.sysInfo.MemoryLimit {
-		log.Printf("WARNING: Your kernel does not support memory limit capabilities. Limitation discarded.\n")
+		log.Infof("WARNING: Your kernel does not support memory limit capabilities. Limitation discarded.")
 		container.Config.Memory = 0
 	}
 	if container.Config.Memory > 0 && !container.daemon.sysInfo.SwapLimit {
-		log.Printf("WARNING: Your kernel does not support swap limit capabilities. Limitation discarded.\n")
+		log.Infof("WARNING: Your kernel does not support swap limit capabilities. Limitation discarded.")
 		container.Config.MemorySwap = -1
 	}
 	if container.daemon.sysInfo.IPv4ForwardingDisabled {
-		log.Printf("WARNING: IPv4 forwarding is disabled. Networking will not work")
+		log.Infof("WARNING: IPv4 forwarding is disabled. Networking will not work")
 	}
 }
 
@@ -1019,9 +1004,12 @@
 func (container *Container) createDaemonEnvironment(linkedEnv []string) []string {
 	// Setup environment
 	env := []string{
-		"HOME=/",
 		"PATH=" + DefaultPathEnv,
 		"HOSTNAME=" + container.Config.Hostname,
+		// Note: we don't set HOME here because it'll get autoset intelligently
+		// based on the value of USER inside dockerinit, but only if it isn't
+		// set already (ie, that can be overridden by setting HOME via -e or ENV
+		// in a Dockerfile).
 	}
 	if container.Config.Tty {
 		env = append(env, "TERM=xterm")
@@ -1080,38 +1068,16 @@
 }
 
 func (container *Container) waitForStart() error {
-	callback := func(command *execdriver.Command) {
-		if command.Tty {
-			// The callback is called after the process Start()
-			// so we are in the parent process. In TTY mode, stdin/out/err is the PtySlace
-			// which we close here.
-			if c, ok := command.Stdout.(io.Closer); ok {
-				c.Close()
-			}
-		}
-		container.State.SetRunning(command.Pid())
-		if err := container.ToDisk(); err != nil {
-			utils.Debugf("%s", err)
-		}
-	}
+	container.monitor = newContainerMonitor(container, container.hostConfig.RestartPolicy)
 
-	// We use a callback here instead of a goroutine and an chan for
-	// syncronization purposes
-	cErr := utils.Go(func() error { return container.monitor(callback) })
-
-	waitStart := make(chan struct{})
-
-	go func() {
-		container.State.WaitRunning(-1 * time.Second)
-		close(waitStart)
-	}()
-
-	// Start should not return until the process is actually running
+	// block until we either receive an error from the initial start of the container's
+	// process or until the process is running in the container
 	select {
-	case <-waitStart:
-	case err := <-cErr:
+	case <-container.monitor.startSignal:
+	case err := <-utils.Go(container.monitor.Start):
 		return err
 	}
+
 	return nil
 }
 
diff --git a/daemon/container_unit_test.go b/daemon/container_unit_test.go
index 0a8e69a..1b1b934 100644
--- a/daemon/container_unit_test.go
+++ b/daemon/container_unit_test.go
@@ -1,7 +1,7 @@
 package daemon
 
 import (
-	"github.com/dotcloud/docker/nat"
+	"github.com/docker/docker/nat"
 	"testing"
 )
 
@@ -89,6 +89,41 @@
 	}
 }
 
+func TestParseNetworkOptsPublicNoPort(t *testing.T) {
+	ports, bindings, err := nat.ParsePortSpecs([]string{"192.168.1.100"})
+
+	if err == nil {
+		t.Logf("Expected error Invalid containerPort")
+		t.Fail()
+	}
+	if ports != nil {
+		t.Logf("Expected nil got %s", ports)
+		t.Fail()
+	}
+	if bindings != nil {
+		t.Logf("Expected nil got %s", bindings)
+		t.Fail()
+	}
+}
+
+func TestParseNetworkOptsNegativePorts(t *testing.T) {
+	ports, bindings, err := nat.ParsePortSpecs([]string{"192.168.1.100:-1:-1"})
+
+	if err == nil {
+		t.Fail()
+	}
+	t.Logf("%v", len(ports))
+	t.Logf("%v", bindings)
+	if len(ports) != 0 {
+		t.Logf("Expected nil got %s", len(ports))
+		t.Fail()
+	}
+	if len(bindings) != 0 {
+		t.Logf("Expected 0 got %s", len(bindings))
+		t.Fail()
+	}
+}
+
 func TestParseNetworkOptsUdp(t *testing.T) {
 	ports, bindings, err := nat.ParsePortSpecs([]string{"192.168.1.100::6000/udp"})
 	if err != nil {
diff --git a/daemon/copy.go b/daemon/copy.go
new file mode 100644
index 0000000..9d18b01
--- /dev/null
+++ b/daemon/copy.go
@@ -0,0 +1,33 @@
+package daemon
+
+import (
+	"io"
+
+	"github.com/docker/docker/engine"
+)
+
+func (daemon *Daemon) ContainerCopy(job *engine.Job) engine.Status {
+	if len(job.Args) != 2 {
+		return job.Errorf("Usage: %s CONTAINER RESOURCE\n", job.Name)
+	}
+
+	var (
+		name     = job.Args[0]
+		resource = job.Args[1]
+	)
+
+	if container := daemon.Get(name); container != nil {
+
+		data, err := container.Copy(resource)
+		if err != nil {
+			return job.Error(err)
+		}
+		defer data.Close()
+
+		if _, err := io.Copy(job.Stdout, data); err != nil {
+			return job.Error(err)
+		}
+		return engine.StatusOK
+	}
+	return job.Errorf("No such container: %s", name)
+}
diff --git a/daemon/create.go b/daemon/create.go
new file mode 100644
index 0000000..3c6827e
--- /dev/null
+++ b/daemon/create.go
@@ -0,0 +1,86 @@
+package daemon
+
+import (
+	"github.com/docker/docker/engine"
+	"github.com/docker/docker/graph"
+	"github.com/docker/docker/pkg/parsers"
+	"github.com/docker/docker/runconfig"
+)
+
+func (daemon *Daemon) ContainerCreate(job *engine.Job) engine.Status {
+	var name string
+	if len(job.Args) == 1 {
+		name = job.Args[0]
+	} else if len(job.Args) > 1 {
+		return job.Errorf("Usage: %s", job.Name)
+	}
+	config := runconfig.ContainerConfigFromJob(job)
+	if config.Memory != 0 && config.Memory < 524288 {
+		return job.Errorf("Minimum memory limit allowed is 512k")
+	}
+	if config.Memory > 0 && !daemon.SystemConfig().MemoryLimit {
+		job.Errorf("Your kernel does not support memory limit capabilities. Limitation discarded.\n")
+		config.Memory = 0
+	}
+	if config.Memory > 0 && !daemon.SystemConfig().SwapLimit {
+		job.Errorf("Your kernel does not support swap limit capabilities. Limitation discarded.\n")
+		config.MemorySwap = -1
+	}
+	container, buildWarnings, err := daemon.Create(config, name)
+	if err != nil {
+		if daemon.Graph().IsNotExist(err) {
+			_, tag := parsers.ParseRepositoryTag(config.Image)
+			if tag == "" {
+				tag = graph.DEFAULTTAG
+			}
+			return job.Errorf("No such image: %s (tag: %s)", config.Image, tag)
+		}
+		return job.Error(err)
+	}
+	if !container.Config.NetworkDisabled && daemon.SystemConfig().IPv4ForwardingDisabled {
+		job.Errorf("IPv4 forwarding is disabled.\n")
+	}
+	container.LogEvent("create")
+	// FIXME: this is necessary because daemon.Create might return a nil container
+	// with a non-nil error. This should not happen! Once it's fixed we
+	// can remove this workaround.
+	if container != nil {
+		job.Printf("%s\n", container.ID)
+	}
+	for _, warning := range buildWarnings {
+		job.Errorf("%s\n", warning)
+	}
+	return engine.StatusOK
+}
+
+// Create creates a new container from the given configuration with a given name.
+func (daemon *Daemon) Create(config *runconfig.Config, name string) (*Container, []string, error) {
+	var (
+		container *Container
+		warnings  []string
+	)
+
+	img, err := daemon.repositories.LookupImage(config.Image)
+	if err != nil {
+		return nil, nil, err
+	}
+	if err := img.CheckDepth(); err != nil {
+		return nil, nil, err
+	}
+	if warnings, err = daemon.mergeAndVerifyConfig(config, img); err != nil {
+		return nil, nil, err
+	}
+	if container, err = daemon.newContainer(name, config, img); err != nil {
+		return nil, nil, err
+	}
+	if err := daemon.createRootfs(container, img); err != nil {
+		return nil, nil, err
+	}
+	if err := container.ToDisk(); err != nil {
+		return nil, nil, err
+	}
+	if err := daemon.Register(container); err != nil {
+		return nil, nil, err
+	}
+	return container, warnings, nil
+}
diff --git a/daemon/daemon.go b/daemon/daemon.go
index a94a445..811cb33 100644
--- a/daemon/daemon.go
+++ b/daemon/daemon.go
@@ -4,42 +4,40 @@
 	"fmt"
 	"io"
 	"io/ioutil"
-	"log"
 	"os"
 	"path"
 	"regexp"
+	"runtime"
 	"strings"
 	"sync"
 	"time"
 
 	"github.com/docker/libcontainer/label"
-	"github.com/docker/libcontainer/selinux"
-	"github.com/dotcloud/docker/archive"
-	"github.com/dotcloud/docker/daemon/execdriver"
-	"github.com/dotcloud/docker/daemon/execdriver/execdrivers"
-	"github.com/dotcloud/docker/daemon/execdriver/lxc"
-	"github.com/dotcloud/docker/daemon/graphdriver"
-	_ "github.com/dotcloud/docker/daemon/graphdriver/vfs"
-	_ "github.com/dotcloud/docker/daemon/networkdriver/bridge"
-	"github.com/dotcloud/docker/daemon/networkdriver/portallocator"
-	"github.com/dotcloud/docker/daemonconfig"
-	"github.com/dotcloud/docker/dockerversion"
-	"github.com/dotcloud/docker/engine"
-	"github.com/dotcloud/docker/graph"
-	"github.com/dotcloud/docker/image"
-	"github.com/dotcloud/docker/pkg/graphdb"
-	"github.com/dotcloud/docker/pkg/namesgenerator"
-	"github.com/dotcloud/docker/pkg/networkfs/resolvconf"
-	"github.com/dotcloud/docker/pkg/sysinfo"
-	"github.com/dotcloud/docker/pkg/truncindex"
-	"github.com/dotcloud/docker/runconfig"
-	"github.com/dotcloud/docker/utils"
-)
 
-// Set the max depth to the aufs default that most
-// kernels are compiled with
-// For more information see: http://sourceforge.net/p/aufs/aufs3-standalone/ci/aufs3.12/tree/config.mk
-const MaxImageDepth = 127
+	"github.com/docker/docker/archive"
+	"github.com/docker/docker/daemon/execdriver"
+	"github.com/docker/docker/daemon/execdriver/execdrivers"
+	"github.com/docker/docker/daemon/execdriver/lxc"
+	"github.com/docker/docker/daemon/graphdriver"
+	_ "github.com/docker/docker/daemon/graphdriver/vfs"
+	_ "github.com/docker/docker/daemon/networkdriver/bridge"
+	"github.com/docker/docker/daemon/networkdriver/portallocator"
+	"github.com/docker/docker/dockerversion"
+	"github.com/docker/docker/engine"
+	"github.com/docker/docker/graph"
+	"github.com/docker/docker/image"
+	"github.com/docker/docker/pkg/broadcastwriter"
+	"github.com/docker/docker/pkg/graphdb"
+	"github.com/docker/docker/pkg/log"
+	"github.com/docker/docker/pkg/namesgenerator"
+	"github.com/docker/docker/pkg/networkfs/resolvconf"
+	"github.com/docker/docker/pkg/parsers"
+	"github.com/docker/docker/pkg/parsers/kernel"
+	"github.com/docker/docker/pkg/sysinfo"
+	"github.com/docker/docker/pkg/truncindex"
+	"github.com/docker/docker/runconfig"
+	"github.com/docker/docker/utils"
+)
 
 var (
 	DefaultDns                = []string{"8.8.8.8", "8.8.4.4"}
@@ -91,38 +89,65 @@
 	idIndex        *truncindex.TruncIndex
 	sysInfo        *sysinfo.SysInfo
 	volumes        *graph.Graph
-	srv            Server
 	eng            *engine.Engine
-	config         *daemonconfig.Config
+	config         *Config
 	containerGraph *graphdb.Database
 	driver         graphdriver.Driver
 	execDriver     execdriver.Driver
-	Sockets        []string
 }
 
 // Install installs daemon capabilities to eng.
 func (daemon *Daemon) Install(eng *engine.Engine) error {
-	return eng.Register("container_inspect", daemon.ContainerInspect)
-}
-
-// List returns an array of all containers registered in the daemon.
-func (daemon *Daemon) List() []*Container {
-	return daemon.containers.List()
+	// FIXME: rename "delete" to "rm" for consistency with the CLI command
+	// FIXME: rename ContainerDestroy to ContainerRm for consistency with the CLI command
+	// FIXME: remove ImageDelete's dependency on Daemon, then move to graph/
+	for name, method := range map[string]engine.Handler{
+		"attach":            daemon.ContainerAttach,
+		"build":             daemon.CmdBuild,
+		"commit":            daemon.ContainerCommit,
+		"container_changes": daemon.ContainerChanges,
+		"container_copy":    daemon.ContainerCopy,
+		"container_inspect": daemon.ContainerInspect,
+		"containers":        daemon.Containers,
+		"create":            daemon.ContainerCreate,
+		"delete":            daemon.ContainerDestroy,
+		"export":            daemon.ContainerExport,
+		"info":              daemon.CmdInfo,
+		"kill":              daemon.ContainerKill,
+		"logs":              daemon.ContainerLogs,
+		"pause":             daemon.ContainerPause,
+		"resize":            daemon.ContainerResize,
+		"restart":           daemon.ContainerRestart,
+		"start":             daemon.ContainerStart,
+		"stop":              daemon.ContainerStop,
+		"top":               daemon.ContainerTop,
+		"unpause":           daemon.ContainerUnpause,
+		"wait":              daemon.ContainerWait,
+		"image_delete":      daemon.ImageDelete, // FIXME: see above
+	} {
+		if err := eng.Register(name, method); err != nil {
+			return err
+		}
+	}
+	if err := daemon.Repositories().Install(eng); err != nil {
+		return err
+	}
+	// FIXME: this hack is necessary for legacy integration tests to access
+	// the daemon object.
+	eng.Hack_SetGlobalVar("httpapi.daemon", daemon)
+	return nil
 }
 
 // Get looks for a container by the specified ID or name, and returns it.
 // If the container is not found, or if an error occurs, nil is returned.
 func (daemon *Daemon) Get(name string) *Container {
+	if id, err := daemon.idIndex.Get(name); err == nil {
+		return daemon.containers.Get(id)
+	}
 	if c, _ := daemon.GetByName(name); c != nil {
 		return c
 	}
-
-	id, err := daemon.idIndex.Get(name)
-	if err != nil {
-		return nil
-	}
-
-	return daemon.containers.Get(id)
+	return nil
 }
 
 // Exists returns a true if a container of the specified ID or name exists,
@@ -142,20 +167,24 @@
 	if err := container.FromDisk(); err != nil {
 		return nil, err
 	}
+
 	if container.ID != id {
 		return container, fmt.Errorf("Container %s is stored at %s", container.ID, id)
 	}
+
+	container.readHostConfig()
+
 	return container, nil
 }
 
 // Register makes a container object usable by the daemon as <container.ID>
 // This is a wrapper for register
 func (daemon *Daemon) Register(container *Container) error {
-	return daemon.register(container, true, nil)
+	return daemon.register(container, true)
 }
 
 // register makes a container object usable by the daemon as <container.ID>
-func (daemon *Daemon) register(container *Container, updateSuffixarray bool, containersToStart *[]*Container) error {
+func (daemon *Daemon) register(container *Container, updateSuffixarray bool) error {
 	if container.daemon != nil || daemon.Exists(container.ID) {
 		return fmt.Errorf("Container is already loaded")
 	}
@@ -169,8 +198,8 @@
 	container.daemon = daemon
 
 	// Attach to stdout and stderr
-	container.stderr = utils.NewWriteBroadcaster()
-	container.stdout = utils.NewWriteBroadcaster()
+	container.stderr = broadcastwriter.New()
+	container.stdout = broadcastwriter.New()
 	// Attach to stdin
 	if container.Config.OpenStdin {
 		container.stdin, container.stdinPipe = io.Pipe()
@@ -188,7 +217,7 @@
 	//        if so, then we need to restart monitor and init a new lock
 	// If the container is supposed to be running, make sure of it
 	if container.State.IsRunning() {
-		utils.Debugf("killing old running container %s", container.ID)
+		log.Debugf("killing old running container %s", container.ID)
 
 		existingPid := container.State.Pid
 		container.State.SetStopped(0)
@@ -205,36 +234,28 @@
 			var err error
 			cmd.Process, err = os.FindProcess(existingPid)
 			if err != nil {
-				utils.Debugf("cannot find existing process for %d", existingPid)
+				log.Debugf("cannot find existing process for %d", existingPid)
 			}
 			daemon.execDriver.Terminate(cmd)
 		}
 
 		if err := container.Unmount(); err != nil {
-			utils.Debugf("unmount error %s", err)
+			log.Debugf("unmount error %s", err)
 		}
 		if err := container.ToDisk(); err != nil {
-			utils.Debugf("saving stopped state to disk %s", err)
+			log.Debugf("saving stopped state to disk %s", err)
 		}
 
 		info := daemon.execDriver.Info(container.ID)
 		if !info.IsRunning() {
-			utils.Debugf("Container %s was supposed to be running but is not.", container.ID)
+			log.Debugf("Container %s was supposed to be running but is not.", container.ID)
 
-			utils.Debugf("Marking as stopped")
+			log.Debugf("Marking as stopped")
 
 			container.State.SetStopped(-127)
 			if err := container.ToDisk(); err != nil {
 				return err
 			}
-
-			if daemon.config.AutoRestart {
-				utils.Debugf("Marking as restarting")
-
-				if containersToStart != nil {
-					*containersToStart = append(*containersToStart, container)
-				}
-			}
 		}
 	}
 	return nil
@@ -249,13 +270,13 @@
 		container.Name = name
 
 		if err := container.ToDisk(); err != nil {
-			utils.Debugf("Error saving container name %s", err)
+			log.Debugf("Error saving container name %s", err)
 		}
 	}
 	return nil
 }
 
-func (daemon *Daemon) LogToDisk(src *utils.WriteBroadcaster, dst, stream string) error {
+func (daemon *Daemon) LogToDisk(src *broadcastwriter.BroadcastWriter, dst, stream string) error {
 	log, err := os.OpenFile(dst, os.O_RDWR|os.O_APPEND|os.O_CREATE, 0600)
 	if err != nil {
 		return err
@@ -264,56 +285,15 @@
 	return nil
 }
 
-// Destroy unregisters a container from the daemon and cleanly removes its contents from the filesystem.
-func (daemon *Daemon) Destroy(container *Container) error {
-	if container == nil {
-		return fmt.Errorf("The given container is <nil>")
-	}
-
-	element := daemon.containers.Get(container.ID)
-	if element == nil {
-		return fmt.Errorf("Container %v not found - maybe it was already destroyed?", container.ID)
-	}
-
-	if err := container.Stop(3); err != nil {
-		return err
-	}
-
-	// Deregister the container before removing its directory, to avoid race conditions
-	daemon.idIndex.Delete(container.ID)
-	daemon.containers.Delete(container.ID)
-
-	if _, err := daemon.containerGraph.Purge(container.ID); err != nil {
-		utils.Debugf("Unable to remove container from link graph: %s", err)
-	}
-
-	if err := daemon.driver.Remove(container.ID); err != nil {
-		return fmt.Errorf("Driver %s failed to remove root filesystem %s: %s", daemon.driver, container.ID, err)
-	}
-
-	initID := fmt.Sprintf("%s-init", container.ID)
-	if err := daemon.driver.Remove(initID); err != nil {
-		return fmt.Errorf("Driver %s failed to remove init filesystem %s: %s", daemon.driver, initID, err)
-	}
-
-	if err := os.RemoveAll(container.root); err != nil {
-		return fmt.Errorf("Unable to remove filesystem for %v: %v", container.ID, err)
-	}
-	selinux.FreeLxcContexts(container.ProcessLabel)
-
-	return nil
-}
-
 func (daemon *Daemon) restore() error {
 	var (
-		debug             = (os.Getenv("DEBUG") != "" || os.Getenv("TEST") != "")
-		containers        = make(map[string]*Container)
-		currentDriver     = daemon.driver.String()
-		containersToStart = []*Container{}
+		debug         = (os.Getenv("DEBUG") != "" || os.Getenv("TEST") != "")
+		containers    = make(map[string]*Container)
+		currentDriver = daemon.driver.String()
 	)
 
 	if !debug {
-		fmt.Printf("Loading containers: ")
+		log.Infof("Loading containers: ")
 	}
 	dir, err := ioutil.ReadDir(daemon.repository)
 	if err != nil {
@@ -327,29 +307,38 @@
 			fmt.Print(".")
 		}
 		if err != nil {
-			utils.Errorf("Failed to load container %v: %v", id, err)
+			log.Errorf("Failed to load container %v: %v", id, err)
 			continue
 		}
 
 		// Ignore the container if it does not support the current driver being used by the graph
-		if container.Driver == "" && currentDriver == "aufs" || container.Driver == currentDriver {
-			utils.Debugf("Loaded container %v", container.ID)
+		if (container.Driver == "" && currentDriver == "aufs") || container.Driver == currentDriver {
+			log.Debugf("Loaded container %v", container.ID)
+
 			containers[container.ID] = container
 		} else {
-			utils.Debugf("Cannot load container %s because it was created with another graph driver.", container.ID)
+			log.Debugf("Cannot load container %s because it was created with another graph driver.", container.ID)
 		}
 	}
 
+	registeredContainers := []*Container{}
+
 	if entities := daemon.containerGraph.List("/", -1); entities != nil {
 		for _, p := range entities.Paths() {
 			if !debug {
 				fmt.Print(".")
 			}
+
 			e := entities[p]
+
 			if container, ok := containers[e.ID()]; ok {
-				if err := daemon.register(container, false, &containersToStart); err != nil {
-					utils.Debugf("Failed to register container %s: %s", container.ID, err)
+				if err := daemon.register(container, false); err != nil {
+					log.Debugf("Failed to register container %s: %s", container.ID, err)
 				}
+
+				registeredContainers = append(registeredContainers, container)
+
+				// delete from the map so that a new name is not automatically generated
 				delete(containers, e.ID())
 			}
 		}
@@ -360,72 +349,40 @@
 		// Try to set the default name for a container if it exists prior to links
 		container.Name, err = daemon.generateNewName(container.ID)
 		if err != nil {
-			utils.Debugf("Setting default id - %s", err)
+			log.Debugf("Setting default id - %s", err)
 		}
-		if err := daemon.register(container, false, &containersToStart); err != nil {
-			utils.Debugf("Failed to register container %s: %s", container.ID, err)
+
+		if err := daemon.register(container, false); err != nil {
+			log.Debugf("Failed to register container %s: %s", container.ID, err)
 		}
+
+		registeredContainers = append(registeredContainers, container)
 	}
 
-	for _, container := range containersToStart {
-		utils.Debugf("Starting container %d", container.ID)
-		if err := container.Start(); err != nil {
-			utils.Debugf("Failed to start container %s: %s", container.ID, err)
+	// check the restart policy on the containers and restart any container with
+	// the restart policy of "always"
+	if daemon.config.AutoRestart {
+		log.Debugf("Restarting containers...")
+
+		for _, container := range registeredContainers {
+			if container.hostConfig.RestartPolicy.Name == "always" ||
+				(container.hostConfig.RestartPolicy.Name == "on-failure" && container.State.ExitCode != 0) {
+				log.Debugf("Starting container %s", container.ID)
+
+				if err := container.Start(); err != nil {
+					log.Debugf("Failed to start container %s: %s", container.ID, err)
+				}
+			}
 		}
 	}
 
 	if !debug {
-		fmt.Printf(": done.\n")
+		log.Infof(": done.")
 	}
 
 	return nil
 }
 
-// Create creates a new container from the given configuration with a given name.
-func (daemon *Daemon) Create(config *runconfig.Config, name string) (*Container, []string, error) {
-	var (
-		container *Container
-		warnings  []string
-	)
-
-	img, err := daemon.repositories.LookupImage(config.Image)
-	if err != nil {
-		return nil, nil, err
-	}
-	if err := daemon.checkImageDepth(img); err != nil {
-		return nil, nil, err
-	}
-	if warnings, err = daemon.mergeAndVerifyConfig(config, img); err != nil {
-		return nil, nil, err
-	}
-	if container, err = daemon.newContainer(name, config, img); err != nil {
-		return nil, nil, err
-	}
-	if err := daemon.createRootfs(container, img); err != nil {
-		return nil, nil, err
-	}
-	if err := container.ToDisk(); err != nil {
-		return nil, nil, err
-	}
-	if err := daemon.Register(container); err != nil {
-		return nil, nil, err
-	}
-	return container, warnings, nil
-}
-
-func (daemon *Daemon) checkImageDepth(img *image.Image) error {
-	// We add 2 layers to the depth because the container's rw and
-	// init layer add to the restriction
-	depth, err := img.Depth()
-	if err != nil {
-		return err
-	}
-	if depth+2 >= MaxImageDepth {
-		return fmt.Errorf("Cannot create container with more than %d parents", MaxImageDepth)
-	}
-	return nil
-}
-
 func (daemon *Daemon) checkDeprecatedExpose(config *runconfig.Config) bool {
 	if config != nil {
 		if config.PortSpecs != nil {
@@ -618,51 +575,6 @@
 	return nil
 }
 
-// Commit creates a new filesystem image from the current state of a container.
-// The image can optionally be tagged into a repository
-func (daemon *Daemon) Commit(container *Container, repository, tag, comment, author string, pause bool, config *runconfig.Config) (*image.Image, error) {
-	if pause {
-		container.Pause()
-		defer container.Unpause()
-	}
-
-	if err := container.Mount(); err != nil {
-		return nil, err
-	}
-	defer container.Unmount()
-
-	rwTar, err := container.ExportRw()
-	if err != nil {
-		return nil, err
-	}
-	defer rwTar.Close()
-
-	// Create a new image from the container's base layers + a new layer from container changes
-	var (
-		containerID, containerImage string
-		containerConfig             *runconfig.Config
-	)
-
-	if container != nil {
-		containerID = container.ID
-		containerImage = container.Image
-		containerConfig = container.Config
-	}
-
-	img, err := daemon.graph.Create(rwTar, containerID, containerImage, comment, author, containerConfig, config)
-	if err != nil {
-		return nil, err
-	}
-
-	// Register the image if needed
-	if repository != "" {
-		if err := daemon.repositories.Set(repository, tag, img.ID, true); err != nil {
-			return img, err
-		}
-	}
-	return img, nil
-}
-
 func GetFullContainerName(name string) (string, error) {
 	if name == "" {
 		return "", fmt.Errorf("Container name cannot be empty")
@@ -723,7 +635,7 @@
 func (daemon *Daemon) RegisterLinks(container *Container, hostConfig *runconfig.HostConfig) error {
 	if hostConfig != nil && hostConfig.Links != nil {
 		for _, l := range hostConfig.Links {
-			parts, err := utils.PartParser("name:alias", l)
+			parts, err := parsers.PartParser("name:alias", l)
 			if err != nil {
 				return err
 			}
@@ -750,7 +662,7 @@
 }
 
 // FIXME: harmonize with NewGraph()
-func NewDaemon(config *daemonconfig.Config, eng *engine.Engine) (*Daemon, error) {
+func NewDaemon(config *Config, eng *engine.Engine) (*Daemon, error) {
 	daemon, err := NewDaemonFromDirectory(config, eng)
 	if err != nil {
 		return nil, err
@@ -758,11 +670,71 @@
 	return daemon, nil
 }
 
-func NewDaemonFromDirectory(config *daemonconfig.Config, eng *engine.Engine) (*Daemon, error) {
-	if !config.EnableSelinuxSupport {
-		selinux.SetDisabled()
+func NewDaemonFromDirectory(config *Config, eng *engine.Engine) (*Daemon, error) {
+	// Apply configuration defaults
+	if config.Mtu == 0 {
+		// FIXME: GetDefaultNetwork Mtu doesn't need to be public anymore
+		config.Mtu = GetDefaultNetworkMtu()
+	}
+	// Check for mutually incompatible config options
+	if config.BridgeIface != "" && config.BridgeIP != "" {
+		return nil, fmt.Errorf("You specified -b & --bip, mutually exclusive options. Please specify only one.")
+	}
+	if !config.EnableIptables && !config.InterContainerCommunication {
+		return nil, fmt.Errorf("You specified --iptables=false with --icc=false. ICC uses iptables to function. Please set --icc or --iptables to true.")
+	}
+	// FIXME: DisableNetworkBidge doesn't need to be public anymore
+	config.DisableNetwork = config.BridgeIface == DisableNetworkBridge
+
+	// Claim the pidfile first, to avoid any and all unexpected race conditions.
+	// Some of the init doesn't need a pidfile lock - but let's not try to be smart.
+	if config.Pidfile != "" {
+		if err := utils.CreatePidFile(config.Pidfile); err != nil {
+			return nil, err
+		}
+		eng.OnShutdown(func() {
+			// Always release the pidfile last, just in case
+			utils.RemovePidFile(config.Pidfile)
+		})
 	}
 
+	// Check that the system is supported and we have sufficient privileges
+	// FIXME: return errors instead of calling Fatal
+	if runtime.GOOS != "linux" {
+		log.Fatalf("The Docker daemon is only supported on linux")
+	}
+	if os.Geteuid() != 0 {
+		log.Fatalf("The Docker daemon needs to be run as root")
+	}
+	if err := checkKernelAndArch(); err != nil {
+		log.Fatalf(err.Error())
+	}
+
+	// set up the TempDir to use a canonical path
+	tmp, err := utils.TempDir(config.Root)
+	if err != nil {
+		log.Fatalf("Unable to get the TempDir under %s: %s", config.Root, err)
+	}
+	realTmp, err := utils.ReadSymlinkedDirectory(tmp)
+	if err != nil {
+		log.Fatalf("Unable to get the full path to the TempDir (%s): %s", tmp, err)
+	}
+	os.Setenv("TMPDIR", realTmp)
+	if !config.EnableSelinuxSupport {
+		selinuxSetDisabled()
+	}
+
+	// get the canonical path to the Docker root directory
+	var realRoot string
+	if _, err := os.Stat(config.Root); err != nil && os.IsNotExist(err) {
+		realRoot = config.Root
+	} else {
+		realRoot, err = utils.ReadSymlinkedDirectory(config.Root)
+		if err != nil {
+			log.Fatalf("Unable to get the full path to root (%s): %s", config.Root, err)
+		}
+	}
+	config.Root = realRoot
 	// Create the root directory if it doesn't exists
 	if err := os.MkdirAll(config.Root, 0700); err != nil && !os.IsExist(err) {
 		return nil, err
@@ -776,7 +748,12 @@
 	if err != nil {
 		return nil, err
 	}
-	utils.Debugf("Using graph driver %s", driver)
+	log.Debugf("Using graph driver %s", driver)
+
+	// As Docker on btrfs and SELinux are incompatible at present, error on both being enabled
+	if config.EnableSelinuxSupport && driver.String() == "btrfs" {
+		return nil, fmt.Errorf("SELinux is not supported with the BTRFS graph driver!")
+	}
 
 	daemonRepo := path.Join(config.Root, "containers")
 
@@ -789,7 +766,7 @@
 		return nil, err
 	}
 
-	utils.Debugf("Creating images graph")
+	log.Debugf("Creating images graph")
 	g, err := graph.NewGraph(path.Join(config.Root, "graph"), driver)
 	if err != nil {
 		return nil, err
@@ -801,12 +778,12 @@
 	if err != nil {
 		return nil, err
 	}
-	utils.Debugf("Creating volumes graph")
+	log.Debugf("Creating volumes graph")
 	volumes, err := graph.NewGraph(path.Join(config.Root, "volumes"), volumesDriver)
 	if err != nil {
 		return nil, err
 	}
-	utils.Debugf("Creating repository list")
+	log.Debugf("Creating repository list")
 	repositories, err := graph.NewTagStore(path.Join(config.Root, "repositories-"+driver.String()), g)
 	if err != nil {
 		return nil, fmt.Errorf("Couldn't create Tag store: %s", err)
@@ -873,34 +850,52 @@
 		sysInitPath:    sysInitPath,
 		execDriver:     ed,
 		eng:            eng,
-		Sockets:        config.Sockets,
 	}
-
 	if err := daemon.checkLocaldns(); err != nil {
 		return nil, err
 	}
 	if err := daemon.restore(); err != nil {
 		return nil, err
 	}
+	// Setup shutdown handlers
+	// FIXME: can these shutdown handlers be registered closer to their source?
+	eng.OnShutdown(func() {
+		// FIXME: if these cleanup steps can be called concurrently, register
+		// them as separate handlers to speed up total shutdown time
+		// FIXME: use engine logging instead of log.Errorf
+		if err := daemon.shutdown(); err != nil {
+			log.Errorf("daemon.shutdown(): %s", err)
+		}
+		if err := portallocator.ReleaseAll(); err != nil {
+			log.Errorf("portallocator.ReleaseAll(): %s", err)
+		}
+		if err := daemon.driver.Cleanup(); err != nil {
+			log.Errorf("daemon.driver.Cleanup(): %s", err.Error())
+		}
+		if err := daemon.containerGraph.Close(); err != nil {
+			log.Errorf("daemon.containerGraph.Close(): %s", err.Error())
+		}
+	})
+
 	return daemon, nil
 }
 
 func (daemon *Daemon) shutdown() error {
 	group := sync.WaitGroup{}
-	utils.Debugf("starting clean shutdown of all containers...")
+	log.Debugf("starting clean shutdown of all containers...")
 	for _, container := range daemon.List() {
 		c := container
 		if c.State.IsRunning() {
-			utils.Debugf("stopping %s", c.ID)
+			log.Debugf("stopping %s", c.ID)
 			group.Add(1)
 
 			go func() {
 				defer group.Done()
 				if err := c.KillSig(15); err != nil {
-					utils.Debugf("kill 15 error for %s - %s", c.ID, err)
+					log.Debugf("kill 15 error for %s - %s", c.ID, err)
 				}
 				c.State.WaitStop(-1 * time.Second)
-				utils.Debugf("container stopped %s", c.ID)
+				log.Debugf("container stopped %s", c.ID)
 			}()
 		}
 	}
@@ -909,30 +904,6 @@
 	return nil
 }
 
-func (daemon *Daemon) Close() error {
-	errorsStrings := []string{}
-	if err := daemon.shutdown(); err != nil {
-		utils.Errorf("daemon.shutdown(): %s", err)
-		errorsStrings = append(errorsStrings, err.Error())
-	}
-	if err := portallocator.ReleaseAll(); err != nil {
-		utils.Errorf("portallocator.ReleaseAll(): %s", err)
-		errorsStrings = append(errorsStrings, err.Error())
-	}
-	if err := daemon.driver.Cleanup(); err != nil {
-		utils.Errorf("daemon.driver.Cleanup(): %s", err.Error())
-		errorsStrings = append(errorsStrings, err.Error())
-	}
-	if err := daemon.containerGraph.Close(); err != nil {
-		utils.Errorf("daemon.containerGraph.Close(): %s", err.Error())
-		errorsStrings = append(errorsStrings, err.Error())
-	}
-	if len(errorsStrings) > 0 {
-		return fmt.Errorf("%s", strings.Join(errorsStrings, ", "))
-	}
-	return nil
-}
-
 func (daemon *Daemon) Mount(container *Container) error {
 	dir, err := daemon.driver.Get(container.ID, container.GetMountLabel())
 	if err != nil {
@@ -1023,6 +994,8 @@
 // from the content root, including images, volumes and
 // container filesystems.
 // Again: this will remove your entire docker daemon!
+// FIXME: this is deprecated, and only used in legacy
+// tests. Please remove.
 func (daemon *Daemon) Nuke() error {
 	var wg sync.WaitGroup
 	for _, container := range daemon.List() {
@@ -1033,7 +1006,6 @@
 		}(container)
 	}
 	wg.Wait()
-	daemon.Close()
 
 	return os.RemoveAll(daemon.config.Root)
 }
@@ -1050,7 +1022,7 @@
 	return daemon.repositories
 }
 
-func (daemon *Daemon) Config() *daemonconfig.Config {
+func (daemon *Daemon) Config() *Config {
 	return daemon.config
 }
 
@@ -1078,18 +1050,70 @@
 	return daemon.containerGraph
 }
 
-func (daemon *Daemon) SetServer(server Server) {
-	daemon.srv = server
-}
-
 func (daemon *Daemon) checkLocaldns() error {
 	resolvConf, err := resolvconf.Get()
 	if err != nil {
 		return err
 	}
 	if len(daemon.config.Dns) == 0 && utils.CheckLocalDns(resolvConf) {
-		log.Printf("Local (127.0.0.1) DNS resolver found in resolv.conf and containers can't use it. Using default external servers : %v\n", DefaultDns)
+		log.Infof("Local (127.0.0.1) DNS resolver found in resolv.conf and containers can't use it. Using default external servers : %v", DefaultDns)
 		daemon.config.Dns = DefaultDns
 	}
 	return nil
 }
+
+func (daemon *Daemon) ImageGetCached(imgID string, config *runconfig.Config) (*image.Image, error) {
+	// Retrieve all images
+	images, err := daemon.Graph().Map()
+	if err != nil {
+		return nil, err
+	}
+
+	// Store the tree in a map of map (map[parentId][childId])
+	imageMap := make(map[string]map[string]struct{})
+	for _, img := range images {
+		if _, exists := imageMap[img.Parent]; !exists {
+			imageMap[img.Parent] = make(map[string]struct{})
+		}
+		imageMap[img.Parent][img.ID] = struct{}{}
+	}
+
+	// Loop on the children of the given image and check the config
+	var match *image.Image
+	for elem := range imageMap[imgID] {
+		img, err := daemon.Graph().Get(elem)
+		if err != nil {
+			return nil, err
+		}
+		if runconfig.Compare(&img.ContainerConfig, config) {
+			if match == nil || match.Created.Before(img.Created) {
+				match = img
+			}
+		}
+	}
+	return match, nil
+}
+
+func checkKernelAndArch() error {
+	// Check for unsupported architectures
+	if runtime.GOARCH != "amd64" {
+		return fmt.Errorf("The Docker runtime currently only supports amd64 (not %s). This will change in the future. Aborting.", runtime.GOARCH)
+	}
+	// Check for unsupported kernel versions
+	// FIXME: it would be cleaner to not test for specific versions, but rather
+	// test for specific functionalities.
+	// Unfortunately we can't test for the feature "does not cause a kernel panic"
+	// without actually causing a kernel panic, so we need this workaround until
+	// the circumstances of pre-3.8 crashes are clearer.
+	// For details see http://github.com/docker/docker/issues/407
+	if k, err := kernel.GetKernelVersion(); err != nil {
+		log.Infof("WARNING: %s", err)
+	} else {
+		if kernel.CompareKernelVersion(k, &kernel.KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}) < 0 {
+			if os.Getenv("DOCKER_NOWARN_KERNEL_VERSION") == "" {
+				log.Infof("WARNING: You are running linux kernel version %s, which might be unstable running docker. Please upgrade your kernel to 3.8.0.", k.String())
+			}
+		}
+	}
+	return nil
+}
diff --git a/daemon/daemon_aufs.go b/daemon/daemon_aufs.go
index ee3e1d1..a370a4c 100644
--- a/daemon/daemon_aufs.go
+++ b/daemon/daemon_aufs.go
@@ -3,17 +3,17 @@
 package daemon
 
 import (
-	"github.com/dotcloud/docker/daemon/graphdriver"
-	"github.com/dotcloud/docker/daemon/graphdriver/aufs"
-	"github.com/dotcloud/docker/graph"
-	"github.com/dotcloud/docker/utils"
+	"github.com/docker/docker/daemon/graphdriver"
+	"github.com/docker/docker/daemon/graphdriver/aufs"
+	"github.com/docker/docker/graph"
+	"github.com/docker/docker/pkg/log"
 )
 
 // Given the graphdriver ad, if it is aufs, then migrate it.
 // If aufs driver is not built, this func is a noop.
 func migrateIfAufs(driver graphdriver.Driver, root string) error {
 	if ad, ok := driver.(*aufs.Driver); ok {
-		utils.Debugf("Migrating existing containers")
+		log.Debugf("Migrating existing containers")
 		if err := ad.Migrate(root, graph.SetupInitLayer); err != nil {
 			return err
 		}
diff --git a/daemon/daemon_btrfs.go b/daemon/daemon_btrfs.go
index f343d69..cd505c3 100644
--- a/daemon/daemon_btrfs.go
+++ b/daemon/daemon_btrfs.go
@@ -3,5 +3,5 @@
 package daemon
 
 import (
-	_ "github.com/dotcloud/docker/daemon/graphdriver/btrfs"
+	_ "github.com/docker/docker/daemon/graphdriver/btrfs"
 )
diff --git a/daemon/daemon_devicemapper.go b/daemon/daemon_devicemapper.go
index ddf8107..4777545 100644
--- a/daemon/daemon_devicemapper.go
+++ b/daemon/daemon_devicemapper.go
@@ -3,5 +3,5 @@
 package daemon
 
 import (
-	_ "github.com/dotcloud/docker/daemon/graphdriver/devmapper"
+	_ "github.com/docker/docker/daemon/graphdriver/devmapper"
 )
diff --git a/daemon/daemon_no_aufs.go b/daemon/daemon_no_aufs.go
index 2d9fed2..06cdc77 100644
--- a/daemon/daemon_no_aufs.go
+++ b/daemon/daemon_no_aufs.go
@@ -3,7 +3,7 @@
 package daemon
 
 import (
-	"github.com/dotcloud/docker/daemon/graphdriver"
+	"github.com/docker/docker/daemon/graphdriver"
 )
 
 func migrateIfAufs(driver graphdriver.Driver, root string) error {
diff --git a/daemon/delete.go b/daemon/delete.go
new file mode 100644
index 0000000..501aed3
--- /dev/null
+++ b/daemon/delete.go
@@ -0,0 +1,174 @@
+package daemon
+
+import (
+	"fmt"
+	"os"
+	"path"
+	"path/filepath"
+	"strings"
+
+	"github.com/docker/docker/engine"
+	"github.com/docker/docker/pkg/log"
+)
+
+// FIXME: rename to ContainerRemove for consistency with the CLI command.
+func (daemon *Daemon) ContainerDestroy(job *engine.Job) engine.Status {
+	if len(job.Args) != 1 {
+		return job.Errorf("Not enough arguments. Usage: %s CONTAINER\n", job.Name)
+	}
+	name := job.Args[0]
+	removeVolume := job.GetenvBool("removeVolume")
+	removeLink := job.GetenvBool("removeLink")
+	forceRemove := job.GetenvBool("forceRemove")
+	container := daemon.Get(name)
+
+	if removeLink {
+		if container == nil {
+			return job.Errorf("No such link: %s", name)
+		}
+		name, err := GetFullContainerName(name)
+		if err != nil {
+			job.Error(err)
+		}
+		parent, n := path.Split(name)
+		if parent == "/" {
+			return job.Errorf("Conflict, cannot remove the default name of the container")
+		}
+		pe := daemon.ContainerGraph().Get(parent)
+		if pe == nil {
+			return job.Errorf("Cannot get parent %s for name %s", parent, name)
+		}
+		parentContainer := daemon.Get(pe.ID())
+
+		if parentContainer != nil {
+			parentContainer.DisableLink(n)
+		}
+
+		if err := daemon.ContainerGraph().Delete(name); err != nil {
+			return job.Error(err)
+		}
+		return engine.StatusOK
+	}
+
+	if container != nil {
+		if container.State.IsRunning() {
+			if forceRemove {
+				if err := container.Kill(); err != nil {
+					return job.Errorf("Could not kill running container, cannot remove - %v", err)
+				}
+			} else {
+				return job.Errorf("You cannot remove a running container. Stop the container before attempting removal or use -f")
+			}
+		}
+		if err := daemon.Destroy(container); err != nil {
+			return job.Errorf("Cannot destroy container %s: %s", name, err)
+		}
+		container.LogEvent("destroy")
+
+		if removeVolume {
+			var (
+				volumes     = make(map[string]struct{})
+				binds       = make(map[string]struct{})
+				usedVolumes = make(map[string]*Container)
+			)
+
+			// the volume id is always the base of the path
+			getVolumeId := func(p string) string {
+				return filepath.Base(strings.TrimSuffix(p, "/layer"))
+			}
+
+			// populate bind map so that they can be skipped and not removed
+			for _, bind := range container.HostConfig().Binds {
+				source := strings.Split(bind, ":")[0]
+				// TODO: refactor all volume stuff, all of it
+				// it is very important that we eval the link or comparing the keys to container.Volumes will not work
+				//
+				// eval symlink can fail, ref #5244 if we receive an is not exist error we can ignore it
+				p, err := filepath.EvalSymlinks(source)
+				if err != nil && !os.IsNotExist(err) {
+					return job.Error(err)
+				}
+				if p != "" {
+					source = p
+				}
+				binds[source] = struct{}{}
+			}
+
+			// Store all the deleted containers volumes
+			for _, volumeId := range container.Volumes {
+				// Skip the volumes mounted from external
+				// bind mounts here will will be evaluated for a symlink
+				if _, exists := binds[volumeId]; exists {
+					continue
+				}
+
+				volumeId = getVolumeId(volumeId)
+				volumes[volumeId] = struct{}{}
+			}
+
+			// Retrieve all volumes from all remaining containers
+			for _, container := range daemon.List() {
+				for _, containerVolumeId := range container.Volumes {
+					containerVolumeId = getVolumeId(containerVolumeId)
+					usedVolumes[containerVolumeId] = container
+				}
+			}
+
+			for volumeId := range volumes {
+				// If the requested volu
+				if c, exists := usedVolumes[volumeId]; exists {
+					log.Infof("The volume %s is used by the container %s. Impossible to remove it. Skipping.", volumeId, c.ID)
+					continue
+				}
+				if err := daemon.Volumes().Delete(volumeId); err != nil {
+					return job.Errorf("Error calling volumes.Delete(%q): %v", volumeId, err)
+				}
+			}
+		}
+	} else {
+		return job.Errorf("No such container: %s", name)
+	}
+	return engine.StatusOK
+}
+
+// Destroy unregisters a container from the daemon and cleanly removes its contents from the filesystem.
+// FIXME: rename to Rm for consistency with the CLI command
+func (daemon *Daemon) Destroy(container *Container) error {
+	if container == nil {
+		return fmt.Errorf("The given container is <nil>")
+	}
+
+	element := daemon.containers.Get(container.ID)
+	if element == nil {
+		return fmt.Errorf("Container %v not found - maybe it was already destroyed?", container.ID)
+	}
+
+	if err := container.Stop(3); err != nil {
+		return err
+	}
+
+	// Deregister the container before removing its directory, to avoid race conditions
+	daemon.idIndex.Delete(container.ID)
+	daemon.containers.Delete(container.ID)
+
+	if _, err := daemon.containerGraph.Purge(container.ID); err != nil {
+		log.Debugf("Unable to remove container from link graph: %s", err)
+	}
+
+	if err := daemon.driver.Remove(container.ID); err != nil {
+		return fmt.Errorf("Driver %s failed to remove root filesystem %s: %s", daemon.driver, container.ID, err)
+	}
+
+	initID := fmt.Sprintf("%s-init", container.ID)
+	if err := daemon.driver.Remove(initID); err != nil {
+		return fmt.Errorf("Driver %s failed to remove init filesystem %s: %s", daemon.driver, initID, err)
+	}
+
+	if err := os.RemoveAll(container.root); err != nil {
+		return fmt.Errorf("Unable to remove filesystem for %v: %v", container.ID, err)
+	}
+
+	selinuxFreeLxcContexts(container.ProcessLabel)
+
+	return nil
+}
diff --git a/daemon/execdriver/driver.go b/daemon/execdriver/driver.go
index a3d3bc2..121c6a5 100644
--- a/daemon/execdriver/driver.go
+++ b/daemon/execdriver/driver.go
@@ -20,47 +20,7 @@
 	ErrDriverNotFound          = errors.New("The requested docker init has not been found")
 )
 
-var dockerInitFcts map[string]InitFunc
-
-type (
-	StartCallback func(*Command)
-	InitFunc      func(i *InitArgs) error
-)
-
-func RegisterInitFunc(name string, fct InitFunc) error {
-	if dockerInitFcts == nil {
-		dockerInitFcts = make(map[string]InitFunc)
-	}
-	if _, ok := dockerInitFcts[name]; ok {
-		return ErrDriverAlreadyRegistered
-	}
-	dockerInitFcts[name] = fct
-	return nil
-}
-
-func GetInitFunc(name string) (InitFunc, error) {
-	fct, ok := dockerInitFcts[name]
-	if !ok {
-		return nil, ErrDriverNotFound
-	}
-	return fct, nil
-}
-
-// Args provided to the init function for a driver
-type InitArgs struct {
-	User       string
-	Gateway    string
-	Ip         string
-	WorkDir    string
-	Privileged bool
-	Env        []string
-	Args       []string
-	Mtu        int
-	Driver     string
-	Console    string
-	Pipe       int
-	Root       string
-}
+type StartCallback func(*Command)
 
 // Driver specific information based on
 // processes registered with the driver
@@ -140,6 +100,8 @@
 	Mounts             []Mount             `json:"mounts"`
 	AllowedDevices     []*devices.Device   `json:"allowed_devices"`
 	AutoCreatedDevices []*devices.Device   `json:"autocreated_devices"`
+	CapAdd             []string            `json:"cap_add"`
+	CapDrop            []string            `json:"cap_drop"`
 
 	Terminal     Terminal `json:"-"`             // standard or tty terminal
 	Console      string   `json:"-"`             // dev/console path
diff --git a/daemon/execdriver/execdrivers/execdrivers.go b/daemon/execdriver/execdrivers/execdrivers.go
index 2e18454..2a050b4 100644
--- a/daemon/execdriver/execdrivers/execdrivers.go
+++ b/daemon/execdriver/execdrivers/execdrivers.go
@@ -2,10 +2,10 @@
 
 import (
 	"fmt"
-	"github.com/dotcloud/docker/daemon/execdriver"
-	"github.com/dotcloud/docker/daemon/execdriver/lxc"
-	"github.com/dotcloud/docker/daemon/execdriver/native"
-	"github.com/dotcloud/docker/pkg/sysinfo"
+	"github.com/docker/docker/daemon/execdriver"
+	"github.com/docker/docker/daemon/execdriver/lxc"
+	"github.com/docker/docker/daemon/execdriver/native"
+	"github.com/docker/docker/pkg/sysinfo"
 	"path"
 )
 
@@ -15,7 +15,7 @@
 		// we want to give the lxc driver the full docker root because it needs
 		// to access and write config and template files in /var/lib/docker/containers/*
 		// to be backwards compatible
-		return lxc.NewDriver(root, sysInfo.AppArmor)
+		return lxc.NewDriver(root, initPath, sysInfo.AppArmor)
 	case "native":
 		return native.NewDriver(path.Join(root, "execdriver", "native"), initPath)
 	}
diff --git a/daemon/execdriver/lxc/driver.go b/daemon/execdriver/lxc/driver.go
index 59daf1a..3b87017 100644
--- a/daemon/execdriver/lxc/driver.go
+++ b/daemon/execdriver/lxc/driver.go
@@ -3,69 +3,47 @@
 import (
 	"encoding/json"
 	"fmt"
+	"io"
 	"io/ioutil"
-	"log"
 	"os"
 	"os/exec"
 	"path"
 	"path/filepath"
-	"runtime"
 	"strconv"
 	"strings"
 	"syscall"
 	"time"
 
+	"github.com/kr/pty"
+
+	"github.com/docker/docker/daemon/execdriver"
+	"github.com/docker/docker/pkg/log"
+	"github.com/docker/docker/pkg/term"
+	"github.com/docker/docker/utils"
 	"github.com/docker/libcontainer/cgroups"
 	"github.com/docker/libcontainer/label"
 	"github.com/docker/libcontainer/mount/nodes"
-	"github.com/dotcloud/docker/daemon/execdriver"
-	"github.com/dotcloud/docker/utils"
 )
 
 const DriverName = "lxc"
 
-func init() {
-	execdriver.RegisterInitFunc(DriverName, func(args *execdriver.InitArgs) error {
-		runtime.LockOSThread()
-		if err := setupEnv(args); err != nil {
-			return err
-		}
-		if err := setupHostname(args); err != nil {
-			return err
-		}
-		if err := setupNetworking(args); err != nil {
-			return err
-		}
-		if err := finalizeNamespace(args); err != nil {
-			return err
-		}
-
-		path, err := exec.LookPath(args.Args[0])
-		if err != nil {
-			log.Printf("Unable to locate %v", args.Args[0])
-			os.Exit(127)
-		}
-		if err := syscall.Exec(path, args.Args, os.Environ()); err != nil {
-			return fmt.Errorf("dockerinit unable to execute %s - %s", path, err)
-		}
-		panic("Unreachable")
-	})
-}
-
 type driver struct {
 	root       string // root path for the driver to use
+	initPath   string
 	apparmor   bool
 	sharedRoot bool
 }
 
-func NewDriver(root string, apparmor bool) (*driver, error) {
+func NewDriver(root, initPath string, apparmor bool) (*driver, error) {
 	// setup unconfined symlink
 	if err := linkLxcStart(root); err != nil {
 		return nil, err
 	}
+
 	return &driver{
 		apparmor:   apparmor,
 		root:       root,
+		initPath:   initPath,
 		sharedRoot: rootIsShared(),
 	}, nil
 }
@@ -76,9 +54,25 @@
 }
 
 func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) {
-	if err := execdriver.SetTerminal(c, pipes); err != nil {
-		return -1, err
+	var (
+		term execdriver.Terminal
+		err  error
+	)
+
+	if c.Tty {
+		term, err = NewTtyConsole(c, pipes)
+	} else {
+		term, err = execdriver.NewStdConsole(c, pipes)
 	}
+	c.Terminal = term
+
+	c.Mounts = append(c.Mounts, execdriver.Mount{
+		Source:      d.initPath,
+		Destination: c.InitPath,
+		Writable:    false,
+		Private:     true,
+	})
+
 	if err := d.generateEnvConfig(c); err != nil {
 		return -1, err
 	}
@@ -92,8 +86,6 @@
 		"-f", configPath,
 		"--",
 		c.InitPath,
-		"-driver",
-		DriverName,
 	}
 
 	if c.Network.Interface != nil {
@@ -122,6 +114,14 @@
 		params = append(params, "-w", c.WorkingDir)
 	}
 
+	if len(c.CapAdd) > 0 {
+		params = append(params, fmt.Sprintf("-cap-add=%s", strings.Join(c.CapAdd, ":")))
+	}
+
+	if len(c.CapDrop) > 0 {
+		params = append(params, fmt.Sprintf("-cap-drop=%s", strings.Join(c.CapDrop, ":")))
+	}
+
 	params = append(params, "--", c.Entrypoint)
 	params = append(params, c.Arguments...)
 
@@ -320,7 +320,7 @@
 
 	output, err := i.driver.getInfo(i.ID)
 	if err != nil {
-		utils.Errorf("Error getting info for lxc container %s: %s (%s)", i.ID, err, output)
+		log.Errorf("Error getting info for lxc container %s: %s (%s)", i.ID, err, output)
 		return false
 	}
 	if strings.Contains(string(output), "RUNNING") {
@@ -447,7 +447,83 @@
 		return err
 	}
 	p := path.Join(d.root, "containers", c.ID, "config.env")
-	c.Mounts = append(c.Mounts, execdriver.Mount{p, "/.dockerenv", false, true})
+	c.Mounts = append(c.Mounts, execdriver.Mount{
+		Source:      p,
+		Destination: "/.dockerenv",
+		Writable:    false,
+		Private:     true,
+	})
 
 	return ioutil.WriteFile(p, data, 0600)
 }
+
+type TtyConsole struct {
+	MasterPty *os.File
+	SlavePty  *os.File
+}
+
+func NewTtyConsole(command *execdriver.Command, pipes *execdriver.Pipes) (*TtyConsole, error) {
+	// lxc is special in that we cannot create the master outside of the container without
+	// opening the slave because we have nothing to provide to the cmd.  We have to open both then do
+	// the crazy setup on command right now instead of passing the console path to lxc and telling it
+	// to open up that console.  we save a couple of openfiles in the native driver because we can do
+	// this.
+	ptyMaster, ptySlave, err := pty.Open()
+	if err != nil {
+		return nil, err
+	}
+
+	tty := &TtyConsole{
+		MasterPty: ptyMaster,
+		SlavePty:  ptySlave,
+	}
+
+	if err := tty.AttachPipes(&command.Cmd, pipes); err != nil {
+		tty.Close()
+		return nil, err
+	}
+
+	command.Console = tty.SlavePty.Name()
+
+	return tty, nil
+}
+
+func (t *TtyConsole) Master() *os.File {
+	return t.MasterPty
+}
+
+func (t *TtyConsole) Resize(h, w int) error {
+	return term.SetWinsize(t.MasterPty.Fd(), &term.Winsize{Height: uint16(h), Width: uint16(w)})
+}
+
+func (t *TtyConsole) AttachPipes(command *exec.Cmd, pipes *execdriver.Pipes) error {
+	command.Stdout = t.SlavePty
+	command.Stderr = t.SlavePty
+
+	go func() {
+		if wb, ok := pipes.Stdout.(interface {
+			CloseWriters() error
+		}); ok {
+			defer wb.CloseWriters()
+		}
+
+		io.Copy(pipes.Stdout, t.MasterPty)
+	}()
+
+	if pipes.Stdin != nil {
+		command.Stdin = t.SlavePty
+		command.SysProcAttr.Setctty = true
+
+		go func() {
+			io.Copy(t.MasterPty, pipes.Stdin)
+
+			pipes.Stdin.Close()
+		}()
+	}
+	return nil
+}
+
+func (t *TtyConsole) Close() error {
+	t.SlavePty.Close()
+	return t.MasterPty.Close()
+}
diff --git a/daemon/execdriver/lxc/init.go b/daemon/execdriver/lxc/init.go
index 1af7730..2a91bbb 100644
--- a/daemon/execdriver/lxc/init.go
+++ b/daemon/execdriver/lxc/init.go
@@ -2,19 +2,116 @@
 
 import (
 	"encoding/json"
+	"flag"
 	"fmt"
 	"io/ioutil"
+	"log"
 	"net"
 	"os"
+	"os/exec"
+	"runtime"
 	"strings"
 	"syscall"
 
+	"github.com/docker/docker/reexec"
 	"github.com/docker/libcontainer/netlink"
-	"github.com/dotcloud/docker/daemon/execdriver"
 )
 
+// Args provided to the init function for a driver
+type InitArgs struct {
+	User       string
+	Gateway    string
+	Ip         string
+	WorkDir    string
+	Privileged bool
+	Env        []string
+	Args       []string
+	Mtu        int
+	Console    string
+	Pipe       int
+	Root       string
+	CapAdd     string
+	CapDrop    string
+}
+
+func init() {
+	// like always lxc requires a hack to get this to work
+	reexec.Register("/.dockerinit", dockerInititalizer)
+}
+
+func dockerInititalizer() {
+	initializer()
+}
+
+// initializer is the lxc driver's init function that is run inside the namespace to setup
+// additional configurations
+func initializer() {
+	runtime.LockOSThread()
+
+	args := getArgs()
+
+	if err := setupNamespace(args); err != nil {
+		log.Fatal(err)
+	}
+}
+
+func setupNamespace(args *InitArgs) error {
+	if err := setupEnv(args); err != nil {
+		return err
+	}
+	if err := setupHostname(args); err != nil {
+		return err
+	}
+	if err := setupNetworking(args); err != nil {
+		return err
+	}
+	if err := finalizeNamespace(args); err != nil {
+		return err
+	}
+
+	path, err := exec.LookPath(args.Args[0])
+	if err != nil {
+		log.Printf("Unable to locate %v", args.Args[0])
+		os.Exit(127)
+	}
+
+	if err := syscall.Exec(path, args.Args, os.Environ()); err != nil {
+		return fmt.Errorf("dockerinit unable to execute %s - %s", path, err)
+	}
+
+	return nil
+}
+
+func getArgs() *InitArgs {
+	var (
+		// Get cmdline arguments
+		user       = flag.String("u", "", "username or uid")
+		gateway    = flag.String("g", "", "gateway address")
+		ip         = flag.String("i", "", "ip address")
+		workDir    = flag.String("w", "", "workdir")
+		privileged = flag.Bool("privileged", false, "privileged mode")
+		mtu        = flag.Int("mtu", 1500, "interface mtu")
+		capAdd     = flag.String("cap-add", "", "capabilities to add")
+		capDrop    = flag.String("cap-drop", "", "capabilities to drop")
+	)
+
+	flag.Parse()
+
+	return &InitArgs{
+		User:       *user,
+		Gateway:    *gateway,
+		Ip:         *ip,
+		WorkDir:    *workDir,
+		Privileged: *privileged,
+		Args:       flag.Args(),
+		Mtu:        *mtu,
+		CapAdd:     *capAdd,
+		CapDrop:    *capDrop,
+	}
+}
+
 // Clear environment pollution introduced by lxc-start
-func setupEnv(args *execdriver.InitArgs) error {
+func setupEnv(args *InitArgs) error {
 	// Get env
 	var env []string
 	content, err := ioutil.ReadFile(".dockerenv")
@@ -41,7 +138,7 @@
 	return nil
 }
 
-func setupHostname(args *execdriver.InitArgs) error {
+func setupHostname(args *InitArgs) error {
 	hostname := getEnv(args, "HOSTNAME")
 	if hostname == "" {
 		return nil
@@ -50,7 +147,7 @@
 }
 
 // Setup networking
-func setupNetworking(args *execdriver.InitArgs) error {
+func setupNetworking(args *InitArgs) error {
 	if args.Ip != "" {
 		// eth0
 		iface, err := net.InterfaceByName("eth0")
@@ -95,7 +192,7 @@
 }
 
 // Setup working directory
-func setupWorkingDirectory(args *execdriver.InitArgs) error {
+func setupWorkingDirectory(args *InitArgs) error {
 	if args.WorkDir == "" {
 		return nil
 	}
@@ -105,7 +202,7 @@
 	return nil
 }
 
-func getEnv(args *execdriver.InitArgs, key string) string {
+func getEnv(args *InitArgs, key string) string {
 	for _, kv := range args.Env {
 		parts := strings.SplitN(kv, "=", 2)
 		if parts[0] == key && len(parts) == 2 {
diff --git a/daemon/execdriver/lxc/lxc_init_linux.go b/daemon/execdriver/lxc/lxc_init_linux.go
index 1fd497e..625caa1 100644
--- a/daemon/execdriver/lxc/lxc_init_linux.go
+++ b/daemon/execdriver/lxc/lxc_init_linux.go
@@ -1,24 +1,23 @@
-// +build amd64
-
 package lxc
 
 import (
 	"fmt"
+	"strings"
 	"syscall"
 
+	"github.com/docker/docker/daemon/execdriver"
+	"github.com/docker/docker/daemon/execdriver/native/template"
 	"github.com/docker/libcontainer/namespaces"
 	"github.com/docker/libcontainer/security/capabilities"
+	"github.com/docker/libcontainer/system"
 	"github.com/docker/libcontainer/utils"
-	"github.com/dotcloud/docker/daemon/execdriver"
-	"github.com/dotcloud/docker/daemon/execdriver/native/template"
-	"github.com/dotcloud/docker/pkg/system"
 )
 
 func setHostname(hostname string) error {
 	return syscall.Sethostname([]byte(hostname))
 }
 
-func finalizeNamespace(args *execdriver.InitArgs) error {
+func finalizeNamespace(args *InitArgs) error {
 	if err := utils.CloseExecFrom(3); err != nil {
 		return err
 	}
@@ -48,8 +47,25 @@
 			return fmt.Errorf("clear keep caps %s", err)
 		}
 
+		var (
+			adds  []string
+			drops []string
+		)
+
+		if args.CapAdd != "" {
+			adds = strings.Split(args.CapAdd, ":")
+		}
+		if args.CapDrop != "" {
+			drops = strings.Split(args.CapDrop, ":")
+		}
+
+		caps, err := execdriver.TweakCapabilities(container.Capabilities, adds, drops)
+		if err != nil {
+			return err
+		}
+
 		// drop all other capabilities
-		if err := capabilities.DropCapabilities(container.Capabilities); err != nil {
+		if err := capabilities.DropCapabilities(caps); err != nil {
 			return fmt.Errorf("drop capabilities %s", err)
 		}
 	}
diff --git a/daemon/execdriver/lxc/lxc_init_unsupported.go b/daemon/execdriver/lxc/lxc_init_unsupported.go
index 079446e..b3f2ae6 100644
--- a/daemon/execdriver/lxc/lxc_init_unsupported.go
+++ b/daemon/execdriver/lxc/lxc_init_unsupported.go
@@ -1,8 +1,8 @@
-// +build !linux !amd64
+// +build !linux
 
 package lxc
 
-import "github.com/dotcloud/docker/daemon/execdriver"
+import "github.com/docker/docker/daemon/execdriver"
 
 func setHostname(hostname string) error {
 	panic("Not supported on darwin")
diff --git a/daemon/execdriver/lxc/lxc_template.go b/daemon/execdriver/lxc/lxc_template.go
index 88618f0..229b0a5 100644
--- a/daemon/execdriver/lxc/lxc_template.go
+++ b/daemon/execdriver/lxc/lxc_template.go
@@ -4,8 +4,8 @@
 	"strings"
 	"text/template"
 
+	"github.com/docker/docker/daemon/execdriver"
 	"github.com/docker/libcontainer/label"
-	"github.com/dotcloud/docker/daemon/execdriver"
 )
 
 const LxcTemplate = `
@@ -75,9 +75,9 @@
 
 {{range $value := .Mounts}}
 {{if $value.Writable}}
-lxc.mount.entry = {{$value.Source}} {{escapeFstabSpaces $ROOTFS}}/{{escapeFstabSpaces $value.Destination}} none bind,rw 0 0
+lxc.mount.entry = {{$value.Source}} {{escapeFstabSpaces $ROOTFS}}/{{escapeFstabSpaces $value.Destination}} none rbind,rw 0 0
 {{else}}
-lxc.mount.entry = {{$value.Source}} {{escapeFstabSpaces $ROOTFS}}/{{escapeFstabSpaces $value.Destination}} none bind,ro 0 0
+lxc.mount.entry = {{$value.Source}} {{escapeFstabSpaces $ROOTFS}}/{{escapeFstabSpaces $value.Destination}} none rbind,ro 0 0
 {{end}}
 {{end}}
 
diff --git a/daemon/execdriver/lxc/lxc_template_unit_test.go b/daemon/execdriver/lxc/lxc_template_unit_test.go
index a9a67c4..8acda80 100644
--- a/daemon/execdriver/lxc/lxc_template_unit_test.go
+++ b/daemon/execdriver/lxc/lxc_template_unit_test.go
@@ -1,3 +1,5 @@
+// +build linux
+
 package lxc
 
 import (
@@ -11,8 +13,8 @@
 	"testing"
 	"time"
 
+	"github.com/docker/docker/daemon/execdriver"
 	"github.com/docker/libcontainer/devices"
-	"github.com/dotcloud/docker/daemon/execdriver"
 )
 
 func TestLXCConfig(t *testing.T) {
@@ -35,7 +37,7 @@
 		cpu    = cpuMin + rand.Intn(cpuMax-cpuMin)
 	)
 
-	driver, err := NewDriver(root, false)
+	driver, err := NewDriver(root, "", false)
 	if err != nil {
 		t.Fatal(err)
 	}
@@ -71,7 +73,7 @@
 
 	os.MkdirAll(path.Join(root, "containers", "1"), 0777)
 
-	driver, err := NewDriver(root, false)
+	driver, err := NewDriver(root, "", false)
 	if err != nil {
 		t.Fatal(err)
 	}
diff --git a/daemon/execdriver/native/configuration/parse.go b/daemon/execdriver/native/configuration/parse.go
index 8fb1b45..e021fa0 100644
--- a/daemon/execdriver/native/configuration/parse.go
+++ b/daemon/execdriver/native/configuration/parse.go
@@ -7,8 +7,8 @@
 	"strconv"
 	"strings"
 
+	"github.com/docker/docker/pkg/units"
 	"github.com/docker/libcontainer"
-	"github.com/dotcloud/docker/pkg/units"
 )
 
 type Action func(*libcontainer.Config, interface{}, string) error
diff --git a/daemon/execdriver/native/configuration/parse_test.go b/daemon/execdriver/native/configuration/parse_test.go
index 0401d7b..1493d2b 100644
--- a/daemon/execdriver/native/configuration/parse_test.go
+++ b/daemon/execdriver/native/configuration/parse_test.go
@@ -3,8 +3,8 @@
 import (
 	"testing"
 
+	"github.com/docker/docker/daemon/execdriver/native/template"
 	"github.com/docker/libcontainer/security/capabilities"
-	"github.com/dotcloud/docker/daemon/execdriver/native/template"
 )
 
 // Checks whether the expected capability is specified in the capabilities.
diff --git a/daemon/execdriver/native/create.go b/daemon/execdriver/native/create.go
index f28507b..e475a1f 100644
--- a/daemon/execdriver/native/create.go
+++ b/daemon/execdriver/native/create.go
@@ -1,3 +1,5 @@
+// +build linux,cgo
+
 package native
 
 import (
@@ -6,14 +8,14 @@
 	"os/exec"
 	"path/filepath"
 
+	"github.com/docker/docker/daemon/execdriver"
+	"github.com/docker/docker/daemon/execdriver/native/configuration"
+	"github.com/docker/docker/daemon/execdriver/native/template"
 	"github.com/docker/libcontainer"
 	"github.com/docker/libcontainer/apparmor"
 	"github.com/docker/libcontainer/devices"
 	"github.com/docker/libcontainer/mount"
 	"github.com/docker/libcontainer/security/capabilities"
-	"github.com/dotcloud/docker/daemon/execdriver"
-	"github.com/dotcloud/docker/daemon/execdriver/native/configuration"
-	"github.com/dotcloud/docker/daemon/execdriver/native/template"
 )
 
 // createContainer populates and configures the container type with the
@@ -42,6 +44,10 @@
 		if err := d.setPrivileged(container); err != nil {
 			return nil, err
 		}
+	} else {
+		if err := d.setCapabilities(container, c); err != nil {
+			return nil, err
+		}
 	}
 
 	if err := d.setupCgroups(container, c); err != nil {
@@ -136,6 +142,11 @@
 	return nil
 }
 
+func (d *driver) setCapabilities(container *libcontainer.Config, c *execdriver.Command) (err error) {
+	container.Capabilities, err = execdriver.TweakCapabilities(container.Capabilities, c.CapAdd, c.CapDrop)
+	return err
+}
+
 func (d *driver) setupCgroups(container *libcontainer.Config, c *execdriver.Command) error {
 	if c.Resources != nil {
 		container.Cgroups.CpuShares = c.Resources.CpuShares
diff --git a/daemon/execdriver/native/driver.go b/daemon/execdriver/native/driver.go
index 9033370..c45188b 100644
--- a/daemon/execdriver/native/driver.go
+++ b/daemon/execdriver/native/driver.go
@@ -1,8 +1,11 @@
+// +build linux,cgo
+
 package native
 
 import (
 	"encoding/json"
 	"fmt"
+	"io"
 	"io/ioutil"
 	"os"
 	"os/exec"
@@ -11,13 +14,15 @@
 	"sync"
 	"syscall"
 
+	"github.com/docker/docker/daemon/execdriver"
+	"github.com/docker/docker/pkg/term"
 	"github.com/docker/libcontainer"
 	"github.com/docker/libcontainer/apparmor"
 	"github.com/docker/libcontainer/cgroups/fs"
 	"github.com/docker/libcontainer/cgroups/systemd"
+	consolepkg "github.com/docker/libcontainer/console"
 	"github.com/docker/libcontainer/namespaces"
-	"github.com/dotcloud/docker/daemon/execdriver"
-	"github.com/dotcloud/docker/pkg/system"
+	"github.com/docker/libcontainer/system"
 )
 
 const (
@@ -25,34 +30,6 @@
 	Version    = "0.2"
 )
 
-func init() {
-	execdriver.RegisterInitFunc(DriverName, func(args *execdriver.InitArgs) error {
-		var container *libcontainer.Config
-		f, err := os.Open(filepath.Join(args.Root, "container.json"))
-		if err != nil {
-			return err
-		}
-		if err := json.NewDecoder(f).Decode(&container); err != nil {
-			f.Close()
-			return err
-		}
-		f.Close()
-
-		rootfs, err := os.Getwd()
-		if err != nil {
-			return err
-		}
-		syncPipe, err := namespaces.NewSyncPipeFromFd(0, uintptr(args.Pipe))
-		if err != nil {
-			return err
-		}
-		if err := namespaces.Init(container, rootfs, args.Console, syncPipe, args.Args); err != nil {
-			return err
-		}
-		return nil
-	})
-}
-
 type activeContainer struct {
 	container *libcontainer.Config
 	cmd       *exec.Cmd
@@ -88,6 +65,19 @@
 	if err != nil {
 		return -1, err
 	}
+
+	var term execdriver.Terminal
+
+	if c.Tty {
+		term, err = NewTtyConsole(c, pipes)
+	} else {
+		term, err = execdriver.NewStdConsole(c, pipes)
+	}
+	if err != nil {
+		return -1, err
+	}
+	c.Terminal = term
+
 	d.Lock()
 	d.activeContainers[c.ID] = &activeContainer{
 		container: container,
@@ -99,6 +89,7 @@
 		dataPath = filepath.Join(d.root, c.ID)
 		args     = append([]string{c.Entrypoint}, c.Arguments...)
 	)
+
 	if err := d.createContainerRoot(c.ID); err != nil {
 		return -1, err
 	}
@@ -108,16 +99,10 @@
 		return -1, err
 	}
 
-	term := getTerminal(c, pipes)
-
-	return namespaces.Exec(container, term, c.Rootfs, dataPath, args, func(container *libcontainer.Config, console, rootfs, dataPath, init string, child *os.File, args []string) *exec.Cmd {
-		// we need to join the rootfs because namespaces will setup the rootfs and chroot
-		initPath := filepath.Join(c.Rootfs, c.InitPath)
-
+	return namespaces.Exec(container, c.Stdin, c.Stdout, c.Stderr, c.Console, c.Rootfs, dataPath, args, func(container *libcontainer.Config, console, rootfs, dataPath, init string, child *os.File, args []string) *exec.Cmd {
 		c.Path = d.initPath
 		c.Args = append([]string{
-			initPath,
-			"-driver", DriverName,
+			DriverName,
 			"-console", console,
 			"-pipe", "3",
 			"-root", filepath.Join(d.root, c.ID),
@@ -125,8 +110,9 @@
 		}, args...)
 
 		// set this to nil so that when we set the clone flags anything else is reset
-		c.SysProcAttr = nil
-		system.SetCloneFlags(&c.Cmd, uintptr(namespaces.GetNamespaceFlags(container.Namespaces)))
+		c.SysProcAttr = &syscall.SysProcAttr{
+			Cloneflags: uintptr(namespaces.GetNamespaceFlags(container.Namespaces)),
+		}
 		c.ExtraFiles = []*os.File{child}
 
 		c.Env = container.Env
@@ -194,11 +180,13 @@
 	if err != nil {
 		return err
 	}
+
 	if state.InitStartTime == currentStartTime {
 		err = syscall.Kill(p.Process.Pid, 9)
 		syscall.Wait4(p.Process.Pid, nil, 0, nil)
 	}
 	d.removeContainerRoot(p.ID)
+
 	return err
 
 }
@@ -260,17 +248,60 @@
 	return ""
 }
 
-func getTerminal(c *execdriver.Command, pipes *execdriver.Pipes) namespaces.Terminal {
-	var term namespaces.Terminal
-	if c.Tty {
-		term = &dockerTtyTerm{
-			pipes: pipes,
-		}
-	} else {
-		term = &dockerStdTerm{
-			pipes: pipes,
-		}
+type TtyConsole struct {
+	MasterPty *os.File
+}
+
+func NewTtyConsole(command *execdriver.Command, pipes *execdriver.Pipes) (*TtyConsole, error) {
+	ptyMaster, console, err := consolepkg.CreateMasterAndConsole()
+	if err != nil {
+		return nil, err
 	}
-	c.Terminal = term
-	return term
+
+	tty := &TtyConsole{
+		MasterPty: ptyMaster,
+	}
+
+	if err := tty.AttachPipes(&command.Cmd, pipes); err != nil {
+		tty.Close()
+		return nil, err
+	}
+
+	command.Console = console
+
+	return tty, nil
+}
+
+func (t *TtyConsole) Master() *os.File {
+	return t.MasterPty
+}
+
+func (t *TtyConsole) Resize(h, w int) error {
+	return term.SetWinsize(t.MasterPty.Fd(), &term.Winsize{Height: uint16(h), Width: uint16(w)})
+}
+
+func (t *TtyConsole) AttachPipes(command *exec.Cmd, pipes *execdriver.Pipes) error {
+	go func() {
+		if wb, ok := pipes.Stdout.(interface {
+			CloseWriters() error
+		}); ok {
+			defer wb.CloseWriters()
+		}
+
+		io.Copy(pipes.Stdout, t.MasterPty)
+	}()
+
+	if pipes.Stdin != nil {
+		go func() {
+			io.Copy(t.MasterPty, pipes.Stdin)
+
+			pipes.Stdin.Close()
+		}()
+	}
+
+	return nil
+}
+
+func (t *TtyConsole) Close() error {
+	return t.MasterPty.Close()
 }
diff --git a/daemon/execdriver/native/driver_unsupported.go b/daemon/execdriver/native/driver_unsupported.go
new file mode 100644
index 0000000..97839cf
--- /dev/null
+++ b/daemon/execdriver/native/driver_unsupported.go
@@ -0,0 +1,13 @@
+// +build !linux
+
+package native
+
+import (
+	"fmt"
+
+	"github.com/docker/docker/daemon/execdriver"
+)
+
+func NewDriver(root, initPath string) (execdriver.Driver, error) {
+	return nil, fmt.Errorf("native driver not supported on non-linux")
+}
diff --git a/daemon/execdriver/native/driver_unsupported_nocgo.go b/daemon/execdriver/native/driver_unsupported_nocgo.go
new file mode 100644
index 0000000..2b8e9f8
--- /dev/null
+++ b/daemon/execdriver/native/driver_unsupported_nocgo.go
@@ -0,0 +1,13 @@
+// +build linux,!cgo
+
+package native
+
+import (
+	"fmt"
+
+	"github.com/docker/docker/daemon/execdriver"
+)
+
+func NewDriver(root, initPath string) (execdriver.Driver, error) {
+	return nil, fmt.Errorf("native driver not supported on non-linux")
+}
diff --git a/daemon/execdriver/native/info.go b/daemon/execdriver/native/info.go
index c34d029..601b97e 100644
--- a/daemon/execdriver/native/info.go
+++ b/daemon/execdriver/native/info.go
@@ -1,3 +1,5 @@
+// +build linux,cgo
+
 package native
 
 import (
diff --git a/daemon/execdriver/native/init.go b/daemon/execdriver/native/init.go
new file mode 100644
index 0000000..7021eeb
--- /dev/null
+++ b/daemon/execdriver/native/init.go
@@ -0,0 +1,66 @@
+// +build linux
+
+package native
+
+import (
+	"encoding/json"
+	"flag"
+	"fmt"
+	"os"
+	"path/filepath"
+	"runtime"
+
+	"github.com/docker/docker/reexec"
+	"github.com/docker/libcontainer"
+	"github.com/docker/libcontainer/namespaces"
+	"github.com/docker/libcontainer/syncpipe"
+)
+
+func init() {
+	reexec.Register(DriverName, initializer)
+}
+
+func initializer() {
+	runtime.LockOSThread()
+
+	var (
+		pipe    = flag.Int("pipe", 0, "sync pipe fd")
+		console = flag.String("console", "", "console (pty slave) path")
+		root    = flag.String("root", ".", "root path for configuration files")
+	)
+
+	flag.Parse()
+
+	var container *libcontainer.Config
+	f, err := os.Open(filepath.Join(*root, "container.json"))
+	if err != nil {
+		writeError(err)
+	}
+
+	if err := json.NewDecoder(f).Decode(&container); err != nil {
+		f.Close()
+		writeError(err)
+	}
+	f.Close()
+
+	rootfs, err := os.Getwd()
+	if err != nil {
+		writeError(err)
+	}
+
+	syncPipe, err := syncpipe.NewSyncPipeFromFd(0, uintptr(*pipe))
+	if err != nil {
+		writeError(err)
+	}
+
+	if err := namespaces.Init(container, rootfs, *console, syncPipe, flag.Args()); err != nil {
+		writeError(err)
+	}
+
+	panic("Unreachable")
+}
+
+func writeError(err error) {
+	fmt.Fprint(os.Stderr, err)
+	os.Exit(1)
+}
diff --git a/daemon/execdriver/native/template/default_template.go b/daemon/execdriver/native/template/default_template.go
index d0894a0..be3dd5a 100644
--- a/daemon/execdriver/native/template/default_template.go
+++ b/daemon/execdriver/native/template/default_template.go
@@ -12,6 +12,7 @@
 		Capabilities: []string{
 			"CHOWN",
 			"DAC_OVERRIDE",
+			"FSETID",
 			"FOWNER",
 			"MKNOD",
 			"NET_RAW",
@@ -22,6 +23,7 @@
 			"NET_BIND_SERVICE",
 			"SYS_CHROOT",
 			"KILL",
+			"AUDIT_WRITE",
 		},
 		Namespaces: map[string]bool{
 			"NEWNS":  true,
diff --git a/daemon/execdriver/native/term.go b/daemon/execdriver/native/term.go
deleted file mode 100644
index f60351c..0000000
--- a/daemon/execdriver/native/term.go
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
-   These types are wrappers around the libcontainer Terminal interface so that
-   we can resuse the docker implementations where possible.
-*/
-package native
-
-import (
-	"github.com/dotcloud/docker/daemon/execdriver"
-	"io"
-	"os"
-	"os/exec"
-)
-
-type dockerStdTerm struct {
-	execdriver.StdConsole
-	pipes *execdriver.Pipes
-}
-
-func (d *dockerStdTerm) Attach(cmd *exec.Cmd) error {
-	return d.AttachPipes(cmd, d.pipes)
-}
-
-func (d *dockerStdTerm) SetMaster(master *os.File) {
-	// do nothing
-}
-
-type dockerTtyTerm struct {
-	execdriver.TtyConsole
-	pipes *execdriver.Pipes
-}
-
-func (t *dockerTtyTerm) Attach(cmd *exec.Cmd) error {
-	go io.Copy(t.pipes.Stdout, t.MasterPty)
-	if t.pipes.Stdin != nil {
-		go io.Copy(t.MasterPty, t.pipes.Stdin)
-	}
-	return nil
-}
-
-func (t *dockerTtyTerm) SetMaster(master *os.File) {
-	t.MasterPty = master
-}
diff --git a/daemon/execdriver/termconsole.go b/daemon/execdriver/termconsole.go
index af6b88d..dc0e54c 100644
--- a/daemon/execdriver/termconsole.go
+++ b/daemon/execdriver/termconsole.go
@@ -1,90 +1,10 @@
 package execdriver
 
 import (
-	"github.com/dotcloud/docker/pkg/term"
-	"github.com/kr/pty"
 	"io"
-	"os"
 	"os/exec"
 )
 
-func SetTerminal(command *Command, pipes *Pipes) error {
-	var (
-		term Terminal
-		err  error
-	)
-	if command.Tty {
-		term, err = NewTtyConsole(command, pipes)
-	} else {
-		term, err = NewStdConsole(command, pipes)
-	}
-	if err != nil {
-		return err
-	}
-	command.Terminal = term
-	return nil
-}
-
-type TtyConsole struct {
-	MasterPty *os.File
-	SlavePty  *os.File
-}
-
-func NewTtyConsole(command *Command, pipes *Pipes) (*TtyConsole, error) {
-	ptyMaster, ptySlave, err := pty.Open()
-	if err != nil {
-		return nil, err
-	}
-	tty := &TtyConsole{
-		MasterPty: ptyMaster,
-		SlavePty:  ptySlave,
-	}
-	if err := tty.AttachPipes(&command.Cmd, pipes); err != nil {
-		tty.Close()
-		return nil, err
-	}
-	command.Console = tty.SlavePty.Name()
-	return tty, nil
-}
-
-func (t *TtyConsole) Master() *os.File {
-	return t.MasterPty
-}
-
-func (t *TtyConsole) Resize(h, w int) error {
-	return term.SetWinsize(t.MasterPty.Fd(), &term.Winsize{Height: uint16(h), Width: uint16(w)})
-}
-
-func (t *TtyConsole) AttachPipes(command *exec.Cmd, pipes *Pipes) error {
-	command.Stdout = t.SlavePty
-	command.Stderr = t.SlavePty
-
-	go func() {
-		if wb, ok := pipes.Stdout.(interface {
-			CloseWriters() error
-		}); ok {
-			defer wb.CloseWriters()
-		}
-		io.Copy(pipes.Stdout, t.MasterPty)
-	}()
-
-	if pipes.Stdin != nil {
-		command.Stdin = t.SlavePty
-		command.SysProcAttr.Setctty = true
-
-		go func() {
-			defer pipes.Stdin.Close()
-			io.Copy(t.MasterPty, pipes.Stdin)
-		}()
-	}
-	return nil
-}
-
-func (t *TtyConsole) Close() error {
-	t.SlavePty.Close()
-	return t.MasterPty.Close()
-}
-
 type StdConsole struct {
 }
 
diff --git a/daemon/execdriver/utils.go b/daemon/execdriver/utils.go
new file mode 100644
index 0000000..37042ef
--- /dev/null
+++ b/daemon/execdriver/utils.go
@@ -0,0 +1,63 @@
+package execdriver
+
+import (
+	"fmt"
+	"strings"
+
+	"github.com/docker/docker/utils"
+	"github.com/docker/libcontainer/security/capabilities"
+)
+
+func TweakCapabilities(basics, adds, drops []string) ([]string, error) {
+	var (
+		newCaps []string
+		allCaps = capabilities.GetAllCapabilities()
+	)
+
+	// look for invalid cap in the drop list
+	for _, cap := range drops {
+		if strings.ToLower(cap) == "all" {
+			continue
+		}
+		if !utils.StringsContainsNoCase(allCaps, cap) {
+			return nil, fmt.Errorf("Unknown capability drop: %q", cap)
+		}
+	}
+
+	// handle --cap-add=all
+	if utils.StringsContainsNoCase(adds, "all") {
+		basics = capabilities.GetAllCapabilities()
+	}
+
+	if !utils.StringsContainsNoCase(drops, "all") {
+		for _, cap := range basics {
+			// skip `all` aready handled above
+			if strings.ToLower(cap) == "all" {
+				continue
+			}
+
+			// if we don't drop `all`, add back all the non-dropped caps
+			if !utils.StringsContainsNoCase(drops, cap) {
+				newCaps = append(newCaps, strings.ToUpper(cap))
+			}
+		}
+	}
+
+	for _, cap := range adds {
+		// skip `all` aready handled above
+		if strings.ToLower(cap) == "all" {
+			continue
+		}
+
+		if !utils.StringsContainsNoCase(allCaps, cap) {
+			return nil, fmt.Errorf("Unknown capability to add: %q", cap)
+		}
+
+		// add cap if not already in the list
+		if !utils.StringsContainsNoCase(newCaps, cap) {
+			newCaps = append(newCaps, strings.ToUpper(cap))
+		}
+	}
+
+	return newCaps, nil
+}
diff --git a/daemon/export.go b/daemon/export.go
new file mode 100644
index 0000000..bc0f14a
--- /dev/null
+++ b/daemon/export.go
@@ -0,0 +1,30 @@
+package daemon
+
+import (
+	"io"
+
+	"github.com/docker/docker/engine"
+)
+
+func (daemon *Daemon) ContainerExport(job *engine.Job) engine.Status {
+	if len(job.Args) != 1 {
+		return job.Errorf("Usage: %s container_id", job.Name)
+	}
+	name := job.Args[0]
+	if container := daemon.Get(name); container != nil {
+		data, err := container.Export()
+		if err != nil {
+			return job.Errorf("%s: %s", name, err)
+		}
+		defer data.Close()
+
+		// Stream the entire contents of the container (basically a volatile snapshot)
+		if _, err := io.Copy(job.Stdout, data); err != nil {
+			return job.Errorf("%s: %s", name, err)
+		}
+		// FIXME: factor job-specific LogEvent to engine.Job.Run()
+		container.LogEvent("export")
+		return engine.StatusOK
+	}
+	return job.Errorf("No such container: %s", name)
+}
diff --git a/daemon/graphdriver/aufs/aufs.go b/daemon/graphdriver/aufs/aufs.go
index 0206b92..ebd4929 100644
--- a/daemon/graphdriver/aufs/aufs.go
+++ b/daemon/graphdriver/aufs/aufs.go
@@ -30,11 +30,12 @@
 	"sync"
 	"syscall"
 
+	"github.com/docker/docker/archive"
+	"github.com/docker/docker/daemon/graphdriver"
+	"github.com/docker/docker/pkg/log"
+	mountpk "github.com/docker/docker/pkg/mount"
+	"github.com/docker/docker/utils"
 	"github.com/docker/libcontainer/label"
-	"github.com/dotcloud/docker/archive"
-	"github.com/dotcloud/docker/daemon/graphdriver"
-	mountpk "github.com/dotcloud/docker/pkg/mount"
-	"github.com/dotcloud/docker/utils"
 )
 
 var (
@@ -209,7 +210,7 @@
 	defer a.Unlock()
 
 	if a.active[id] != 0 {
-		utils.Errorf("Warning: removing active id %s\n", id)
+		log.Errorf("Warning: removing active id %s", id)
 	}
 
 	// Make sure the dir is umounted first
@@ -378,7 +379,7 @@
 
 	for _, id := range ids {
 		if err := a.unmount(id); err != nil {
-			utils.Errorf("Unmounting %s: %s", utils.TruncateID(id), err)
+			log.Errorf("Unmounting %s: %s", utils.TruncateID(id), err)
 		}
 	}
 
diff --git a/daemon/graphdriver/aufs/aufs_test.go b/daemon/graphdriver/aufs/aufs_test.go
index b3bad41..081fb88 100644
--- a/daemon/graphdriver/aufs/aufs_test.go
+++ b/daemon/graphdriver/aufs/aufs_test.go
@@ -4,8 +4,8 @@
 	"crypto/sha256"
 	"encoding/hex"
 	"fmt"
-	"github.com/dotcloud/docker/archive"
-	"github.com/dotcloud/docker/daemon/graphdriver"
+	"github.com/docker/docker/archive"
+	"github.com/docker/docker/daemon/graphdriver"
 	"io/ioutil"
 	"os"
 	"path"
diff --git a/daemon/graphdriver/aufs/mount.go b/daemon/graphdriver/aufs/mount.go
index 1f1d98f..fa74e05 100644
--- a/daemon/graphdriver/aufs/mount.go
+++ b/daemon/graphdriver/aufs/mount.go
@@ -1,14 +1,15 @@
 package aufs
 
 import (
-	"github.com/dotcloud/docker/utils"
 	"os/exec"
 	"syscall"
+
+	"github.com/docker/docker/pkg/log"
 )
 
 func Unmount(target string) error {
 	if err := exec.Command("auplink", target, "flush").Run(); err != nil {
-		utils.Errorf("[warning]: couldn't run auplink before unmount: %s", err)
+		log.Errorf("[warning]: couldn't run auplink before unmount: %s", err)
 	}
 	if err := syscall.Unmount(target, 0); err != nil {
 		return err
diff --git a/daemon/graphdriver/aufs/mount_linux.go b/daemon/graphdriver/aufs/mount_linux.go
index 6082d9f..c86f1bb 100644
--- a/daemon/graphdriver/aufs/mount_linux.go
+++ b/daemon/graphdriver/aufs/mount_linux.go
@@ -1,5 +1,3 @@
-// +build amd64
-
 package aufs
 
 import "syscall"
diff --git a/daemon/graphdriver/aufs/mount_unsupported.go b/daemon/graphdriver/aufs/mount_unsupported.go
index 2735624..e291bef 100644
--- a/daemon/graphdriver/aufs/mount_unsupported.go
+++ b/daemon/graphdriver/aufs/mount_unsupported.go
@@ -1,4 +1,4 @@
-// +build !linux !amd64
+// +build !linux
 
 package aufs
 
diff --git a/daemon/graphdriver/btrfs/btrfs.go b/daemon/graphdriver/btrfs/btrfs.go
index f561244..c491fd7 100644
--- a/daemon/graphdriver/btrfs/btrfs.go
+++ b/daemon/graphdriver/btrfs/btrfs.go
@@ -1,4 +1,4 @@
-// +build linux,amd64
+// +build linux
 
 package btrfs
 
@@ -16,8 +16,8 @@
 	"syscall"
 	"unsafe"
 
-	"github.com/dotcloud/docker/daemon/graphdriver"
-	"github.com/dotcloud/docker/pkg/mount"
+	"github.com/docker/docker/daemon/graphdriver"
+	"github.com/docker/docker/pkg/mount"
 )
 
 func init() {
diff --git a/daemon/graphdriver/btrfs/btrfs_test.go b/daemon/graphdriver/btrfs/btrfs_test.go
index 3069a98..cde23ce 100644
--- a/daemon/graphdriver/btrfs/btrfs_test.go
+++ b/daemon/graphdriver/btrfs/btrfs_test.go
@@ -1,7 +1,7 @@
 package btrfs
 
 import (
-	"github.com/dotcloud/docker/daemon/graphdriver/graphtest"
+	"github.com/docker/docker/daemon/graphdriver/graphtest"
 	"testing"
 )
 
diff --git a/daemon/graphdriver/btrfs/dummy_unsupported.go b/daemon/graphdriver/btrfs/dummy_unsupported.go
index 6c44615..f070888 100644
--- a/daemon/graphdriver/btrfs/dummy_unsupported.go
+++ b/daemon/graphdriver/btrfs/dummy_unsupported.go
@@ -1,3 +1,3 @@
-// +build !linux !amd64
+// +build !linux !cgo
 
 package btrfs
diff --git a/daemon/graphdriver/devmapper/README.md b/daemon/graphdriver/devmapper/README.md
index c8ab1d1..c426202 100644
--- a/daemon/graphdriver/devmapper/README.md
+++ b/daemon/graphdriver/devmapper/README.md
@@ -7,7 +7,7 @@
 graph location (typically `/var/lib/docker/devicemapper`, $graph below)
 a thin pool is created based on two block devices, one for data and
 one for metadata.  By default these block devices are created
-automatically by using loopback mounts of automatically creates sparse
+automatically by using loopback mounts of automatically created sparse
 files.
 
 The default loopback files used are `$graph/devicemapper/data` and
@@ -15,15 +15,15 @@
 from docker entities to the corresponding devicemapper volumes is
 stored in the `$graph/devicemapper/json` file (encoded as Json).
 
-In order to support multiple devicemapper graphs on a system the thin
+In order to support multiple devicemapper graphs on a system, the thin
 pool will be named something like: `docker-0:33-19478248-pool`, where
-the `0:30` part is the minor/major device nr and `19478248` is the
+the `0:33` part is the minor/major device nr and `19478248` is the
 inode number of the $graph directory.
 
-On the thin pool docker automatically creates a base thin device,
+On the thin pool, docker automatically creates a base thin device,
 called something like `docker-0:33-19478248-base` of a fixed
-size. This is automatically formated on creation and contains just an
-empty filesystem. This device is the base of all docker images and
+size. This is automatically formatted with an empty filesystem on
+creation. This device is the base of all docker images and
 containers. All base images are snapshots of this device and those
 images are then in turn used as snapshots for other images and
 eventually containers.
@@ -31,8 +31,8 @@
 ### options
 
 The devicemapper backend supports some options that you can specify
-when starting the docker daemon using the --storage-opt flags.
-This uses the `dm` prefix and would be used somthing like `docker -d --storage-opt dm.foo=bar`.
+when starting the docker daemon using the `--storage-opt` flags.
+This uses the `dm` prefix and would be used something like `docker -d --storage-opt dm.foo=bar`.
 
 Here is the list of supported options:
 
@@ -43,7 +43,11 @@
     10G. Note, thin devices are inherently "sparse", so a 10G device
     which is mostly empty doesn't use 10 GB of space on the
     pool. However, the filesystem will use more space for the empty
-    case the larger the device is.
+    case the larger the device is. **Warning**: This value affects the
+    system-wide "base" empty filesystem that may already be
+    initialized and inherited by pulled images.  Typically, a change
+    to this value will require additional steps to take effect: 1)
+    stop `docker -d`, 2) `rm -rf /var/lib/docker`, 3) start `docker -d`.
 
     Example use:
 
@@ -126,6 +130,15 @@
 
     ``docker -d --storage-opt dm.datadev=/dev/sdb1 --storage-opt dm.metadatadev=/dev/sdc1``
 
+ *  `dm.blocksize`
+
+    Specifies a custom blocksize to use for the thin pool.  The default
+    blocksize is 64K.
+
+    Example use:
+
+    ``docker -d --storage-opt dm.blocksize=512K``
+
  *  `dm.blkdiscard`
 
     Enables or disables the use of blkdiscard when removing
diff --git a/daemon/graphdriver/devmapper/attach_loopback.go b/daemon/graphdriver/devmapper/attach_loopback.go
index 28a648a..86714d1 100644
--- a/daemon/graphdriver/devmapper/attach_loopback.go
+++ b/daemon/graphdriver/devmapper/attach_loopback.go
@@ -1,4 +1,4 @@
-// +build linux,amd64
+// +build linux
 
 package devmapper
 
@@ -7,7 +7,7 @@
 	"os"
 	"syscall"
 
-	"github.com/dotcloud/docker/utils"
+	"github.com/docker/docker/pkg/log"
 )
 
 func stringToLoopName(src string) [LoNameSize]uint8 {
@@ -39,20 +39,20 @@
 		fi, err := os.Stat(target)
 		if err != nil {
 			if os.IsNotExist(err) {
-				utils.Errorf("There are no more loopback devices available.")
+				log.Errorf("There are no more loopback devices available.")
 			}
 			return nil, ErrAttachLoopbackDevice
 		}
 
 		if fi.Mode()&os.ModeDevice != os.ModeDevice {
-			utils.Errorf("Loopback device %s is not a block device.", target)
+			log.Errorf("Loopback device %s is not a block device.", target)
 			continue
 		}
 
 		// OpenFile adds O_CLOEXEC
 		loopFile, err = os.OpenFile(target, os.O_RDWR, 0644)
 		if err != nil {
-			utils.Errorf("Error openning loopback device: %s", err)
+			log.Errorf("Error openning loopback device: %s", err)
 			return nil, ErrAttachLoopbackDevice
 		}
 
@@ -62,7 +62,7 @@
 
 			// If the error is EBUSY, then try the next loopback
 			if err != syscall.EBUSY {
-				utils.Errorf("Cannot set up loopback device %s: %s", target, err)
+				log.Errorf("Cannot set up loopback device %s: %s", target, err)
 				return nil, ErrAttachLoopbackDevice
 			}
 
@@ -75,7 +75,7 @@
 
 	// This can't happen, but let's be sure
 	if loopFile == nil {
-		utils.Errorf("Unreachable code reached! Error attaching %s to a loopback device.", sparseFile.Name())
+		log.Errorf("Unreachable code reached! Error attaching %s to a loopback device.", sparseFile.Name())
 		return nil, ErrAttachLoopbackDevice
 	}
 
@@ -91,13 +91,13 @@
 	// loopback from index 0.
 	startIndex, err := getNextFreeLoopbackIndex()
 	if err != nil {
-		utils.Debugf("Error retrieving the next available loopback: %s", err)
+		log.Debugf("Error retrieving the next available loopback: %s", err)
 	}
 
 	// OpenFile adds O_CLOEXEC
 	sparseFile, err := os.OpenFile(sparseName, os.O_RDWR, 0644)
 	if err != nil {
-		utils.Errorf("Error openning sparse file %s: %s", sparseName, err)
+		log.Errorf("Error openning sparse file %s: %s", sparseName, err)
 		return nil, ErrAttachLoopbackDevice
 	}
 	defer sparseFile.Close()
@@ -115,11 +115,11 @@
 	}
 
 	if err := ioctlLoopSetStatus64(loopFile.Fd(), loopInfo); err != nil {
-		utils.Errorf("Cannot set up loopback device info: %s", err)
+		log.Errorf("Cannot set up loopback device info: %s", err)
 
 		// If the call failed, then free the loopback device
 		if err := ioctlLoopClrFd(loopFile.Fd()); err != nil {
-			utils.Errorf("Error while cleaning up the loopback device")
+			log.Errorf("Error while cleaning up the loopback device")
 		}
 		loopFile.Close()
 		return nil, ErrAttachLoopbackDevice
diff --git a/daemon/graphdriver/devmapper/deviceset.go b/daemon/graphdriver/devmapper/deviceset.go
index 31c3f39..42315c6 100644
--- a/daemon/graphdriver/devmapper/deviceset.go
+++ b/daemon/graphdriver/devmapper/deviceset.go
@@ -1,4 +1,4 @@
-// +build linux,amd64
+// +build linux
 
 package devmapper
 
@@ -18,16 +18,18 @@
 	"syscall"
 	"time"
 
+	"github.com/docker/docker/daemon/graphdriver"
+	"github.com/docker/docker/pkg/log"
+	"github.com/docker/docker/pkg/parsers"
+	"github.com/docker/docker/pkg/units"
 	"github.com/docker/libcontainer/label"
-	"github.com/dotcloud/docker/daemon/graphdriver"
-	"github.com/dotcloud/docker/pkg/units"
-	"github.com/dotcloud/docker/utils"
 )
 
 var (
 	DefaultDataLoopbackSize     int64  = 100 * 1024 * 1024 * 1024
 	DefaultMetaDataLoopbackSize int64  = 2 * 1024 * 1024 * 1024
 	DefaultBaseFsSize           uint64 = 10 * 1024 * 1024 * 1024
+	DefaultThinpBlockSize       uint32 = 128 // 64K = 128 512b sectors
 )
 
 type DevInfo struct {
@@ -78,6 +80,7 @@
 	dataDevice           string
 	metadataDevice       string
 	doBlkDiscard         bool
+	thinpBlockSize       uint32
 }
 
 type DiskUsage struct {
@@ -171,7 +174,7 @@
 		if !os.IsNotExist(err) {
 			return "", err
 		}
-		utils.Debugf("Creating loopback file %s for device-manage use", filename)
+		log.Debugf("Creating loopback file %s for device-manage use", filename)
 		file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0600)
 		if err != nil {
 			return "", err
@@ -249,7 +252,7 @@
 }
 
 func (devices *DeviceSet) registerDevice(id int, hash string, size uint64) (*DevInfo, error) {
-	utils.Debugf("registerDevice(%v, %v)", id, hash)
+	log.Debugf("registerDevice(%v, %v)", id, hash)
 	info := &DevInfo{
 		Hash:          hash,
 		DeviceId:      id,
@@ -275,7 +278,7 @@
 }
 
 func (devices *DeviceSet) activateDeviceIfNeeded(info *DevInfo) error {
-	utils.Debugf("activateDeviceIfNeeded(%v)", info.Hash)
+	log.Debugf("activateDeviceIfNeeded(%v)", info.Hash)
 
 	if devinfo, _ := getInfo(info.Name()); devinfo != nil && devinfo.Exists != 0 {
 		return nil
@@ -382,13 +385,13 @@
 	}
 
 	if oldInfo != nil && !oldInfo.Initialized {
-		utils.Debugf("Removing uninitialized base image")
+		log.Debugf("Removing uninitialized base image")
 		if err := devices.deleteDevice(oldInfo); err != nil {
 			return err
 		}
 	}
 
-	utils.Debugf("Initializing base device-manager snapshot")
+	log.Debugf("Initializing base device-manager snapshot")
 
 	id := devices.nextDeviceId
 
@@ -400,14 +403,14 @@
 	// Ids are 24bit, so wrap around
 	devices.nextDeviceId = (id + 1) & 0xffffff
 
-	utils.Debugf("Registering base device (id %v) with FS size %v", id, devices.baseFsSize)
+	log.Debugf("Registering base device (id %v) with FS size %v", id, devices.baseFsSize)
 	info, err := devices.registerDevice(id, "", devices.baseFsSize)
 	if err != nil {
 		_ = deleteDevice(devices.getPoolDevName(), id)
 		return err
 	}
 
-	utils.Debugf("Creating filesystem on base device-manager snapshot")
+	log.Debugf("Creating filesystem on base device-manager snapshot")
 
 	if err = devices.activateDeviceIfNeeded(info); err != nil {
 		return err
@@ -445,7 +448,7 @@
 		return // Ignore _LOG_DEBUG
 	}
 
-	utils.Debugf("libdevmapper(%d): %s:%d (%d) %s", level, file, line, dmError, message)
+	log.Debugf("libdevmapper(%d): %s:%d (%d) %s", level, file, line, dmError, message)
 }
 
 func major(device uint64) uint64 {
@@ -510,7 +513,7 @@
 	}
 
 	// Reload with the new block sizes
-	if err := reloadPool(devices.getPoolName(), dataloopback, metadataloopback); err != nil {
+	if err := reloadPool(devices.getPoolName(), dataloopback, metadataloopback, devices.thinpBlockSize); err != nil {
 		return fmt.Errorf("Unable to reload pool: %s", err)
 	}
 
@@ -549,13 +552,13 @@
 	//	- The target of this device is at major <maj> and minor <min>
 	//	- If <inode> is defined, use that file inside the device as a loopback image. Otherwise use the device itself.
 	devices.devicePrefix = fmt.Sprintf("docker-%d:%d-%d", major(sysSt.Dev), minor(sysSt.Dev), sysSt.Ino)
-	utils.Debugf("Generated prefix: %s", devices.devicePrefix)
+	log.Debugf("Generated prefix: %s", devices.devicePrefix)
 
 	// Check for the existence of the device <prefix>-pool
-	utils.Debugf("Checking for existence of the pool '%s'", devices.getPoolName())
+	log.Debugf("Checking for existence of the pool '%s'", devices.getPoolName())
 	info, err := getInfo(devices.getPoolName())
 	if info == nil {
-		utils.Debugf("Error device getInfo: %s", err)
+		log.Debugf("Error device getInfo: %s", err)
 		return err
 	}
 
@@ -571,7 +574,7 @@
 
 	// If the pool doesn't exist, create it
 	if info.Exists == 0 {
-		utils.Debugf("Pool doesn't exist. Creating it.")
+		log.Debugf("Pool doesn't exist. Creating it.")
 
 		var (
 			dataFile     *os.File
@@ -593,7 +596,7 @@
 
 			data, err := devices.ensureImage("data", devices.dataLoopbackSize)
 			if err != nil {
-				utils.Debugf("Error device ensureImage (data): %s\n", err)
+				log.Debugf("Error device ensureImage (data): %s", err)
 				return err
 			}
 
@@ -624,7 +627,7 @@
 
 			metadata, err := devices.ensureImage("metadata", devices.metaDataLoopbackSize)
 			if err != nil {
-				utils.Debugf("Error device ensureImage (metadata): %s\n", err)
+				log.Debugf("Error device ensureImage (metadata): %s", err)
 				return err
 			}
 
@@ -640,7 +643,7 @@
 		}
 		defer metadataFile.Close()
 
-		if err := createPool(devices.getPoolName(), dataFile, metadataFile); err != nil {
+		if err := createPool(devices.getPoolName(), dataFile, metadataFile, devices.thinpBlockSize); err != nil {
 			return err
 		}
 	}
@@ -656,7 +659,7 @@
 	// Setup the base image
 	if doInit {
 		if err := devices.setupBaseImage(); err != nil {
-			utils.Debugf("Error device setupBaseImage: %s\n", err)
+			log.Debugf("Error device setupBaseImage: %s", err)
 			return err
 		}
 	}
@@ -683,7 +686,7 @@
 	deviceId := devices.nextDeviceId
 
 	if err := createSnapDevice(devices.getPoolDevName(), &deviceId, baseInfo.Name(), baseInfo.DeviceId); err != nil {
-		utils.Debugf("Error creating snap device: %s\n", err)
+		log.Debugf("Error creating snap device: %s", err)
 		return err
 	}
 
@@ -692,7 +695,7 @@
 
 	if _, err := devices.registerDevice(deviceId, hash, baseInfo.Size); err != nil {
 		deleteDevice(devices.getPoolDevName(), deviceId)
-		utils.Debugf("Error registering device: %s\n", err)
+		log.Debugf("Error registering device: %s", err)
 		return err
 	}
 	return nil
@@ -705,7 +708,7 @@
 		// manually
 		if err := devices.activateDeviceIfNeeded(info); err == nil {
 			if err := BlockDeviceDiscard(info.DevName()); err != nil {
-				utils.Debugf("Error discarding block on device: %s (ignoring)\n", err)
+				log.Debugf("Error discarding block on device: %s (ignoring)", err)
 			}
 		}
 	}
@@ -713,13 +716,13 @@
 	devinfo, _ := getInfo(info.Name())
 	if devinfo != nil && devinfo.Exists != 0 {
 		if err := devices.removeDeviceAndWait(info.Name()); err != nil {
-			utils.Debugf("Error removing device: %s\n", err)
+			log.Debugf("Error removing device: %s", err)
 			return err
 		}
 	}
 
 	if err := deleteDevice(devices.getPoolDevName(), info.DeviceId); err != nil {
-		utils.Debugf("Error deleting device: %s\n", err)
+		log.Debugf("Error deleting device: %s", err)
 		return err
 	}
 
@@ -732,7 +735,7 @@
 		devices.devicesLock.Lock()
 		devices.Devices[info.Hash] = info
 		devices.devicesLock.Unlock()
-		utils.Debugf("Error removing meta data: %s\n", err)
+		log.Debugf("Error removing meta data: %s", err)
 		return err
 	}
 
@@ -755,8 +758,8 @@
 }
 
 func (devices *DeviceSet) deactivatePool() error {
-	utils.Debugf("[devmapper] deactivatePool()")
-	defer utils.Debugf("[devmapper] deactivatePool END")
+	log.Debugf("[devmapper] deactivatePool()")
+	defer log.Debugf("[devmapper] deactivatePool END")
 	devname := devices.getPoolDevName()
 	devinfo, err := getInfo(devname)
 	if err != nil {
@@ -770,13 +773,13 @@
 }
 
 func (devices *DeviceSet) deactivateDevice(info *DevInfo) error {
-	utils.Debugf("[devmapper] deactivateDevice(%s)", info.Hash)
-	defer utils.Debugf("[devmapper] deactivateDevice END")
+	log.Debugf("[devmapper] deactivateDevice(%s)", info.Hash)
+	defer log.Debugf("[devmapper] deactivateDevice END")
 
 	// Wait for the unmount to be effective,
 	// by watching the value of Info.OpenCount for the device
 	if err := devices.waitClose(info); err != nil {
-		utils.Errorf("Warning: error waiting for device %s to close: %s\n", info.Hash, err)
+		log.Errorf("Warning: error waiting for device %s to close: %s", info.Hash, err)
 	}
 
 	devinfo, err := getInfo(info.Name())
@@ -826,8 +829,8 @@
 // a) the device registered at <device_set_prefix>-<hash> is removed,
 // or b) the 10 second timeout expires.
 func (devices *DeviceSet) waitRemove(devname string) error {
-	utils.Debugf("[deviceset %s] waitRemove(%s)", devices.devicePrefix, devname)
-	defer utils.Debugf("[deviceset %s] waitRemove(%s) END", devices.devicePrefix, devname)
+	log.Debugf("[deviceset %s] waitRemove(%s)", devices.devicePrefix, devname)
+	defer log.Debugf("[deviceset %s] waitRemove(%s) END", devices.devicePrefix, devname)
 	i := 0
 	for ; i < 1000; i += 1 {
 		devinfo, err := getInfo(devname)
@@ -837,7 +840,7 @@
 			return nil
 		}
 		if i%100 == 0 {
-			utils.Debugf("Waiting for removal of %s: exists=%d", devname, devinfo.Exists)
+			log.Debugf("Waiting for removal of %s: exists=%d", devname, devinfo.Exists)
 		}
 		if devinfo.Exists == 0 {
 			break
@@ -864,7 +867,7 @@
 			return err
 		}
 		if i%100 == 0 {
-			utils.Debugf("Waiting for unmount of %s: opencount=%d", info.Hash, devinfo.OpenCount)
+			log.Debugf("Waiting for unmount of %s: opencount=%d", info.Hash, devinfo.OpenCount)
 		}
 		if devinfo.OpenCount == 0 {
 			break
@@ -881,9 +884,9 @@
 
 func (devices *DeviceSet) Shutdown() error {
 
-	utils.Debugf("[deviceset %s] shutdown()", devices.devicePrefix)
-	utils.Debugf("[devmapper] Shutting down DeviceSet: %s", devices.root)
-	defer utils.Debugf("[deviceset %s] shutdown END", devices.devicePrefix)
+	log.Debugf("[deviceset %s] shutdown()", devices.devicePrefix)
+	log.Debugf("[devmapper] Shutting down DeviceSet: %s", devices.root)
+	defer log.Debugf("[deviceset %s] shutdown END", devices.devicePrefix)
 
 	var devs []*DevInfo
 
@@ -900,12 +903,12 @@
 			// container. This means it'll go away from the global scope directly,
 			// and the device will be released when that container dies.
 			if err := syscall.Unmount(info.mountPath, syscall.MNT_DETACH); err != nil {
-				utils.Debugf("Shutdown unmounting %s, error: %s\n", info.mountPath, err)
+				log.Debugf("Shutdown unmounting %s, error: %s", info.mountPath, err)
 			}
 
 			devices.Lock()
 			if err := devices.deactivateDevice(info); err != nil {
-				utils.Debugf("Shutdown deactivate %s , error: %s\n", info.Hash, err)
+				log.Debugf("Shutdown deactivate %s , error: %s", info.Hash, err)
 			}
 			devices.Unlock()
 		}
@@ -917,7 +920,7 @@
 		info.lock.Lock()
 		devices.Lock()
 		if err := devices.deactivateDevice(info); err != nil {
-			utils.Debugf("Shutdown deactivate base , error: %s\n", err)
+			log.Debugf("Shutdown deactivate base , error: %s", err)
 		}
 		devices.Unlock()
 		info.lock.Unlock()
@@ -925,7 +928,7 @@
 
 	devices.Lock()
 	if err := devices.deactivatePool(); err != nil {
-		utils.Debugf("Shutdown deactivate pool , error: %s\n", err)
+		log.Debugf("Shutdown deactivate pool , error: %s", err)
 	}
 	devices.Unlock()
 
@@ -989,8 +992,8 @@
 }
 
 func (devices *DeviceSet) UnmountDevice(hash string) error {
-	utils.Debugf("[devmapper] UnmountDevice(hash=%s)", hash)
-	defer utils.Debugf("[devmapper] UnmountDevice END")
+	log.Debugf("[devmapper] UnmountDevice(hash=%s)", hash)
+	defer log.Debugf("[devmapper] UnmountDevice END")
 
 	info, err := devices.lookupDevice(hash)
 	if err != nil {
@@ -1012,11 +1015,11 @@
 		return nil
 	}
 
-	utils.Debugf("[devmapper] Unmount(%s)", info.mountPath)
+	log.Debugf("[devmapper] Unmount(%s)", info.mountPath)
 	if err := syscall.Unmount(info.mountPath, 0); err != nil {
 		return err
 	}
-	utils.Debugf("[devmapper] Unmount done")
+	log.Debugf("[devmapper] Unmount done")
 
 	if err := devices.deactivateDevice(info); err != nil {
 		return err
@@ -1159,30 +1162,31 @@
 		baseFsSize:           DefaultBaseFsSize,
 		filesystem:           "ext4",
 		doBlkDiscard:         true,
+		thinpBlockSize:       DefaultThinpBlockSize,
 	}
 
 	foundBlkDiscard := false
 	for _, option := range options {
-		key, val, err := utils.ParseKeyValueOpt(option)
+		key, val, err := parsers.ParseKeyValueOpt(option)
 		if err != nil {
 			return nil, err
 		}
 		key = strings.ToLower(key)
 		switch key {
 		case "dm.basesize":
-			size, err := units.FromHumanSize(val)
+			size, err := units.RAMInBytes(val)
 			if err != nil {
 				return nil, err
 			}
 			devices.baseFsSize = uint64(size)
 		case "dm.loopdatasize":
-			size, err := units.FromHumanSize(val)
+			size, err := units.RAMInBytes(val)
 			if err != nil {
 				return nil, err
 			}
 			devices.dataLoopbackSize = size
 		case "dm.loopmetadatasize":
-			size, err := units.FromHumanSize(val)
+			size, err := units.RAMInBytes(val)
 			if err != nil {
 				return nil, err
 			}
@@ -1206,6 +1210,13 @@
 			if err != nil {
 				return nil, err
 			}
+		case "dm.blocksize":
+			size, err := units.RAMInBytes(val)
+			if err != nil {
+				return nil, err
+			}
+			// convert to 512b sectors
+			devices.thinpBlockSize = uint32(size) >> 9
 		default:
 			return nil, fmt.Errorf("Unknown option %s\n", key)
 		}
diff --git a/daemon/graphdriver/devmapper/devmapper.go b/daemon/graphdriver/devmapper/devmapper.go
index a6602c2..d09e740 100644
--- a/daemon/graphdriver/devmapper/devmapper.go
+++ b/daemon/graphdriver/devmapper/devmapper.go
@@ -1,4 +1,4 @@
-// +build linux,amd64
+// +build linux
 
 package devmapper
 
@@ -9,7 +9,7 @@
 	"runtime"
 	"syscall"
 
-	"github.com/dotcloud/docker/utils"
+	"github.com/docker/docker/pkg/log"
 )
 
 type DevmapperLogger interface {
@@ -198,7 +198,7 @@
 func getLoopbackBackingFile(file *os.File) (uint64, uint64, error) {
 	loopInfo, err := ioctlLoopGetStatus64(file.Fd())
 	if err != nil {
-		utils.Errorf("Error get loopback backing file: %s\n", err)
+		log.Errorf("Error get loopback backing file: %s", err)
 		return 0, 0, ErrGetLoopbackBackingFile
 	}
 	return loopInfo.loDevice, loopInfo.loInode, nil
@@ -206,7 +206,7 @@
 
 func LoopbackSetCapacity(file *os.File) error {
 	if err := ioctlLoopSetCapacity(file.Fd(), 0); err != nil {
-		utils.Errorf("Error loopbackSetCapacity: %s", err)
+		log.Errorf("Error loopbackSetCapacity: %s", err)
 		return ErrLoopbackSetCapacity
 	}
 	return nil
@@ -246,7 +246,7 @@
 
 func UdevWait(cookie uint) error {
 	if res := DmUdevWait(cookie); res != 1 {
-		utils.Debugf("Failed to wait on udev cookie %d", cookie)
+		log.Debugf("Failed to wait on udev cookie %d", cookie)
 		return ErrUdevWait
 	}
 	return nil
@@ -265,7 +265,7 @@
 
 func SetDevDir(dir string) error {
 	if res := DmSetDevDir(dir); res != 1 {
-		utils.Debugf("Error dm_set_dev_dir")
+		log.Debugf("Error dm_set_dev_dir")
 		return ErrSetDevDir
 	}
 	return nil
@@ -286,7 +286,7 @@
 		return ErrCreateRemoveTask
 	}
 	if err := task.SetName(name); err != nil {
-		utils.Debugf("Can't set task name %s", name)
+		log.Debugf("Can't set task name %s", name)
 		return err
 	}
 	if err := task.Run(); err != nil {
@@ -298,7 +298,7 @@
 func GetBlockDeviceSize(file *os.File) (uint64, error) {
 	size, err := ioctlBlkGetSize64(file.Fd())
 	if err != nil {
-		utils.Errorf("Error getblockdevicesize: %s", err)
+		log.Errorf("Error getblockdevicesize: %s", err)
 		return 0, ErrGetBlockSize
 	}
 	return uint64(size), nil
@@ -328,7 +328,7 @@
 }
 
 // This is the programmatic example of "dmsetup create"
-func createPool(poolName string, dataFile, metadataFile *os.File) error {
+func createPool(poolName string, dataFile, metadataFile *os.File, poolBlockSize uint32) error {
 	task, err := createTask(DeviceCreate, poolName)
 	if task == nil {
 		return err
@@ -339,7 +339,7 @@
 		return fmt.Errorf("Can't get data size %s", err)
 	}
 
-	params := metadataFile.Name() + " " + dataFile.Name() + " 128 32768 1 skip_block_zeroing"
+	params := fmt.Sprintf("%s %s %d 32768 1 skip_block_zeroing", metadataFile.Name(), dataFile.Name(), poolBlockSize)
 	if err := task.AddTarget(0, size/512, "thin-pool", params); err != nil {
 		return fmt.Errorf("Can't add target %s", err)
 	}
@@ -358,7 +358,7 @@
 	return nil
 }
 
-func reloadPool(poolName string, dataFile, metadataFile *os.File) error {
+func reloadPool(poolName string, dataFile, metadataFile *os.File, poolBlockSize uint32) error {
 	task, err := createTask(DeviceReload, poolName)
 	if task == nil {
 		return err
@@ -369,7 +369,7 @@
 		return fmt.Errorf("Can't get data size %s", err)
 	}
 
-	params := metadataFile.Name() + " " + dataFile.Name() + " 128 32768"
+	params := fmt.Sprintf("%s %s %d 32768 1 skip_block_zeroing", metadataFile.Name(), dataFile.Name(), poolBlockSize)
 	if err := task.AddTarget(0, size/512, "thin-pool", params); err != nil {
 		return fmt.Errorf("Can't add target %s", err)
 	}
@@ -417,21 +417,21 @@
 func getStatus(name string) (uint64, uint64, string, string, error) {
 	task, err := createTask(DeviceStatus, name)
 	if task == nil {
-		utils.Debugf("getStatus: Error createTask: %s", err)
+		log.Debugf("getStatus: Error createTask: %s", err)
 		return 0, 0, "", "", err
 	}
 	if err := task.Run(); err != nil {
-		utils.Debugf("getStatus: Error Run: %s", err)
+		log.Debugf("getStatus: Error Run: %s", err)
 		return 0, 0, "", "", err
 	}
 
 	devinfo, err := task.GetInfo()
 	if err != nil {
-		utils.Debugf("getStatus: Error GetInfo: %s", err)
+		log.Debugf("getStatus: Error GetInfo: %s", err)
 		return 0, 0, "", "", err
 	}
 	if devinfo.Exists == 0 {
-		utils.Debugf("getStatus: Non existing device %s", name)
+		log.Debugf("getStatus: Non existing device %s", name)
 		return 0, 0, "", "", fmt.Errorf("Non existing device %s", name)
 	}
 
@@ -491,7 +491,7 @@
 }
 
 func createDevice(poolName string, deviceId *int) error {
-	utils.Debugf("[devmapper] createDevice(poolName=%v, deviceId=%v)", poolName, *deviceId)
+	log.Debugf("[devmapper] createDevice(poolName=%v, deviceId=%v)", poolName, *deviceId)
 
 	for {
 		task, err := createTask(DeviceTargetMsg, poolName)
@@ -542,8 +542,8 @@
 }
 
 func removeDevice(name string) error {
-	utils.Debugf("[devmapper] removeDevice START")
-	defer utils.Debugf("[devmapper] removeDevice END")
+	log.Debugf("[devmapper] removeDevice START")
+	defer log.Debugf("[devmapper] removeDevice END")
 	task, err := createTask(DeviceRemove, name)
 	if task == nil {
 		return err
diff --git a/daemon/graphdriver/devmapper/devmapper_log.go b/daemon/graphdriver/devmapper/devmapper_log.go
index cdeaed2..ec7809c 100644
--- a/daemon/graphdriver/devmapper/devmapper_log.go
+++ b/daemon/graphdriver/devmapper/devmapper_log.go
@@ -1,4 +1,4 @@
-// +build linux,amd64
+// +build linux
 
 package devmapper
 
diff --git a/daemon/graphdriver/devmapper/devmapper_test.go b/daemon/graphdriver/devmapper/devmapper_test.go
index 7c97d6b..1672619 100644
--- a/daemon/graphdriver/devmapper/devmapper_test.go
+++ b/daemon/graphdriver/devmapper/devmapper_test.go
@@ -1,9 +1,9 @@
-// +build linux,amd64
+// +build linux
 
 package devmapper
 
 import (
-	"github.com/dotcloud/docker/daemon/graphdriver/graphtest"
+	"github.com/docker/docker/daemon/graphdriver/graphtest"
 	"testing"
 )
 
diff --git a/daemon/graphdriver/devmapper/devmapper_wrapper.go b/daemon/graphdriver/devmapper/devmapper_wrapper.go
index 9f1b5a6..bd1c6fd 100644
--- a/daemon/graphdriver/devmapper/devmapper_wrapper.go
+++ b/daemon/graphdriver/devmapper/devmapper_wrapper.go
@@ -1,4 +1,4 @@
-// +build linux,amd64
+// +build linux
 
 package devmapper
 
diff --git a/daemon/graphdriver/devmapper/driver.go b/daemon/graphdriver/devmapper/driver.go
index cf82ad6..4c13eb0 100644
--- a/daemon/graphdriver/devmapper/driver.go
+++ b/daemon/graphdriver/devmapper/driver.go
@@ -1,4 +1,4 @@
-// +build linux,amd64
+// +build linux
 
 package devmapper
 
@@ -8,9 +8,9 @@
 	"os"
 	"path"
 
-	"github.com/dotcloud/docker/daemon/graphdriver"
-	"github.com/dotcloud/docker/pkg/mount"
-	"github.com/dotcloud/docker/utils"
+	"github.com/docker/docker/daemon/graphdriver"
+	"github.com/docker/docker/pkg/log"
+	"github.com/docker/docker/pkg/mount"
 )
 
 func init() {
@@ -54,6 +54,7 @@
 
 	status := [][2]string{
 		{"Pool Name", s.PoolName},
+		{"Pool Blocksize", fmt.Sprintf("%d Kb", s.SectorSize/1024)},
 		{"Data file", s.DataLoopback},
 		{"Metadata file", s.MetadataLoopback},
 		{"Data Space Used", fmt.Sprintf("%.1f Mb", float64(s.Data.Used)/(1024*1024))},
@@ -137,7 +138,7 @@
 
 func (d *Driver) Put(id string) {
 	if err := d.DeviceSet.UnmountDevice(id); err != nil {
-		utils.Errorf("Warning: error unmounting device %s: %s\n", id, err)
+		log.Errorf("Warning: error unmounting device %s: %s", id, err)
 	}
 }
 
diff --git a/daemon/graphdriver/devmapper/ioctl.go b/daemon/graphdriver/devmapper/ioctl.go
index 8f403da..29caab0 100644
--- a/daemon/graphdriver/devmapper/ioctl.go
+++ b/daemon/graphdriver/devmapper/ioctl.go
@@ -1,4 +1,4 @@
-// +build linux,amd64
+// +build linux
 
 package devmapper
 
diff --git a/daemon/graphdriver/devmapper/mount.go b/daemon/graphdriver/devmapper/mount.go
index c9ff216..f64e995 100644
--- a/daemon/graphdriver/devmapper/mount.go
+++ b/daemon/graphdriver/devmapper/mount.go
@@ -1,4 +1,4 @@
-// +build linux,amd64
+// +build linux
 
 package devmapper
 
diff --git a/daemon/graphdriver/driver.go b/daemon/graphdriver/driver.go
index 4536489..90ed1d8 100644
--- a/daemon/graphdriver/driver.go
+++ b/daemon/graphdriver/driver.go
@@ -6,8 +6,8 @@
 	"os"
 	"path"
 
-	"github.com/dotcloud/docker/archive"
-	"github.com/dotcloud/docker/pkg/mount"
+	"github.com/docker/docker/archive"
+	"github.com/docker/docker/pkg/mount"
 )
 
 type FsMagic uint64
diff --git a/daemon/graphdriver/graphtest/graphtest.go b/daemon/graphdriver/graphtest/graphtest.go
index a667f2a..6407e12 100644
--- a/daemon/graphdriver/graphtest/graphtest.go
+++ b/daemon/graphdriver/graphtest/graphtest.go
@@ -7,7 +7,7 @@
 	"syscall"
 	"testing"
 
-	"github.com/dotcloud/docker/daemon/graphdriver"
+	"github.com/docker/docker/daemon/graphdriver"
 )
 
 var (
diff --git a/daemon/graphdriver/vfs/driver.go b/daemon/graphdriver/vfs/driver.go
index 7eaa224..2ea6325 100644
--- a/daemon/graphdriver/vfs/driver.go
+++ b/daemon/graphdriver/vfs/driver.go
@@ -3,7 +3,7 @@
 import (
 	"bytes"
 	"fmt"
-	"github.com/dotcloud/docker/daemon/graphdriver"
+	"github.com/docker/docker/daemon/graphdriver"
 	"os"
 	"os/exec"
 	"path"
diff --git a/daemon/graphdriver/vfs/vfs_test.go b/daemon/graphdriver/vfs/vfs_test.go
index e79f93c..eaf70f5 100644
--- a/daemon/graphdriver/vfs/vfs_test.go
+++ b/daemon/graphdriver/vfs/vfs_test.go
@@ -1,7 +1,7 @@
 package vfs
 
 import (
-	"github.com/dotcloud/docker/daemon/graphdriver/graphtest"
+	"github.com/docker/docker/daemon/graphdriver/graphtest"
 	"testing"
 )
 
diff --git a/daemon/image_delete.go b/daemon/image_delete.go
new file mode 100644
index 0000000..77e8f85
--- /dev/null
+++ b/daemon/image_delete.go
@@ -0,0 +1,156 @@
+package daemon
+
+import (
+	"fmt"
+	"strings"
+
+	"github.com/docker/docker/engine"
+	"github.com/docker/docker/graph"
+	"github.com/docker/docker/image"
+	"github.com/docker/docker/pkg/parsers"
+	"github.com/docker/docker/utils"
+)
+
+func (daemon *Daemon) ImageDelete(job *engine.Job) engine.Status {
+	if n := len(job.Args); n != 1 {
+		return job.Errorf("Usage: %s IMAGE", job.Name)
+	}
+	imgs := engine.NewTable("", 0)
+	if err := daemon.DeleteImage(job.Eng, job.Args[0], imgs, true, job.GetenvBool("force"), job.GetenvBool("noprune")); err != nil {
+		return job.Error(err)
+	}
+	if len(imgs.Data) == 0 {
+		return job.Errorf("Conflict, %s wasn't deleted", job.Args[0])
+	}
+	if _, err := imgs.WriteListTo(job.Stdout); err != nil {
+		return job.Error(err)
+	}
+	return engine.StatusOK
+}
+
+// FIXME: make this private and use the job instead
+func (daemon *Daemon) DeleteImage(eng *engine.Engine, name string, imgs *engine.Table, first, force, noprune bool) error {
+	var (
+		repoName, tag string
+		tags          = []string{}
+		tagDeleted    bool
+	)
+
+	// FIXME: please respect DRY and centralize repo+tag parsing in a single central place! -- shykes
+	repoName, tag = parsers.ParseRepositoryTag(name)
+	if tag == "" {
+		tag = graph.DEFAULTTAG
+	}
+
+	img, err := daemon.Repositories().LookupImage(name)
+	if err != nil {
+		if r, _ := daemon.Repositories().Get(repoName); r != nil {
+			return fmt.Errorf("No such image: %s:%s", repoName, tag)
+		}
+		return fmt.Errorf("No such image: %s", name)
+	}
+
+	if strings.Contains(img.ID, name) {
+		repoName = ""
+		tag = ""
+	}
+
+	byParents, err := daemon.Graph().ByParent()
+	if err != nil {
+		return err
+	}
+
+	//If delete by id, see if the id belong only to one repository
+	if repoName == "" {
+		for _, repoAndTag := range daemon.Repositories().ByID()[img.ID] {
+			parsedRepo, parsedTag := parsers.ParseRepositoryTag(repoAndTag)
+			if repoName == "" || repoName == parsedRepo {
+				repoName = parsedRepo
+				if parsedTag != "" {
+					tags = append(tags, parsedTag)
+				}
+			} else if repoName != parsedRepo && !force {
+				// the id belongs to multiple repos, like base:latest and user:test,
+				// in that case return conflict
+				return fmt.Errorf("Conflict, cannot delete image %s because it is tagged in multiple repositories, use -f to force", name)
+			}
+		}
+	} else {
+		tags = append(tags, tag)
+	}
+
+	if !first && len(tags) > 0 {
+		return nil
+	}
+
+	//Untag the current image
+	for _, tag := range tags {
+		tagDeleted, err = daemon.Repositories().Delete(repoName, tag)
+		if err != nil {
+			return err
+		}
+		if tagDeleted {
+			out := &engine.Env{}
+			out.Set("Untagged", repoName+":"+tag)
+			imgs.Add(out)
+			eng.Job("log", "untag", img.ID, "").Run()
+		}
+	}
+	tags = daemon.Repositories().ByID()[img.ID]
+	if (len(tags) <= 1 && repoName == "") || len(tags) == 0 {
+		if len(byParents[img.ID]) == 0 {
+			if err := daemon.canDeleteImage(img.ID, force, tagDeleted); err != nil {
+				return err
+			}
+			if err := daemon.Repositories().DeleteAll(img.ID); err != nil {
+				return err
+			}
+			if err := daemon.Graph().Delete(img.ID); err != nil {
+				return err
+			}
+			out := &engine.Env{}
+			out.Set("Deleted", img.ID)
+			imgs.Add(out)
+			eng.Job("log", "delete", img.ID, "").Run()
+			if img.Parent != "" && !noprune {
+				err := daemon.DeleteImage(eng, img.Parent, imgs, false, force, noprune)
+				if first {
+					return err
+				}
+
+			}
+
+		}
+	}
+	return nil
+}
+
+func (daemon *Daemon) canDeleteImage(imgID string, force, untagged bool) error {
+	var message string
+	if untagged {
+		message = " (docker untagged the image)"
+	}
+	for _, container := range daemon.List() {
+		parent, err := daemon.Repositories().LookupImage(container.Image)
+		if err != nil {
+			return err
+		}
+
+		if err := parent.WalkHistory(func(p *image.Image) error {
+			if imgID == p.ID {
+				if container.State.IsRunning() {
+					if force {
+						return fmt.Errorf("Conflict, cannot force delete %s because the running container %s is using it%s, stop it and retry", utils.TruncateID(imgID), utils.TruncateID(container.ID), message)
+					}
+					return fmt.Errorf("Conflict, cannot delete %s because the running container %s is using it%s, stop it and use -f to force", utils.TruncateID(imgID), utils.TruncateID(container.ID), message)
+				} else if !force {
+					return fmt.Errorf("Conflict, cannot delete %s because the container %s is using it%s, use -f to force", utils.TruncateID(imgID), utils.TruncateID(container.ID), message)
+				}
+			}
+			return nil
+		}); err != nil {
+			return err
+		}
+	}
+	return nil
+}
diff --git a/daemon/info.go b/daemon/info.go
new file mode 100644
index 0000000..3d3c9ba
--- /dev/null
+++ b/daemon/info.go
@@ -0,0 +1,74 @@
+package daemon
+
+import (
+	"os"
+	"runtime"
+
+	"github.com/docker/docker/dockerversion"
+	"github.com/docker/docker/engine"
+	"github.com/docker/docker/pkg/log"
+	"github.com/docker/docker/pkg/parsers/kernel"
+	"github.com/docker/docker/pkg/parsers/operatingsystem"
+	"github.com/docker/docker/registry"
+	"github.com/docker/docker/utils"
+)
+
+func (daemon *Daemon) CmdInfo(job *engine.Job) engine.Status {
+	images, _ := daemon.Graph().Map()
+	var imgcount int
+	if images == nil {
+		imgcount = 0
+	} else {
+		imgcount = len(images)
+	}
+	kernelVersion := "<unknown>"
+	if kv, err := kernel.GetKernelVersion(); err == nil {
+		kernelVersion = kv.String()
+	}
+
+	operatingSystem := "<unknown>"
+	if s, err := operatingsystem.GetOperatingSystem(); err == nil {
+		operatingSystem = s
+	}
+	if inContainer, err := operatingsystem.IsContainerized(); err != nil {
+		log.Errorf("Could not determine if daemon is containerized: %v", err)
+		operatingSystem += " (error determining if containerized)"
+	} else if inContainer {
+		operatingSystem += " (containerized)"
+	}
+
+	// if we still have the original dockerinit binary from before we copied it locally, let's return the path to that, since that's more intuitive (the copied path is trivial to derive by hand given VERSION)
+	initPath := utils.DockerInitPath("")
+	if initPath == "" {
+		// if that fails, we'll just return the path from the daemon
+		initPath = daemon.SystemInitPath()
+	}
+
+	cjob := job.Eng.Job("subscribers_count")
+	env, _ := cjob.Stdout.AddEnv()
+	if err := cjob.Run(); err != nil {
+		return job.Error(err)
+	}
+	v := &engine.Env{}
+	v.SetInt("Containers", len(daemon.List()))
+	v.SetInt("Images", imgcount)
+	v.Set("Driver", daemon.GraphDriver().String())
+	v.SetJson("DriverStatus", daemon.GraphDriver().Status())
+	v.SetBool("MemoryLimit", daemon.SystemConfig().MemoryLimit)
+	v.SetBool("SwapLimit", daemon.SystemConfig().SwapLimit)
+	v.SetBool("IPv4Forwarding", !daemon.SystemConfig().IPv4ForwardingDisabled)
+	v.SetBool("Debug", os.Getenv("DEBUG") != "")
+	v.SetInt("NFd", utils.GetTotalUsedFds())
+	v.SetInt("NGoroutines", runtime.NumGoroutine())
+	v.Set("ExecutionDriver", daemon.ExecutionDriver().Name())
+	v.SetInt("NEventsListener", env.GetInt("count"))
+	v.Set("KernelVersion", kernelVersion)
+	v.Set("OperatingSystem", operatingSystem)
+	v.Set("IndexServerAddress", registry.IndexServerAddress())
+	v.Set("InitSha1", dockerversion.INITSHA1)
+	v.Set("InitPath", initPath)
+	if _, err := v.WriteTo(job.Stdout); err != nil {
+		return job.Error(err)
+	}
+	return engine.StatusOK
+}
diff --git a/daemon/inspect.go b/daemon/inspect.go
index b93aec5..373b43b 100644
--- a/daemon/inspect.go
+++ b/daemon/inspect.go
@@ -4,8 +4,8 @@
 	"encoding/json"
 	"fmt"
 
-	"github.com/dotcloud/docker/engine"
-	"github.com/dotcloud/docker/runconfig"
+	"github.com/docker/docker/engine"
+	"github.com/docker/docker/runconfig"
 )
 
 func (daemon *Daemon) ContainerInspect(job *engine.Job) engine.Status {
diff --git a/daemon/kill.go b/daemon/kill.go
new file mode 100644
index 0000000..f5f5897
--- /dev/null
+++ b/daemon/kill.go
@@ -0,0 +1,59 @@
+package daemon
+
+import (
+	"strconv"
+	"strings"
+	"syscall"
+
+	"github.com/docker/docker/engine"
+	"github.com/docker/docker/pkg/signal"
+)
+
+// ContainerKill send signal to the container
+// If no signal is given (sig 0), then Kill with SIGKILL and wait
+// for the container to exit.
+// If a signal is given, then just send it to the container and return.
+func (daemon *Daemon) ContainerKill(job *engine.Job) engine.Status {
+	if n := len(job.Args); n < 1 || n > 2 {
+		return job.Errorf("Usage: %s CONTAINER [SIGNAL]", job.Name)
+	}
+	var (
+		name = job.Args[0]
+		sig  uint64
+		err  error
+	)
+
+	// If we have a signal, look at it. Otherwise, do nothing
+	if len(job.Args) == 2 && job.Args[1] != "" {
+		// Check if we passed the signal as a number:
+		// The largest legal signal is 31, so let's parse on 5 bits
+		sig, err = strconv.ParseUint(job.Args[1], 10, 5)
+		if err != nil {
+			// The signal is not a number, treat it as a string (either like "KILL" or like "SIGKILL")
+			sig = uint64(signal.SignalMap[strings.TrimPrefix(job.Args[1], "SIG")])
+		}
+
+		if sig == 0 {
+			return job.Errorf("Invalid signal: %s", job.Args[1])
+		}
+	}
+
+	if container := daemon.Get(name); container != nil {
+		// If no signal is passed, or SIGKILL, perform regular Kill (SIGKILL + wait())
+		if sig == 0 || syscall.Signal(sig) == syscall.SIGKILL {
+			if err := container.Kill(); err != nil {
+				return job.Errorf("Cannot kill container %s: %s", name, err)
+			}
+			container.LogEvent("kill")
+		} else {
+			// Otherwise, just send the requested signal
+			if err := container.KillSig(int(sig)); err != nil {
+				return job.Errorf("Cannot kill container %s: %s", name, err)
+			}
+			// FIXME: Add event for signals
+		}
+	} else {
+		return job.Errorf("No such container: %s", name)
+	}
+	return engine.StatusOK
+}
diff --git a/daemon/list.go b/daemon/list.go
new file mode 100644
index 0000000..2da5254
--- /dev/null
+++ b/daemon/list.go
@@ -0,0 +1,151 @@
+package daemon
+
+import (
+	"errors"
+	"fmt"
+	"strconv"
+	"strings"
+
+	"github.com/docker/docker/pkg/graphdb"
+
+	"github.com/docker/docker/engine"
+	"github.com/docker/docker/pkg/parsers/filters"
+)
+
+// List returns an array of all containers registered in the daemon.
+func (daemon *Daemon) List() []*Container {
+	return daemon.containers.List()
+}
+
+func (daemon *Daemon) Containers(job *engine.Job) engine.Status {
+	var (
+		foundBefore bool
+		displayed   int
+		all         = job.GetenvBool("all")
+		since       = job.Getenv("since")
+		before      = job.Getenv("before")
+		n           = job.GetenvInt("limit")
+		size        = job.GetenvBool("size")
+		psFilters   filters.Args
+		filt_exited []int
+	)
+	outs := engine.NewTable("Created", 0)
+
+	psFilters, err := filters.FromParam(job.Getenv("filters"))
+	if err != nil {
+		return job.Error(err)
+	}
+	if i, ok := psFilters["exited"]; ok {
+		for _, value := range i {
+			code, err := strconv.Atoi(value)
+			if err != nil {
+				return job.Error(err)
+			}
+			filt_exited = append(filt_exited, code)
+		}
+	}
+
+	names := map[string][]string{}
+	daemon.ContainerGraph().Walk("/", func(p string, e *graphdb.Entity) error {
+		names[e.ID()] = append(names[e.ID()], p)
+		return nil
+	}, -1)
+
+	var beforeCont, sinceCont *Container
+	if before != "" {
+		beforeCont = daemon.Get(before)
+		if beforeCont == nil {
+			return job.Error(fmt.Errorf("Could not find container with name or id %s", before))
+		}
+	}
+
+	if since != "" {
+		sinceCont = daemon.Get(since)
+		if sinceCont == nil {
+			return job.Error(fmt.Errorf("Could not find container with name or id %s", since))
+		}
+	}
+
+	errLast := errors.New("last container")
+	writeCont := func(container *Container) error {
+		container.Lock()
+		defer container.Unlock()
+		if !container.State.IsRunning() && !all && n <= 0 && since == "" && before == "" {
+			return nil
+		}
+		if before != "" && !foundBefore {
+			if container.ID == beforeCont.ID {
+				foundBefore = true
+			}
+			return nil
+		}
+		if n > 0 && displayed == n {
+			return errLast
+		}
+		if since != "" {
+			if container.ID == sinceCont.ID {
+				return errLast
+			}
+		}
+		if len(filt_exited) > 0 && !container.State.IsRunning() {
+			should_skip := true
+			for _, code := range filt_exited {
+				if code == container.State.GetExitCode() {
+					should_skip = false
+					break
+				}
+			}
+			if should_skip {
+				return nil
+			}
+		}
+		displayed++
+		out := &engine.Env{}
+		out.Set("Id", container.ID)
+		out.SetList("Names", names[container.ID])
+		out.Set("Image", daemon.Repositories().ImageName(container.Image))
+		if len(container.Args) > 0 {
+			args := []string{}
+			for _, arg := range container.Args {
+				if strings.Contains(arg, " ") {
+					args = append(args, fmt.Sprintf("'%s'", arg))
+				} else {
+					args = append(args, arg)
+				}
+			}
+			argsAsString := strings.Join(args, " ")
+
+			out.Set("Command", fmt.Sprintf("\"%s %s\"", container.Path, argsAsString))
+		} else {
+			out.Set("Command", fmt.Sprintf("\"%s\"", container.Path))
+		}
+		out.SetInt64("Created", container.Created.Unix())
+		out.Set("Status", container.State.String())
+		str, err := container.NetworkSettings.PortMappingAPI().ToListString()
+		if err != nil {
+			return err
+		}
+		out.Set("Ports", str)
+		if size {
+			sizeRw, sizeRootFs := container.GetSize()
+			out.SetInt64("SizeRw", sizeRw)
+			out.SetInt64("SizeRootFs", sizeRootFs)
+		}
+		outs.Add(out)
+		return nil
+	}
+
+	for _, container := range daemon.List() {
+		if err := writeCont(container); err != nil {
+			if err != errLast {
+				return job.Error(err)
+			}
+			break
+		}
+	}
+	outs.ReverseSort()
+	if _, err := outs.WriteListTo(job.Stdout); err != nil {
+		return job.Error(err)
+	}
+	return engine.StatusOK
+}
diff --git a/daemon/logs.go b/daemon/logs.go
new file mode 100644
index 0000000..386d9c6
--- /dev/null
+++ b/daemon/logs.go
@@ -0,0 +1,134 @@
+package daemon
+
+import (
+	"bytes"
+	"encoding/json"
+	"fmt"
+	"io"
+	"os"
+	"strconv"
+	"time"
+
+	"github.com/docker/docker/pkg/log"
+	"github.com/docker/docker/pkg/tailfile"
+
+	"github.com/docker/docker/engine"
+	"github.com/docker/docker/pkg/jsonlog"
+)
+
+func (daemon *Daemon) ContainerLogs(job *engine.Job) engine.Status {
+	if len(job.Args) != 1 {
+		return job.Errorf("Usage: %s CONTAINER\n", job.Name)
+	}
+
+	var (
+		name   = job.Args[0]
+		stdout = job.GetenvBool("stdout")
+		stderr = job.GetenvBool("stderr")
+		tail   = job.Getenv("tail")
+		follow = job.GetenvBool("follow")
+		times  = job.GetenvBool("timestamps")
+		lines  = -1
+		format string
+	)
+	if !(stdout || stderr) {
+		return job.Errorf("You must choose at least one stream")
+	}
+	if times {
+		format = time.RFC3339Nano
+	}
+	if tail == "" {
+		tail = "all"
+	}
+	container := daemon.Get(name)
+	if container == nil {
+		return job.Errorf("No such container: %s", name)
+	}
+	cLog, err := container.ReadLog("json")
+	if err != nil && os.IsNotExist(err) {
+		// Legacy logs
+		log.Debugf("Old logs format")
+		if stdout {
+			cLog, err := container.ReadLog("stdout")
+			if err != nil {
+				log.Errorf("Error reading logs (stdout): %s", err)
+			} else if _, err := io.Copy(job.Stdout, cLog); err != nil {
+				log.Errorf("Error streaming logs (stdout): %s", err)
+			}
+		}
+		if stderr {
+			cLog, err := container.ReadLog("stderr")
+			if err != nil {
+				log.Errorf("Error reading logs (stderr): %s", err)
+			} else if _, err := io.Copy(job.Stderr, cLog); err != nil {
+				log.Errorf("Error streaming logs (stderr): %s", err)
+			}
+		}
+	} else if err != nil {
+		log.Errorf("Error reading logs (json): %s", err)
+	} else {
+		if tail != "all" {
+			var err error
+			lines, err = strconv.Atoi(tail)
+			if err != nil {
+				log.Errorf("Failed to parse tail %s, error: %v, show all logs", tail, err)
+				lines = -1
+			}
+		}
+		if lines != 0 {
+			if lines > 0 {
+				f := cLog.(*os.File)
+				ls, err := tailfile.TailFile(f, lines)
+				if err != nil {
+					return job.Error(err)
+				}
+				tmp := bytes.NewBuffer([]byte{})
+				for _, l := range ls {
+					fmt.Fprintf(tmp, "%s\n", l)
+				}
+				cLog = tmp
+			}
+			dec := json.NewDecoder(cLog)
+			for {
+				l := &jsonlog.JSONLog{}
+
+				if err := dec.Decode(l); err == io.EOF {
+					break
+				} else if err != nil {
+					log.Errorf("Error streaming logs: %s", err)
+					break
+				}
+				logLine := l.Log
+				if times {
+					logLine = fmt.Sprintf("%s %s", l.Created.Format(format), logLine)
+				}
+				if l.Stream == "stdout" && stdout {
+					fmt.Fprintf(job.Stdout, "%s", logLine)
+				}
+				if l.Stream == "stderr" && stderr {
+					fmt.Fprintf(job.Stderr, "%s", logLine)
+				}
+			}
+		}
+	}
+	if follow {
+		errors := make(chan error, 2)
+		if stdout {
+			stdoutPipe := container.StdoutLogPipe()
+			go func() {
+				errors <- jsonlog.WriteLog(stdoutPipe, job.Stdout, format)
+			}()
+		}
+		if stderr {
+			stderrPipe := container.StderrLogPipe()
+			go func() {
+				errors <- jsonlog.WriteLog(stderrPipe, job.Stderr, format)
+			}()
+		}
+		err := <-errors
+		if err != nil {
+			log.Errorf("%s", err)
+		}
+	}
+	return engine.StatusOK
+}
diff --git a/daemon/monitor.go b/daemon/monitor.go
new file mode 100644
index 0000000..1a92965
--- /dev/null
+++ b/daemon/monitor.go
@@ -0,0 +1,315 @@
+package daemon
+
+import (
+	"io"
+	"os/exec"
+	"sync"
+	"time"
+
+	"github.com/docker/docker/daemon/execdriver"
+	"github.com/docker/docker/pkg/log"
+	"github.com/docker/docker/runconfig"
+)
+
+const defaultTimeIncrement = 100
+
+// containerMonitor monitors the execution of a container's main process.
+// If a restart policy is specified for the cotnainer the monitor will ensure that the
+// process is restarted based on the rules of the policy.  When the container is finally stopped
+// the monitor will reset and cleanup any of the container resources such as networking allocations
+// and the rootfs
+type containerMonitor struct {
+	mux sync.Mutex
+
+	// container is the container being monitored
+	container *Container
+
+	// restartPolicy is the current policy being applied to the container monitor
+	restartPolicy runconfig.RestartPolicy
+
+	// failureCount is the number of times the container has failed to
+	// start in a row
+	failureCount int
+
+	// shouldStop signals the monitor that the next time the container exits it is
+	// either because docker or the user asked for the container to be stopped
+	shouldStop bool
+
+	// startSignal is a channel that is closes after the container initially starts
+	startSignal chan struct{}
+
+	// stopChan is used to signal to the monitor whenever there is a wait for the
+	// next restart so that the timeIncrement is not honored and the user is not
+	// left waiting for nothing to happen during this time
+	stopChan chan struct{}
+
+	// timeIncrement is the amount of time to wait between restarts
+	// this is in milliseconds
+	timeIncrement int
+
+	// lastStartTime is the time which the monitor last exec'd the container's process
+	lastStartTime time.Time
+}
+
+// newContainerMonitor returns an initialized containerMonitor for the provided container
+// honoring the provided restart policy
+func newContainerMonitor(container *Container, policy runconfig.RestartPolicy) *containerMonitor {
+	return &containerMonitor{
+		container:     container,
+		restartPolicy: policy,
+		timeIncrement: defaultTimeIncrement,
+		stopChan:      make(chan struct{}),
+		startSignal:   make(chan struct{}),
+	}
+}
+
+// Stop signals to the container monitor that it should stop monitoring the container
+// for exits the next time the process dies
+func (m *containerMonitor) ExitOnNext() {
+	m.mux.Lock()
+
+	// we need to protect having a double close of the channel when stop is called
+	// twice or else we will get a panic
+	if !m.shouldStop {
+		m.shouldStop = true
+		close(m.stopChan)
+	}
+
+	m.mux.Unlock()
+}
+
+// Close closes the container's resources such as networking allocations and
+// unmounts the contatiner's root filesystem
+func (m *containerMonitor) Close() error {
+	// Cleanup networking and mounts
+	m.container.cleanup()
+
+	// FIXME: here is race condition between two RUN instructions in Dockerfile
+	// because they share same runconfig and change image. Must be fixed
+	// in builder/builder.go
+	if err := m.container.toDisk(); err != nil {
+		log.Errorf("Error dumping container %s state to disk: %s", m.container.ID, err)
+
+		return err
+	}
+
+	return nil
+}
+
+// Start starts the containers process and monitors it according to the restart policy
+func (m *containerMonitor) Start() error {
+	var (
+		err        error
+		exitStatus int
+	)
+
+	// this variable indicates that we under container.Lock
+	underLock := true
+
+	// ensure that when the monitor finally exits we release the networking and unmount the rootfs
+	defer func() {
+		if !underLock {
+			m.container.Lock()
+			defer m.container.Unlock()
+		}
+		m.Close()
+	}()
+
+	// reset the restart count
+	m.container.RestartCount = -1
+
+	for {
+		m.container.RestartCount++
+
+		if err := m.container.startLoggingToDisk(); err != nil {
+			m.resetContainer()
+
+			return err
+		}
+
+		pipes := execdriver.NewPipes(m.container.stdin, m.container.stdout, m.container.stderr, m.container.Config.OpenStdin)
+
+		m.container.LogEvent("start")
+
+		m.lastStartTime = time.Now()
+
+		if exitStatus, err = m.container.daemon.Run(m.container, pipes, m.callback); err != nil {
+			// if we receive an internal error from the initial start of a container then lets
+			// return it instead of entering the restart loop
+			if m.container.RestartCount == 0 {
+				m.resetContainer()
+
+				return err
+			}
+
+			log.Errorf("Error running container: %s", err)
+		}
+
+		// here container.Lock is already lost
+		underLock = false
+
+		m.resetMonitor(err == nil && exitStatus == 0)
+
+		if m.shouldRestart(exitStatus) {
+			m.container.State.SetRestarting(exitStatus)
+
+			m.container.LogEvent("die")
+
+			m.resetContainer()
+
+			// sleep with a small time increment between each restart to help avoid issues cased by quickly
+			// restarting the container because of some types of errors ( networking cut out, etc... )
+			m.waitForNextRestart()
+
+			// we need to check this before reentering the loop because the waitForNextRestart could have
+			// been terminated by a request from a user
+			if m.shouldStop {
+				m.container.State.SetStopped(exitStatus)
+
+				return err
+			}
+
+			continue
+		}
+
+		m.container.State.SetStopped(exitStatus)
+
+		m.container.LogEvent("die")
+
+		m.resetContainer()
+
+		break
+	}
+
+	return err
+}
+
+// resetMonitor resets the stateful fields on the containerMonitor based on the
+// previous runs success or failure.  Reguardless of success, if the container had
+// an execution time of more than 10s then reset the timer back to the default
+func (m *containerMonitor) resetMonitor(successful bool) {
+	executionTime := time.Now().Sub(m.lastStartTime).Seconds()
+
+	if executionTime > 10 {
+		m.timeIncrement = defaultTimeIncrement
+	} else {
+		// otherwise we need to increment the amount of time we wait before restarting
+		// the process.  We will build up by multiplying the increment by 2
+		m.timeIncrement *= 2
+	}
+
+	// the container exited successfully so we need to reset the failure counter
+	if successful {
+		m.failureCount = 0
+	} else {
+		m.failureCount++
+	}
+}
+
+// waitForNextRestart waits with the default time increment to restart the container unless
+// a user or docker asks for the container to be stopped
+func (m *containerMonitor) waitForNextRestart() {
+	select {
+	case <-time.After(time.Duration(m.timeIncrement) * time.Millisecond):
+	case <-m.stopChan:
+	}
+}
+
+// shouldRestart checks the restart policy and applies the rules to determine if
+// the container's process should be restarted
+func (m *containerMonitor) shouldRestart(exitStatus int) bool {
+	m.mux.Lock()
+	defer m.mux.Unlock()
+
+	// do not restart if the user or docker has requested that this container be stopped
+	if m.shouldStop {
+		return false
+	}
+
+	switch m.restartPolicy.Name {
+	case "always":
+		return true
+	case "on-failure":
+		// the default value of 0 for MaximumRetryCount means that we will not enforce a maximum count
+		if max := m.restartPolicy.MaximumRetryCount; max != 0 && m.failureCount >= max {
+			log.Debugf("stopping restart of container %s because maximum failure could of %d has been reached", max)
+			return false
+		}
+
+		return exitStatus != 0
+	}
+
+	return false
+}
+
+// callback ensures that the container's state is properly updated after we
+// received ack from the execution drivers
+func (m *containerMonitor) callback(command *execdriver.Command) {
+	if command.Tty {
+		// The callback is called after the process Start()
+		// so we are in the parent process. In TTY mode, stdin/out/err is the PtySlace
+		// which we close here.
+		if c, ok := command.Stdout.(io.Closer); ok {
+			c.Close()
+		}
+	}
+
+	m.container.State.SetRunning(command.Pid())
+
+	// signal that the process has started
+	// close channel only if not closed
+	select {
+	case <-m.startSignal:
+	default:
+		close(m.startSignal)
+	}
+
+	if err := m.container.ToDisk(); err != nil {
+		log.Debugf("%s", err)
+	}
+}
+
+// resetContainer resets the container's IO and ensures that the command is able to be executed again
+// by copying the data into a new struct
+func (m *containerMonitor) resetContainer() {
+	container := m.container
+
+	if container.Config.OpenStdin {
+		if err := container.stdin.Close(); err != nil {
+			log.Errorf("%s: Error close stdin: %s", container.ID, err)
+		}
+	}
+
+	if err := container.stdout.Clean(); err != nil {
+		log.Errorf("%s: Error close stdout: %s", container.ID, err)
+	}
+
+	if err := container.stderr.Clean(); err != nil {
+		log.Errorf("%s: Error close stderr: %s", container.ID, err)
+	}
+
+	if container.command != nil && container.command.Terminal != nil {
+		if err := container.command.Terminal.Close(); err != nil {
+			log.Errorf("%s: Error closing terminal: %s", container.ID, err)
+		}
+	}
+
+	// Re-create a brand new stdin pipe once the container exited
+	if container.Config.OpenStdin {
+		container.stdin, container.stdinPipe = io.Pipe()
+	}
+
+	c := container.command.Cmd
+
+	container.command.Cmd = exec.Cmd{
+		Stdin:       c.Stdin,
+		Stdout:      c.Stdout,
+		Stderr:      c.Stderr,
+		Path:        c.Path,
+		Env:         c.Env,
+		ExtraFiles:  c.ExtraFiles,
+		Args:        c.Args,
+		Dir:         c.Dir,
+		SysProcAttr: c.SysProcAttr,
+	}
+}
diff --git a/daemon/network_settings.go b/daemon/network_settings.go
index a5c750a..bf28ca1 100644
--- a/daemon/network_settings.go
+++ b/daemon/network_settings.go
@@ -1,8 +1,8 @@
 package daemon
 
 import (
-	"github.com/dotcloud/docker/engine"
-	"github.com/dotcloud/docker/nat"
+	"github.com/docker/docker/engine"
+	"github.com/docker/docker/nat"
 )
 
 // FIXME: move deprecated port stuff to nat to clean up the core.
diff --git a/daemon/networkdriver/bridge/driver.go b/daemon/networkdriver/bridge/driver.go
index a843da0..06cf37e 100644
--- a/daemon/networkdriver/bridge/driver.go
+++ b/daemon/networkdriver/bridge/driver.go
@@ -3,20 +3,20 @@
 import (
 	"fmt"
 	"io/ioutil"
-	"log"
 	"net"
 	"strings"
 	"sync"
 
+	"github.com/docker/docker/daemon/networkdriver"
+	"github.com/docker/docker/daemon/networkdriver/ipallocator"
+	"github.com/docker/docker/daemon/networkdriver/portallocator"
+	"github.com/docker/docker/daemon/networkdriver/portmapper"
+	"github.com/docker/docker/engine"
+	"github.com/docker/docker/pkg/iptables"
+	"github.com/docker/docker/pkg/log"
+	"github.com/docker/docker/pkg/networkfs/resolvconf"
+	"github.com/docker/docker/pkg/parsers/kernel"
 	"github.com/docker/libcontainer/netlink"
-	"github.com/dotcloud/docker/daemon/networkdriver"
-	"github.com/dotcloud/docker/daemon/networkdriver/ipallocator"
-	"github.com/dotcloud/docker/daemon/networkdriver/portallocator"
-	"github.com/dotcloud/docker/daemon/networkdriver/portmapper"
-	"github.com/dotcloud/docker/engine"
-	"github.com/dotcloud/docker/pkg/iptables"
-	"github.com/dotcloud/docker/pkg/networkfs/resolvconf"
-	"github.com/dotcloud/docker/utils"
 )
 
 const (
@@ -158,7 +158,7 @@
 
 	bridgeNetwork = network
 
-	// https://github.com/dotcloud/docker/issues/2768
+	// https://github.com/docker/docker/issues/2768
 	job.Eng.Hack_SetGlobalVar("httpapi.bridgeIP", bridgeNetwork.IP)
 
 	for name, f := range map[string]engine.Handler{
@@ -176,7 +176,7 @@
 
 func setupIPTables(addr net.Addr, icc bool) error {
 	// Enable NAT
-	natArgs := []string{"POSTROUTING", "-t", "nat", "-s", addr.String(), "!", "-d", addr.String(), "-j", "MASQUERADE"}
+	natArgs := []string{"POSTROUTING", "-t", "nat", "-s", addr.String(), "!", "-o", bridgeIface, "-j", "MASQUERADE"}
 
 	if !iptables.Exists(natArgs...) {
 		if output, err := iptables.Raw(append([]string{"-I"}, natArgs...)...); err != nil {
@@ -196,7 +196,7 @@
 		iptables.Raw(append([]string{"-D"}, acceptArgs...)...)
 
 		if !iptables.Exists(dropArgs...) {
-			utils.Debugf("Disable inter-container communication")
+			log.Debugf("Disable inter-container communication")
 			if output, err := iptables.Raw(append([]string{"-I"}, dropArgs...)...); err != nil {
 				return fmt.Errorf("Unable to prevent intercontainer communication: %s", err)
 			} else if len(output) != 0 {
@@ -207,7 +207,7 @@
 		iptables.Raw(append([]string{"-D"}, dropArgs...)...)
 
 		if !iptables.Exists(acceptArgs...) {
-			utils.Debugf("Enable inter-container communication")
+			log.Debugf("Enable inter-container communication")
 			if output, err := iptables.Raw(append([]string{"-I"}, acceptArgs...)...); err != nil {
 				return fmt.Errorf("Unable to allow intercontainer communication: %s", err)
 			} else if len(output) != 0 {
@@ -271,7 +271,7 @@
 					ifaceAddr = addr
 					break
 				} else {
-					utils.Debugf("%s %s", addr, err)
+					log.Debugf("%s %s", addr, err)
 				}
 			}
 		}
@@ -280,7 +280,7 @@
 	if ifaceAddr == "" {
 		return fmt.Errorf("Could not find a free IP address range for interface '%s'. Please configure its address manually and run 'docker -b %s'", bridgeIface, bridgeIface)
 	}
-	utils.Debugf("Creating bridge %s with network %s", bridgeIface, ifaceAddr)
+	log.Debugf("Creating bridge %s with network %s", bridgeIface, ifaceAddr)
 
 	if err := createBridgeIface(bridgeIface); err != nil {
 		return err
@@ -306,11 +306,11 @@
 }
 
 func createBridgeIface(name string) error {
-	kv, err := utils.GetKernelVersion()
+	kv, err := kernel.GetKernelVersion()
 	// only set the bridge's mac address if the kernel version is > 3.3
 	// before that it was not supported
 	setBridgeMacAddr := err == nil && (kv.Kernel >= 3 && kv.Major >= 3)
-	utils.Debugf("setting bridge mac address = %v", setBridgeMacAddr)
+	log.Debugf("setting bridge mac address = %v", setBridgeMacAddr)
 	return netlink.CreateBridge(name, setBridgeMacAddr)
 }
 
@@ -363,12 +363,12 @@
 
 	for _, nat := range containerInterface.PortMappings {
 		if err := portmapper.Unmap(nat); err != nil {
-			log.Printf("Unable to unmap port %s: %s", nat, err)
+			log.Infof("Unable to unmap port %s: %s", nat, err)
 		}
 	}
 
 	if err := ipallocator.ReleaseIP(bridgeNetwork, &containerInterface.IP); err != nil {
-		log.Printf("Unable to release ip %s\n", err)
+		log.Infof("Unable to release ip %s", err)
 	}
 	return engine.StatusOK
 }
@@ -415,8 +415,7 @@
 			break
 		}
 
-		switch allocerr := err.(type) {
-		case portallocator.ErrPortAlreadyAllocated:
+		if allocerr, ok := err.(portallocator.ErrPortAlreadyAllocated); ok {
 			// There is no point in immediately retrying to map an explicitly
 			// chosen port.
 			if hostPort != 0 {
@@ -426,7 +425,7 @@
 
 			// Automatically chosen 'free' port failed to bind: move on the next.
 			job.Logf("Failed to bind %s for container address %s. Trying another port.", allocerr.IPPort(), container.String())
-		default:
+		} else {
 			// some other error during mapping
 			job.Logf("Received an unexpected error during port allocation: %s", err.Error())
 			break
diff --git a/daemon/networkdriver/bridge/driver_test.go b/daemon/networkdriver/bridge/driver_test.go
index f8ddd4c..9bc6c32 100644
--- a/daemon/networkdriver/bridge/driver_test.go
+++ b/daemon/networkdriver/bridge/driver_test.go
@@ -1,14 +1,19 @@
 package bridge
 
 import (
-	"fmt"
 	"net"
 	"strconv"
 	"testing"
 
-	"github.com/dotcloud/docker/engine"
+	"github.com/docker/docker/daemon/networkdriver/portmapper"
+	"github.com/docker/docker/engine"
 )
 
+func init() {
+	// reset the new proxy command for mocking out the userland proxy in tests
+	portmapper.NewProxy = portmapper.NewMockProxyCommand
+}
+
 func findFreePort(t *testing.T) int {
 	l, err := net.Listen("tcp", ":0")
 	if err != nil {
@@ -61,46 +66,3 @@
 		t.Fatal("Duplicate port allocation granted by AllocatePort")
 	}
 }
-
-func TestAllocatePortReclaim(t *testing.T) {
-	eng := engine.New()
-	eng.Logging = false
-
-	freePort := findFreePort(t)
-
-	// Init driver
-	job := eng.Job("initdriver")
-	if res := InitDriver(job); res != engine.StatusOK {
-		t.Fatal("Failed to initialize network driver")
-	}
-
-	// Allocate interface
-	job = eng.Job("allocate_interface", "container_id")
-	if res := Allocate(job); res != engine.StatusOK {
-		t.Fatal("Failed to allocate network interface")
-	}
-
-	// Occupy port
-	listenAddr := fmt.Sprintf(":%d", freePort)
-	tcpListenAddr, err := net.ResolveTCPAddr("tcp", listenAddr)
-	if err != nil {
-		t.Fatalf("Failed to resolve TCP address '%s'", listenAddr)
-	}
-
-	l, err := net.ListenTCP("tcp", tcpListenAddr)
-	if err != nil {
-		t.Fatalf("Fail to listen on port %d", freePort)
-	}
-
-	// Allocate port, expect failure
-	job = newPortAllocationJob(eng, freePort)
-	if res := AllocatePort(job); res == engine.StatusOK {
-		t.Fatal("Successfully allocated currently used port")
-	}
-
-	// Reclaim port, retry allocation
-	l.Close()
-	if res := AllocatePort(job); res != engine.StatusOK {
-		t.Fatal("Failed to allocate previously reclaimed port")
-	}
-}
diff --git a/daemon/networkdriver/ipallocator/allocator.go b/daemon/networkdriver/ipallocator/allocator.go
index f154b0b..1bf8e1d 100644
--- a/daemon/networkdriver/ipallocator/allocator.go
+++ b/daemon/networkdriver/ipallocator/allocator.go
@@ -3,7 +3,7 @@
 import (
 	"encoding/binary"
 	"errors"
-	"github.com/dotcloud/docker/daemon/networkdriver"
+	"github.com/docker/docker/daemon/networkdriver"
 	"net"
 	"sync"
 )
diff --git a/daemon/networkdriver/portallocator/portallocator.go b/daemon/networkdriver/portallocator/portallocator.go
index c722ba9..d4fcc6e 100644
--- a/daemon/networkdriver/portallocator/portallocator.go
+++ b/daemon/networkdriver/portallocator/portallocator.go
@@ -12,10 +12,22 @@
 	last int
 }
 
-type (
-	protocolMap map[string]*portMap
-	ipMapping   map[string]protocolMap
-)
+func newPortMap() *portMap {
+	return &portMap{
+		p: map[int]struct{}{},
+	}
+}
+
+type protoMap map[string]*portMap
+
+func newProtoMap() protoMap {
+	return protoMap{
+		"tcp": newPortMap(),
+		"udp": newPortMap(),
+	}
+}
+
+type ipMapping map[string]protoMap
 
 const (
 	BeginPortRange = 49153
@@ -62,107 +74,83 @@
 	return fmt.Sprintf("Bind for %s:%d failed: port is already allocated", e.ip, e.port)
 }
 
+// RequestPort requests new port from global ports pool for specified ip and proto.
+// If port is 0 it returns first free port. Otherwise it cheks port availability
+// in pool and return that port or error if port is already busy.
 func RequestPort(ip net.IP, proto string, port int) (int, error) {
 	mutex.Lock()
 	defer mutex.Unlock()
 
-	if err := validateProto(proto); err != nil {
+	if proto != "tcp" && proto != "udp" {
+		return 0, ErrUnknownProtocol
+	}
+
+	if ip == nil {
+		ip = defaultIP
+	}
+	ipstr := ip.String()
+	protomap, ok := globalMap[ipstr]
+	if !ok {
+		protomap = newProtoMap()
+		globalMap[ipstr] = protomap
+	}
+	mapping := protomap[proto]
+	if port > 0 {
+		if _, ok := mapping.p[port]; !ok {
+			mapping.p[port] = struct{}{}
+			return port, nil
+		}
+		return 0, NewErrPortAlreadyAllocated(ipstr, port)
+	}
+
+	port, err := mapping.findPort()
+	if err != nil {
 		return 0, err
 	}
-
-	ip = getDefault(ip)
-
-	mapping := getOrCreate(ip)
-
-	if port > 0 {
-		if _, ok := mapping[proto].p[port]; !ok {
-			mapping[proto].p[port] = struct{}{}
-			return port, nil
-		} else {
-			return 0, NewErrPortAlreadyAllocated(ip.String(), port)
-		}
-	} else {
-		port, err := findPort(ip, proto)
-
-		if err != nil {
-			return 0, err
-		}
-
-		return port, nil
-	}
+	return port, nil
 }
 
+// ReleasePort releases port from global ports pool for specified ip and proto.
 func ReleasePort(ip net.IP, proto string, port int) error {
 	mutex.Lock()
 	defer mutex.Unlock()
 
-	ip = getDefault(ip)
-
-	mapping := getOrCreate(ip)[proto]
-	delete(mapping.p, port)
-
+	if ip == nil {
+		ip = defaultIP
+	}
+	protomap, ok := globalMap[ip.String()]
+	if !ok {
+		return nil
+	}
+	delete(protomap[proto].p, port)
 	return nil
 }
 
+// ReleaseAll releases all ports for all ips.
 func ReleaseAll() error {
 	mutex.Lock()
-	defer mutex.Unlock()
-
 	globalMap = ipMapping{}
-
+	mutex.Unlock()
 	return nil
 }
 
-func getOrCreate(ip net.IP) protocolMap {
-	ipstr := ip.String()
-
-	if _, ok := globalMap[ipstr]; !ok {
-		globalMap[ipstr] = protocolMap{
-			"tcp": &portMap{p: map[int]struct{}{}, last: 0},
-			"udp": &portMap{p: map[int]struct{}{}, last: 0},
-		}
-	}
-
-	return globalMap[ipstr]
-}
-
-func findPort(ip net.IP, proto string) (int, error) {
-	mapping := getOrCreate(ip)[proto]
-
-	if mapping.last == 0 {
-		mapping.p[BeginPortRange] = struct{}{}
-		mapping.last = BeginPortRange
+func (pm *portMap) findPort() (int, error) {
+	if pm.last == 0 {
+		pm.p[BeginPortRange] = struct{}{}
+		pm.last = BeginPortRange
 		return BeginPortRange, nil
 	}
 
-	for port := mapping.last + 1; port != mapping.last; port++ {
+	for port := pm.last + 1; port != pm.last; port++ {
 		if port > EndPortRange {
 			port = BeginPortRange
 		}
 
-		if _, ok := mapping.p[port]; !ok {
-			mapping.p[port] = struct{}{}
-			mapping.last = port
+		if _, ok := pm.p[port]; !ok {
+			pm.p[port] = struct{}{}
+			pm.last = port
 			return port, nil
 		}
-
 	}
-
 	return 0, ErrAllPortsAllocated
 }
-
-func getDefault(ip net.IP) net.IP {
-	if ip == nil {
-		return defaultIP
-	}
-
-	return ip
-}
-
-func validateProto(proto string) error {
-	if proto != "tcp" && proto != "udp" {
-		return ErrUnknownProtocol
-	}
-
-	return nil
-}
diff --git a/daemon/networkdriver/portmapper/mapper.go b/daemon/networkdriver/portmapper/mapper.go
index 1bd3322..a81596d 100644
--- a/daemon/networkdriver/portmapper/mapper.go
+++ b/daemon/networkdriver/portmapper/mapper.go
@@ -6,14 +6,13 @@
 	"net"
 	"sync"
 
-	"github.com/dotcloud/docker/daemon/networkdriver/portallocator"
-	"github.com/dotcloud/docker/pkg/iptables"
-	"github.com/dotcloud/docker/pkg/proxy"
+	"github.com/docker/docker/daemon/networkdriver/portallocator"
+	"github.com/docker/docker/pkg/iptables"
 )
 
 type mapping struct {
 	proto         string
-	userlandProxy proxy.Proxy
+	userlandProxy UserlandProxy
 	host          net.Addr
 	container     net.Addr
 }
@@ -24,7 +23,8 @@
 
 	// udp:ip:port
 	currentMappings = make(map[string]*mapping)
-	newProxy        = proxy.NewProxy
+
+	NewProxy = NewProxyCommand
 )
 
 var (
@@ -37,54 +37,58 @@
 	chain = c
 }
 
-func Map(container net.Addr, hostIP net.IP, hostPort int) (net.Addr, error) {
+func Map(container net.Addr, hostIP net.IP, hostPort int) (host net.Addr, err error) {
 	lock.Lock()
 	defer lock.Unlock()
 
 	var (
 		m                 *mapping
-		err               error
 		proto             string
 		allocatedHostPort int
+		proxy             UserlandProxy
 	)
 
-	// release the port on any error during return.
-	defer func() {
-		if err != nil {
-			portallocator.ReleasePort(hostIP, proto, allocatedHostPort)
-		}
-	}()
-
 	switch container.(type) {
 	case *net.TCPAddr:
 		proto = "tcp"
 		if allocatedHostPort, err = portallocator.RequestPort(hostIP, proto, hostPort); err != nil {
 			return nil, err
 		}
+
 		m = &mapping{
 			proto:     proto,
 			host:      &net.TCPAddr{IP: hostIP, Port: allocatedHostPort},
 			container: container,
 		}
+
+		proxy = NewProxy(proto, hostIP, allocatedHostPort, container.(*net.TCPAddr).IP, container.(*net.TCPAddr).Port)
 	case *net.UDPAddr:
 		proto = "udp"
 		if allocatedHostPort, err = portallocator.RequestPort(hostIP, proto, hostPort); err != nil {
 			return nil, err
 		}
+
 		m = &mapping{
 			proto:     proto,
 			host:      &net.UDPAddr{IP: hostIP, Port: allocatedHostPort},
 			container: container,
 		}
+
+		proxy = NewProxy(proto, hostIP, allocatedHostPort, container.(*net.UDPAddr).IP, container.(*net.UDPAddr).Port)
 	default:
-		err = ErrUnknownBackendAddressType
-		return nil, err
+		return nil, ErrUnknownBackendAddressType
 	}
 
+	// release the allocated port on any further error during return.
+	defer func() {
+		if err != nil {
+			portallocator.ReleasePort(hostIP, proto, allocatedHostPort)
+		}
+	}()
+
 	key := getKey(m.host)
 	if _, exists := currentMappings[key]; exists {
-		err = ErrPortMappedForIP
-		return nil, err
+		return nil, ErrPortMappedForIP
 	}
 
 	containerIP, containerPort := getIPAndPort(m.container)
@@ -92,17 +96,15 @@
 		return nil, err
 	}
 
-	p, err := newProxy(m.host, m.container)
-	if err != nil {
-		// need to undo the iptables rules before we return
-		forward(iptables.Delete, m.proto, hostIP, allocatedHostPort, containerIP.String(), containerPort)
-		return nil, err
-	}
-
-	m.userlandProxy = p
+	m.userlandProxy = proxy
 	currentMappings[key] = m
 
-	go p.Run()
+	if err := proxy.Start(); err != nil {
+		// need to undo the iptables rules before we return
+		forward(iptables.Delete, m.proto, hostIP, allocatedHostPort, containerIP.String(), containerPort)
+
+		return nil, err
+	}
 
 	return m.host, nil
 }
@@ -117,7 +119,8 @@
 		return ErrPortNotMapped
 	}
 
-	data.userlandProxy.Close()
+	data.userlandProxy.Stop()
+
 	delete(currentMappings, key)
 
 	containerIP, containerPort := getIPAndPort(data.container)
diff --git a/daemon/networkdriver/portmapper/mapper_test.go b/daemon/networkdriver/portmapper/mapper_test.go
index 6affdc5..42e44a1 100644
--- a/daemon/networkdriver/portmapper/mapper_test.go
+++ b/daemon/networkdriver/portmapper/mapper_test.go
@@ -1,16 +1,16 @@
 package portmapper
 
 import (
-	"github.com/dotcloud/docker/daemon/networkdriver/portallocator"
-	"github.com/dotcloud/docker/pkg/iptables"
-	"github.com/dotcloud/docker/pkg/proxy"
 	"net"
 	"testing"
+
+	"github.com/docker/docker/daemon/networkdriver/portallocator"
+	"github.com/docker/docker/pkg/iptables"
 )
 
 func init() {
 	// override this func to mock out the proxy server
-	newProxy = proxy.NewStubProxy
+	NewProxy = NewMockProxyCommand
 }
 
 func reset() {
@@ -138,7 +138,7 @@
 		}
 
 		if _, err := Map(srcAddr1, dstIp1, portallocator.BeginPortRange); err == nil {
-			t.Fatal("Port %d should be bound but is not", portallocator.BeginPortRange)
+			t.Fatalf("Port %d should be bound but is not", portallocator.BeginPortRange)
 		}
 
 		for _, val := range hosts {
diff --git a/daemon/networkdriver/portmapper/mock_proxy.go b/daemon/networkdriver/portmapper/mock_proxy.go
new file mode 100644
index 0000000..253ce83
--- /dev/null
+++ b/daemon/networkdriver/portmapper/mock_proxy.go
@@ -0,0 +1,18 @@
+package portmapper
+
+import "net"
+
+func NewMockProxyCommand(proto string, hostIP net.IP, hostPort int, containerIP net.IP, containerPort int) UserlandProxy {
+	return &mockProxyCommand{}
+}
+
+type mockProxyCommand struct {
+}
+
+func (p *mockProxyCommand) Start() error {
+	return nil
+}
+
+func (p *mockProxyCommand) Stop() error {
+	return nil
+}
diff --git a/daemon/networkdriver/portmapper/proxy.go b/daemon/networkdriver/portmapper/proxy.go
new file mode 100644
index 0000000..b247237
--- /dev/null
+++ b/daemon/networkdriver/portmapper/proxy.go
@@ -0,0 +1,119 @@
+package portmapper
+
+import (
+	"flag"
+	"log"
+	"net"
+	"os"
+	"os/exec"
+	"os/signal"
+	"strconv"
+	"syscall"
+
+	"github.com/docker/docker/pkg/proxy"
+	"github.com/docker/docker/reexec"
+)
+
+const userlandProxyCommandName = "docker-proxy"
+
+func init() {
+	reexec.Register(userlandProxyCommandName, execProxy)
+}
+
+type UserlandProxy interface {
+	Start() error
+	Stop() error
+}
+
+// proxyCommand wraps an exec.Cmd to run the userland TCP and UDP
+// proxies as separate processes.
+type proxyCommand struct {
+	cmd *exec.Cmd
+}
+
+// execProxy is the reexec function that is registered to start the userland proxies
+func execProxy() {
+	host, container := parseHostContainerAddrs()
+
+	p, err := proxy.NewProxy(host, container)
+	if err != nil {
+		log.Fatal(err)
+	}
+
+	go handleStopSignals(p)
+
+	// Run will block until the proxy stops
+	p.Run()
+}
+
+// parseHostContainerAddrs parses the flags passed on reexec to create the TCP or UDP
+// net.Addrs to map the host and container ports
+func parseHostContainerAddrs() (host net.Addr, container net.Addr) {
+	var (
+		proto         = flag.String("proto", "tcp", "proxy protocol")
+		hostIP        = flag.String("host-ip", "", "host ip")
+		hostPort      = flag.Int("host-port", -1, "host port")
+		containerIP   = flag.String("container-ip", "", "container ip")
+		containerPort = flag.Int("container-port", -1, "container port")
+	)
+
+	flag.Parse()
+
+	switch *proto {
+	case "tcp":
+		host = &net.TCPAddr{IP: net.ParseIP(*hostIP), Port: *hostPort}
+		container = &net.TCPAddr{IP: net.ParseIP(*containerIP), Port: *containerPort}
+	case "udp":
+		host = &net.UDPAddr{IP: net.ParseIP(*hostIP), Port: *hostPort}
+		container = &net.UDPAddr{IP: net.ParseIP(*containerIP), Port: *containerPort}
+	default:
+		log.Fatalf("unsupported protocol %s", *proto)
+	}
+
+	return host, container
+}
+
+func handleStopSignals(p proxy.Proxy) {
+	s := make(chan os.Signal, 10)
+	signal.Notify(s, os.Interrupt, syscall.SIGTERM, syscall.SIGSTOP)
+
+	for _ = range s {
+		p.Close()
+
+		os.Exit(0)
+	}
+}
+
+func NewProxyCommand(proto string, hostIP net.IP, hostPort int, containerIP net.IP, containerPort int) UserlandProxy {
+	args := []string{
+		userlandProxyCommandName,
+		"-proto", proto,
+		"-host-ip", hostIP.String(),
+		"-host-port", strconv.Itoa(hostPort),
+		"-container-ip", containerIP.String(),
+		"-container-port", strconv.Itoa(containerPort),
+	}
+
+	return &proxyCommand{
+		cmd: &exec.Cmd{
+			Path:   reexec.Self(),
+			Args:   args,
+			Stdout: os.Stdout,
+			Stderr: os.Stderr,
+			SysProcAttr: &syscall.SysProcAttr{
+				Pdeathsig: syscall.SIGTERM, // send a sigterm to the proxy if the daemon process dies
+			},
+		},
+	}
+}
+
+func (p *proxyCommand) Start() error {
+	return p.cmd.Start()
+}
+
+func (p *proxyCommand) Stop() error {
+	err := p.cmd.Process.Signal(os.Interrupt)
+	p.cmd.Wait()
+
+	return err
+}
diff --git a/daemon/pause.go b/daemon/pause.go
new file mode 100644
index 0000000..0e4323d
--- /dev/null
+++ b/daemon/pause.go
@@ -0,0 +1,37 @@
+package daemon
+
+import (
+	"github.com/docker/docker/engine"
+)
+
+func (daemon *Daemon) ContainerPause(job *engine.Job) engine.Status {
+	if len(job.Args) != 1 {
+		return job.Errorf("Usage: %s CONTAINER", job.Name)
+	}
+	name := job.Args[0]
+	container := daemon.Get(name)
+	if container == nil {
+		return job.Errorf("No such container: %s", name)
+	}
+	if err := container.Pause(); err != nil {
+		return job.Errorf("Cannot pause container %s: %s", name, err)
+	}
+	container.LogEvent("pause")
+	return engine.StatusOK
+}
+
+func (daemon *Daemon) ContainerUnpause(job *engine.Job) engine.Status {
+	if n := len(job.Args); n < 1 || n > 2 {
+		return job.Errorf("Usage: %s CONTAINER", job.Name)
+	}
+	name := job.Args[0]
+	container := daemon.Get(name)
+	if container == nil {
+		return job.Errorf("No such container: %s", name)
+	}
+	if err := container.Unpause(); err != nil {
+		return job.Errorf("Cannot unpause container %s: %s", name, err)
+	}
+	container.LogEvent("unpause")
+	return engine.StatusOK
+}
diff --git a/daemon/resize.go b/daemon/resize.go
new file mode 100644
index 0000000..dd196ff
--- /dev/null
+++ b/daemon/resize.go
@@ -0,0 +1,29 @@
+package daemon
+
+import (
+	"strconv"
+
+	"github.com/docker/docker/engine"
+)
+
+func (daemon *Daemon) ContainerResize(job *engine.Job) engine.Status {
+	if len(job.Args) != 3 {
+		return job.Errorf("Not enough arguments. Usage: %s CONTAINER HEIGHT WIDTH\n", job.Name)
+	}
+	name := job.Args[0]
+	height, err := strconv.Atoi(job.Args[1])
+	if err != nil {
+		return job.Error(err)
+	}
+	width, err := strconv.Atoi(job.Args[2])
+	if err != nil {
+		return job.Error(err)
+	}
+	if container := daemon.Get(name); container != nil {
+		if err := container.Resize(height, width); err != nil {
+			return job.Error(err)
+		}
+		return engine.StatusOK
+	}
+	return job.Errorf("No such container: %s", name)
+}
diff --git a/daemon/restart.go b/daemon/restart.go
new file mode 100644
index 0000000..bcc0571
--- /dev/null
+++ b/daemon/restart.go
@@ -0,0 +1,27 @@
+package daemon
+
+import (
+	"github.com/docker/docker/engine"
+)
+
+func (daemon *Daemon) ContainerRestart(job *engine.Job) engine.Status {
+	if len(job.Args) != 1 {
+		return job.Errorf("Usage: %s CONTAINER\n", job.Name)
+	}
+	var (
+		name = job.Args[0]
+		t    = 10
+	)
+	if job.EnvExists("t") {
+		t = job.GetenvInt("t")
+	}
+	if container := daemon.Get(name); container != nil {
+		if err := container.Restart(int(t)); err != nil {
+			return job.Errorf("Cannot restart container %s: %s\n", name, err)
+		}
+		container.LogEvent("restart")
+	} else {
+		return job.Errorf("No such container: %s\n", name)
+	}
+	return engine.StatusOK
+}
diff --git a/daemon/server.go b/daemon/server.go
deleted file mode 100644
index dbe6a8e..0000000
--- a/daemon/server.go
+++ /dev/null
@@ -1,10 +0,0 @@
-package daemon
-
-import (
-	"github.com/dotcloud/docker/utils"
-)
-
-type Server interface {
-	LogEvent(action, id, from string) *utils.JSONMessage
-	IsRunning() bool // returns true if the server is currently in operation
-}
diff --git a/daemon/start.go b/daemon/start.go
new file mode 100644
index 0000000..30e0154
--- /dev/null
+++ b/daemon/start.go
@@ -0,0 +1,67 @@
+package daemon
+
+import (
+	"fmt"
+	"os"
+	"strings"
+
+	"github.com/docker/docker/engine"
+	"github.com/docker/docker/runconfig"
+)
+
+func (daemon *Daemon) ContainerStart(job *engine.Job) engine.Status {
+	if len(job.Args) < 1 {
+		return job.Errorf("Usage: %s container_id", job.Name)
+	}
+	var (
+		name      = job.Args[0]
+		container = daemon.Get(name)
+	)
+
+	if container == nil {
+		return job.Errorf("No such container: %s", name)
+	}
+
+	if container.State.IsRunning() {
+		return job.Errorf("Container already started")
+	}
+
+	// If no environment was set, then no hostconfig was passed.
+	if len(job.Environ()) > 0 {
+		hostConfig := runconfig.ContainerHostConfigFromJob(job)
+		if err := daemon.setHostConfig(container, hostConfig); err != nil {
+			return job.Error(err)
+		}
+	}
+	if err := container.Start(); err != nil {
+		return job.Errorf("Cannot start container %s: %s", name, err)
+	}
+
+	return engine.StatusOK
+}
+
+func (daemon *Daemon) setHostConfig(container *Container, hostConfig *runconfig.HostConfig) error {
+	// Validate the HostConfig binds. Make sure that:
+	// the source exists
+	for _, bind := range hostConfig.Binds {
+		splitBind := strings.Split(bind, ":")
+		source := splitBind[0]
+
+		// ensure the source exists on the host
+		_, err := os.Stat(source)
+		if err != nil && os.IsNotExist(err) {
+			err = os.MkdirAll(source, 0755)
+			if err != nil {
+				return fmt.Errorf("Could not create local directory '%s' for bind mount: %s!", source, err.Error())
+			}
+		}
+	}
+	// Register any links from the host config before starting the container
+	if err := daemon.RegisterLinks(container, hostConfig); err != nil {
+		return err
+	}
+	container.SetHostConfig(hostConfig)
+	container.ToDisk()
+
+	return nil
+}
diff --git a/daemon/state.go b/daemon/state.go
index 3f904d7..44742b7 100644
--- a/daemon/state.go
+++ b/daemon/state.go
@@ -1,17 +1,19 @@
 package daemon
 
 import (
+	"encoding/json"
 	"fmt"
 	"sync"
 	"time"
 
-	"github.com/dotcloud/docker/pkg/units"
+	"github.com/docker/docker/pkg/units"
 )
 
 type State struct {
 	sync.RWMutex
 	Running    bool
 	Paused     bool
+	Restarting bool
 	Pid        int
 	ExitCode   int
 	StartedAt  time.Time
@@ -34,14 +36,30 @@
 		if s.Paused {
 			return fmt.Sprintf("Up %s (Paused)", units.HumanDuration(time.Now().UTC().Sub(s.StartedAt)))
 		}
+		if s.Restarting {
+			return fmt.Sprintf("Restarting (%d) %s ago", s.ExitCode, units.HumanDuration(time.Now().UTC().Sub(s.FinishedAt)))
+		}
+
 		return fmt.Sprintf("Up %s", units.HumanDuration(time.Now().UTC().Sub(s.StartedAt)))
 	}
+
 	if s.FinishedAt.IsZero() {
 		return ""
 	}
+
 	return fmt.Sprintf("Exited (%d) %s ago", s.ExitCode, units.HumanDuration(time.Now().UTC().Sub(s.FinishedAt)))
 }
 
+type jState State
+
+// MarshalJSON for state is needed to avoid race conditions on inspect
+func (s *State) MarshalJSON() ([]byte, error) {
+	s.RLock()
+	b, err := json.Marshal(jState(*s))
+	s.RUnlock()
+	return b, err
+}
+
 func wait(waitChan <-chan struct{}, timeout time.Duration) error {
 	if timeout < 0 {
 		<-waitChan
@@ -114,31 +132,52 @@
 
 func (s *State) SetRunning(pid int) {
 	s.Lock()
-	if !s.Running {
-		s.Running = true
-		s.Paused = false
-		s.ExitCode = 0
-		s.Pid = pid
-		s.StartedAt = time.Now().UTC()
-		close(s.waitChan) // fire waiters for start
-		s.waitChan = make(chan struct{})
-	}
+	s.Running = true
+	s.Paused = false
+	s.Restarting = false
+	s.ExitCode = 0
+	s.Pid = pid
+	s.StartedAt = time.Now().UTC()
+	close(s.waitChan) // fire waiters for start
+	s.waitChan = make(chan struct{})
 	s.Unlock()
 }
 
 func (s *State) SetStopped(exitCode int) {
 	s.Lock()
-	if s.Running {
-		s.Running = false
-		s.Pid = 0
-		s.FinishedAt = time.Now().UTC()
-		s.ExitCode = exitCode
-		close(s.waitChan) // fire waiters for stop
-		s.waitChan = make(chan struct{})
-	}
+	s.Running = false
+	s.Restarting = false
+	s.Pid = 0
+	s.FinishedAt = time.Now().UTC()
+	s.ExitCode = exitCode
+	close(s.waitChan) // fire waiters for stop
+	s.waitChan = make(chan struct{})
 	s.Unlock()
 }
 
+// SetRestarting is when docker hanldes the auto restart of containers when they are
+// in the middle of a stop and being restarted again
+func (s *State) SetRestarting(exitCode int) {
+	s.Lock()
+	// we should consider the container running when it is restarting because of
+	// all the checks in docker around rm/stop/etc
+	s.Running = true
+	s.Restarting = true
+	s.Pid = 0
+	s.FinishedAt = time.Now().UTC()
+	s.ExitCode = exitCode
+	close(s.waitChan) // fire waiters for stop
+	s.waitChan = make(chan struct{})
+	s.Unlock()
+}
+
+func (s *State) IsRestarting() bool {
+	s.RLock()
+	res := s.Restarting
+	s.RUnlock()
+	return res
+}
+
 func (s *State) SetPaused() {
 	s.Lock()
 	s.Paused = true
diff --git a/daemon/state_test.go b/daemon/state_test.go
index 7b02f3a..3552435 100644
--- a/daemon/state_test.go
+++ b/daemon/state_test.go
@@ -37,7 +37,7 @@
 			t.Fatalf("Pid %v, expected %v", runPid, i+100)
 		}
 		if pid, err := s.WaitRunning(-1 * time.Second); err != nil || pid != i+100 {
-			t.Fatal("WaitRunning returned pid: %v, err: %v, expected pid: %v, err: %v", pid, err, i+100, nil)
+			t.Fatalf("WaitRunning returned pid: %v, err: %v, expected pid: %v, err: %v", pid, err, i+100, nil)
 		}
 
 		stopped := make(chan struct{})
@@ -68,7 +68,7 @@
 			t.Fatalf("ExitCode %v, expected %v", exitCode, i)
 		}
 		if exitCode, err := s.WaitStop(-1 * time.Second); err != nil || exitCode != i {
-			t.Fatal("WaitStop returned exitCode: %v, err: %v, expected exitCode: %v, err: %v", exitCode, err, i, nil)
+			t.Fatalf("WaitStop returned exitCode: %v, err: %v, expected exitCode: %v, err: %v", exitCode, err, i, nil)
 		}
 	}
 }
diff --git a/daemon/stop.go b/daemon/stop.go
new file mode 100644
index 0000000..f185129
--- /dev/null
+++ b/daemon/stop.go
@@ -0,0 +1,30 @@
+package daemon
+
+import (
+	"github.com/docker/docker/engine"
+)
+
+func (daemon *Daemon) ContainerStop(job *engine.Job) engine.Status {
+	if len(job.Args) != 1 {
+		return job.Errorf("Usage: %s CONTAINER\n", job.Name)
+	}
+	var (
+		name = job.Args[0]
+		t    = 10
+	)
+	if job.EnvExists("t") {
+		t = job.GetenvInt("t")
+	}
+	if container := daemon.Get(name); container != nil {
+		if !container.State.IsRunning() {
+			return job.Errorf("Container already stopped")
+		}
+		if err := container.Stop(int(t)); err != nil {
+			return job.Errorf("Cannot stop container %s: %s\n", name, err)
+		}
+		container.LogEvent("stop")
+	} else {
+		return job.Errorf("No such container: %s\n", name)
+	}
+	return engine.StatusOK
+}
diff --git a/daemon/top.go b/daemon/top.go
new file mode 100644
index 0000000..ceaeea1
--- /dev/null
+++ b/daemon/top.go
@@ -0,0 +1,79 @@
+package daemon
+
+import (
+	"os/exec"
+	"strconv"
+	"strings"
+
+	"github.com/docker/docker/engine"
+)
+
+func (daemon *Daemon) ContainerTop(job *engine.Job) engine.Status {
+	if len(job.Args) != 1 && len(job.Args) != 2 {
+		return job.Errorf("Not enough arguments. Usage: %s CONTAINER [PS_ARGS]\n", job.Name)
+	}
+	var (
+		name   = job.Args[0]
+		psArgs = "-ef"
+	)
+
+	if len(job.Args) == 2 && job.Args[1] != "" {
+		psArgs = job.Args[1]
+	}
+
+	if container := daemon.Get(name); container != nil {
+		if !container.State.IsRunning() {
+			return job.Errorf("Container %s is not running", name)
+		}
+		pids, err := daemon.ExecutionDriver().GetPidsForContainer(container.ID)
+		if err != nil {
+			return job.Error(err)
+		}
+		output, err := exec.Command("ps", psArgs).Output()
+		if err != nil {
+			return job.Errorf("Error running ps: %s", err)
+		}
+
+		lines := strings.Split(string(output), "\n")
+		header := strings.Fields(lines[0])
+		out := &engine.Env{}
+		out.SetList("Titles", header)
+
+		pidIndex := -1
+		for i, name := range header {
+			if name == "PID" {
+				pidIndex = i
+			}
+		}
+		if pidIndex == -1 {
+			return job.Errorf("Couldn't find PID field in ps output")
+		}
+
+		processes := [][]string{}
+		for _, line := range lines[1:] {
+			if len(line) == 0 {
+				continue
+			}
+			fields := strings.Fields(line)
+			p, err := strconv.Atoi(fields[pidIndex])
+			if err != nil {
+				return job.Errorf("Unexpected pid '%s': %s", fields[pidIndex], err)
+			}
+
+			for _, pid := range pids {
+				if pid == p {
+					// Make sure number of fields equals number of header titles
+					// merging "overhanging" fields
+					process := fields[:len(header)-1]
+					process = append(process, strings.Join(fields[len(header)-1:], " "))
+					processes = append(processes, process)
+				}
+			}
+		}
+		out.SetJson("Processes", processes)
+		out.WriteTo(job.Stdout)
+		return engine.StatusOK
+
+	}
+	return job.Errorf("No such container: %s", name)
+}
diff --git a/daemon/utils.go b/daemon/utils.go
index d60d985..053319c 100644
--- a/daemon/utils.go
+++ b/daemon/utils.go
@@ -4,8 +4,8 @@
 	"fmt"
 	"strings"
 
-	"github.com/dotcloud/docker/nat"
-	"github.com/dotcloud/docker/runconfig"
+	"github.com/docker/docker/nat"
+	"github.com/docker/docker/runconfig"
 )
 
 func migratePortMappings(config *runconfig.Config, hostConfig *runconfig.HostConfig) error {
diff --git a/daemon/utils_linux.go b/daemon/utils_linux.go
new file mode 100644
index 0000000..bff2a78
--- /dev/null
+++ b/daemon/utils_linux.go
@@ -0,0 +1,13 @@
+// +build linux
+
+package daemon
+
+import "github.com/docker/libcontainer/selinux"
+
+func selinuxSetDisabled() {
+	selinux.SetDisabled()
+}
+
+func selinuxFreeLxcContexts(label string) {
+	selinux.FreeLxcContexts(label)
+}
diff --git a/daemon/utils_nolinux.go b/daemon/utils_nolinux.go
new file mode 100644
index 0000000..399376d
--- /dev/null
+++ b/daemon/utils_nolinux.go
@@ -0,0 +1,9 @@
+// +build !linux
+
+package daemon
+
+func selinuxSetDisabled() {
+}
+
+func selinuxFreeLxcContexts(label string) {
+}
diff --git a/daemon/utils_test.go b/daemon/utils_test.go
index 22b52d1..1f3175b 100644
--- a/daemon/utils_test.go
+++ b/daemon/utils_test.go
@@ -3,8 +3,8 @@
 import (
 	"testing"
 
-	"github.com/dotcloud/docker/runconfig"
-	"github.com/dotcloud/docker/utils"
+	"github.com/docker/docker/runconfig"
+	"github.com/docker/docker/utils"
 )
 
 func TestMergeLxcConfig(t *testing.T) {
diff --git a/daemon/volumes.go b/daemon/volumes.go
index f4b3921..b60118c 100644
--- a/daemon/volumes.go
+++ b/daemon/volumes.go
@@ -8,15 +8,29 @@
 	"strings"
 	"syscall"
 
-	"github.com/dotcloud/docker/archive"
-	"github.com/dotcloud/docker/daemon/execdriver"
-	"github.com/dotcloud/docker/pkg/symlink"
+	"github.com/docker/docker/archive"
+	"github.com/docker/docker/daemon/execdriver"
+	"github.com/docker/docker/pkg/symlink"
 )
 
-type BindMap struct {
-	SrcPath string
-	DstPath string
-	Mode    string
+type Volume struct {
+	HostPath    string
+	VolPath     string
+	Mode        string
+	isBindMount bool
+}
+
+func (v *Volume) isRw() bool {
+	return v.Mode == "" || strings.ToLower(v.Mode) == "rw"
+}
+
+func (v *Volume) isDir() (bool, error) {
+	stat, err := os.Stat(v.HostPath)
+	if err != nil {
+		return false, err
+	}
+
+	return stat.IsDir(), nil
 }
 
 func prepareVolumesForContainer(container *Container) error {
@@ -36,16 +50,15 @@
 
 func setupMountsForContainer(container *Container) error {
 	mounts := []execdriver.Mount{
-		{container.daemon.sysInitPath, "/.dockerinit", false, true},
-		{container.ResolvConfPath, "/etc/resolv.conf", false, true},
+		{container.ResolvConfPath, "/etc/resolv.conf", true, true},
 	}
 
 	if container.HostnamePath != "" {
-		mounts = append(mounts, execdriver.Mount{container.HostnamePath, "/etc/hostname", false, true})
+		mounts = append(mounts, execdriver.Mount{container.HostnamePath, "/etc/hostname", true, true})
 	}
 
 	if container.HostsPath != "" {
-		mounts = append(mounts, execdriver.Mount{container.HostsPath, "/etc/hosts", false, true})
+		mounts = append(mounts, execdriver.Mount{container.HostsPath, "/etc/hosts", true, true})
 	}
 
 	// Mount user specified volumes
@@ -123,181 +136,175 @@
 	return nil
 }
 
-func getBindMap(container *Container) (map[string]BindMap, error) {
+func parseBindVolumeSpec(spec string) (Volume, error) {
+	var (
+		arr = strings.Split(spec, ":")
+		vol Volume
+	)
+
+	vol.isBindMount = true
+	switch len(arr) {
+	case 1:
+		vol.VolPath = spec
+		vol.Mode = "rw"
+	case 2:
+		vol.HostPath = arr[0]
+		vol.VolPath = arr[1]
+		vol.Mode = "rw"
+	case 3:
+		vol.HostPath = arr[0]
+		vol.VolPath = arr[1]
+		vol.Mode = arr[2]
+	default:
+		return vol, fmt.Errorf("Invalid volume specification: %s", spec)
+	}
+
+	if !filepath.IsAbs(vol.HostPath) {
+		return vol, fmt.Errorf("cannot bind mount volume: %s volume paths must be absolute.", vol.HostPath)
+	}
+
+	return vol, nil
+}
+
+func getBindMap(container *Container) (map[string]Volume, error) {
 	var (
 		// Create the requested bind mounts
-		binds = make(map[string]BindMap)
+		volumes = map[string]Volume{}
 		// Define illegal container destinations
 		illegalDsts = []string{"/", "."}
 	)
 
 	for _, bind := range container.hostConfig.Binds {
-		// FIXME: factorize bind parsing in parseBind
-		var (
-			src, dst, mode string
-			arr            = strings.Split(bind, ":")
-		)
-
-		if len(arr) == 2 {
-			src = arr[0]
-			dst = arr[1]
-			mode = "rw"
-		} else if len(arr) == 3 {
-			src = arr[0]
-			dst = arr[1]
-			mode = arr[2]
-		} else {
-			return nil, fmt.Errorf("Invalid bind specification: %s", bind)
+		vol, err := parseBindVolumeSpec(bind)
+		if err != nil {
+			return volumes, err
 		}
-
 		// Bail if trying to mount to an illegal destination
 		for _, illegal := range illegalDsts {
-			if dst == illegal {
-				return nil, fmt.Errorf("Illegal bind destination: %s", dst)
+			if vol.VolPath == illegal {
+				return nil, fmt.Errorf("Illegal bind destination: %s", vol.VolPath)
 			}
 		}
 
-		bindMap := BindMap{
-			SrcPath: src,
-			DstPath: dst,
-			Mode:    mode,
-		}
-		binds[filepath.Clean(dst)] = bindMap
+		volumes[filepath.Clean(vol.VolPath)] = vol
 	}
-	return binds, nil
+	return volumes, nil
 }
 
 func createVolumes(container *Container) error {
-	binds, err := getBindMap(container)
+	// Get all the bindmounts
+	volumes, err := getBindMap(container)
 	if err != nil {
 		return err
 	}
 
-	// Create the requested volumes if they don't exist
+	// Get all the rest of the volumes
 	for volPath := range container.Config.Volumes {
-		if err := initializeVolume(container, volPath, binds); err != nil {
+		// Make sure the the volume isn't already specified as a bindmount
+		if _, exists := volumes[volPath]; !exists {
+			volumes[volPath] = Volume{
+				VolPath:     volPath,
+				Mode:        "rw",
+				isBindMount: false,
+			}
+		}
+	}
+
+	for _, vol := range volumes {
+		if err = vol.initialize(container); err != nil {
+			return err
+		}
+	}
+	return nil
+
+}
+
+func createVolumeHostPath(container *Container) (string, error) {
+	volumesDriver := container.daemon.volumes.Driver()
+
+	// Do not pass a container as the parameter for the volume creation.
+	// The graph driver using the container's information ( Image ) to
+	// create the parent.
+	c, err := container.daemon.volumes.Create(nil, "", "", "", "", nil, nil)
+	if err != nil {
+		return "", err
+	}
+	hostPath, err := volumesDriver.Get(c.ID, "")
+	if err != nil {
+		return hostPath, fmt.Errorf("Driver %s failed to get volume rootfs %s: %s", volumesDriver, c.ID, err)
+	}
+
+	return hostPath, nil
+}
+
+func (v *Volume) initialize(container *Container) error {
+	var err error
+	v.VolPath = filepath.Clean(v.VolPath)
+
+	// Do not initialize an existing volume
+	if _, exists := container.Volumes[v.VolPath]; exists {
+		return nil
+	}
+
+	// If it's not a bindmount we need to create the dir on the host
+	if !v.isBindMount {
+		v.HostPath, err = createVolumeHostPath(container)
+		if err != nil {
 			return err
 		}
 	}
 
-	for volPath := range binds {
-		if err := initializeVolume(container, volPath, binds); err != nil {
-			return err
-		}
+	hostPath, err := filepath.EvalSymlinks(v.HostPath)
+	if err != nil {
+		return err
+	}
+
+	// Create the mountpoint
+	// This is the path to the volume within the container FS
+	// This differs from `hostPath` in that `hostPath` refers to the place where
+	// the volume data is actually stored on the host
+	fullVolPath, err := symlink.FollowSymlinkInScope(filepath.Join(container.basefs, v.VolPath), container.basefs)
+	if err != nil {
+		return err
+	}
+
+	container.Volumes[v.VolPath] = hostPath
+	container.VolumesRW[v.VolPath] = v.isRw()
+
+	volIsDir, err := v.isDir()
+	if err != nil {
+		return err
+	}
+	if err := createIfNotExists(fullVolPath, volIsDir); err != nil {
+		return err
+	}
+
+	// Do not copy or change permissions if we are mounting from the host
+	if v.isRw() && !v.isBindMount {
+		return copyExistingContents(fullVolPath, hostPath)
 	}
 	return nil
 }
 
 func createIfNotExists(destination string, isDir bool) error {
-	if _, err := os.Stat(destination); err != nil && os.IsNotExist(err) {
-		if isDir {
-			if err := os.MkdirAll(destination, 0755); err != nil {
-				return err
-			}
-		} else {
-			if err := os.MkdirAll(filepath.Dir(destination), 0755); err != nil {
-				return err
-			}
-
-			f, err := os.OpenFile(destination, os.O_CREATE, 0755)
-			if err != nil {
-				return err
-			}
-			f.Close()
-		}
-	}
-
-	return nil
-}
-
-func initializeVolume(container *Container, volPath string, binds map[string]BindMap) error {
-	volumesDriver := container.daemon.volumes.Driver()
-	volPath = filepath.Clean(volPath)
-
-	// Skip existing volumes
-	if _, exists := container.Volumes[volPath]; exists {
+	if _, err := os.Stat(destination); err == nil || !os.IsNotExist(err) {
 		return nil
 	}
 
-	var (
-		destination string
-		isBindMount bool
-		volIsDir    = true
-
-		srcRW = false
-	)
-
-	// If an external bind is defined for this volume, use that as a source
-	if bindMap, exists := binds[volPath]; exists {
-		isBindMount = true
-		destination = bindMap.SrcPath
-
-		if !filepath.IsAbs(destination) {
-			return fmt.Errorf("%s must be an absolute path", destination)
-		}
-
-		if strings.ToLower(bindMap.Mode) == "rw" {
-			srcRW = true
-		}
-
-		if stat, err := os.Stat(bindMap.SrcPath); err != nil {
-			return err
-		} else {
-			volIsDir = stat.IsDir()
-		}
-	} else {
-		// Do not pass a container as the parameter for the volume creation.
-		// The graph driver using the container's information ( Image ) to
-		// create the parent.
-		c, err := container.daemon.volumes.Create(nil, "", "", "", "", nil, nil)
-		if err != nil {
-			return err
-		}
-
-		destination, err = volumesDriver.Get(c.ID, "")
-		if err != nil {
-			return fmt.Errorf("Driver %s failed to get volume rootfs %s: %s", volumesDriver, c.ID, err)
-		}
-
-		srcRW = true
+	if isDir {
+		return os.MkdirAll(destination, 0755)
 	}
 
-	if p, err := filepath.EvalSymlinks(destination); err != nil {
+	if err := os.MkdirAll(filepath.Dir(destination), 0755); err != nil {
 		return err
-	} else {
-		destination = p
 	}
 
-	// Create the mountpoint
-	source, err := symlink.FollowSymlinkInScope(filepath.Join(container.basefs, volPath), container.basefs)
+	f, err := os.OpenFile(destination, os.O_CREATE, 0755)
 	if err != nil {
 		return err
 	}
+	f.Close()
 
-	newVolPath, err := filepath.Rel(container.basefs, source)
-	if err != nil {
-		return err
-	}
-	newVolPath = "/" + newVolPath
-
-	if volPath != newVolPath {
-		delete(container.Volumes, volPath)
-		delete(container.VolumesRW, volPath)
-	}
-
-	container.Volumes[volPath] = destination
-	container.VolumesRW[volPath] = srcRW
-
-	if err := createIfNotExists(source, volIsDir); err != nil {
-		return err
-	}
-
-	// Do not copy or change permissions if we are mounting from the host
-	if srcRW && !isBindMount {
-		if err := copyExistingContents(source, destination); err != nil {
-			return err
-		}
-	}
 	return nil
 }
 
diff --git a/daemon/wait.go b/daemon/wait.go
new file mode 100644
index 0000000..7224b62
--- /dev/null
+++ b/daemon/wait.go
@@ -0,0 +1,20 @@
+package daemon
+
+import (
+	"time"
+
+	"github.com/docker/docker/engine"
+)
+
+func (daemon *Daemon) ContainerWait(job *engine.Job) engine.Status {
+	if len(job.Args) != 1 {
+		return job.Errorf("Usage: %s", job.Name)
+	}
+	name := job.Args[0]
+	if container := daemon.Get(name); container != nil {
+		status, _ := container.State.WaitStop(-1 * time.Second)
+		job.Printf("%d\n", status)
+		return engine.StatusOK
+	}
+	return job.Errorf("%s: no such container: %s", job.Name, name)
+}
diff --git a/daemonconfig/README.md b/daemonconfig/README.md
deleted file mode 100644
index 488e7c7..0000000
--- a/daemonconfig/README.md
+++ /dev/null
@@ -1,3 +0,0 @@
-This directory contains code pertaining to the configuration of the docker deamon
-
-These are the configuration settings that you pass to the docker daemon when you launch it with say: `docker -d -e lxc`
diff --git a/daemonconfig/config.go b/daemonconfig/config.go
deleted file mode 100644
index 1d2bb60..0000000
--- a/daemonconfig/config.go
+++ /dev/null
@@ -1,82 +0,0 @@
-package daemonconfig
-
-import (
-	"github.com/dotcloud/docker/daemon/networkdriver"
-	"github.com/dotcloud/docker/engine"
-	"net"
-)
-
-const (
-	defaultNetworkMtu    = 1500
-	DisableNetworkBridge = "none"
-)
-
-// FIXME: separate runtime configuration from http api configuration
-type Config struct {
-	Pidfile                     string
-	Root                        string
-	AutoRestart                 bool
-	Dns                         []string
-	DnsSearch                   []string
-	EnableIptables              bool
-	EnableIpForward             bool
-	DefaultIp                   net.IP
-	BridgeIface                 string
-	BridgeIP                    string
-	InterContainerCommunication bool
-	GraphDriver                 string
-	GraphOptions                []string
-	ExecDriver                  string
-	Mtu                         int
-	DisableNetwork              bool
-	EnableSelinuxSupport        bool
-	Context                     map[string][]string
-	Sockets                     []string
-}
-
-// ConfigFromJob creates and returns a new DaemonConfig object
-// by parsing the contents of a job's environment.
-func ConfigFromJob(job *engine.Job) *Config {
-	config := &Config{
-		Pidfile:                     job.Getenv("Pidfile"),
-		Root:                        job.Getenv("Root"),
-		AutoRestart:                 job.GetenvBool("AutoRestart"),
-		EnableIptables:              job.GetenvBool("EnableIptables"),
-		EnableIpForward:             job.GetenvBool("EnableIpForward"),
-		BridgeIP:                    job.Getenv("BridgeIP"),
-		BridgeIface:                 job.Getenv("BridgeIface"),
-		DefaultIp:                   net.ParseIP(job.Getenv("DefaultIp")),
-		InterContainerCommunication: job.GetenvBool("InterContainerCommunication"),
-		GraphDriver:                 job.Getenv("GraphDriver"),
-		ExecDriver:                  job.Getenv("ExecDriver"),
-		EnableSelinuxSupport:        job.GetenvBool("EnableSelinuxSupport"),
-	}
-	if graphOpts := job.GetenvList("GraphOptions"); graphOpts != nil {
-		config.GraphOptions = graphOpts
-	}
-
-	if dns := job.GetenvList("Dns"); dns != nil {
-		config.Dns = dns
-	}
-	if dnsSearch := job.GetenvList("DnsSearch"); dnsSearch != nil {
-		config.DnsSearch = dnsSearch
-	}
-	if mtu := job.GetenvInt("Mtu"); mtu != 0 {
-		config.Mtu = mtu
-	} else {
-		config.Mtu = GetDefaultNetworkMtu()
-	}
-	config.DisableNetwork = config.BridgeIface == DisableNetworkBridge
-	if sockets := job.GetenvList("Sockets"); sockets != nil {
-		config.Sockets = sockets
-	}
-
-	return config
-}
-
-func GetDefaultNetworkMtu() int {
-	if iface, err := networkdriver.GetDefaultRouteIface(); err == nil {
-		return iface.MTU
-	}
-	return defaultNetworkMtu
-}
diff --git a/docker/client.go b/docker/client.go
new file mode 100644
index 0000000..27001cc
--- /dev/null
+++ b/docker/client.go
@@ -0,0 +1,13 @@
+// +build !daemon
+
+package main
+
+import (
+	"log"
+)
+
+const CanDaemon = false
+
+func mainDaemon() {
+	log.Fatal("This is a client-only binary - running the Docker daemon is not supported.")
+}
diff --git a/docker/daemon.go b/docker/daemon.go
new file mode 100644
index 0000000..dc9d56d
--- /dev/null
+++ b/docker/daemon.go
@@ -0,0 +1,81 @@
+// +build daemon
+
+package main
+
+import (
+	"log"
+
+	"github.com/docker/docker/builtins"
+	"github.com/docker/docker/daemon"
+	_ "github.com/docker/docker/daemon/execdriver/lxc"
+	_ "github.com/docker/docker/daemon/execdriver/native"
+	"github.com/docker/docker/dockerversion"
+	"github.com/docker/docker/engine"
+	flag "github.com/docker/docker/pkg/mflag"
+	"github.com/docker/docker/pkg/signal"
+)
+
+const CanDaemon = true
+
+var (
+	daemonCfg = &daemon.Config{}
+)
+
+func init() {
+	daemonCfg.InstallFlags()
+}
+
+func mainDaemon() {
+	if flag.NArg() != 0 {
+		flag.Usage()
+		return
+	}
+	eng := engine.New()
+	signal.Trap(eng.Shutdown)
+	// Load builtins
+	if err := builtins.Register(eng); err != nil {
+		log.Fatal(err)
+	}
+
+	// load the daemon in the background so we can immediately start
+	// the http api so that connections don't fail while the daemon
+	// is booting
+	go func() {
+		d, err := daemon.NewDaemon(daemonCfg, eng)
+		if err != nil {
+			log.Fatal(err)
+		}
+		if err := d.Install(eng); err != nil {
+			log.Fatal(err)
+		}
+		// after the daemon is done setting up we can tell the api to start
+		// accepting connections
+		if err := eng.Job("acceptconnections").Run(); err != nil {
+			log.Fatal(err)
+		}
+	}()
+	// TODO actually have a resolved graphdriver to show?
+	log.Printf("docker daemon: %s %s; execdriver: %s; graphdriver: %s",
+		dockerversion.VERSION,
+		dockerversion.GITCOMMIT,
+		daemonCfg.ExecDriver,
+		daemonCfg.GraphDriver,
+	)
+
+	// Serve api
+	job := eng.Job("serveapi", flHosts...)
+	job.SetenvBool("Logging", true)
+	job.SetenvBool("EnableCors", *flEnableCors)
+	job.Setenv("Version", dockerversion.VERSION)
+	job.Setenv("SocketGroup", *flSocketGroup)
+
+	job.SetenvBool("Tls", *flTls)
+	job.SetenvBool("TlsVerify", *flTlsVerify)
+	job.Setenv("TlsCa", *flCa)
+	job.Setenv("TlsCert", *flCert)
+	job.Setenv("TlsKey", *flKey)
+	job.SetenvBool("BufferRequests", true)
+	if err := job.Run(); err != nil {
+		log.Fatal(err)
+	}
+}
diff --git a/docker/docker.go b/docker/docker.go
index 30d43bc..f2b4ca9 100644
--- a/docker/docker.go
+++ b/docker/docker.go
@@ -6,20 +6,15 @@
 	"fmt"
 	"io/ioutil"
 	"log"
-	"net"
 	"os"
-	"runtime"
 	"strings"
 
-	"github.com/dotcloud/docker/api"
-	"github.com/dotcloud/docker/api/client"
-	"github.com/dotcloud/docker/builtins"
-	"github.com/dotcloud/docker/dockerversion"
-	"github.com/dotcloud/docker/engine"
-	"github.com/dotcloud/docker/opts"
-	flag "github.com/dotcloud/docker/pkg/mflag"
-	"github.com/dotcloud/docker/sysinit"
-	"github.com/dotcloud/docker/utils"
+	"github.com/docker/docker/api"
+	"github.com/docker/docker/api/client"
+	"github.com/docker/docker/dockerversion"
+	flag "github.com/docker/docker/pkg/mflag"
+	"github.com/docker/docker/reexec"
+	"github.com/docker/docker/utils"
 )
 
 const (
@@ -28,60 +23,23 @@
 	defaultCertFile = "cert.pem"
 )
 
-var (
-	dockerConfDir = os.Getenv("HOME") + "/.docker/"
-)
-
 func main() {
-	if selfPath := utils.SelfPath(); strings.Contains(selfPath, ".dockerinit") {
-		// Running in init mode
-		sysinit.SysInit()
+	if reexec.Init() {
 		return
 	}
-
-	var (
-		flVersion            = flag.Bool([]string{"v", "-version"}, false, "Print version information and quit")
-		flDaemon             = flag.Bool([]string{"d", "-daemon"}, false, "Enable daemon mode")
-		flGraphOpts          opts.ListOpts
-		flDebug              = flag.Bool([]string{"D", "-debug"}, false, "Enable debug mode")
-		flAutoRestart        = flag.Bool([]string{"r", "-restart"}, true, "Restart previously running containers")
-		bridgeName           = flag.String([]string{"b", "-bridge"}, "", "Attach containers to a pre-existing network bridge\nuse 'none' to disable container networking")
-		bridgeIp             = flag.String([]string{"#bip", "-bip"}, "", "Use this CIDR notation address for the network bridge's IP, not compatible with -b")
-		pidfile              = flag.String([]string{"p", "-pidfile"}, "/var/run/docker.pid", "Path to use for daemon PID file")
-		flRoot               = flag.String([]string{"g", "-graph"}, "/var/lib/docker", "Path to use as the root of the Docker runtime")
-		flSocketGroup        = flag.String([]string{"G", "-group"}, "docker", "Group to assign the unix socket specified by -H when running in daemon mode\nuse '' (the empty string) to disable setting of a group")
-		flEnableCors         = flag.Bool([]string{"#api-enable-cors", "-api-enable-cors"}, false, "Enable CORS headers in the remote API")
-		flDns                = opts.NewListOpts(opts.ValidateIp4Address)
-		flDnsSearch          = opts.NewListOpts(opts.ValidateDomain)
-		flEnableIptables     = flag.Bool([]string{"#iptables", "-iptables"}, true, "Enable Docker's addition of iptables rules")
-		flEnableIpForward    = flag.Bool([]string{"#ip-forward", "-ip-forward"}, true, "Enable net.ipv4.ip_forward")
-		flDefaultIp          = flag.String([]string{"#ip", "-ip"}, "0.0.0.0", "Default IP address to use when binding container ports")
-		flInterContainerComm = flag.Bool([]string{"#icc", "-icc"}, true, "Enable inter-container communication")
-		flGraphDriver        = flag.String([]string{"s", "-storage-driver"}, "", "Force the Docker runtime to use a specific storage driver")
-		flExecDriver         = flag.String([]string{"e", "-exec-driver"}, "native", "Force the Docker runtime to use a specific exec driver")
-		flHosts              = opts.NewListOpts(api.ValidateHost)
-		flMtu                = flag.Int([]string{"#mtu", "-mtu"}, 0, "Set the containers network MTU\nif no value is provided: default to the default route MTU or 1500 if no default route is available")
-		flTls                = flag.Bool([]string{"-tls"}, false, "Use TLS; implied by tls-verify flags")
-		flTlsVerify          = flag.Bool([]string{"-tlsverify"}, false, "Use TLS and verify the remote (daemon: verify client, client: verify daemon)")
-		flCa                 = flag.String([]string{"-tlscacert"}, dockerConfDir+defaultCaFile, "Trust only remotes providing a certificate signed by the CA given here")
-		flCert               = flag.String([]string{"-tlscert"}, dockerConfDir+defaultCertFile, "Path to TLS certificate file")
-		flKey                = flag.String([]string{"-tlskey"}, dockerConfDir+defaultKeyFile, "Path to TLS key file")
-		flSelinuxEnabled     = flag.Bool([]string{"-selinux-enabled"}, false, "Enable selinux support")
-	)
-	flag.Var(&flDns, []string{"#dns", "-dns"}, "Force Docker to use specific DNS servers")
-	flag.Var(&flDnsSearch, []string{"-dns-search"}, "Force Docker to use specific DNS search domains")
-	flag.Var(&flHosts, []string{"H", "-host"}, "The socket(s) to bind to in daemon mode\nspecified using one or more tcp://host:port, unix:///path/to/socket, fd://* or fd://socketfd.")
-	flag.Var(&flGraphOpts, []string{"-storage-opt"}, "Set storage driver options")
-
 	flag.Parse()
+	// FIXME: validate daemon flags here
 
 	if *flVersion {
 		showVersion()
 		return
 	}
-	if flHosts.Len() == 0 {
-		defaultHost := os.Getenv("DOCKER_HOST")
+	if *flDebug {
+		os.Setenv("DEBUG", "1")
+	}
 
+	if len(flHosts) == 0 {
+		defaultHost := os.Getenv("DOCKER_HOST")
 		if defaultHost == "" || *flDaemon {
 			// If we do not have a host, default to unix socket
 			defaultHost = fmt.Sprintf("unix://%s", api.DEFAULTUNIXSOCKET)
@@ -89,203 +47,69 @@
 		if _, err := api.ValidateHost(defaultHost); err != nil {
 			log.Fatal(err)
 		}
-		flHosts.Set(defaultHost)
-	}
-
-	if *bridgeName != "" && *bridgeIp != "" {
-		log.Fatal("You specified -b & --bip, mutually exclusive options. Please specify only one.")
-	}
-
-	if !*flEnableIptables && !*flInterContainerComm {
-		log.Fatal("You specified --iptables=false with --icc=false. ICC uses iptables to function. Please set --icc or --iptables to true.")
-	}
-
-	if net.ParseIP(*flDefaultIp) == nil {
-		log.Fatalf("Specified --ip=%s is not in correct format \"0.0.0.0\".", *flDefaultIp)
-	}
-
-	if *flDebug {
-		os.Setenv("DEBUG", "1")
+		flHosts = append(flHosts, defaultHost)
 	}
 
 	if *flDaemon {
-		if runtime.GOOS != "linux" {
-			log.Fatalf("The Docker daemon is only supported on linux")
-		}
-		if os.Geteuid() != 0 {
-			log.Fatalf("The Docker daemon needs to be run as root")
-		}
+		mainDaemon()
+		return
+	}
 
-		if flag.NArg() != 0 {
-			flag.Usage()
-			return
-		}
+	if len(flHosts) > 1 {
+		log.Fatal("Please specify only one -H")
+	}
+	protoAddrParts := strings.SplitN(flHosts[0], "://", 2)
 
-		// set up the TempDir to use a canonical path
-		tmp := os.TempDir()
-		realTmp, err := utils.ReadSymlinkedDirectory(tmp)
+	var (
+		cli       *client.DockerCli
+		tlsConfig tls.Config
+	)
+	tlsConfig.InsecureSkipVerify = true
+
+	// If we should verify the server, we need to load a trusted ca
+	if *flTlsVerify {
+		*flTls = true
+		certPool := x509.NewCertPool()
+		file, err := ioutil.ReadFile(*flCa)
 		if err != nil {
-			log.Fatalf("Unable to get the full path to the TempDir (%s): %s", tmp, err)
+			log.Fatalf("Couldn't read ca cert %s: %s", *flCa, err)
 		}
-		os.Setenv("TMPDIR", realTmp)
+		certPool.AppendCertsFromPEM(file)
+		tlsConfig.RootCAs = certPool
+		tlsConfig.InsecureSkipVerify = false
+	}
 
-		// get the canonical path to the Docker root directory
-		root := *flRoot
-		var realRoot string
-		if _, err := os.Stat(root); err != nil && os.IsNotExist(err) {
-			realRoot = root
-		} else {
-			realRoot, err = utils.ReadSymlinkedDirectory(root)
-			if err != nil {
-				log.Fatalf("Unable to get the full path to root (%s): %s", root, err)
-			}
-		}
-		if err := checkKernelAndArch(); err != nil {
-			log.Fatal(err)
-		}
-
-		eng := engine.New()
-		// Load builtins
-		if err := builtins.Register(eng); err != nil {
-			log.Fatal(err)
-		}
-		// load the daemon in the background so we can immediately start
-		// the http api so that connections don't fail while the daemon
-		// is booting
-		go func() {
-			// Load plugin: httpapi
-			job := eng.Job("initserver")
-			job.Setenv("Pidfile", *pidfile)
-			job.Setenv("Root", realRoot)
-			job.SetenvBool("AutoRestart", *flAutoRestart)
-			job.SetenvList("Dns", flDns.GetAll())
-			job.SetenvList("DnsSearch", flDnsSearch.GetAll())
-			job.SetenvBool("EnableIptables", *flEnableIptables)
-			job.SetenvBool("EnableIpForward", *flEnableIpForward)
-			job.Setenv("BridgeIface", *bridgeName)
-			job.Setenv("BridgeIP", *bridgeIp)
-			job.Setenv("DefaultIp", *flDefaultIp)
-			job.SetenvBool("InterContainerCommunication", *flInterContainerComm)
-			job.Setenv("GraphDriver", *flGraphDriver)
-			job.SetenvList("GraphOptions", flGraphOpts.GetAll())
-			job.Setenv("ExecDriver", *flExecDriver)
-			job.SetenvInt("Mtu", *flMtu)
-			job.SetenvBool("EnableSelinuxSupport", *flSelinuxEnabled)
-			job.SetenvList("Sockets", flHosts.GetAll())
-			if err := job.Run(); err != nil {
-				log.Fatal(err)
-			}
-			// after the daemon is done setting up we can tell the api to start
-			// accepting connections
-			if err := eng.Job("acceptconnections").Run(); err != nil {
-				log.Fatal(err)
-			}
-		}()
-
-		// TODO actually have a resolved graphdriver to show?
-		log.Printf("docker daemon: %s %s; execdriver: %s; graphdriver: %s",
-			dockerversion.VERSION,
-			dockerversion.GITCOMMIT,
-			*flExecDriver,
-			*flGraphDriver)
-
-		// Serve api
-		job := eng.Job("serveapi", flHosts.GetAll()...)
-		job.SetenvBool("Logging", true)
-		job.SetenvBool("EnableCors", *flEnableCors)
-		job.Setenv("Version", dockerversion.VERSION)
-		job.Setenv("SocketGroup", *flSocketGroup)
-
-		job.SetenvBool("Tls", *flTls)
-		job.SetenvBool("TlsVerify", *flTlsVerify)
-		job.Setenv("TlsCa", *flCa)
-		job.Setenv("TlsCert", *flCert)
-		job.Setenv("TlsKey", *flKey)
-		job.SetenvBool("BufferRequests", true)
-		if err := job.Run(); err != nil {
-			log.Fatal(err)
-		}
-	} else {
-		if flHosts.Len() > 1 {
-			log.Fatal("Please specify only one -H")
-		}
-		protoAddrParts := strings.SplitN(flHosts.GetAll()[0], "://", 2)
-
-		var (
-			cli       *client.DockerCli
-			tlsConfig tls.Config
-		)
-		tlsConfig.InsecureSkipVerify = true
-
-		// If we should verify the server, we need to load a trusted ca
-		if *flTlsVerify {
+	// If tls is enabled, try to load and send client certificates
+	if *flTls || *flTlsVerify {
+		_, errCert := os.Stat(*flCert)
+		_, errKey := os.Stat(*flKey)
+		if errCert == nil && errKey == nil {
 			*flTls = true
-			certPool := x509.NewCertPool()
-			file, err := ioutil.ReadFile(*flCa)
+			cert, err := tls.LoadX509KeyPair(*flCert, *flKey)
 			if err != nil {
-				log.Fatalf("Couldn't read ca cert %s: %s", *flCa, err)
+				log.Fatalf("Couldn't load X509 key pair: %s. Key encrypted?", err)
 			}
-			certPool.AppendCertsFromPEM(file)
-			tlsConfig.RootCAs = certPool
-			tlsConfig.InsecureSkipVerify = false
+			tlsConfig.Certificates = []tls.Certificate{cert}
 		}
+	}
 
-		// If tls is enabled, try to load and send client certificates
-		if *flTls || *flTlsVerify {
-			_, errCert := os.Stat(*flCert)
-			_, errKey := os.Stat(*flKey)
-			if errCert == nil && errKey == nil {
-				*flTls = true
-				cert, err := tls.LoadX509KeyPair(*flCert, *flKey)
-				if err != nil {
-					log.Fatalf("Couldn't load X509 key pair: %s. Key encrypted?", err)
-				}
-				tlsConfig.Certificates = []tls.Certificate{cert}
+	if *flTls || *flTlsVerify {
+		cli = client.NewDockerCli(os.Stdin, os.Stdout, os.Stderr, protoAddrParts[0], protoAddrParts[1], &tlsConfig)
+	} else {
+		cli = client.NewDockerCli(os.Stdin, os.Stdout, os.Stderr, protoAddrParts[0], protoAddrParts[1], nil)
+	}
+
+	if err := cli.Cmd(flag.Args()...); err != nil {
+		if sterr, ok := err.(*utils.StatusError); ok {
+			if sterr.Status != "" {
+				log.Println(sterr.Status)
 			}
+			os.Exit(sterr.StatusCode)
 		}
-
-		if *flTls || *flTlsVerify {
-			cli = client.NewDockerCli(os.Stdin, os.Stdout, os.Stderr, protoAddrParts[0], protoAddrParts[1], &tlsConfig)
-		} else {
-			cli = client.NewDockerCli(os.Stdin, os.Stdout, os.Stderr, protoAddrParts[0], protoAddrParts[1], nil)
-		}
-
-		if err := cli.ParseCommands(flag.Args()...); err != nil {
-			if sterr, ok := err.(*utils.StatusError); ok {
-				if sterr.Status != "" {
-					log.Println(sterr.Status)
-				}
-				os.Exit(sterr.StatusCode)
-			}
-			log.Fatal(err)
-		}
+		log.Fatal(err)
 	}
 }
 
 func showVersion() {
 	fmt.Printf("Docker version %s, build %s\n", dockerversion.VERSION, dockerversion.GITCOMMIT)
 }
-
-func checkKernelAndArch() error {
-	// Check for unsupported architectures
-	if runtime.GOARCH != "amd64" {
-		return fmt.Errorf("The Docker runtime currently only supports amd64 (not %s). This will change in the future. Aborting.", runtime.GOARCH)
-	}
-	// Check for unsupported kernel versions
-	// FIXME: it would be cleaner to not test for specific versions, but rather
-	// test for specific functionalities.
-	// Unfortunately we can't test for the feature "does not cause a kernel panic"
-	// without actually causing a kernel panic, so we need this workaround until
-	// the circumstances of pre-3.8 crashes are clearer.
-	// For details see http://github.com/dotcloud/docker/issues/407
-	if k, err := utils.GetKernelVersion(); err != nil {
-		log.Printf("WARNING: %s\n", err)
-	} else {
-		if utils.CompareKernelVersion(k, &utils.KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}) < 0 {
-			if os.Getenv("DOCKER_NOWARN_KERNEL_VERSION") == "" {
-				log.Printf("WARNING: You are running linux kernel version %s, which might be unstable running docker. Please upgrade your kernel to 3.8.0.", k.String())
-			}
-		}
-	}
-	return nil
-}
diff --git a/docker/flags.go b/docker/flags.go
new file mode 100644
index 0000000..baae40e
--- /dev/null
+++ b/docker/flags.go
@@ -0,0 +1,42 @@
+package main
+
+import (
+	"os"
+	"path/filepath"
+
+	"github.com/docker/docker/opts"
+	flag "github.com/docker/docker/pkg/mflag"
+)
+
+var (
+	dockerCertPath = os.Getenv("DOCKER_CERT_PATH")
+)
+
+func init() {
+	if dockerCertPath == "" {
+		dockerCertPath = filepath.Join(os.Getenv("HOME"), ".docker")
+	}
+}
+
+var (
+	flVersion     = flag.Bool([]string{"v", "-version"}, false, "Print version information and quit")
+	flDaemon      = flag.Bool([]string{"d", "-daemon"}, false, "Enable daemon mode")
+	flDebug       = flag.Bool([]string{"D", "-debug"}, false, "Enable debug mode")
+	flSocketGroup = flag.String([]string{"G", "-group"}, "docker", "Group to assign the unix socket specified by -H when running in daemon mode\nuse '' (the empty string) to disable setting of a group")
+	flEnableCors  = flag.Bool([]string{"#api-enable-cors", "-api-enable-cors"}, false, "Enable CORS headers in the remote API")
+	flTls         = flag.Bool([]string{"-tls"}, false, "Use TLS; implied by tls-verify flags")
+	flTlsVerify   = flag.Bool([]string{"-tlsverify"}, false, "Use TLS and verify the remote (daemon: verify client, client: verify daemon)")
+
+	// these are initialized in init() below since their default values depend on dockerCertPath which isn't fully initialized until init() runs
+	flCa    *string
+	flCert  *string
+	flKey   *string
+	flHosts []string
+)
+
+func init() {
+	flCa = flag.String([]string{"-tlscacert"}, filepath.Join(dockerCertPath, defaultCaFile), "Trust only remotes providing a certificate signed by the CA given here")
+	flCert = flag.String([]string{"-tlscert"}, filepath.Join(dockerCertPath, defaultCertFile), "Path to TLS certificate file")
+	flKey = flag.String([]string{"-tlskey"}, filepath.Join(dockerCertPath, defaultKeyFile), "Path to TLS key file")
+	opts.HostListVar(&flHosts, []string{"H", "-host"}, "The socket(s) to bind to in daemon mode\nspecified using one or more tcp://host:port, unix:///path/to/socket, fd://* or fd://socketfd.")
+}
diff --git a/dockerinit/dockerinit.go b/dockerinit/dockerinit.go
index 1d06893..c5bba78 100644
--- a/dockerinit/dockerinit.go
+++ b/dockerinit/dockerinit.go
@@ -1,11 +1,12 @@
 package main
 
 import (
-	"github.com/dotcloud/docker/sysinit"
+	_ "github.com/docker/docker/daemon/execdriver/lxc"
+	_ "github.com/docker/docker/daemon/execdriver/native"
+	"github.com/docker/docker/reexec"
 )
 
 func main() {
 	// Running in init mode
-	sysinit.SysInit()
-	return
+	reexec.Init()
 }
diff --git a/docs/Dockerfile b/docs/Dockerfile
index 329646e..a50b396 100644
--- a/docs/Dockerfile
+++ b/docs/Dockerfile
@@ -1,10 +1,10 @@
 #
-# See the top level Makefile in https://github.com/dotcloud/docker for usage.
+# See the top level Makefile in https://github.com/docker/docker for usage.
 #
 FROM 		debian:jessie
 MAINTAINER	Sven Dowideit <SvenDowideit@docker.com> (@SvenDowideit)
 
-RUN 	apt-get update && apt-get install -yq make python-pip python-setuptools vim-tiny git gettext
+RUN 	apt-get update && apt-get install -y make python-pip python-setuptools vim-tiny git gettext
 
 RUN	pip install mkdocs
 
@@ -16,6 +16,9 @@
 # this version works, the current versions fail in different ways
 RUN	pip install awscli==1.3.9
 
+# make sure the git clone is not an old cache - we've published old versions a few times now
+ENV	CACHE_BUST Jul2014
+
 # get my sitemap.xml branch of mkdocs and use that for now
 RUN	git clone https://github.com/SvenDowideit/mkdocs	&&\
 	cd mkdocs/						&&\
@@ -27,15 +30,20 @@
 WORKDIR	/docs
 
 RUN	VERSION=$(cat /docs/VERSION)								&&\
+        MAJOR_MINOR="${VERSION%.*}"								&&\
+	for i in $(seq $MAJOR_MINOR -0.1 1.0) ; do echo "<li><a class='version' href='/v$i'>Version v$i</a></li>" ; done > /docs/sources/versions.html_fragment &&\
 	GIT_BRANCH=$(cat /docs/GIT_BRANCH)							&&\
 	GITCOMMIT=$(cat /docs/GITCOMMIT)							&&\
 	AWS_S3_BUCKET=$(cat /docs/AWS_S3_BUCKET)						&&\
+	BUILD_DATE=$(date)									&&\
 	sed -i "s/\$VERSION/$VERSION/g" /docs/theme/mkdocs/base.html				&&\
+	sed -i "s/\$MAJOR_MINOR/v$MAJOR_MINOR/g" /docs/theme/mkdocs/base.html			&&\
 	sed -i "s/\$GITCOMMIT/$GITCOMMIT/g" /docs/theme/mkdocs/base.html			&&\
 	sed -i "s/\$GIT_BRANCH/$GIT_BRANCH/g" /docs/theme/mkdocs/base.html			&&\
+	sed -i "s/\$BUILD_DATE/$BUILD_DATE/g" /docs/theme/mkdocs/base.html				&&\
 	sed -i "s/\$AWS_S3_BUCKET/$AWS_S3_BUCKET/g" /docs/theme/mkdocs/base.html
 
-# note, EXPOSE is only last because of https://github.com/dotcloud/docker/issues/3525
+# note, EXPOSE is only last because of https://github.com/docker/docker/issues/3525
 EXPOSE	8000
 
 CMD 	["mkdocs", "serve"]
diff --git a/docs/README.md b/docs/README.md
index 1729940..ba1feb5 100755
--- a/docs/README.md
+++ b/docs/README.md
@@ -5,7 +5,7 @@
 
 The HTML files are built and hosted on `https://docs.docker.com`, and update
 automatically after each change to the master or release branch of [Docker on
-GitHub](https://github.com/dotcloud/docker) thanks to post-commit hooks. The
+GitHub](https://github.com/docker/docker) thanks to post-commit hooks. The
 `docs` branch maps to the "latest" documentation and the `master` (unreleased
 development) branch maps to the "master" documentation.
 
diff --git a/docs/docs-update.py b/docs/docs-update.py
index 31bb47d..2ff305c 100755
--- a/docs/docs-update.py
+++ b/docs/docs-update.py
@@ -7,6 +7,7 @@
 #       ./docs/update.py /usr/bin/docker
 #
 
+import datetime
 import re
 from sys import argv
 import subprocess
@@ -15,6 +16,9 @@
 
 script, docker_cmd = argv
 
+# date "+%B %Y"
+date_string = datetime.date.today().strftime('%B %Y')
+
 def print_usage(outtext, docker_cmd, command):
     help = ""
     try:
@@ -204,9 +208,9 @@
         outtext.write("# HISTORY\n")
         if history != "":
            outtext.write(history+"\n")
-        recent_history_re = re.compile(".*June 2014.*", re.MULTILINE|re.DOTALL)
+        recent_history_re = re.compile(".*"+date_string+".*", re.MULTILINE|re.DOTALL)
         if not recent_history_re.match(history):
-            outtext.write("June 2014, updated by Sven Dowideit <SvenDowideit@home.org.au>\n")
+            outtext.write(date_string+", updated by Sven Dowideit <SvenDowideit@home.org.au>\n")
         outtext.close()
 
 # main
diff --git a/docs/man/Dockerfile b/docs/man/Dockerfile
index 438227d..9910bd4 100644
--- a/docs/man/Dockerfile
+++ b/docs/man/Dockerfile
@@ -1,5 +1,7 @@
-FROM fedora:20
-MAINTAINER ipbabble <emailwhenry@redhat.com>
-# Update and install pandoc
-RUN yum -y update; yum clean all;
-RUN yum -y install pandoc;
+FROM golang:1.3
+RUN mkdir -p /go/src/github.com/cpuguy83
+RUN mkdir -p /go/src/github.com/cpuguy83 \
+    && git clone -b v1 https://github.com/cpuguy83/go-md2man.git /go/src/github.com/cpuguy83/go-md2man \
+    && cd /go/src/github.com/cpuguy83/go-md2man \
+    && go get -v ./...
+CMD ["/go/bin/go-md2man", "--help"]
diff --git a/docs/man/Dockerfile.5.md b/docs/man/Dockerfile.5.md
index b0a863f..9772d4e 100644
--- a/docs/man/Dockerfile.5.md
+++ b/docs/man/Dockerfile.5.md
@@ -96,7 +96,7 @@
   If you use the shell form of the CMD, the <command> executes in /bin/sh -c:
   **FROM ubuntu**
   **CMD echo "This is a test." | wc -**
-  If you run <command> wihtout a shell, then you must express the command as a
+  If you run <command> without a shell, then you must express the command as a
   JSON arry and give the full path to the executable. This array form is the
   preferred form of CMD. All additional parameters must be individually expressed
   as strings in the array:
diff --git a/docs/man/README.md b/docs/man/README.md
index 45f1a91..a52e0cb 100644
--- a/docs/man/README.md
+++ b/docs/man/README.md
@@ -44,27 +44,26 @@
 
 # Generating man pages from the Markdown files
 
-The recommended approach for generating the man pages is via a  Docker 
-container. Using the supplied Dockerfile, Docker will create a Fedora based 
-container and isolate the Pandoc installation. This is a seamless process, 
-saving you from dealing with Pandoc and dependencies on your own computer.
+The recommended approach for generating the man pages is via a Docker
+container using the supplied `Dockerfile` to create an image with the correct
+environment. This uses `go-md2man`, a pure Go Markdown to man page generator.
 
-## Building the Fedora / Pandoc image
+## Building the md2man image
 
-There is a Dockerfile provided in the `docker/docs/man` directory.
+There is a `Dockerfile` provided in the `docker/docs/man` directory.
 
-Using this Dockerfile, create a Docker image tagged `fedora/pandoc`:
+Using this `Dockerfile`, create a Docker image tagged `docker/md2man`:
 
-    docker build  -t fedora/pandoc .
+    docker build -t docker/md2man .
 
-## Utilizing the Fedora / Pandoc image
+## Utilizing the image
 
 Once the image is built, run a container using the image with *volumes*:
 
-    docker run -v /<path-to-git-dir>/docker/docs/man:/pandoc:rw \
-    -w /pandoc -i fedora/pandoc /pandoc/md2man-all.sh
+    docker run -v /<path-to-git-dir>/docker/docs/man:/docs:rw \
+    -w /docs -i docker/md2man /docs/md2man-all.sh
 
-The Pandoc Docker container will process the Markdown files and generate
+The `md2man` Docker container will process the Markdown files and generate
 the man pages inside the `docker/docs/man/man1` directory using
 Docker volumes. For more information on Docker volumes see the man page for
 `docker run` and also look at the article [Sharing Directories via Volumes]
diff --git a/docs/man/docker-attach.1.md b/docs/man/docker-attach.1.md
index 1b4e68b..7deda6c 100644
--- a/docs/man/docker-attach.1.md
+++ b/docs/man/docker-attach.1.md
@@ -14,7 +14,7 @@
 If you **docker run** a container in detached mode (**-d**), you can reattach to
 the detached container with **docker attach** using the container's ID or name.
 
-You can detach from the container again (and leave it running) with `CTRL-q 
+You can detach from the container again (and leave it running) with `CTRL-p 
 CTRL-q` (for a quiet exit), or `CTRL-c`  which will send a SIGKILL to the
 container, or `CTRL-\` to get a stacktrace of the Docker client when it quits.
 When you detach from a container the exit code will be returned to
@@ -25,7 +25,7 @@
    Do not attach STDIN. The default is *false*.
 
 **--sig-proxy**=*true*|*false*
-   Proxify all received signals to the process (even in non-TTY mode). SIGCHLD is not proxied. The default is *true*.
+   Proxy all received signals to the process (even in non-TTY mode). SIGCHLD, SIGKILL, and SIGSTOP are not proxied. The default is *true*.
 
 # EXAMPLES
 
diff --git a/docs/man/docker-commit.1.md b/docs/man/docker-commit.1.md
index bbd1db2..31edcc0 100644
--- a/docs/man/docker-commit.1.md
+++ b/docs/man/docker-commit.1.md
@@ -8,6 +8,7 @@
 **docker commit**
 [**-a**|**--author**[=*AUTHOR*]]
 [**-m**|**--message**[=*MESSAGE*]]
+[**-p**|**--pause**[=*true*]]
  CONTAINER [REPOSITORY[:TAG]]
 
 # DESCRIPTION
@@ -20,8 +21,8 @@
 **-m**, **--message**=""
    Commit message
 
-**-p, --pause**=true
-   Pause container during commit
+**-p**, **--pause**=*true*|*false*
+   Pause container during commit. The default is *true*.
 
 # EXAMPLES
 
@@ -30,10 +31,11 @@
 in interactive mode with the bash shell. Apache is also running. To
 create a new image run docker ps to find the container's ID and then run:
 
-    # docker commit -m= "Added Apache to Fedora base image" \
+    # docker commit -m="Added Apache to Fedora base image" \
       -a="A D Ministrator" 98bd7fc99854 fedora/fedora_httpd:20
 
 # HISTORY
 April 2014, Originally compiled by William Henry (whenry at redhat dot com)
 based on docker.com source material and in
 June 2014, updated by Sven Dowideit <SvenDowideit@home.org.au>
+July 2014, updated by Sven Dowideit <SvenDowideit@home.org.au>
diff --git a/docs/man/docker-info.1.md b/docs/man/docker-info.1.md
index 2945d61..bf64a7b 100644
--- a/docs/man/docker-info.1.md
+++ b/docs/man/docker-info.1.md
@@ -29,18 +29,14 @@
 Here is a sample output:
 
     # docker info
-    Containers: 18
-    Images: 95
-    Storage Driver: devicemapper
-     Pool Name: docker-8:1-170408448-pool
-     Data file: /var/lib/docker/devicemapper/devicemapper/data
-     Metadata file: /var/lib/docker/devicemapper/devicemapper/metadata
-     Data Space Used: 9946.3 Mb
-     Data Space Total: 102400.0 Mb
-     Metadata Space Used: 9.9 Mb
-     Metadata Space Total: 2048.0 Mb
-    Execution Driver: native-0.1
-    Kernel Version: 3.10.0-116.el7.x86_64
+    Containers: 14
+    Images: 52
+    Storage Driver: aufs
+     Root Dir: /var/lib/docker/aufs
+     Dirs: 80
+    Execution Driver: native-0.2
+    Kernel Version: 3.13.0-24-generic
+    Operating System: Ubuntu 14.04 LTS
 
 # HISTORY
 April 2014, Originally compiled by William Henry (whenry at redhat dot com)
diff --git a/docs/man/docker-logout.1.md b/docs/man/docker-logout.1.md
new file mode 100644
index 0000000..07dcdcb
--- /dev/null
+++ b/docs/man/docker-logout.1.md
@@ -0,0 +1,27 @@
+% DOCKER(1) Docker User Manuals
+% Docker Community
+% JUNE 2014
+# NAME
+docker-logout - Log out from a Docker registry, if no server is specified "https://index.docker.io/v1/" is the default.
+
+# SYNOPSIS
+**docker logout**
+[SERVER]
+
+# DESCRIPTION
+Log the user out from a Docker registry, if no server is
+specified "https://index.docker.io/v1/" is the default. If you want to
+log out from a private registry you can specify this by adding the server name.
+
+# OPTIONS
+There are no available options.
+
+# EXAMPLES
+
+## Log out from a local registry
+
+    # docker logout localhost:8080
+
+# HISTORY
+June 2014, Originally compiled by Daniel, Dao Quang Minh (daniel at nitrous dot io)
+July 2014, updated by Sven Dowideit <SvenDowideit@home.org.au>
diff --git a/docs/man/docker-logs.1.md b/docs/man/docker-logs.1.md
index 5c3df75..1fbd229 100644
--- a/docs/man/docker-logs.1.md
+++ b/docs/man/docker-logs.1.md
@@ -8,6 +8,7 @@
 **docker logs**
 [**-f**|**--follow**[=*false*]]
 [**-t**|**--timestamps**[=*false*]]
+[**--tail**[=*"all"*]]
 CONTAINER
 
 # DESCRIPTION
@@ -27,7 +28,11 @@
 **-t**, **--timestamps**=*true*|*false*
    Show timestamps. The default is *false*.
 
+**--tail**="all"
+   Output the specified number of lines at the end of logs (defaults to all logs)
+
 # HISTORY
 April 2014, Originally compiled by William Henry (whenry at redhat dot com)
 based on docker.com source material and internal work.
 June 2014, updated by Sven Dowideit <SvenDowideit@home.org.au>
+July 2014, updated by Sven Dowideit <SvenDowideit@home.org.au>
diff --git a/docs/man/docker-pause.1.md b/docs/man/docker-pause.1.md
index e6c0c24..7b4b091 100644
--- a/docs/man/docker-pause.1.md
+++ b/docs/man/docker-pause.1.md
@@ -8,6 +8,18 @@
 **docker pause**
 CONTAINER
 
+# DESCRIPTION
+
+The `docker pause` command uses the cgroups freezer to suspend all processes in
+a container.  Traditionally when suspending a process the `SIGSTOP` signal is
+used, which is observable by the process being suspended. With the cgroups freezer
+the process is unaware, and unable to capture, that it is being suspended,
+and subsequently resumed.
+
+See the [cgroups freezer documentation]
+(https://www.kernel.org/doc/Documentation/cgroups/freezer-subsystem.txt) for
+further details.
+
 # OPTIONS
 There are no available options.
 
diff --git a/docs/man/docker-ps.1.md b/docs/man/docker-ps.1.md
index 9264d53..bf22d87 100644
--- a/docs/man/docker-ps.1.md
+++ b/docs/man/docker-ps.1.md
@@ -8,6 +8,7 @@
 **docker ps**
 [**-a**|**--all**[=*false*]]
 [**--before**[=*BEFORE*]]
+[**-f**|**--filter**[=*[]*]]
 [**-l**|**--latest**[=*false*]]
 [**-n**[=*-1*]]
 [**--no-trunc**[=*false*]]
@@ -28,6 +29,10 @@
 **--before**=""
    Show only container created before Id or Name, include non-running ones.
 
+**-f**, **--filter**=[]
+   Provide filter values. Valid filters:
+                          exited=<int> - containers with exit code of <int>
+
 **-l**, **--latest**=*true*|*false*
    Show only the latest created container, include non-running ones. The default is *false*.
 
@@ -68,3 +73,4 @@
 April 2014, Originally compiled by William Henry (whenry at redhat dot com)
 based on docker.com source material and internal work.
 June 2014, updated by Sven Dowideit <SvenDowideit@home.org.au>
+August 2014, updated by Sven Dowideit <SvenDowideit@home.org.au>
diff --git a/docs/man/docker-rm.1.md b/docs/man/docker-rm.1.md
index 1b45376..bae6a7e 100644
--- a/docs/man/docker-rm.1.md
+++ b/docs/man/docker-rm.1.md
@@ -20,7 +20,7 @@
 
 # OPTIONS
 **-f**, **--force**=*true*|*false*
-   Force removal of running container. The default is *false*.
+   Force the removal of a running container (uses SIGKILL). The default is *false*.
 
 **-l**, **--link**=*true*|*false*
    Remove the specified link and not the underlying container. The default is *false*.
@@ -49,3 +49,5 @@
 April 2014, Originally compiled by William Henry (whenry at redhat dot com)
 based on docker.com source material and internal work.
 June 2014, updated by Sven Dowideit <SvenDowideit@home.org.au>
+July 2014, updated by Sven Dowideit <SvenDowideit@home.org.au>
+August 2014, updated by Sven Dowideit <SvenDowideit@home.org.au>
diff --git a/docs/man/docker-run.1.md b/docs/man/docker-run.1.md
index e7571ac..225fb78 100644
--- a/docs/man/docker-run.1.md
+++ b/docs/man/docker-run.1.md
@@ -8,9 +8,12 @@
 **docker run**
 [**-a**|**--attach**[=*[]*]]
 [**-c**|**--cpu-shares**[=*0*]]
+[**--cap-add**[=*[]*]]
+[**--cap-drop**[=*[]*]]
 [**--cidfile**[=*CIDFILE*]]
 [**--cpuset**[=*CPUSET*]]
 [**-d**|**--detach**[=*false*]]
+[**--device**[=*[]*]]
 [**--dns-search**[=*[]*]]
 [**--dns**[=*[]*]]
 [**-e**|**--env**[=*[]*]]
@@ -27,6 +30,7 @@
 [**-P**|**--publish-all**[=*false*]]
 [**-p**|**--publish**[=*[]*]]
 [**--privileged**[=*false*]]
+[**--restart**[=*POLICY*]]
 [**--rm**[=*false*]]
 [**--sig-proxy**[=*true*]]
 [**-t**|**--tty**[=*false*]]
@@ -67,13 +71,19 @@
 shares of CPU time to one or more containers when you start them via **docker
 run**.
 
-**--cidfile**=*file*
-   Write the container ID to the file specified.
+**--cap-add**=[]
+   Add Linux capabilities
+
+**--cap-drop**=[]
+   Drop Linux capabilities
+
+**--cidfile**=""
+   Write the container ID to the file
 
 **--cpuset**=""
    CPUs in which to allow execution (0-3, 0,1)
 
-**-d**, **-detach**=*true*|*false*
+**-d**, **--detach**=*true*|*false*
    Detached mode. This runs the container in the background. It outputs the new
 container's ID and any error messages. At any time you can run **docker ps** in
 the other shell to view a list of the running containers. You can reattach to a
@@ -82,18 +92,19 @@
 
    When attached in the tty mode, you can detach from a running container without
 stopping the process by pressing the keys CTRL-P CTRL-Q.
+**--device**=[]
+   Add a host device to the container (e.g. --device=/dev/sdc:/dev/xvdc)
 
 **--dns-search**=[]
-   Set custom dns search domains
+   Set custom DNS search domains
 
 **--dns**=*IP-address*
    Set custom DNS servers. This option can be used to override the DNS
 configuration passed to the container. Typically this is necessary when the
 host DNS configuration is invalid for the container (e.g., 127.0.0.1). When this
-is the case the **-dns** flags is necessary for every run.
+is the case the **--dns** flags is necessary for every run.
 
-
-**-e**, **-env**=*environment*
+**-e**, **--env**=*environment*
    Set environment variables. This option allows you to specify arbitrary
 environment variables that are available for the process that will be launched
 inside of the container.
@@ -110,8 +121,9 @@
 something else inside the container, so you can override the default ENTRYPOINT
 at runtime by using a **--entrypoint** and a string to specify the new
 ENTRYPOINT.
+
 **--env-file**=[]
-   Read in a line delimited file of ENV variables
+   Read in a line delimited file of environment variables
 
 **--expose**=*port*
    Expose a port from the container without publishing it to your host. A
@@ -120,10 +132,10 @@
 the operator can use the **--expose** option with **docker run**, or 3) the
 container can be started with the **--link**.
 
-**-h**, **-hostname**=*hostname*
+**-h**, **--hostname**=*hostname*
    Sets the container host name that is available inside the container.
 
-**-i**, **-interactive**=*true*|*false*
+**-i**, **--interactive**=*true*|*false*
    When set to true, keep stdin open even if not attached. The default is false.
 
 **--link**=*name*:*alias*
@@ -136,7 +148,7 @@
 **--lxc-conf**=[]
    (lxc exec-driver only) Add custom lxc options --lxc-conf="lxc.cgroup.cpuset.cpus = 0,1"
 
-**-m**, **-memory**=*memory-limit*
+**-m**, **--memory**=*memory-limit*
    Allows you to constrain the memory available to a container. If the host
 supports swap memory, then the -m memory setting can be larger than physical
 RAM. If a limit of 0 is specified, the container's memory is not limited. The
@@ -165,14 +177,14 @@
                                'container:<name|id>': reuses another container network stack
                                'host': use the host network stack inside the container.  Note: the host mode gives the container full access to local system services such as D-bus and is therefore considered insecure.
 
-**-P**, **-publish-all**=*true*|*false*
+**-P**, **--publish-all**=*true*|*false*
    When set to true publish all exposed ports to the host interfaces. The
 default is false. If the operator uses -P (or -p) then Docker will make the
 exposed port accessible on the host and the ports will be available to any
 client that can reach the host. To find the map between the host ports and the
 exposed ports, use **docker port**.
 
-**-p**, **-publish**=[]
+**-p**, **--publish**=[]
    Publish a container's port to the host (format: ip:hostPort:containerPort |
 ip::containerPort | hostPort:containerPort) (use **docker port** to see the
 actual mapping)
@@ -190,26 +202,21 @@
 
 
 **--rm**=*true*|*false*
-   If set to *true* the container is automatically removed when it exits. The
-default is *false*. This option is incompatible with **-d**.
-
+   Automatically remove the container when it exits (incompatible with -d). The default is *false*.
 
 **--sig-proxy**=*true*|*false*
-   When set to true, proxify received signals to the process (even in
-non-tty mode). SIGCHLD is not proxied. The default is *true*.
+   Proxy received signals to the process (even in non-TTY mode). SIGCHLD, SIGSTOP, and SIGKILL are not proxied. The default is *true*.
 
-
-**-t**, **-tty**=*true*|*false*
+**-t**, **--tty**=*true*|*false*
    When set to true Docker can allocate a pseudo-tty and attach to the standard
 input of any container. This can be used, for example, to run a throwaway
 interactive shell. The default is value is false.
 
-
-**-u**, **-user**=*username*,*uid*
-   Set a username or UID for the container.
+**-u**, **--user**=""
+   Username or UID
 
 
-**-v**, **-volume**=*volume*[:ro|:rw]
+**-v**, **--volume**=*volume*[:ro|:rw]
    Bind mount a volume to the container. 
 
 The **-v** option can be used one or
@@ -233,7 +240,7 @@
 the reference container.
 
 
-**-w**, **-workdir**=*directory*
+**-w**, **--workdir**=*directory*
    Working directory inside the container. The default working directory for
 running binaries within a container is the root directory (/). The developer can
 set a different default with the Dockerfile WORKDIR instruction. The operator
@@ -241,7 +248,10 @@
 
 
 **IMAGE**
-   The image name or ID.
+   The image name or ID. You can specify a version of an image you'd like to run
+   the container with by adding image:tag to the command. For example,
+   `docker run ubuntu:14.04`.
+
 
 
 **COMMAND**
@@ -338,7 +348,7 @@
 
 Multiple --volumes-from parameters will bring together multiple data volumes from
 multiple containers. And it's possible to mount the volumes that came from the
-DATA container in yet another container via the fedora-container1 intermidiery
+DATA container in yet another container via the fedora-container1 intermediary
 container, allowing to abstract the actual data source from users of that data:
 
     # docker run --volumes-from=fedora-container1 --name=fedora-container2 -i -t fedora bash
@@ -371,3 +381,4 @@
 April 2014, Originally compiled by William Henry (whenry at redhat dot com)
 based on docker.com source material and internal work.
 June 2014, updated by Sven Dowideit <SvenDowideit@home.org.au>
+July 2014, updated by Sven Dowideit <SvenDowideit@home.org.au>
diff --git a/docs/man/docker-tag.1.md b/docs/man/docker-tag.1.md
index 041c9e1..a42ebe7 100644
--- a/docs/man/docker-tag.1.md
+++ b/docs/man/docker-tag.1.md
@@ -7,7 +7,7 @@
 # SYNOPSIS
 **docker tag**
 [**-f**|**--force**[=*false*]]
- IMAGE [REGISTRYHOST/][USERNAME/]NAME[:TAG]
+ IMAGE[:TAG] [REGISTRYHOST/][USERNAME/]NAME[:TAG]
 
 # DESCRIPTION
 This will give a new alias to an image in the repository. This refers to the
@@ -29,7 +29,7 @@
 
 **TAG**
    The tag you are assigning to the image.  Though this is arbitrary it is
-recommended to be used for a version to disinguish images with the same name.
+recommended to be used for a version to distinguish images with the same name.
 Note that here TAG is a part of the overall name or "tag".
 
 # OPTIONS
@@ -56,3 +56,4 @@
 April 2014, Originally compiled by William Henry (whenry at redhat dot com)
 based on docker.com source material and internal work.
 June 2014, updated by Sven Dowideit <SvenDowideit@home.org.au>
+July 2014, updated by Sven Dowideit <SvenDowideit@home.org.au>
diff --git a/docs/man/docker-unpause.1.md b/docs/man/docker-unpause.1.md
index 8949548..dfce163 100644
--- a/docs/man/docker-unpause.1.md
+++ b/docs/man/docker-unpause.1.md
@@ -8,6 +8,15 @@
 **docker unpause**
 CONTAINER
 
+# DESCRIPTION
+
+The `docker unpause` command uses the cgroups freezer to un-suspend all
+processes in a container.
+
+See the [cgroups freezer documentation]
+(https://www.kernel.org/doc/Documentation/cgroups/freezer-subsystem.txt) for
+further details.
+
 # OPTIONS
 There are no available options.
 
diff --git a/docs/man/docker.1.md b/docs/man/docker.1.md
index a7a826e..3932097 100644
--- a/docs/man/docker.1.md
+++ b/docs/man/docker.1.md
@@ -64,9 +64,6 @@
 **-p**=""
   Path to use for daemon PID file. Default is `/var/run/docker.pid`
 
-**-r**=*true*|*false*
-  Restart previously running containers. Default is true.
-
 **-s**=""
   Force the Docker runtime to use a specific storage driver.
 
@@ -74,7 +71,7 @@
   Print version information and quit. Default is false.
 
 **--selinux-enabled**=*true*|*false*
-  Enable selinux support. Default is false.
+  Enable selinux support. Default is false. SELinux does not presently support the BTRFS storage driver.
 
 # COMMANDS
 **docker-attach(1)**
@@ -124,6 +121,9 @@
 **docker-login(1)**
   Register or Login to a Docker registry server
 
+**docker-logout(1)**
+  Log the user out of a Docker registry server
+
 **docker-logs(1)**
   Fetch the logs of a container
 
diff --git a/docs/man/md2man-all.sh b/docs/man/md2man-all.sh
index 12d84de..97c65c9 100755
--- a/docs/man/md2man-all.sh
+++ b/docs/man/md2man-all.sh
@@ -18,5 +18,5 @@
 		continue
 	fi
 	mkdir -p "./man${num}"
-	pandoc -s -t man "$FILE" -o "./man${num}/${name}"
+	go-md2man -in "$FILE" -out "./man${num}/${name}"
 done
diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml
index f4ebcb6..c45b717 100755
--- a/docs/mkdocs.yml
+++ b/docs/mkdocs.yml
@@ -6,7 +6,7 @@
 
 dev_addr: '0.0.0.0:8000'
 
-repo_url: https://github.com/dotcloud/docker/
+repo_url: https://github.com/docker/docker/
 
 docs_dir: sources
 
@@ -83,6 +83,7 @@
 - ['articles/security.md', 'Articles', 'Security']
 - ['articles/https.md', 'Articles', 'Running Docker with HTTPS']
 - ['articles/host_integration.md', 'Articles', 'Automatically starting Containers']
+- ['articles/certificates.md', 'Articles', 'Using certificates for repository client verification']
 - ['articles/using_supervisord.md', 'Articles', 'Using Supervisor']
 - ['articles/cfengine_process_management.md', 'Articles', 'Process management with CFEngine']
 - ['articles/puppet.md', 'Articles', 'Using Puppet']
@@ -104,6 +105,7 @@
 - ['reference/api/registry_api.md', 'Reference', 'Docker Registry API']
 - ['reference/api/hub_registry_spec.md', 'Reference', 'Docker Hub and Registry Spec']
 - ['reference/api/docker_remote_api.md', 'Reference', 'Docker Remote API']
+- ['reference/api/docker_remote_api_v1.14.md', 'Reference', 'Docker Remote API v1.14']
 - ['reference/api/docker_remote_api_v1.13.md', 'Reference', 'Docker Remote API v1.13']
 - ['reference/api/docker_remote_api_v1.12.md', 'Reference', 'Docker Remote API v1.12']
 - ['reference/api/docker_remote_api_v1.11.md', 'Reference', 'Docker Remote API v1.11']
@@ -119,7 +121,6 @@
 - ['reference/api/docker_remote_api_v1.1.md', '**HIDDEN**']
 - ['reference/api/docker_remote_api_v1.0.md', '**HIDDEN**']
 - ['reference/api/remote_api_client_libraries.md', 'Reference', 'Docker Remote API Client Libraries']
-- ['reference/api/docker_io_oauth_api.md', 'Reference', 'Docker Hub OAuth API']
 - ['reference/api/docker_io_accounts_api.md', 'Reference', 'Docker Hub Accounts API']
 
 - ['jsearch.md', '**HIDDEN**']
diff --git a/docs/release.sh b/docs/release.sh
index f6dc2ec..ba309aa 100755
--- a/docs/release.sh
+++ b/docs/release.sh
@@ -27,6 +27,10 @@
 	fi
 fi
 
+# Remove the last version - 1.0.2-dev -> 1.0
+MAJOR_MINOR="v${VERSION%.*}"
+export MAJOR_MINOR
+
 export BUCKET=$AWS_S3_BUCKET
 
 export AWS_CONFIG_FILE=$(pwd)/awsconfig
@@ -69,7 +73,8 @@
 
 	# a really complicated way to send only the files we want
 	# if there are too many in any one set, aws s3 sync seems to fall over with 2 files to go
-	endings=( json html xml css js gif png JPG ttf svg woff)
+	#  versions.html_fragment
+	endings=( json html xml css js gif png JPG ttf svg woff html_fragment )
 	for i in ${endings[@]}; do
 		include=""
 		for j in ${endings[@]}; do
@@ -101,13 +106,16 @@
 }
 
 setup_s3
-build_current_documentation
-upload_current_documentation
 
-# Remove the last version - 1.0.2-dev -> 1.0
-MAJOR_MINOR="v${VERSION%.*}"
+# Default to only building the version specific docs so we don't clober the latest by accident with old versions
+if [ "$BUILD_ROOT" == "yes" ]; then
+	echo "Building root documentation"
+	build_current_documentation
+	upload_current_documentation
+fi
 
 #build again with /v1.0/ prefix
 sed -i "s/^site_url:.*/site_url: \/$MAJOR_MINOR\//" mkdocs.yml
+echo "Building the /$MAJOR_MINOR/ documentation"
 build_current_documentation
 upload_current_documentation "/$MAJOR_MINOR/"
diff --git a/docs/sources/articles/baseimages.md b/docs/sources/articles/baseimages.md
index c795b7a..bc677eb 100644
--- a/docs/sources/articles/baseimages.md
+++ b/docs/sources/articles/baseimages.md
@@ -33,13 +33,13 @@
 There are more example scripts for creating base images in the Docker
 GitHub Repo:
 
- - [BusyBox](https://github.com/dotcloud/docker/blob/master/contrib/mkimage-busybox.sh)
+ - [BusyBox](https://github.com/docker/docker/blob/master/contrib/mkimage-busybox.sh)
  - CentOS / Scientific Linux CERN (SLC) [on Debian/Ubuntu](
-   https://github.com/dotcloud/docker/blob/master/contrib/mkimage-rinse.sh) or
+   https://github.com/docker/docker/blob/master/contrib/mkimage-rinse.sh) or
    [on CentOS/RHEL/SLC/etc.](
-   https://github.com/dotcloud/docker/blob/master/contrib/mkimage-yum.sh)
+   https://github.com/docker/docker/blob/master/contrib/mkimage-yum.sh)
  - [Debian / Ubuntu](
-   https://github.com/dotcloud/docker/blob/master/contrib/mkimage-debootstrap.sh)
+   https://github.com/docker/docker/blob/master/contrib/mkimage-debootstrap.sh)
 
 ## Creating a simple base image using `scratch`
 
@@ -52,7 +52,7 @@
 image to base your new minimal containers `FROM`:
 
     FROM scratch
-    ADD true-asm /true
+    COPY true-asm /true
     CMD ["/true"]
 
 The Dockerfile above is from extremely minimal image - [tianon/true](
diff --git a/docs/sources/articles/certificates.md b/docs/sources/articles/certificates.md
new file mode 100644
index 0000000..90d3f1b
--- /dev/null
+++ b/docs/sources/articles/certificates.md
@@ -0,0 +1,114 @@
+page_title: Using certificates for repository client verification
+page_description: How to set up and use certificates with a registry to verify access
+page_keywords: Usage, registry, repository, client, root, certificate, docker, apache, ssl, tls, documentation, examples, articles, tutorials
+
+# Using certificates for repository client verification
+
+In [Running Docker with HTTPS](/articles/https), you learned that, by default,
+Docker runs via a non-networked Unix socket and TLS must be enabled in order
+to have the Docker client and the daemon communicate securely over HTTPS.
+
+Now, you will see how to allow the Docker registry (i.e., *a server*) to
+verify that the Docker daemon (i.e., *a client*) has the right to access the
+images being hosted with *certificate-based client-server authentication*.
+
+We will show you how to install a Certificate Authority (CA) root certificate
+for the registry and how to set the client TLS certificate for verification.
+
+## Understanding the configuration
+
+A custom certificate is configured by creating a directory under
+`/etc/docker/certs.d` using the same name as the registry's hostname (e.g.,
+`localhost`). All `*.crt` files are added to this directory as CA roots.
+
+> **Note:**
+> In the absence of any root certificate authorities, Docker
+> will use the system default (i.e., host's root CA set).
+
+The presence of one or more `<filename>.key/cert` pairs indicates to Docker
+that there are custom certificates required for access to the desired
+repository.
+
+> **Note:**
+> If there are multiple certificates, each will be tried in alphabetical
+> order. If there is an authentication error (e.g., 403, 5xx, etc.), Docker
+> will continue to try with the next certificate.
+
+Our example is set up like this:
+
+    /etc/docker/certs.d/        <-- Certificate directory
+    └── localhost               <-- Hostname
+       ├── client.cert          <-- Client certificate
+       ├── client.key           <-- Client key
+       └── localhost.crt        <-- Registry certificate
+
+## Creating the client certificates
+
+You will use OpenSSL's `genrsa` and `req` commands to first generate an RSA
+key and then use the key to create the certificate request.   
+
+    $ openssl genrsa -out client.key 1024
+    $ openssl req -new -x509 -text -key client.key -out client.cert
+
+> **Warning:**: 
+> Using TLS and managing a CA is an advanced topic.
+> You should be familiar with OpenSSL, x509, and TLS before
+> attempting to use them in production. 
+
+> **Warning:**
+> These TLS commands will only generate a working set of certificates on Linux.
+> The version of OpenSSL in Mac OS X is incompatible with the type of
+> certificate Docker requires.
+
+## Testing the verification setup
+
+You can test this setup by using Apache to host a Docker registry.
+For this purpose, you can copy a registry tree (containing images) inside
+the Apache root.
+
+> **Note:**
+> You can find such an example [here](
+> http://people.gnome.org/~alexl/v1.tar.gz) - which contains the busybox image.
+
+Once you set up the registry, you can use the following Apache configuration
+to implement certificate-based protection.
+
+    # This must be in the root context, otherwise it causes a re-negotiation
+    # which is not supported by the TLS implementation in go
+    SSLVerifyClient optional_no_ca
+
+    <Location /v1>
+    Action cert-protected /cgi-bin/cert.cgi
+    SetHandler cert-protected
+
+    Header set x-docker-registry-version "0.6.2"
+    SetEnvIf Host (.*) custom_host=$1
+    Header set X-Docker-Endpoints "%{custom_host}e"
+    </Location>
+
+Save the above content as `/etc/httpd/conf.d/registry.conf`, and
+continue with creating a `cert.cgi` file under `/var/www/cgi-bin/`.
+
+    #!/bin/bash
+    if [ "$HTTPS" != "on" ]; then
+        echo "Status: 403 Not using SSL"
+        echo "x-docker-registry-version: 0.6.2"
+        echo
+        exit 0
+    fi
+    if [ "$SSL_CLIENT_VERIFY" == "NONE" ]; then
+        echo "Status: 403 Client certificate invalid"
+        echo "x-docker-registry-version: 0.6.2"
+        echo
+        exit 0
+    fi
+    echo "Content-length: $(stat --printf='%s' $PATH_TRANSLATED)"
+    echo "x-docker-registry-version: 0.6.2"
+    echo "X-Docker-Endpoints: $SERVER_NAME"
+    echo "X-Docker-Size: 0"
+    echo
+
+    cat $PATH_TRANSLATED
+
+This CGI script will ensure that all requests to `/v1` *without* a valid
+certificate will be returned with a `403` (i.e., HTTP forbidden) error.
diff --git a/docs/sources/articles/cfengine_process_management.md b/docs/sources/articles/cfengine_process_management.md
index 6bb4df6..a9441a6 100644
--- a/docs/sources/articles/cfengine_process_management.md
+++ b/docs/sources/articles/cfengine_process_management.md
@@ -65,13 +65,12 @@
     FROM ubuntu
     MAINTAINER Eystein Måløy Stenberg <eytein.stenberg@gmail.com>
 
-    RUN apt-get -y install wget lsb-release unzip ca-certificates
+    RUN apt-get update && apt-get install -y wget lsb-release unzip ca-certificates
 
     # install latest CFEngine
     RUN wget -qO- http://cfengine.com/pub/gpg.key | apt-key add -
     RUN echo "deb http://cfengine.com/pub/apt $(lsb_release -cs) main" > /etc/apt/sources.list.d/cfengine-community.list
-    RUN apt-get update
-    RUN apt-get install cfengine-community
+    RUN apt-get update && apt-get install -y cfengine-community
 
     # install cfe-docker process management policy
     RUN wget https://github.com/estenberg/cfe-docker/archive/master.zip -P /tmp/ && unzip /tmp/master.zip -d /tmp/
@@ -80,7 +79,7 @@
     RUN rm -rf /tmp/cfe-docker-master /tmp/master.zip
 
     # apache2 and openssh are just for testing purposes, install your own apps here
-    RUN apt-get -y install openssh-server apache2
+    RUN apt-get update && apt-get install -y openssh-server apache2
     RUN mkdir -p /var/run/sshd
     RUN echo "root:password" | chpasswd  # need a password for ssh
 
diff --git a/docs/sources/articles/dsc.md b/docs/sources/articles/dsc.md
index 94f5e9d..5e05c40 100644
--- a/docs/sources/articles/dsc.md
+++ b/docs/sources/articles/dsc.md
@@ -8,7 +8,7 @@
 management tool that extends the existing functionality of Windows PowerShell.
 DSC uses a declarative syntax to define the state in which a target should be
 configured. More information about PowerShell DSC can be found at
-http://technet.microsoft.com/en-us/library/dn249912.aspx.
+[http://technet.microsoft.com/en-us/library/dn249912.aspx](http://technet.microsoft.com/en-us/library/dn249912.aspx).
 
 ## Requirements
 
@@ -17,14 +17,14 @@
 The included DSC configuration script also uses the official PPA so
 only an Ubuntu target is supported. The Ubuntu target must already have the
 required OMI Server and PowerShell DSC for Linux providers installed. More
-information can be found at https://github.com/MSFTOSSMgmt/WPSDSCLinux. The
-source repository listed below also includes PowerShell DSC for Linux
+information can be found at [https://github.com/MSFTOSSMgmt/WPSDSCLinux](https://github.com/MSFTOSSMgmt/WPSDSCLinux).
+The source repository listed below also includes PowerShell DSC for Linux
 installation and init scripts along with more detailed installation information.
 
 ## Installation
 
 The DSC configuration example source is available in the following repository:
-https://github.com/anweiss/DockerClientDSC. It can be cloned with:
+[https://github.com/anweiss/DockerClientDSC](https://github.com/anweiss/DockerClientDSC). It can be cloned with:
 
     $ git clone https://github.com/anweiss/DockerClientDSC.git
 
@@ -37,15 +37,18 @@
 `Set-DscConfiguration` cmdlet.
 
 More detailed usage information can be found at
-https://github.com/anweiss/DockerClientDSC.
+[https://github.com/anweiss/DockerClientDSC](https://github.com/anweiss/DockerClientDSC).
 
-### Run Configuration
+### Install Docker
 The Docker installation configuration is equivalent to running:
 
 ```
-apt-get install docker.io
-ln -sf /usr/bin/docker.io /usr/local/bin/docker
-sed -i '$acomplete -F _docker docker' /etc/bash_completion.d/docker.io
+apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys\
+36A1D7869245C8950F966E92D8576A8BA88D21E9
+sh -c "echo deb https://get.docker.io/ubuntu docker main\
+> /etc/apt/sources.list.d/docker.list"
+apt-get update
+apt-get install lxc-docker
 ```
 
 Ensure that your current working directory is set to the `DockerClientDSC`
@@ -83,35 +86,82 @@
 ```
 
 ### Images
-Image configuration is equivalent to running: `docker pull [image]`.
+Image configuration is equivalent to running: `docker pull [image]` or
+`docker rmi -f [IMAGE]`.
 
-Using the same Run Configuration steps defined above, execute `DockerClient`
-with the `Image` parameter:
+Using the same steps defined above, execute `DockerClient` with the `Image`
+parameter and apply the configuration:
 
 ```powershell
-DockerClient -Hostname "myhost" -Image node
+DockerClient -Hostname "myhost" -Image "node"
+.\RunDockerClientConfig.ps1 -Hostname "myhost"
 ```
 
-The configuration process can be initiated as before:
+You can also configure the host to pull multiple images:
 
 ```powershell
+DockerClient -Hostname "myhost" -Image "node","mongo"
 .\RunDockerClientConfig.ps1 -Hostname "myhost"
 ```
 
+To remove images, use a hashtable as follows:
+
+```powershell
+DockerClient -Hostname "myhost" -Image @{Name="node"; Remove=$true}
+.\RunDockerClientConfig.ps1 -Hostname $hostname
+```
+
 ### Containers
 Container configuration is equivalent to running:
-`docker run -d --name="[containername]" [image] '[command]'`.
 
-Using the same Run Configuration steps defined above, execute `DockerClient`
-with the `Image`, `ContainerName`, and `Command` parameters:
+```
+docker run -d --name="[containername]" -p '[port]' -e '[env]' --link '[link]'\
+'[image]' '[command]'
+```
+or
 
-```powershell
-DockerClient -Hostname "myhost" -Image node -ContainerName "helloworld" `
--Command 'echo "Hello World!"'
+```
+docker rm -f [containername]
 ```
 
-The configuration process can be initiated as before:
+To create or remove containers, you can use the `Container` parameter with one
+or more hashtables. The hashtable(s) passed to this parameter can have the
+following properties:
+
+- Name (required)
+- Image (required unless Remove property is set to `$true`)
+- Port
+- Env
+- Link
+- Command
+- Remove
+
+For example, create a hashtable with the settings for your container:
 
 ```powershell
+$webContainer = @{Name="web"; Image="anweiss/docker-platynem"; Port="80:80"}
+```
+
+Then, using the same steps defined above, execute
+`DockerClient` with the `-Image` and `-Container` parameters:
+
+```powershell
+DockerClient -Hostname "myhost" -Image node -Container $webContainer
 .\RunDockerClientConfig.ps1 -Hostname "myhost"
 ```
+
+Existing containers can also be removed as follows:
+
+```powershell
+$containerToRemove = @{Name="web"; Remove=$true}
+DockerClient -Hostname "myhost" -Container $containerToRemove
+.\RunDockerClientConfig.ps1 -Hostname "myhost"
+```
+
+Here is a hashtable with all of the properties that can be used to create a
+container:
+
+```powershell
+$containerProps = @{Name="web"; Image="node:latest"; Port="80:80"; `
+Env="PORT=80"; Link="db:db"; Command="grunt"}
+```
\ No newline at end of file
diff --git a/docs/sources/articles/https.md b/docs/sources/articles/https.md
index b6ae4ef..739b724 100644
--- a/docs/sources/articles/https.md
+++ b/docs/sources/articles/https.md
@@ -1,23 +1,28 @@
-page_title: Docker HTTPS Setup
-page_description: How to setup docker with https
-page_keywords: docker, example, https, daemon
+page_title: Running Docker with HTTPS
+page_description: How to setup and run Docker with HTTPS
+page_keywords: docker, docs, article, example, https, daemon, tls, ca, certificate
 
 # Running Docker with https
 
 By default, Docker runs via a non-networked Unix socket. It can also
 optionally communicate using a HTTP socket.
 
-If you need Docker reachable via the network in a safe manner, you can
-enable TLS by specifying the tlsverify flag and pointing Docker's
-tlscacert flag to a trusted CA certificate.
+If you need Docker to be reachable via the network in a safe manner, you can
+enable TLS by specifying the `tlsverify` flag and pointing Docker's
+`tlscacert` flag to a trusted CA certificate.
 
-In daemon mode, it will only allow connections from clients
-authenticated by a certificate signed by that CA. In client mode, it
-will only connect to servers with a certificate signed by that CA.
+In the daemon mode, it will only allow connections from clients
+authenticated by a certificate signed by that CA. In the client mode,
+it will only connect to servers with a certificate signed by that CA.
 
 > **Warning**: 
-> Using TLS and managing a CA is an advanced topic. Please make you self
-> familiar with openssl, x509 and tls before using it in production.
+> Using TLS and managing a CA is an advanced topic. Please familiarize yourself
+> with OpenSSL, x509 and TLS before using it in production.
+
+> **Warning**:
+> These TLS commands will only generate a working set of certificates on Linux.
+> Mac OS X comes with a version of OpenSSL that is incompatible with the 
+> certificates that Docker requires.
 
 ## Create a CA, server and client keys with OpenSSL
 
@@ -25,29 +30,67 @@
 keys:
 
     $ echo 01 > ca.srl
-    $ openssl genrsa -des3 -out ca-key.pem
+    $ openssl genrsa -des3 -out ca-key.pem 2048
+    Generating RSA private key, 2048 bit long modulus
+    ......+++
+    ...............+++
+    e is 65537 (0x10001)
+    Enter pass phrase for ca-key.pem:
+    Verifying - Enter pass phrase for ca-key.pem:
     $ openssl req -new -x509 -days 365 -key ca-key.pem -out ca.pem
+    Enter pass phrase for ca-key.pem:
+     You are about to be asked to enter information that will be incorporated
+     into your certificate request.
+     What you are about to enter is what is called a Distinguished Name or a DN.
+     There are quite a few fields but you can leave some blank
+     For some fields there will be a default value,
+     If you enter '.', the field will be left blank.
+     -----
+     Country Name (2 letter code) [AU]:
+     State or Province Name (full name) [Some-State]:Queensland
+     Locality Name (eg, city) []:Brisbane
+     Organization Name (eg, company) [Internet Widgits Pty Ltd]:Docker Inc
+     Organizational Unit Name (eg, section) []:Boot2Docker
+     Common Name (e.g. server FQDN or YOUR name) []:your.host.com
+     Email Address []:Sven@home.org.au
 
 Now that we have a CA, you can create a server key and certificate
-signing request. Make sure that "Common Name (e.g., server FQDN or YOUR
-name)" matches the hostname you will use to connect to Docker or just
-use `\*` for a certificate valid for any hostname:
+signing request (CSR). Make sure that "Common Name" (i.e. server FQDN or YOUR
+name) matches the hostname you will use to connect to Docker:
 
-    $ openssl genrsa -des3 -out server-key.pem
-    $ openssl req -new -key server-key.pem -out server.csr
+    $ openssl genrsa -des3 -out server-key.pem 2048
+    Generating RSA private key, 2048 bit long modulus
+    ......................................................+++
+    ............................................+++
+    e is 65537 (0x10001)
+    Enter pass phrase for server-key.pem:
+    Verifying - Enter pass phrase for server-key.pem:
+    $ openssl req -subj '/CN=<Your Hostname Here>' -new -key server-key.pem -out server.csr
+    Enter pass phrase for server-key.pem:
 
-Next we're going to sign the key with our CA:
+Next, we're going to sign the key with our CA:
 
     $ openssl x509 -req -days 365 -in server.csr -CA ca.pem -CAkey ca-key.pem \
       -out server-cert.pem
+    Signature ok
+    subject=/CN=your.host.com
+    Getting CA Private Key
+    Enter pass phrase for ca-key.pem:
 
 For client authentication, create a client key and certificate signing
 request:
 
-    $ openssl genrsa -des3 -out client-key.pem
-    $ openssl req -new -key client-key.pem -out client.csr
+    $ openssl genrsa -des3 -out key.pem 2048
+    Generating RSA private key, 2048 bit long modulus
+    ...............................................+++
+    ...............................................................+++
+    e is 65537 (0x10001)
+    Enter pass phrase for key.pem:
+    Verifying - Enter pass phrase for key.pem:
+    $ openssl req -subj '/CN=client' -new -key key.pem -out client.csr
+    Enter pass phrase for key.pem:
 
-To make the key suitable for client authentication, create a extensions
+To make the key suitable for client authentication, create an extensions
 config file:
 
     $ echo extendedKeyUsage = clientAuth > extfile.cnf
@@ -55,34 +98,57 @@
 Now sign the key:
 
     $ openssl x509 -req -days 365 -in client.csr -CA ca.pem -CAkey ca-key.pem \
-      -out client-cert.pem -extfile extfile.cnf
+      -out cert.pem -extfile extfile.cnf
+    Signature ok
+    subject=/CN=client
+    Getting CA Private Key
+    Enter pass phrase for ca-key.pem:
 
-Finally you need to remove the passphrase from the client and server
-key:
+Finally, you need to remove the passphrase from the client and server key:
 
     $ openssl rsa -in server-key.pem -out server-key.pem
-    $ openssl rsa -in client-key.pem -out client-key.pem
+    Enter pass phrase for server-key.pem:
+    writing RSA key
+    $ openssl rsa -in key.pem -out key.pem
+    Enter pass phrase for key.pem:
+    writing RSA key
 
 Now you can make the Docker daemon only accept connections from clients
 providing a certificate trusted by our CA:
 
     $ sudo docker -d --tlsverify --tlscacert=ca.pem --tlscert=server-cert.pem --tlskey=server-key.pem \
-      -H=0.0.0.0:2375
+      -H=0.0.0.0:2376
 
 To be able to connect to Docker and validate its certificate, you now
 need to provide your client keys, certificates and trusted CA:
 
-    $ docker --tlsverify --tlscacert=ca.pem --tlscert=client-cert.pem --tlskey=client-key.pem \
-      -H=dns-name-of-docker-host:2375
+    $ docker --tlsverify --tlscacert=ca.pem --tlscert=cert.pem --tlskey=key.pem \
+      -H=dns-name-of-docker-host:2376 version
+
+> **Note**:
+> Docker over TLS should run on TCP port 2376.
 
 > **Warning**: 
-> As shown in the example above, you don't have to run the
-> `docker` client with `sudo` or
-> the `docker` group when you use certificate
-> authentication. That means anyone with the keys can give any
-> instructions to your Docker daemon, giving them root access to the
-> machine hosting the daemon. Guard these keys as you would a root
-> password!
+> As shown in the example above, you don't have to run the `docker` client
+> with `sudo` or the `docker` group when you use certificate authentication.
+> That means anyone with the keys can give any instructions to your Docker
+> daemon, giving them root access to the machine hosting the daemon. Guard
+> these keys as you would a root password!
+
+## Secure by default
+
+If you want to secure your Docker client connections by default, you can move 
+the files to the `.docker` directory in your home directory - and set the
+`DOCKER_HOST` variable as well.
+
+    $ cp ca.pem ~/.docker/ca.pem
+    $ cp cert.pem ~/.docker/cert.pem
+    $ cp key.pem ~/.docker/key.pem
+    $ export DOCKER_HOST=tcp://:2376
+
+Then you can run Docker with the `--tlsverify` option.
+
+    $ docker --tlsverify ps
 
 ## Other modes
 
@@ -91,17 +157,22 @@
 
 ### Daemon modes
 
- - tlsverify, tlscacert, tlscert, tlskey set: Authenticate clients
- - tls, tlscert, tlskey: Do not authenticate clients
+ - `tlsverify`, `tlscacert`, `tlscert`, `tlskey` set: Authenticate clients
+ - `tls`, `tlscert`, `tlskey`: Do not authenticate clients
 
 ### Client modes
 
- - tls: Authenticate server based on public/default CA pool
- - tlsverify, tlscacert: Authenticate server based on given CA
- - tls, tlscert, tlskey: Authenticate with client certificate, do not
+ - `tls`: Authenticate server based on public/default CA pool
+ - `tlsverify`, `tlscacert`: Authenticate server based on given CA
+ - `tls`, `tlscert`, `tlskey`: Authenticate with client certificate, do not
    authenticate server based on given CA
- - tlsverify, tlscacert, tlscert, tlskey: Authenticate with client
-   certificate, authenticate server based on given CA
+ - `tlsverify`, `tlscacert`, `tlscert`, `tlskey`: Authenticate with client
+   certificate and authenticate server based on given CA
 
-The client will send its client certificate if found, so you just need
-to drop your keys into ~/.docker/<ca, cert or key>.pem
+If found, the client will send its client certificate, so you just need
+to drop your keys into `~/.docker/<ca, cert or key>.pem`. Alternatively,
+if you want to store your keys in another location, you can specify that
+location using the environment variable `DOCKER_CERT_PATH`.
+
+    $ export DOCKER_CERT_PATH=${HOME}/.docker/zone1/
+    $ docker --tlsverify ps
diff --git a/docs/sources/articles/networking.md b/docs/sources/articles/networking.md
index bf46b90..f9aa2d2 100644
--- a/docs/sources/articles/networking.md
+++ b/docs/sources/articles/networking.md
@@ -170,12 +170,41 @@
 the `/etc/resolv.conf` of the host machine where the `docker` daemon is
 running.  The options then modify this default configuration.
 
+## Communication between containers and the wider world
+
+<a name="the-world"></a>
+
+Whether a container can talk to the world is governed by one main factor.
+
+Is the host machine willing to forward IP packets?  This is governed
+by the `ip_forward` system parameter.  Packets can only pass between
+containers if this parameter is `1`.  Usually you will simply leave
+the Docker server at its default setting `--ip-forward=true` and
+Docker will go set `ip_forward` to `1` for you when the server
+starts up.  To check the setting or turn it on manually:
+
+    # Usually not necessary: turning on forwarding,
+    # on the host where your Docker server is running
+
+    $ cat /proc/sys/net/ipv4/ip_forward
+    0
+    $ sudo echo 1 > /proc/sys/net/ipv4/ip_forward
+    $ cat /proc/sys/net/ipv4/ip_forward
+    1
+
+Many using Docker will want `ip_forward` to be on, to at
+least make communication *possible* between containers and
+the wider world.
+
+May also be needed for inter-container communication if you are
+in a multiple bridge setup.
+
 ## Communication between containers
 
 <a name="between-containers"></a>
 
 Whether two containers can communicate is governed, at the operating
-system level, by three factors.
+system level, by two factors.
 
 1.  Does the network topology even connect the containers' network
     interfaces?  By default Docker will attach all containers to a
@@ -183,32 +212,14 @@
     between them.  See the later sections of this document for other
     possible topologies.
 
-2.  Is the host machine willing to forward IP packets?  This is governed
-    by the `ip_forward` system parameter.  Packets can only pass between
-    containers if this parameter is `1`.  Usually you will simply leave
-    the Docker server at its default setting `--ip-forward=true` and
-    Docker will go set `ip_forward` to `1` for you when the server
-    starts up.  To check the setting or turn it on manually:
-
-        # Usually not necessary: turning on forwarding,
-        # on the host where your Docker server is running
-
-        $ cat /proc/sys/net/ipv4/ip_forward
-        0
-        $ sudo echo 1 > /proc/sys/net/ipv4/ip_forward
-        $ cat /proc/sys/net/ipv4/ip_forward
-        1
-
-3.  Do your `iptables` allow this particular connection to be made?
+2.  Do your `iptables` allow this particular connection to be made?
     Docker will never make changes to your system `iptables` rules if
     you set `--iptables=false` when the daemon starts.  Otherwise the
     Docker server will add a default rule to the `FORWARD` chain with a
     blanket `ACCEPT` policy if you retain the default `--icc=true`, or
     else will set the policy to `DROP` if `--icc=false`.
 
-Nearly everyone using Docker will want `ip_forward` to be on, to at
-least make communication *possible* between containers.  But it is a
-strategic question whether to leave `--icc=true` or change it to
+It is a strategic question whether to leave `--icc=true` or change it to
 `--icc=false` (on Ubuntu, by editing the `DOCKER_OPTS` variable in
 `/etc/default/docker` and restarting the Docker server) so that
 `iptables` will protect other containers — and the main host — from
@@ -539,7 +550,7 @@
     It also allows the container to access local network services
     like D-bus.  This can lead to processes in the container being
     able to do unexpected things like
-    [restart your computer](https://github.com/dotcloud/docker/issues/6401).
+    [restart your computer](https://github.com/docker/docker/issues/6401).
     You should use this option with caution.
 
  *  `--net=container:NAME_or_ID` — Tells Docker to put this container's
@@ -720,3 +731,14 @@
 that drive you to such a solution, it is probably far preferable to use
 `--icc=false` to lock down inter-container communication, as we explored
 earlier.
+
+## Editing networking config files
+
+Starting with Docker v.1.2.0, you can now edit `/etc/hosts`, `/etc/hostname`
+and `/etc/resolve.conf` in a running container. This is useful if you need
+to install bind or other services that might override one of those files.
+
+Note, however, that changes to these files will not be saved by
+`docker commit`, nor will they be saved during `docker run`.
+That means they won't be saved in the image, nor will they persist when a
+container is restarted; they will only "stick" in a running container.
diff --git a/docs/sources/articles/runmetrics.md b/docs/sources/articles/runmetrics.md
index 9c871a2..b78de24 100644
--- a/docs/sources/articles/runmetrics.md
+++ b/docs/sources/articles/runmetrics.md
@@ -363,9 +363,9 @@
 - Execute `ip netns exec <somename> ....`
 
 Please review [*Enumerating Cgroups*](#enumerating-cgroups) to learn how to find
-the cgroup of a pprocess running in the container of which you want to
+the cgroup of a process running in the container of which you want to
 measure network usage. From there, you can examine the pseudo-file named
-`tasks`, which containes the PIDs that are in the
+`tasks`, which contains the PIDs that are in the
 control group (i.e. in the container). Pick any one of them.
 
 Putting everything together, if the "short ID" of a container is held in
diff --git a/docs/sources/articles/security.md b/docs/sources/articles/security.md
index dcc61f3..12f7b35 100644
--- a/docs/sources/articles/security.md
+++ b/docs/sources/articles/security.md
@@ -196,7 +196,7 @@
 This won't affect regular web apps; but malicious users will find that
 the arsenal at their disposal has shrunk considerably! By default Docker
 drops all capabilities except [those
-needed](https://github.com/dotcloud/docker/blob/master/daemon/execdriver/native/template/default_template.go),
+needed](https://github.com/docker/docker/blob/master/daemon/execdriver/native/template/default_template.go),
 a whitelist instead of a blacklist approach. You can see a full list of
 available capabilities in [Linux
 manpages](http://man7.org/linux/man-pages/man7/capabilities.7.html).
@@ -204,7 +204,7 @@
 Of course, you can always enable extra capabilities if you really need
 them (for instance, if you want to use a FUSE-based filesystem), but by
 default, Docker containers use only a
-[whitelist](https://github.com/dotcloud/docker/blob/master/daemon/execdriver/native/template/default_template.go)
+[whitelist](https://github.com/docker/docker/blob/master/daemon/execdriver/native/template/default_template.go)
 of kernel capabilities by default.
 
 ## Other Kernel Security Features
diff --git a/docs/sources/articles/using_supervisord.md b/docs/sources/articles/using_supervisord.md
index 91b8976..10f32c7 100644
--- a/docs/sources/articles/using_supervisord.md
+++ b/docs/sources/articles/using_supervisord.md
@@ -28,18 +28,14 @@
 
     FROM ubuntu:13.04
     MAINTAINER examples@docker.com
-    RUN echo "deb http://archive.ubuntu.com/ubuntu precise main universe" > /etc/apt/sources.list
-    RUN apt-get update
-    RUN apt-get upgrade -y
 
 ## Installing Supervisor
 
 We can now install our SSH and Apache daemons as well as Supervisor in
 our container.
 
-    RUN apt-get install -y openssh-server apache2 supervisor
-    RUN mkdir -p /var/run/sshd
-    RUN mkdir -p /var/log/supervisor
+    RUN apt-get update && apt-get install -y openssh-server apache2 supervisor
+    RUN mkdir -p /var/lock/apache2 /var/run/apache2 /var/run/sshd /var/log/supervisor
 
 Here we're installing the `openssh-server`,
 `apache2` and `supervisor`
@@ -52,7 +48,7 @@
 called `supervisord.conf` and is located in
 `/etc/supervisor/conf.d/`.
 
-    ADD supervisord.conf /etc/supervisor/conf.d/supervisord.conf
+    COPY supervisord.conf /etc/supervisor/conf.d/supervisord.conf
 
 Let's see what is inside our `supervisord.conf`
 file.
diff --git a/docs/sources/contributing/contributing.md b/docs/sources/contributing/contributing.md
index dd764eb..7d65a04 100644
--- a/docs/sources/contributing/contributing.md
+++ b/docs/sources/contributing/contributing.md
@@ -7,18 +7,18 @@
 Want to hack on Docker? Awesome!
 
 The repository includes [all the instructions you need to get started](
-https://github.com/dotcloud/docker/blob/master/CONTRIBUTING.md).
+https://github.com/docker/docker/blob/master/CONTRIBUTING.md).
 
 The [developer environment Dockerfile](
-https://github.com/dotcloud/docker/blob/master/Dockerfile)
+https://github.com/docker/docker/blob/master/Dockerfile)
 specifies the tools and versions used to test and build Docker.
 
 If you're making changes to the documentation, see the [README.md](
-https://github.com/dotcloud/docker/blob/master/docs/README.md).
+https://github.com/docker/docker/blob/master/docs/README.md).
 
 The [documentation environment Dockerfile](
-https://github.com/dotcloud/docker/blob/master/docs/Dockerfile)
+https://github.com/docker/docker/blob/master/docs/Dockerfile)
 specifies the tools and versions used to build the Documentation.
 
 Further interesting details can be found in the [Packaging hints](
-https://github.com/dotcloud/docker/blob/master/hack/PACKAGERS.md).
+https://github.com/docker/docker/blob/master/hack/PACKAGERS.md).
diff --git a/docs/sources/contributing/devenvironment.md b/docs/sources/contributing/devenvironment.md
index 606f930..25a80af 100644
--- a/docs/sources/contributing/devenvironment.md
+++ b/docs/sources/contributing/devenvironment.md
@@ -32,7 +32,7 @@
 
 ## Check out the Source
 
-    $ git clone https://git@github.com/dotcloud/docker
+    $ git clone https://git@github.com/docker/docker
     $ cd docker
 
 To checkout a different revision just use `git checkout`
@@ -110,7 +110,7 @@
     === RUN TestDependencyGraph
     --- PASS: TestDependencyGraph (0.00 seconds)
     PASS
-    ok      github.com/dotcloud/docker/utils        0.017s
+    ok      github.com/docker/docker/utils        0.017s
 
 If $TESTFLAGS is set in the environment, it is passed as extra arguments
 to `go test`. You can use this to select certain tests to run, e.g.,
diff --git a/docs/sources/docker-hub/builds.md b/docs/sources/docker-hub/builds.md
index 1d16353..e3e2139 100644
--- a/docs/sources/docker-hub/builds.md
+++ b/docs/sources/docker-hub/builds.md
@@ -1,65 +1,75 @@
 page_title: Automated Builds on Docker Hub
 page_description: Docker Hub Automated Builds
 page_keywords: Docker, docker, registry, accounts, plans, Dockerfile, Docker Hub, docs, documentation, trusted, builds, trusted builds, automated builds
+
 # Automated Builds on Docker Hub
 
-## Automated Builds
+## About Automated Builds
 
-*Automated Builds* is a special feature allowing you to specify a source
-repository with a `Dockerfile` to be built by the
-[Docker Hub](https://hub.docker.com) build clusters. The system will
-clone your repository and build the `Dockerfile` using the repository as
-the context. The resulting image will then be uploaded to the registry
+*Automated Builds* are a special feature of Docker Hub which allow you to
+use [Docker Hub's](https://hub.docker.com) build clusters to automatically
+create images from a specified `Dockerfile` and a GitHub or Bitbucket repo
+(or "context"). The system will clone your repository and build the image
+described by the `Dockerfile` using the repository as the context. The
+resulting automated image will then be uploaded to the Docker Hub registry
 and marked as an *Automated Build*.
 
-Automated Builds have a number of advantages. For example, users of
-*your* Automated Build can be certain that the resulting image was built
-exactly how it claims to be.
+Automated Builds have several advantages:
 
-Furthermore, the `Dockerfile` will be available to anyone browsing your repository
-on the registry. Another advantage of the Automated Builds feature is the automated
-builds. This makes sure that your repository is always up to date.
+* Users of *your* Automated Build can trust that the resulting
+image was built exactly as specified.
+
+* The `Dockerfile` will be available to anyone with access to
+your repository on the Docker Hub registry. 
+
+* Because the process is automated, Automated Builds help to
+make sure that your repository is always up to date.
 
 Automated Builds are supported for both public and private repositories
-on both [GitHub](http://github.com) and
-[BitBucket](https://bitbucket.org/).
+on both [GitHub](http://github.com) and [Bitbucket](https://bitbucket.org/).
 
-### Setting up Automated Builds with GitHub
+To use Automated Builds, you must have an [account on Docker Hub](
+http://docs.docker.com/userguide/dockerhub/#creating-a-docker-hub-account)
+and on GitHub and/or Bitbucket. In either case, the account needs
+to be properly validated and activated before you can link to it.
 
-In order to setup an Automated Build, you need to first link your
-[Docker Hub](https://hub.docker.com) account with a GitHub one. This
-will allow the registry to see your repositories.
+## Setting up Automated Builds with GitHub
+
+In order to set up an Automated Build, you need to first link your
+[Docker Hub](https://hub.docker.com) account with a GitHub account.
+This will allow the registry to see your repositories.
 
 > *Note:* 
-> We currently request access for *read* and *write* since
+> Automated Builds currently require *read* and *write* access since
 > [Docker Hub](https://hub.docker.com) needs to setup a GitHub service
-> hook. Although nothing else is done with your account, this is how
-> GitHub manages permissions, sorry!
+> hook. We have no choice here, this is how GitHub manages permissions, sorry! 
+> We do guarantee nothing else will be touched in your account.
 
-Click on the [Automated Builds
-tab](https://registry.hub.docker.com/builds/) to get started and then
-select [+ Add New](https://registry.hub.docker.com/builds/add/).
+To get started, log into your Docker Hub account and click the
+"+ Add Repository" button at the upper right of the screen. Then select
+[Automated Build](https://registry.hub.docker.com/builds/add/).
 
 Select the [GitHub service](https://registry.hub.docker.com/associate/github/).
 
-Then follow the instructions to authorize and link your GitHub account
-to Docker Hub.
+Then follow the onscreen instructions to authorize and link your
+GitHub account to Docker Hub. Once it is linked, you'll be able to
+choose a repo from which to create the Automatic Build.
 
-#### Creating an Automated Build
+### Creating an Automated Build
 
-You can [create an Automated Build](https://registry.hub.docker.com/builds/github/select/)
-from any of your public or private GitHub repositories with a `Dockerfile`.
+You can [create an Automated Build](
+https://registry.hub.docker.com/builds/github/select/) from any of your
+public or private GitHub repositories with a `Dockerfile`.
 
-#### GitHub organizations
+### GitHub Submodules
 
-GitHub organizations appear once your membership to that organization is
-made public on GitHub. To verify, you can look at the members tab for your
-organization on GitHub.
+If your GitHub repository contains links to private submodules, you'll
+need to add a deploy key from your Docker Hub repository. 
 
-#### GitHub service hooks
-
-You can follow the below steps to configure the GitHub service hooks for your
-Automated Build:
+Your Docker Hub deploy key is located under the "Build Details"
+menu on the Automated Build's main page in the Hub. Add this key
+to your GitHub submodule by visiting the Settings page for the
+repository on GitHub and selecting "Deploy keys".
 
 <table class="table table-bordered">
   <thead>
@@ -72,77 +82,153 @@
   <tbody>
     <tr>
       <td>1.</td>
-      <td><img src="https://d207aa93qlcgug.cloudfront.net/0.8/img/github_settings.png"></td>
-      <td>Login to Github.com, and visit your Repository page. Click on the repository "Settings" link. You will need admin rights to the repository in order to do this. So if you don't have admin rights, you will need to ask someone who does.</td>
+      <td><img src="/docker-hub/hub-images/deploy_key.png"></td>
+      <td>Your automated build's deploy key is in the "Build Details" menu 
+under "Deploy keys".</td>
     </tr>
     <tr>
       <td>2.</td>
-      <td><img src="https://d207aa93qlcgug.cloudfront.net/0.8/img/github_service_hooks.png" alt="Service Hooks"></td>
-      <td>Click on the "Service Hooks" link</td></tr><tr><td>3.</td><td><img src="https://d207aa93qlcgug.cloudfront.net/0.8/img/github_docker_service_hook.png" alt="Find the service hook labeled Docker"></td><td>Find the service hook labeled "Docker" and click on it.</td></tr><tr><td>4.</td><td><img src="https://d207aa93qlcgug.cloudfront.net/0.8/img/github_service_hook_docker_activate.png" alt="Activate Service Hooks"></td>
-      <td>Click on the "Active" checkbox and then the "Update settings" button, to save changes.</td>
+      <td><img src="/docker-hub/hub-images/github_deploy_key.png"></td>
+      <td>In your GitHub submodule's repository Settings page, add the 
+deploy key from your Docker Hub Automated Build.</td>
+    </tr>
+  </tbody>
+</table>
+     
+### GitHub Organizations
+
+GitHub organizations will appear once your membership to that organization is
+made public on GitHub. To verify, you can look at the members tab for your
+organization on GitHub.
+
+### GitHub Service Hooks
+
+Follow the steps below to configure the GitHub service
+hooks for your Automated Build:
+
+<table class="table table-bordered">
+  <thead>
+    <tr>
+      <th>Step</th>
+      <th>Screenshot</th>
+      <th>Description</th>
+    </tr>
+  </thead>
+  <tbody>
+    <tr>
+      <td>1.</td>
+      <td><img src="/docker-hub/hub-images/gh_settings.png"></td>
+      <td>Log in to Github.com, and go to your Repository page. Click on "Settings" on
+      the right side of the page. You must have admin privileges to the repository in order to do this.</td>
+    </tr>
+    <tr>
+      <td>2.</td>
+      <td><img src="/docker-hub/hub-images/gh_menu.png" alt="Webhooks & Services"></td>
+      <td>Click on "Webhooks & Services" on the left side of the page.</td></tr>
+      <tr><td>3.</td>
+      <td><img src="/docker-hub/hub-images/gh_service_hook.png" alt="Find the service labeled Docker"></td><td>Find the service labeled "Docker" and click on it.</td></tr>
+      <tr><td>4.</td><td><img src="/docker-hub/hub-images/gh_docker-service.png" alt="Activate Service Hooks"></td>
+      <td>Make sure the "Active" checkbox is selected and click the "Update service" button to save your changes.</td>
     </tr>
   </tbody>
 </table>
 
-### Setting up Automated Builds with BitBucket
+## Setting up Automated Builds with Bitbucket
 
 In order to setup an Automated Build, you need to first link your
-[Docker Hub](https://hub.docker.com) account with a BitBucket one. This
-will allow the registry to see your repositories.
+[Docker Hub](https://hub.docker.com) account with a Bitbucket account.
+This will allow the registry to see your repositories.
 
-Click on the [Automated Builds tab](https://registry.hub.docker.com/builds/) to
-get started and then select [+ Add
-New](https://registry.hub.docker.com/builds/add/).
+To get started, log into your Docker Hub account and click the
+"+ Add Repository" button at the upper right of the screen. Then
+select [Automated Build](https://registry.hub.docker.com/builds/add/).
 
-Select the [BitBucket
-service](https://registry.hub.docker.com/associate/bitbucket/).
+Select the [Bitbucket source](
+https://registry.hub.docker.com/associate/bitbucket/).
 
-Then follow the instructions to authorize and link your BitBucket account
-to Docker Hub.
+Then follow the onscreen instructions to authorize and link your
+Bitbucket account to Docker Hub. Once it is linked, you'll be able
+to choose a repo from which to create the Automatic Build.
 
-#### Creating an Automated Build
+### Creating an Automated Build
 
 You can [create an Automated Build](
 https://registry.hub.docker.com/builds/bitbucket/select/) from any of your
-public or private BitBucket repositories with a `Dockerfile`.
+public or private Bitbucket repositories with a `Dockerfile`.
 
-### The Dockerfile and Automated Builds
+### Adding a Hook
 
-During the build process, we copy the contents of your `Dockerfile`. We also
-add it to the [Docker Hub](https://hub.docker.com) for the Docker community
+When you link your Docker Hub account, a `POST` hook should get automatically
+added to your Bitbucket repo. Follow the steps below to confirm or modify the
+Bitbucket hooks for your Automated Build:
+
+<table class="table table-bordered">
+  <thead>
+    <tr>
+      <th>Step</th>
+      <th>Screenshot</th>
+      <th>Description</th>
+    </tr>
+  </thead>
+  <tbody>
+    <tr>
+      <td>1.</td>
+      <td><img src="/docker-hub/hub-images/bb_menu.png" alt="Settings" width="180"></td>
+      <td>Log in to Bitbucket.org and go to your Repository page. Click on "Settings" on
+      the far left side of the page, under "Navigation". You must have admin privileges
+      to the repository in order to do this.</td>
+    </tr>
+    <tr>
+      <td>2.</td>
+      <td><img src="/docker-hub/hub-images/bb_hooks.png" alt="Hooks" width="180"></td>
+      <td>Click on "Hooks" on the near left side of the page, under "Settings".</td></tr>
+    <tr>
+      <td>3.</td>
+      <td><img src="/docker-hub/hub-images/bb_post-hook.png" alt="Docker Post Hook"></td><td>You should now see a list of hooks associated with the repo, including a <code>POST</code> hook that points at
+      registry.hub.docker.com/hooks/bitbucket.</td>
+    </tr>
+  </tbody>
+</table>
+
+
+## The Dockerfile and Automated Builds
+
+During the build process, Docker will copy the contents of your `Dockerfile`.
+It will also add it to the [Docker Hub](https://hub.docker.com) for the Docker
+community (for public repos) or approved team members/orgs (for private repos)
 to see on the repository page.
 
-### README.md
+## README.md
 
-If you have a `README.md` file in your repository, we will use that as the
-repository's full description.
+If you have a `README.md` file in your repository, it will be used as the
+repository's full description.The build process will look for a
+`README.md` in the same directory as your `Dockerfile`.
 
 > **Warning:**
 > If you change the full description after a build, it will be
 > rewritten the next time the Automated Build has been built. To make changes,
-> modify the README.md from the Git repository. We will look for a README.md
-> in the same directory as your `Dockerfile`.
+> modify the `README.md` from the Git repository.
 
 ### Build triggers
 
-If you need another way to trigger your Automated Builds outside of GitHub
-or BitBucket, you can setup a build trigger. When you turn on the build
-trigger for an Automated Build, it will give you a URL to which you can
-send POST requests. This will trigger the Automated Build process, which
-is similar to GitHub webhooks.
+If you need a way to trigger Automated Builds outside of GitHub or Bitbucket,
+you can set up a build trigger. When you turn on the build trigger for an
+Automated Build, it will give you a URL to which you can send POST requests.
+This will trigger the Automated Build, much as with a GitHub webhook.
 
-Build Triggers are available under the Settings tab of each Automated Build.
+Build triggers are available under the Settings menu of each Automated Build
+repo on the Docker Hub.
 
 > **Note:** 
 > You can only trigger one build at a time and no more than one
-> every five minutes. If you have a build already pending, or if you already
+> every five minutes. If you already have a build pending, or if you
 > recently submitted a build request, those requests *will be ignored*.
-> You can find the logs of last 10 triggers on the settings page to verify
-> if everything is working correctly.
+> To verify everything is working correctly, check the logs of last
+> ten triggers on the settings page .
 
 ### Webhooks
 
-Also available for Automated Builds are Webhooks. Webhooks can be called
+Automated Builds also include a Webhooks feature. Webhooks can be called
 after a successful repository push is made.
 
 The webhook call will generate a HTTP POST with the following JSON
@@ -179,24 +265,25 @@
 }
 ```
 
-Webhooks are available under the Settings tab of each Automated
-Build.
+Webhooks are available under the Settings menu of each Automated
+Build's repo.
 
-> **Note:** If you want to test your webhook out then we recommend using
+> **Note:** If you want to test your webhook out we recommend using
 > a tool like [requestb.in](http://requestb.in/).
 
 
 ### Repository links
 
-Repository links are a way to associate one Automated Build with another. If one
-gets updated, linking system also triggers a build for the other Automated Build.
-This makes it easy to keep your Automated Builds up to date.
+Repository links are a way to associate one Automated Build with
+another. If one gets updated,the linking system triggers a rebuild
+for the other Automated Build. This makes it easy to keep all your
+Automated Builds up to date.
 
-To add a link, go to the settings page of an Automated Build and click on
-*Repository Links*. Then enter the name of the repository that you want have
-linked.
+To add a link, go to the repo for the Automated Build you want to
+link to and click on *Repository Links* under the Settings menu at
+right. Then, enter the name of the repository that you want have linked.
 
 > **Warning:**
 > You can add more than one repository link, however, you should
-> be very careful. Creating a two way relationship between Automated Builds will
-> cause a never ending build loop.
+> do so very carefully. Creating a two way relationship between Automated Builds will
+> cause an endless build loop.
diff --git a/docs/sources/docker-hub/hub-images/bb_hooks.png b/docs/sources/docker-hub/hub-images/bb_hooks.png
new file mode 100644
index 0000000..d51cd03
--- /dev/null
+++ b/docs/sources/docker-hub/hub-images/bb_hooks.png
Binary files differ
diff --git a/docs/sources/docker-hub/hub-images/bb_menu.png b/docs/sources/docker-hub/hub-images/bb_menu.png
new file mode 100644
index 0000000..6f4a681
--- /dev/null
+++ b/docs/sources/docker-hub/hub-images/bb_menu.png
Binary files differ
diff --git a/docs/sources/docker-hub/hub-images/bb_post-hook.png b/docs/sources/docker-hub/hub-images/bb_post-hook.png
new file mode 100644
index 0000000..78c4730
--- /dev/null
+++ b/docs/sources/docker-hub/hub-images/bb_post-hook.png
Binary files differ
diff --git a/docs/sources/docker-hub/hub-images/deploy_key.png b/docs/sources/docker-hub/hub-images/deploy_key.png
new file mode 100644
index 0000000..c4377bb
--- /dev/null
+++ b/docs/sources/docker-hub/hub-images/deploy_key.png
Binary files differ
diff --git a/docs/sources/docker-hub/hub-images/gh_docker-service.png b/docs/sources/docker-hub/hub-images/gh_docker-service.png
new file mode 100644
index 0000000..0119b9e
--- /dev/null
+++ b/docs/sources/docker-hub/hub-images/gh_docker-service.png
Binary files differ
diff --git a/docs/sources/docker-hub/hub-images/gh_menu.png b/docs/sources/docker-hub/hub-images/gh_menu.png
new file mode 100644
index 0000000..d9c8d11
--- /dev/null
+++ b/docs/sources/docker-hub/hub-images/gh_menu.png
Binary files differ
diff --git a/docs/sources/docker-hub/hub-images/gh_service_hook.png b/docs/sources/docker-hub/hub-images/gh_service_hook.png
new file mode 100644
index 0000000..9a00153
--- /dev/null
+++ b/docs/sources/docker-hub/hub-images/gh_service_hook.png
Binary files differ
diff --git a/docs/sources/docker-hub/hub-images/gh_settings.png b/docs/sources/docker-hub/hub-images/gh_settings.png
new file mode 100644
index 0000000..efb1a3a
--- /dev/null
+++ b/docs/sources/docker-hub/hub-images/gh_settings.png
Binary files differ
diff --git a/docs/sources/docker-hub/hub-images/github_deploy_key.png b/docs/sources/docker-hub/hub-images/github_deploy_key.png
new file mode 100644
index 0000000..bd69054
--- /dev/null
+++ b/docs/sources/docker-hub/hub-images/github_deploy_key.png
Binary files differ
diff --git a/docs/sources/docker-hub/repos.md b/docs/sources/docker-hub/repos.md
index c219a19..8d76d5a 100644
--- a/docs/sources/docker-hub/repos.md
+++ b/docs/sources/docker-hub/repos.md
@@ -65,9 +65,9 @@
 > **Note:**
 > If you would like to contribute an official repository for your
 > organization, product or team you can see more information
-> [here](https://github.com/dotcloud/stackbrew).
+> [here](https://github.com/docker/stackbrew).
 
-## Private Docker Repositories
+## Private Repositories
 
 Private repositories allow you to have repositories that contain images
 that you want to keep private, either to your own account or within an
diff --git a/docs/sources/examples/apt-cacher-ng.Dockerfile b/docs/sources/examples/apt-cacher-ng.Dockerfile
index 3b7862b..d1f7657 100644
--- a/docs/sources/examples/apt-cacher-ng.Dockerfile
+++ b/docs/sources/examples/apt-cacher-ng.Dockerfile
@@ -9,7 +9,7 @@
 MAINTAINER	SvenDowideit@docker.com
 
 VOLUME		["/var/cache/apt-cacher-ng"]
-RUN		apt-get update ; apt-get install -yq apt-cacher-ng
+RUN		apt-get update && apt-get install -y apt-cacher-ng
 
 EXPOSE		3142
-CMD		chmod 777 /var/cache/apt-cacher-ng ; /etc/init.d/apt-cacher-ng start ; tail -f /var/log/apt-cacher-ng/*
+CMD		chmod 777 /var/cache/apt-cacher-ng && /etc/init.d/apt-cacher-ng start && tail -f /var/log/apt-cacher-ng/*
diff --git a/docs/sources/examples/apt-cacher-ng.md b/docs/sources/examples/apt-cacher-ng.md
index 34e4a4b..7dafec1 100644
--- a/docs/sources/examples/apt-cacher-ng.md
+++ b/docs/sources/examples/apt-cacher-ng.md
@@ -28,10 +28,10 @@
     MAINTAINER  SvenDowideit@docker.com
 
     VOLUME      ["/var/cache/apt-cacher-ng"]
-    RUN     apt-get update ; apt-get install -yq apt-cacher-ng
+    RUN     apt-get update && apt-get install -y apt-cacher-ng
 
     EXPOSE      3142
-    CMD     chmod 777 /var/cache/apt-cacher-ng ; /etc/init.d/apt-cacher-ng start ; tail -f /var/log/apt-cacher-ng/*
+    CMD     chmod 777 /var/cache/apt-cacher-ng && /etc/init.d/apt-cacher-ng start && tail -f /var/log/apt-cacher-ng/*
 
 To build the image using:
 
@@ -61,7 +61,7 @@
 
     FROM ubuntu
     RUN  echo 'Acquire::http { Proxy "http://dockerhost:3142"; };' >> /etc/apt/apt.conf.d/01proxy
-    RUN apt-get update ; apt-get install vim git
+    RUN apt-get update && apt-get install -y vim git
 
     # docker build -t my_ubuntu .
 
diff --git a/docs/sources/examples/mongodb.md b/docs/sources/examples/mongodb.md
index 602f55c..28f7824 100644
--- a/docs/sources/examples/mongodb.md
+++ b/docs/sources/examples/mongodb.md
@@ -65,13 +65,12 @@
 After this initial preparation we can update our packages and install MongoDB.
 
     # Update apt-get sources AND install MongoDB
-    RUN apt-get update
-    RUN apt-get install -y -q mongodb-org
+    RUN apt-get update && apt-get install -y mongodb-org
 
 > **Tip:** You can install a specific version of MongoDB by using a list
 > of required packages with versions, e.g.:
 > 
->     RUN apt-get install -y -q mongodb-org=2.6.1 mongodb-org-server=2.6.1 mongodb-org-shell=2.6.1 mongodb-org-mongos=2.6.1 mongodb-org-tools=2.6.1
+>     RUN apt-get update && apt-get install -y mongodb-org=2.6.1 mongodb-org-server=2.6.1 mongodb-org-shell=2.6.1 mongodb-org-mongos=2.6.1 mongodb-org-tools=2.6.1
 
 MongoDB requires a data directory. Let's create it as the final step of our
 installation instructions.
diff --git a/docs/sources/examples/mongodb/Dockerfile b/docs/sources/examples/mongodb/Dockerfile
index e7acc0f..9333eb5 100644
--- a/docs/sources/examples/mongodb/Dockerfile
+++ b/docs/sources/examples/mongodb/Dockerfile
@@ -11,8 +11,7 @@
 RUN echo 'deb http://downloads-distro.mongodb.org/repo/ubuntu-upstart dist 10gen' | tee /etc/apt/sources.list.d/10gen.list
 
 # Update apt-get sources AND install MongoDB
-RUN apt-get update
-RUN apt-get install -y -q mongodb-org
+RUN apt-get update && apt-get install -y mongodb-org
 
 # Create the MongoDB data directory
 RUN mkdir -p /data/db
@@ -20,5 +19,5 @@
 # Expose port #27017 from the container to the host
 EXPOSE 27017
 
-# Set usr/bin/mongod as the dockerized entry-point application
-ENTRYPOINT usr/bin/mongod
+# Set /usr/bin/mongod as the dockerized entry-point application
+ENTRYPOINT ["/usr/bin/mongod"]
diff --git a/docs/sources/examples/nodejs_web_app.md b/docs/sources/examples/nodejs_web_app.md
index a7b8eea..5d69fd7 100644
--- a/docs/sources/examples/nodejs_web_app.md
+++ b/docs/sources/examples/nodejs_web_app.md
@@ -66,10 +66,10 @@
 
 Next, define the parent image you want to use to build your own image on
 top of. Here, we'll use
-[CentOS](https://registry.hub.docker.com/_/centos/) (tag: `6.4`)
+[CentOS](https://registry.hub.docker.com/_/centos/) (tag: `centos6`)
 available on the [Docker Hub](https://hub.docker.com/):
 
-    FROM    centos:6.4
+    FROM    centos:centos6
 
 Since we're building a Node.js app, you'll have to install Node.js as
 well as npm on your CentOS image. Node.js is required to run your app
@@ -84,11 +84,11 @@
     # Install Node.js and npm
     RUN     yum install -y npm
 
-To bundle your app's source code inside the Docker image, use the `ADD`
+To bundle your app's source code inside the Docker image, use the `COPY`
 instruction:
 
     # Bundle app source
-    ADD . /src
+    COPY . /src
 
 Install your app dependencies using the `npm` binary:
 
@@ -109,7 +109,7 @@
 Your `Dockerfile` should now look like this:
 
     # DOCKER-VERSION 0.3.4
-    FROM    centos:6.4
+    FROM    centos:centos6
 
     # Enable EPEL for Node.js
     RUN     rpm -Uvh http://download.fedoraproject.org/pub/epel/6/i386/epel-release-6-8.noarch.rpm
@@ -117,7 +117,7 @@
     RUN     yum install -y npm
 
     # Bundle app source
-    ADD . /src
+    COPY . /src
     # Install app dependencies
     RUN cd /src; npm install
 
@@ -127,7 +127,7 @@
 ## Building your image
 
 Go to the directory that has your `Dockerfile` and run the following command
-to build a Docker image. The `-t` flag let's you tag your image so it's easier
+to build a Docker image. The `-t` flag lets you tag your image so it's easier
 to find later using the `docker images` command:
 
     $ sudo docker build -t <your username>/centos-node-hello .
@@ -137,9 +137,9 @@
     $ sudo docker images
 
     # Example
-    REPOSITORY                            TAG       ID              CREATED
-    centos                                6.4       539c0211cd76    8 weeks ago
-    <your username>/centos-node-hello     latest    d64d3505b0d2    2 hours ago
+    REPOSITORY                          TAG        ID              CREATED
+    centos                              centos6    539c0211cd76    8 weeks ago
+    <your username>/centos-node-hello   latest     d64d3505b0d2    2 hours ago
 
 ## Run the image
 
diff --git a/docs/sources/examples/postgresql_service.Dockerfile b/docs/sources/examples/postgresql_service.Dockerfile
index 364a18a..d0f3766 100644
--- a/docs/sources/examples/postgresql_service.Dockerfile
+++ b/docs/sources/examples/postgresql_service.Dockerfile
@@ -13,17 +13,13 @@
 #     of PostgreSQL, ``9.3``.
 RUN echo "deb http://apt.postgresql.org/pub/repos/apt/ precise-pgdg main" > /etc/apt/sources.list.d/pgdg.list
 
-# Update the Ubuntu and PostgreSQL repository indexes
-RUN apt-get update
-
 # Install ``python-software-properties``, ``software-properties-common`` and PostgreSQL 9.3
 #  There are some warnings (in red) that show up during the build. You can hide
 #  them by prefixing each apt-get statement with DEBIAN_FRONTEND=noninteractive
-RUN apt-get -y -q install python-software-properties software-properties-common
-RUN apt-get -y -q install postgresql-9.3 postgresql-client-9.3 postgresql-contrib-9.3
+RUN apt-get update && apt-get install -y python-software-properties software-properties-common postgresql-9.3 postgresql-client-9.3 postgresql-contrib-9.3
 
 # Note: The official Debian and Ubuntu images automatically ``apt-get clean``
-# after each ``apt-get`` 
+# after each ``apt-get``
 
 # Run the rest of the commands as the ``postgres`` user created by the ``postgres-9.3`` package when it was ``apt-get installed``
 USER postgres
diff --git a/docs/sources/examples/postgresql_service.md b/docs/sources/examples/postgresql_service.md
index 5265935..ffd122e 100644
--- a/docs/sources/examples/postgresql_service.md
+++ b/docs/sources/examples/postgresql_service.md
@@ -35,17 +35,13 @@
     #     of PostgreSQL, ``9.3``.
     RUN echo "deb http://apt.postgresql.org/pub/repos/apt/ precise-pgdg main" > /etc/apt/sources.list.d/pgdg.list
 
-    # Update the Ubuntu and PostgreSQL repository indexes
-    RUN apt-get update
-
     # Install ``python-software-properties``, ``software-properties-common`` and PostgreSQL 9.3
     #  There are some warnings (in red) that show up during the build. You can hide
     #  them by prefixing each apt-get statement with DEBIAN_FRONTEND=noninteractive
-    RUN apt-get -y -q install python-software-properties software-properties-common
-    RUN apt-get -y -q install postgresql-9.3 postgresql-client-9.3 postgresql-contrib-9.3
+    RUN apt-get update && apt-get install -y python-software-properties software-properties-common postgresql-9.3 postgresql-client-9.3 postgresql-contrib-9.3
 
     # Note: The official Debian and Ubuntu images automatically ``apt-get clean``
-    # after each ``apt-get`` 
+    # after each ``apt-get``
 
     # Run the rest of the commands as the ``postgres`` user created by the ``postgres-9.3`` package when it was ``apt-get installed``
     USER postgres
@@ -88,7 +84,7 @@
 
 > **Note**: 
 > The `--rm` removes the container and its image when
-> the container exists successfully.
+> the container exits successfully.
 
 ### Using container linking
 
diff --git a/docs/sources/examples/running_redis_service.md b/docs/sources/examples/running_redis_service.md
index 0eeef06..6d052da 100644
--- a/docs/sources/examples/running_redis_service.md
+++ b/docs/sources/examples/running_redis_service.md
@@ -13,8 +13,7 @@
 image.
 
     FROM        ubuntu:12.10
-    RUN         apt-get update
-    RUN         apt-get -y install redis-server
+    RUN         apt-get update && apt-get install -y redis-server
     EXPOSE      6379
     ENTRYPOINT  ["/usr/bin/redis-server"]
 
@@ -49,9 +48,9 @@
 Once inside our freshly created container we need to install Redis to
 get the `redis-cli` binary to test our connection.
 
-    $ apt-get update
-    $ apt-get -y install redis-server
-    $ service redis-server stop
+    $ sudo apt-get update
+    $ sudo apt-get install redis-server
+    $ sudo service redis-server stop
 
 As we've used the `--link redis:db` option, Docker
 has created some environment variables in our web application container.
diff --git a/docs/sources/examples/running_riak_service.md b/docs/sources/examples/running_riak_service.md
index 5909b7e..c3d83bf 100644
--- a/docs/sources/examples/running_riak_service.md
+++ b/docs/sources/examples/running_riak_service.md
@@ -14,7 +14,7 @@
     $ touch Dockerfile
 
 Next, define the parent image you want to use to build your image on top
-of. We'll use [Ubuntu](https://registry.hub.docker.cm/_/ubuntu/) (tag:
+of. We'll use [Ubuntu](https://registry.hub.docker.com/_/ubuntu/) (tag:
 `latest`), which is available on [Docker Hub](https://hub.docker.com):
 
     # Riak
@@ -25,13 +25,6 @@
     FROM ubuntu:latest
     MAINTAINER Hector Castro hector@basho.com
 
-Next, we update the APT cache and apply any updates:
-
-    # Update the APT cache
-    RUN sed -i.bak 's/main$/main universe/' /etc/apt/sources.list
-    RUN apt-get update
-    RUN apt-get upgrade -y
-
 After that, we install and setup a few dependencies:
 
  - `curl` is used to download Basho's APT
@@ -46,38 +39,29 @@
 <!-- -->
 
     # Install and setup project dependencies
-    RUN apt-get install -y curl lsb-release supervisor openssh-server
+    RUN apt-get update && apt-get install -y curl lsb-release supervisor openssh-server
 
     RUN mkdir -p /var/run/sshd
     RUN mkdir -p /var/log/supervisor
 
     RUN locale-gen en_US en_US.UTF-8
 
-    ADD supervisord.conf /etc/supervisor/conf.d/supervisord.conf
+    COPY supervisord.conf /etc/supervisor/conf.d/supervisord.conf
 
     RUN echo 'root:basho' | chpasswd
 
 Next, we add Basho's APT repository:
 
-    RUN curl -s http://apt.basho.com/gpg/basho.apt.key | apt-key add --
+    RUN curl -sSL http://apt.basho.com/gpg/basho.apt.key | apt-key add --
     RUN echo "deb http://apt.basho.com $(lsb_release -cs) main" > /etc/apt/sources.list.d/basho.list
-    RUN apt-get update
 
 After that, we install Riak and alter a few defaults:
 
     # Install Riak and prepare it to run
-    RUN apt-get install -y riak
+    RUN apt-get update && apt-get install -y riak
     RUN sed -i.bak 's/127.0.0.1/0.0.0.0/' /etc/riak/app.config
     RUN echo "ulimit -n 4096" >> /etc/default/riak
 
-Almost there. Next, we add a hack to get us by the lack of
-`initctl`:
-
-    # Hack for initctl
-    # See: https://github.com/dotcloud/docker/issues/1024
-    RUN dpkg-divert --local --rename --add /sbin/initctl
-    RUN ln -s /bin/true /sbin/initctl
-
 Then, we expose the Riak Protocol Buffers and HTTP interfaces, along
 with SSH:
 
diff --git a/docs/sources/examples/running_ssh_service.Dockerfile b/docs/sources/examples/running_ssh_service.Dockerfile
index 57baf88..1b8ed02 100644
--- a/docs/sources/examples/running_ssh_service.Dockerfile
+++ b/docs/sources/examples/running_ssh_service.Dockerfile
@@ -5,10 +5,7 @@
 FROM    ubuntu:12.04
 MAINTAINER Thatcher R. Peskens "thatcher@dotcloud.com"
 
-# make sure the package repository is up to date
-RUN apt-get update
-
-RUN apt-get install -y openssh-server
+RUN apt-get update && apt-get install -y openssh-server
 RUN mkdir /var/run/sshd
 RUN echo 'root:screencast' |chpasswd
 
diff --git a/docs/sources/examples/running_ssh_service.md b/docs/sources/examples/running_ssh_service.md
index 579d372..7140678 100644
--- a/docs/sources/examples/running_ssh_service.md
+++ b/docs/sources/examples/running_ssh_service.md
@@ -15,10 +15,7 @@
     FROM     ubuntu:12.04
     MAINTAINER Thatcher R. Peskens "thatcher@dotcloud.com"
 
-    # make sure the package repository is up to date
-    RUN apt-get update
-
-    RUN apt-get install -y openssh-server
+    RUN apt-get update && apt-get install -y openssh-server
     RUN mkdir /var/run/sshd
     RUN echo 'root:screencast' |chpasswd
 
@@ -27,7 +24,7 @@
 
 Build the image using:
 
-    $ sudo docker build --rm -t eg_sshd .
+    $ sudo docker build -t eg_sshd .
 
 Then run it. You can then use `docker port` to find out what host port
 the container's port 22 is mapped to:
diff --git a/docs/sources/faq.md b/docs/sources/faq.md
index 667058c..531afc3 100644
--- a/docs/sources/faq.md
+++ b/docs/sources/faq.md
@@ -14,8 +14,8 @@
 ### What open source license are you using?
 
 We are using the Apache License Version 2.0, see it here:
-[https://github.com/dotcloud/docker/blob/master/LICENSE](
-https://github.com/dotcloud/docker/blob/master/LICENSE)
+[https://github.com/docker/docker/blob/master/LICENSE](
+https://github.com/docker/docker/blob/master/LICENSE)
 
 ### Does Docker run on Mac OS X or Windows?
 
@@ -225,9 +225,41 @@
 except if you have no way to figure out that it contains a copy of the
 OpenSSL library vulnerable to the [Heartbleed](http://heartbleed.com/) bug.
 
+### Why is `DEBIAN_FRONTEND=noninteractive` discouraged in Dockerfiles?
+
+When building Docker images on Debian and Ubuntu you may have seen errors like:
+
+    unable to initialize frontend: Dialog
+
+These errors don't stop the image from being built but inform you that the
+installation process tried to open a dialog box, but was unable to. 
+Generally, these errors are safe to ignore.
+
+Some people circumvent these errors by changing the `DEBIAN_FRONTEND` 
+environment variable inside the Dockerfile using:
+
+    ENV DEBIAN_FRONTEND=noninteractive
+
+This prevents the installer from opening dialog boxes during installation 
+which stops the errors.
+
+While this may sound like a good idea, it *may* have side effects. 
+The `DEBIAN_FRONTEND` environment variable will be inherited by all 
+images and containers built from your image, effectively changing
+their behavior. People using those images will run into problems when
+installing software interactively, because installers will not show
+any dialog boxes.
+
+Because of this, and because setting `DEBIAN_FRONTEND` to `noninteractive` is
+mainly a 'cosmetic' change, we *discourage* changing it.
+
+If you *really* need to change its setting, make sure to change it
+back to its [default value](https://www.debian.org/releases/stable/i386/ch05s03.html.en) 
+afterwards.
+
 ### Can I help by adding some questions and answers?
 
-Definitely! You can fork [the repo](https://github.com/dotcloud/docker) and
+Definitely! You can fork [the repo](https://github.com/docker/docker) and
 edit the documentation sources.
 
 ### Where can I find more answers?
@@ -237,7 +269,7 @@
 - [Docker user mailinglist](https://groups.google.com/d/forum/docker-user)
 - [Docker developer mailinglist](https://groups.google.com/d/forum/docker-dev)
 - [IRC, docker on freenode](irc://chat.freenode.net#docker)
-- [GitHub](https://github.com/dotcloud/docker)
+- [GitHub](https://github.com/docker/docker)
 - [Ask questions on Stackoverflow](http://stackoverflow.com/search?q=docker)
 - [Join the conversation on Twitter](http://twitter.com/docker)
 
diff --git a/docs/sources/index.md b/docs/sources/index.md
index 75414b4..5267557 100644
--- a/docs/sources/index.md
+++ b/docs/sources/index.md
@@ -94,7 +94,7 @@
 *`.dockerignore` support*
 
 You can now add a `.dockerignore` file next to your `Dockerfile` and Docker will ignore files and directories specified in that file when sending the build context to the daemon. 
-Example: https://github.com/dotcloud/docker/blob/master/.dockerignore
+Example: https://github.com/docker/docker/blob/master/.dockerignore
 
 *Pause containers during commit*
 
diff --git a/docs/sources/installation/MAINTAINERS b/docs/sources/installation/MAINTAINERS
index 6a2f512..aca1397 100644
--- a/docs/sources/installation/MAINTAINERS
+++ b/docs/sources/installation/MAINTAINERS
@@ -1 +1,2 @@
 google.md: Johan Euphrosine <proppy@google.com> (@proppy)
+softlayer.md: Phil Jackson <underscorephil@gmail.com> (@underscorephil)
diff --git a/docs/sources/installation/binaries.md b/docs/sources/installation/binaries.md
index f6eb44f..8e35d98 100644
--- a/docs/sources/installation/binaries.md
+++ b/docs/sources/installation/binaries.md
@@ -23,9 +23,9 @@
  - a [properly mounted](
    https://github.com/tianon/cgroupfs-mount/blob/master/cgroupfs-mount)
    cgroupfs hierarchy (having a single, all-encompassing "cgroup" mount
-   point [is](https://github.com/dotcloud/docker/issues/2683)
-   [not](https://github.com/dotcloud/docker/issues/3485)
-   [sufficient](https://github.com/dotcloud/docker/issues/4568))
+   point [is](https://github.com/docker/docker/issues/2683)
+   [not](https://github.com/docker/docker/issues/3485)
+   [sufficient](https://github.com/docker/docker/issues/4568))
 
 ## Check kernel dependencies
 
diff --git a/docs/sources/installation/centos.md b/docs/sources/installation/centos.md
index 3966d0f..b919ca5 100644
--- a/docs/sources/installation/centos.md
+++ b/docs/sources/installation/centos.md
@@ -4,23 +4,31 @@
 
 # CentOS
 
-The Docker package is available via the EPEL repository. These
-instructions work for CentOS 6 and later. They will likely work for
+While the Docker package is provided by default as part of CentOS-7, 
+it is provided by a community repository for CentOS-6. Please note that 
+this changes the installation instructions slightly between versions. 
+ 
+These instructions work for CentOS 6 and later. They will likely work for
 other binary compatible EL6 distributions such as Scientific Linux, but
 they haven't been tested.
 
-Please note that this package is part of [Extra Packages for Enterprise
-Linux (EPEL)](https://fedoraproject.org/wiki/EPEL), a community effort
-to create and maintain additional packages for the RHEL distribution.
-
-Also note that due to the current Docker limitations, Docker is able to
+Please note that due to the current Docker limitations, Docker is able to
 run only on the **64 bit** architecture.
 
 To run Docker, you will need [CentOS6](http://www.centos.org) or higher,
 with a kernel version 2.6.32-431 or higher as this has specific kernel
 fixes to allow Docker to run.
 
-## Installation
+## Installing Docker - CentOS-7
+Docker is included by default in the CentOS-Extras repository. To install
+simply run the following command.
+
+    $ sudo yum install docker
+
+## Installing Docker - CentOS-6
+Please note that this for CentOS-6, this package is part of [Extra Packages
+for Enterprise Linux (EPEL)](https://fedoraproject.org/wiki/EPEL), a community effort
+to create and maintain additional packages for the RHEL distribution.
 
 Firstly, you need to ensure you have the EPEL repository enabled. Please
 follow the [EPEL installation instructions](
@@ -39,7 +47,9 @@
 
     $ sudo yum install docker-io
 
-Now that it's installed, let's start the Docker daemon.
+## Using Docker
+
+Once Docker is installed, you will need to start the docker daemon.
 
     $ sudo service docker start
 
@@ -50,7 +60,7 @@
 Now let's verify that Docker is working. First we'll need to get the latest
 `centos` image.
 
-    $ sudo docker pull centos:latest
+    $ sudo docker pull centos
 
 Next we'll make sure that we can see the image by running:
 
@@ -69,6 +79,12 @@
 If everything is working properly, you'll get a simple bash prompt. Type
 exit to continue.
 
+## Dockerfiles
+The CentOS Project provides a number of sample Dockerfiles which you may use
+either as templates or to familiarize yourself with docker. These templates
+are available on github at [https://github.com/CentOS/CentOS-Dockerfiles](
+https://github.com/CentOS/CentOS-Dockerfiles)
+
 **Done!** You can either continue with the [Docker User
 Guide](/userguide/) or explore and build on the images yourself.
 
diff --git a/docs/sources/installation/debian.md b/docs/sources/installation/debian.md
index 0ad54b4..0da2f2f 100644
--- a/docs/sources/installation/debian.md
+++ b/docs/sources/installation/debian.md
@@ -23,8 +23,6 @@
 
     $ sudo apt-get update
     $ sudo apt-get install docker.io
-    $ sudo ln -sf /usr/bin/docker.io /usr/local/bin/docker
-    $ sudo sed -i '$acomplete -F _docker docker' /etc/bash_completion.d/docker.io
 
 To verify that everything has worked as expected:
 
diff --git a/docs/sources/installation/fedora.md b/docs/sources/installation/fedora.md
index a230aa6..757b3e9 100644
--- a/docs/sources/installation/fedora.md
+++ b/docs/sources/installation/fedora.md
@@ -68,7 +68,7 @@
 If you are behind a HTTP proxy server, for example in corporate settings, 
 you will need to add this configuration in the Docker *systemd service file*.
 
-Edit file `/lib/systemd/system/docker.service`. Add the following to
+Edit file `/usr/lib/systemd/system/docker.service`. Add the following to
 section `[Service]` :
 
     Environment="HTTP_PROXY=http://proxy.example.com:80/"
diff --git a/docs/sources/installation/gentoolinux.md b/docs/sources/installation/gentoolinux.md
index 62fdc9f..ac92ad3 100644
--- a/docs/sources/installation/gentoolinux.md
+++ b/docs/sources/installation/gentoolinux.md
@@ -39,6 +39,9 @@
 https://github.com/tianon/docker-overlay/issues) or ping
 tianon directly in the #docker IRC channel on the freenode network.
 
+Other use flags are described in detail on [tianon's
+blog](https://tianon.github.io/post/2014/05/17/docker-on-gentoo.html).
+
 ## Starting Docker
 
 Ensure that you are running a kernel that includes all the necessary
diff --git a/docs/sources/installation/google.md b/docs/sources/installation/google.md
index b6c1b3d..23a9bfb 100644
--- a/docs/sources/installation/google.md
+++ b/docs/sources/installation/google.md
@@ -12,7 +12,7 @@
 2. Download and configure the [Google Cloud SDK][3] to use your
    project with the following commands:
 
-        $ curl https://sdk.cloud.google.com | bash
+        $ curl -sSL https://sdk.cloud.google.com | bash
         $ gcloud auth login
         $ gcloud config set project <google-cloud-project-id>
 
@@ -20,15 +20,18 @@
    (select a zone close to you and the desired instance size)
 
         $ gcloud compute instances create docker-playground \
-          --image https://www.googleapis.com/compute/v1/projects/google-containers/global/images/container-vm-v20140522 \
+          --image container-vm-v20140730 \
+          --image-project google-containers \
           --zone us-central1-a \
           --machine-type f1-micro
 
 4. Connect to the instance using SSH:
 
         $ gcloud compute ssh --zone us-central1-a docker-playground
-        docker-playground:~$ sudo docker run busybox echo 'docker on GCE \o/'
-        docker on GCE \o/
+        $$ docker-playground:~$ sudo docker run hello-world
+	Hello from Docker.
+	This message shows that your installation appears to be working correctly.
+	...
 
 Read more about [deploying Containers on Google Cloud Platform][5].
 
diff --git a/docs/sources/installation/images/osx-Boot2Docker-Start-app.png b/docs/sources/installation/images/osx-Boot2Docker-Start-app.png
deleted file mode 100644
index 21e2b77..0000000
--- a/docs/sources/installation/images/osx-Boot2Docker-Start-app.png
+++ /dev/null
Binary files differ
diff --git a/docs/sources/installation/images/osx-installer.png b/docs/sources/installation/images/osx-installer.png
index 635ac35..dbb6bcd 100644
--- a/docs/sources/installation/images/osx-installer.png
+++ b/docs/sources/installation/images/osx-installer.png
Binary files differ
diff --git a/docs/sources/installation/mac.md b/docs/sources/installation/mac.md
index 2aff0e5..da0e172 100644
--- a/docs/sources/installation/mac.md
+++ b/docs/sources/installation/mac.md
@@ -7,13 +7,13 @@
 > **Note:**
 > Docker is supported on Mac OS X 10.6 "Snow Leopard" or newer.
 
-The Docker Engine uses Linux-specific kernel features, so to run it on OS X
-we need to use a lightweight virtual machine (vm).  You use the OS X Docker client to
+Because the Docker Engine uses Linux-specific kernel features, you'll need to use a
+lightweight virtual machine (VM) to run it on OS X. You use the OS X Docker client to
 control the virtualized Docker Engine to build, run, and manage Docker containers.
 
-To make this process easier, we've designed a helper application called
-[Boot2Docker](https://github.com/boot2docker/boot2docker) that installs the
-virtual machine and runs the Docker daemon.
+To make this process easier, we've built a helper application called
+[Boot2Docker](https://github.com/boot2docker/boot2docker) that installs a
+virtual machine (using VirtualBox) that's all set up to run the Docker daemon.
 
 ## Demonstration
 
@@ -22,50 +22,67 @@
 ## Installation
 
 1. Download the latest release of the [Docker for OS X Installer](
-   https://github.com/boot2docker/osx-installer/releases)
+   https://github.com/boot2docker/osx-installer/releases) (Look for the green
+   Boot2Docker-x.x.x.pkg button near the bottom of the page.)
 
-2. Run the installer, which will install VirtualBox and the Boot2Docker management
-   tool.
+2. Run the installer by double-clicking the downloaded package, which will install a
+VirtualBox VM, Docker itself, and the Boot2Docker management tool.
    ![](/installation/images/osx-installer.png)
 
-3. Run the `Boot2Docker` app in the `Applications` folder:
-   ![](/installation/images/osx-Boot2Docker-Start-app.png)
-
-   Or, to initialize Boot2Docker manually, open a terminal and run:
+3. Locate the `Boot2Docker` app in your `Applications` folder and run it.
+   Or, you can initialize Boot2Docker from the command line by running:
 
 	     $ boot2docker init
 	     $ boot2docker start
 	     $ export DOCKER_HOST=tcp://$(boot2docker ip 2>/dev/null):2375
 
+A terminal window will open and you'll see the virtual machine starting up. 
 Once you have an initialized virtual machine, you can control it with `boot2docker stop`
 and `boot2docker start`.
 
+> **Note:**
+> If you see a message in the terminal that looks something like this:
+>
+>    `To connect the Docker client to the Docker daemon, please set: export 
+DOCKER_HOST=tcp://192.168.59.103:2375`
+> 
+you can safely set the evironment variable as instructed.
+
+View the
+[Boot2Docker ReadMe](https://github.com/boot2docker/boot2docker/blob/master/README.md)
+for more information.
+
 ## Upgrading
 
 1. Download the latest release of the [Docker for OS X Installer](
    https://github.com/boot2docker/osx-installer/releases)
 
-2. Run the installer, which will update VirtualBox and the Boot2Docker management
-   tool.
+2. If Boot2Docker is currently running, stop it with `boot2docker stop`. Then, run
+the installer package, which will update Docker and the Boot2Docker management tool.
 
-3. To upgrade your existing virtual machine, open a terminal and run:
+3. To complete the upgrade, you also need to update your existing virtual machine. Open a
+terminal window and run:
 
         $ boot2docker stop
         $ boot2docker download
         $ boot2docker start
 
+This will download an .iso containing a fresh VM and start it up.
+
 ## Running Docker
 
-From your terminal, you can test that Docker is running with a “hello world” example.
-Start the vm and then run:
+From your terminal, you can test that Docker is running with our small `hello-world`
+example image:
+Start the vm (`boot2docker start`) and then run:
 
-    $ docker run ubuntu echo hello world
+    $ docker run hello-world
 
-This should download the `ubuntu` image and print `hello world`.
+This should download the `hello-world` image, which then creates a small
+container with an executable that prints a brief `Hello from Docker.` message.
 
 ## Container port redirection
 
-The latest version of `boot2docker` sets up a host only network adaptor which provides
+The latest version of `boot2docker` sets up a host-only network adaptor which provides
 access to the container's ports.
 
 If you run a container with an exposed port,
@@ -76,14 +93,16 @@
 
     $ boot2docker ip
 
-Typically, it is 192.168.59.103, but it could get changed by Virtualbox's DHCP
-implementation.
+Typically, it is 192.168.59.103:2375, but VirtualBox's DHCP implementation might change
+this address in the future.
 
 # Further details
 
-If you are curious, the username for the boot2docker default user is `docker` and the password is `tcuser`.
+If you are curious, the username for the boot2docker default user is `docker` and the
+password is `tcuser`.
 
-The Boot2Docker management tool provides several commands:
+The Boot2Docker management tool provides several additional commands for working with the
+VM and Docker:
 
     $ ./boot2docker
     Usage: ./boot2docker [<options>]
diff --git a/docs/sources/installation/openSUSE.md b/docs/sources/installation/openSUSE.md
index c03c74a..951b877 100644
--- a/docs/sources/installation/openSUSE.md
+++ b/docs/sources/installation/openSUSE.md
@@ -47,13 +47,27 @@
 root user, need to be part of this group in order to interact with the
 Docker daemon. You can add users with:
 
-    $ sudo usermod -a -G docker <username>
+    $ sudo /usr/sbin/usermod -a -G docker <username>
 
 To verify that everything has worked as expected:
 
-    $ sudo docker run --rm -i -t ubuntu /bin/bash
+    $ sudo docker run --rm -i -t opensuse /bin/bash
 
-This should download and import the `ubuntu` image, and then start `bash` in a container. To exit the container type `exit`.
+This should download and import the `opensuse` image, and then start `bash` in
+a container. To exit the container type `exit`.
+
+If you want your containers to be able to access the external network you must
+enable the `net.ipv4.ip_forward` rule.
+This can be done using YaST by browsing to the
+`Network Devices -> Network Settings -> Routing` menu and ensuring that the
+`Enable IPv4 Forwarding` box is checked.
+
+This option cannot be changed when networking is handled by the Network Manager.
+In such cases the `/etc/sysconfig/SuSEfirewall2` file needs to be edited by
+hand to ensure the `FW_ROUTE` flag is set to `yes` like so:
+
+    FW_ROUTE="yes"
+
 
 **Done!**
 
diff --git a/docs/sources/installation/rhel.md b/docs/sources/installation/rhel.md
index c144573..a8b785a 100644
--- a/docs/sources/installation/rhel.md
+++ b/docs/sources/installation/rhel.md
@@ -2,15 +2,35 @@
 page_description: Installation instructions for Docker on Red Hat Enterprise Linux.
 page_keywords: Docker, Docker documentation, requirements, linux, rhel, centos
 
-# Red Hat Enterprise Linux
+# Red Hat Enterprise Linux 7
 
-Docker is available for **RHEL** on EPEL. These instructions should work
-for both RHEL and CentOS. They will likely work for other binary
-compatible EL6 distributions as well, but they haven't been tested.
+**Red Hat Enterprise Linux 7** has [shipped with
+Docker](https://access.redhat.com/site/products/red-hat-enterprise-linux/docker-and-containers).
+An overview and some guidance can be found in the [Release
+Notes](https://access.redhat.com/site/documentation/en-US/Red_Hat_Enterprise_Linux/7/html/7.0_Release_Notes/chap-Red_Hat_Enterprise_Linux-7.0_Release_Notes-Linux_Containers_with_Docker_Format.html).
 
-Please note that this package is part of [Extra Packages for Enterprise
-Linux (EPEL)](https://fedoraproject.org/wiki/EPEL), a community effort
-to create and maintain additional packages for the RHEL distribution.
+Docker is located in the *extras* channel. To install Docker:
+
+1. Enable the *extras* channel:
+
+        $ sudo subscription-manager repos --enable=rhel-7-server-extras-rpms
+
+2. Install Docker:
+
+        $ sudo yum install docker 
+
+Additional installation, configuration, and usage information,
+including a [Get Started with Docker Containers in Red Hat
+Enterprise Linux 7](https://access.redhat.com/site/articles/881893)
+guide, can be found by Red Hat customers on the [Red Hat Customer
+Portal](https://access.redhat.com/).
+
+# Red Hat Enterprise Linux 6
+
+Docker is available for **RHEL** on EPEL. Please note that
+this package is part of [Extra Packages for Enterprise Linux
+(EPEL)](https://fedoraproject.org/wiki/EPEL), a community effort to
+create and maintain additional packages for the RHEL distribution.
 
 Also note that due to the current Docker limitations, Docker is able to
 run only on the **64 bit** architecture.
diff --git a/docs/sources/installation/softlayer.md b/docs/sources/installation/softlayer.md
index d018667..d594896 100644
--- a/docs/sources/installation/softlayer.md
+++ b/docs/sources/installation/softlayer.md
@@ -6,22 +6,22 @@
 
 1. Create an [IBM SoftLayer account](
    https://www.softlayer.com/cloud-servers/).
-2. Log in to the [SoftLayer Console](
-   https://control.softlayer.com/devices/).
-3. Go to [Order Hourly Computing Instance Wizard](
-   https://manage.softlayer.com/Sales/orderHourlyComputingInstance)
-   on your SoftLayer Console.
-4. Create a new *CloudLayer Computing Instance* (CCI) using the default
+2. Log in to the [SoftLayer Customer Portal](
+   https://control.softlayer.com/).
+3. From the *Devices* menu select [*Device List*](https://control.softlayer.com/devices)
+4. Click *Order Devices* on the top right of the window below the menu bar.
+5. Under *Virtual Server* click [*Hourly*](https://manage.softlayer.com/Sales/orderHourlyComputingInstance)
+6. Create a new *SoftLayer Virtual Server Instance* (VSI) using the default
    values for all the fields and choose:
 
-    - *First Available* as `Datacenter` and
+    - The desired location for *Datacenter*
     - *Ubuntu Linux 12.04 LTS Precise Pangolin - Minimal Install (64 bit)*
-      as `Operating System`.
+      for *Operating System*.
 
-5. Click the *Continue Your Order* button at the bottom right and
-   select *Go to checkout*.
-6. Insert the required *User Metadata* and place the order.
-7. Then continue with the [*Ubuntu*](../ubuntulinux/#ubuntu-linux)
+7. Click the *Continue Your Order* button at the bottom right.
+8. Fill out VSI *hostname* and *domain*.
+9. Insert the required *User Metadata* and place the order.
+10. Then continue with the [*Ubuntu*](../ubuntulinux/#ubuntu-linux)
    instructions.
 
 ## What next?
diff --git a/docs/sources/installation/ubuntulinux.md b/docs/sources/installation/ubuntulinux.md
index 5d1b6c3..673ea18 100644
--- a/docs/sources/installation/ubuntulinux.md
+++ b/docs/sources/installation/ubuntulinux.md
@@ -63,7 +63,7 @@
 >
 > There is also a simple `curl` script available to help with this process.
 >
->     $ curl -s https://get.docker.io/ubuntu/ | sudo sh
+>     $ curl -sSL https://get.docker.io/ubuntu/ | sudo sh
 
 To verify that everything has worked as expected:
 
@@ -134,7 +134,7 @@
 > 
 > There is also a simple `curl` script available to help with this process.
 > 
->     $ curl -s https://get.docker.io/ubuntu/ | sudo sh
+>     $ curl -sSL https://get.docker.io/ubuntu/ | sudo sh
 
 Now verify that the installation has worked by downloading the
 `ubuntu` image and launching a container.
@@ -266,11 +266,11 @@
 
 ## Troubleshooting
 
-On Linux Mint, the `cgroup-lite` package is not
+On Linux Mint, the `cgroup-lite` and `apparmor` packages are not
 installed by default. Before Docker will work correctly, you will need
 to install this via:
 
-    $ sudo apt-get update && sudo apt-get install cgroup-lite
+    $ sudo apt-get update && sudo apt-get install cgroup-lite apparmor
 
 ## Docker and UFW
 
diff --git a/docs/sources/installation/windows.md b/docs/sources/installation/windows.md
index 9908c05..6220cd6 100644
--- a/docs/sources/installation/windows.md
+++ b/docs/sources/installation/windows.md
@@ -51,11 +51,11 @@
 
 Boot2Docker will log you in automatically so you can start using Docker right away.
 
-Let's try the “hello world” example. Run
+Let's try the `hello-world` example image. Run
 
-    $ docker run busybox echo hello world
+    $ docker run hello-world
 
-This will download the small busybox image and print "hello world".
+This should download the very small `hello-world` image and print a `Hello from Docker.` message.
 
 
 # Further Details
diff --git a/docs/sources/introduction/understanding-docker.md b/docs/sources/introduction/understanding-docker.md
index c79573a..9448f68 100644
--- a/docs/sources/introduction/understanding-docker.md
+++ b/docs/sources/introduction/understanding-docker.md
@@ -55,7 +55,7 @@
 workloads easy. You can use Docker to quickly scale up or tear down applications
 and services. Docker's speed means that scaling can be near real time.
 
-*Achieving higher density and running more workloads**
+*Achieving higher density and running more workloads*
 
 Docker is lightweight and fast. It provides a viable, cost-effective alternative
 to hypervisor-based virtual machines. This is especially useful in high density
@@ -79,7 +79,7 @@
 Docker *daemon*, which does the heavy lifting of building, running, and
 distributing your Docker containers. Both the Docker client and the daemon *can*
 run on the same system, or you can connect a Docker client to a remote Docker
-daemon. The Docker client and service communicate via sockets or through a
+daemon. The Docker client and daemon communicate via sockets or through a
 RESTful API.
 
 ![Docker Architecture Diagram](/article-img/architecture.svg)
@@ -157,7 +157,7 @@
 
 > **Note:** Docker usually gets these base images from
 > [Docker Hub](https://hub.docker.com).
-> 
+
 Docker images are then built from these base images using a simple, descriptive
 set of steps we call *instructions*. Each instruction creates a new layer in our
 image. Instructions include actions like:
diff --git a/docs/sources/reference/api.md b/docs/sources/reference/api.md
deleted file mode 100644
index b617211..0000000
--- a/docs/sources/reference/api.md
+++ /dev/null
@@ -1,87 +0,0 @@
-# APIs
-
-Your programs and scripts can access Docker's functionality via these
-interfaces:
-
- - [Registry & Index Spec](registry_index_spec/)
-    - [1. The 3 roles](registry_index_spec/#the-3-roles)
-        - [1.1 Index](registry_index_spec/#index)
-        - [1.2 Registry](registry_index_spec/#registry)
-        - [1.3 Docker](registry_index_spec/#docker)
-
-    - [2. Workflow](registry_index_spec/#workflow)
-        - [2.1 Pull](registry_index_spec/#pull)
-        - [2.2 Push](registry_index_spec/#push)
-        - [2.3 Delete](registry_index_spec/#delete)
-
-    - [3. How to use the Registry in standalone mode](registry_index_spec/#how-to-use-the-registry-in-standalone-mode)
-        - [3.1 Without an Index](registry_index_spec/#without-an-index)
-        - [3.2 With an Index](registry_index_spec/#with-an-index)
-
-    - [4. The API](registry_index_spec/#the-api)
-        - [4.1 Images](registry_index_spec/#images)
-        - [4.2 Users](registry_index_spec/#users)
-        - [4.3 Tags (Registry)](registry_index_spec/#tags-registry)
-        - [4.4 Images (Index)](registry_index_spec/#images-index)
-        - [4.5 Repositories](registry_index_spec/#repositories)
-
-    - [5. Chaining Registries](registry_index_spec/#chaining-registries)
-    - [6. Authentication & Authorization](registry_index_spec/#authentication-authorization)
-        - [6.1 On the Index](registry_index_spec/#on-the-index)
-        - [6.2 On the Registry](registry_index_spec/#on-the-registry)
-
-    - [7 Document Version](registry_index_spec/#document-version)
-
- - [Docker Registry API](registry_api/)
-    - [1. Brief introduction](registry_api/#brief-introduction)
-    - [2. Endpoints](registry_api/#endpoints)
-        - [2.1 Images](registry_api/#images)
-        - [2.2 Tags](registry_api/#tags)
-        - [2.3 Repositories](registry_api/#repositories)
-        - [2.4 Status](registry_api/#status)
-
-    - [3 Authorization](registry_api/#authorization)
-
- - [Docker Hub API](index_api/)
-    - [1. Brief introduction](index_api/#brief-introduction)
-    - [2. Endpoints](index_api/#endpoints)
-        - [2.1 Repository](index_api/#repository)
-        - [2.2 Users](index_api/#users)
-        - [2.3 Search](index_api/#search)
-
- - [Docker Remote API](docker_remote_api/)
-    - [1. Brief introduction](docker_remote_api/#brief-introduction)
-    - [2. Versions](docker_remote_api/#versions)
-        - [v1.12](docker_remote_api/#v1-12)
-        - [v1.11](docker_remote_api/#v1-11)
-        - [v1.10](docker_remote_api/#v1-10)
-        - [v1.9](docker_remote_api/#v1-9)
-        - [v1.8](docker_remote_api/#v1-8)
-        - [v1.7](docker_remote_api/#v1-7)
-        - [v1.6](docker_remote_api/#v1-6)
-        - [v1.5](docker_remote_api/#v1-5)
-        - [v1.4](docker_remote_api/#v1-4)
-        - [v1.3](docker_remote_api/#v1-3)
-        - [v1.2](docker_remote_api/#v1-2)
-        - [v1.1](docker_remote_api/#v1-1)
-        - [v1.0](docker_remote_api/#v1-0)
-
- - [Docker Remote API Client Libraries](remote_api_client_libraries/)
- - [docker.io OAuth API](docker_io_oauth_api/)
-    - [1. Brief introduction](docker_io_oauth_api/#brief-introduction)
-    - [2. Register Your Application](docker_io_oauth_api/#register-your-application)
-    - [3. Endpoints](docker_io_oauth_api/#endpoints)
-        - [3.1 Get an Authorization Code](docker_io_oauth_api/#get-an-authorization-code)
-        - [3.2 Get an Access Token](docker_io_oauth_api/#get-an-access-token)
-        - [3.3 Refresh a Token](docker_io_oauth_api/#refresh-a-token)
-
-    - [4. Use an Access Token with the API](docker_io_oauth_api/#use-an-access-token-with-the-api)
-
- - [docker.io Accounts API](docker_io_accounts_api/)
-    - [1. Endpoints](docker_io_accounts_api/#endpoints)
-        - [1.1 Get a single user](docker_io_accounts_api/#get-a-single-user)
-        - [1.2 Update a single user](docker_io_accounts_api/#update-a-single-user)
-        - [1.3 List email addresses for a user](docker_io_accounts_api/#list-email-addresses-for-a-user)
-        - [1.4 Add email address for a user](docker_io_accounts_api/#add-email-address-for-a-user)
-        - [1.5 Update an email address for a user](docker_io_accounts_api/#update-an-email-address-for-a-user)
-        - [1.6 Delete email address for a user](docker_io_accounts_api/#delete-email-address-for-a-user)
diff --git a/docs/sources/reference/api/docker-io_api.md b/docs/sources/reference/api/docker-io_api.md
index d5be332..e34e43f 100644
--- a/docs/sources/reference/api/docker-io_api.md
+++ b/docs/sources/reference/api/docker-io_api.md
@@ -421,7 +421,7 @@
         Accept: application/json
         Content-Type: application/json
 
-        {"email": "sam@dotcloud.com",
+        {"email": "sam@docker.com",
          "password": "toto42",
          "username": "foobar"}
 
@@ -468,7 +468,7 @@
         Content-Type: application/json
         Authorization: Basic akmklmasadalkm==
 
-        {"email": "sam@dotcloud.com",
+        {"email": "sam@docker.com",
          "password": "toto42"}
 
     Parameters:
diff --git a/docs/sources/reference/api/docker_io_oauth_api.md b/docs/sources/reference/api/docker_io_oauth_api.md
deleted file mode 100644
index c5d0772..0000000
--- a/docs/sources/reference/api/docker_io_oauth_api.md
+++ /dev/null
@@ -1,254 +0,0 @@
-page_title: docker.io OAuth API
-page_description: API Documentation for docker.io's OAuth flow.
-page_keywords: API, Docker, oauth, REST, documentation
-
-# docker.io OAuth API
-
-## 1. Brief introduction
-
-Some docker.io API requests will require an access token to
-authenticate. To get an access token for a user, that user must first
-grant your application access to their docker.io account. In order for
-them to grant your application access you must first register your
-application.
-
-Before continuing, we encourage you to familiarize yourself with [The
-OAuth 2.0 Authorization Framework](http://tools.ietf.org/html/rfc6749).
-
-*Also note that all OAuth interactions must take place over https
-connections*
-
-## 2. Register Your Application
-
-You will need to register your application with docker.io before users
-will be able to grant your application access to their account
-information. We are currently only allowing applications selectively. To
-request registration of your application send an email to
-[support-accounts@docker.com](mailto:support-accounts%40docker.com) with
-the following information:
-
- - The name of your application
- - A description of your application and the service it will provide to
-   docker.io users.
- - A callback URI that we will use for redirecting authorization
-   requests to your application. These are used in the step of getting
-   an Authorization Code. The domain name of the callback URI will be
-   visible to the user when they are requested to authorize your
-   application.
-
-When your application is approved you will receive a response from the
-docker.io team with your `client_id` and
-`client_secret` which your application will use in
-the steps of getting an Authorization Code and getting an Access Token.
-
-# 3. Endpoints
-
-## 3.1 Get an Authorization Code
-
-Once You have registered you are ready to start integrating docker.io
-accounts into your application! The process is usually started by a user
-following a link in your application to an OAuth Authorization endpoint.
-
-`GET /api/v1.1/o/authorize/`
-
-Request that a docker.io user authorize your application. If the
-user is not already logged in, they will be prompted to login. The
-user is then presented with a form to authorize your application for
-the requested access scope. On submission, the user will be
-redirected to the specified `redirect_uri` with
-an Authorization Code.
-
-    Query Parameters:
-
-     
-
-    -   **client_id** – The `client_id` given to
-        your application at registration.
-    -   **response_type** – MUST be set to `code`.
-        This specifies that you would like an Authorization Code
-        returned.
-    -   **redirect_uri** – The URI to redirect back to after the user
-        has authorized your application. If omitted, the first of your
-        registered `response_uris` is used. If
-        included, it must be one of the URIs which were submitted when
-        registering your application.
-    -   **scope** – The extent of access permissions you are requesting.
-        Currently, the scope options are `profile_read`, `profile_write`,
-        `email_read`, and `email_write`. Scopes must be separated by a space. If omitted, the
-        default scopes `profile_read email_read` are
-        used.
-    -   **state** – (Recommended) Used by your application to maintain
-        state between the authorization request and callback to protect
-        against CSRF attacks.
-
-    **Example Request**
-
-    Asking the user for authorization.
-
-        GET /api/v1.1/o/authorize/?client_id=TestClientID&response_type=code&redirect_uri=https%3A//my.app/auth_complete/&scope=profile_read%20email_read&state=abc123 HTTP/1.1
-        Host: www.docker.io
-
-    **Authorization Page**
-
-    When the user follows a link, making the above GET request, they
-    will be asked to login to their docker.io account if they are not
-    already and then be presented with the following authorization
-    prompt which asks the user to authorize your application with a
-    description of the requested scopes.
-
-    ![](/reference/api/_static/io_oauth_authorization_page.png)
-
-    Once the user allows or denies your Authorization Request the user
-    will be redirected back to your application. Included in that
-    request will be the following query parameters:
-
-    `code`
-    :   The Authorization code generated by the docker.io authorization
-        server. Present it again to request an Access Token. This code
-        expires in 60 seconds.
-    `state`
-    :   If the `state` parameter was present in the
-        authorization request this will be the exact value received from
-        that request.
-    `error`
-    :   An error message in the event of the user denying the
-        authorization or some other kind of error with the request.
-
-## 3.2 Get an Access Token
-
-Once the user has authorized your application, a request will be made to
-your application's specified `redirect_uri` which
-includes a `code` parameter that you must then use
-to get an Access Token.
-
-`POST /api/v1.1/o/token/`
-
-Submit your newly granted Authorization Code and your application's
-credentials to receive an Access Token and Refresh Token. The code
-is valid for 60 seconds and cannot be used more than once.
-
-    Request Headers:
-
-     
-
-    -   **Authorization** – HTTP basic authentication using your
-        application's `client_id` and
-        `client_secret`
-
-    Form Parameters:
-
-     
-
-    -   **grant_type** – MUST be set to `authorization_code`
-    -   **code** – The authorization code received from the user's
-        redirect request.
-    -   **redirect_uri** – The same `redirect_uri`
-        used in the authentication request.
-
-    **Example Request**
-
-    Using an authorization code to get an access token.
-
-        POST /api/v1.1/o/token/ HTTP/1.1
-        Host: www.docker.io
-        Authorization: Basic VGVzdENsaWVudElEOlRlc3RDbGllbnRTZWNyZXQ=
-        Accept: application/json
-        Content-Type: application/json
-
-        {
-            "grant_type": "code",
-            "code": "YXV0aG9yaXphdGlvbl9jb2Rl",
-            "redirect_uri": "https://my.app/auth_complete/"
-        }
-
-    **Example Response**
-
-        HTTP/1.1 200 OK
-        Content-Type: application/json;charset=UTF-8
-
-        {
-            "username": "janedoe",
-            "user_id": 42,
-            "access_token": "t6k2BqgRw59hphQBsbBoPPWLqu6FmS",
-            "expires_in": 15552000,
-            "token_type": "Bearer",
-            "scope": "profile_read email_read",
-            "refresh_token": "hJDhLH3cfsUrQlT4MxA6s8xAFEqdgc"
-        }
-
-    In the case of an error, there will be a non-200 HTTP Status and and
-    data detailing the error.
-
-## 3.3 Refresh a Token
-
-Once the Access Token expires you can use your `refresh_token`
-to have docker.io issue your application a new Access Token,
-if the user has not revoked access from your application.
-
-`POST /api/v1.1/o/token/`
-
-Submit your `refresh_token` and application's
-credentials to receive a new Access Token and Refresh Token. The
-`refresh_token` can be used only once.
-
-    Request Headers:
-
-     
-
-    -   **Authorization** – HTTP basic authentication using your
-        application's `client_id` and
-        `client_secret`
-
-    Form Parameters:
-
-     
-
-    -   **grant_type** – MUST be set to `refresh_token`
-    -   **refresh_token** – The `refresh_token`
-        which was issued to your application.
-    -   **scope** – (optional) The scope of the access token to be
-        returned. Must not include any scope not originally granted by
-        the user and if omitted is treated as equal to the scope
-        originally granted.
-
-    **Example Request**
-
-    Refreshing an access token.
-
-        POST /api/v1.1/o/token/ HTTP/1.1
-        Host: www.docker.io
-        Authorization: Basic VGVzdENsaWVudElEOlRlc3RDbGllbnRTZWNyZXQ=
-        Accept: application/json
-        Content-Type: application/json
-
-        {
-            "grant_type": "refresh_token",
-            "refresh_token": "hJDhLH3cfsUrQlT4MxA6s8xAFEqdgc",
-        }
-
-    **Example Response**
-
-        HTTP/1.1 200 OK
-        Content-Type: application/json;charset=UTF-8
-
-        {
-            "username": "janedoe",
-            "user_id": 42,
-            "access_token": "t6k2BqgRw59hphQBsbBoPPWLqu6FmS",
-            "expires_in": 15552000,
-            "token_type": "Bearer",
-            "scope": "profile_read email_read",
-            "refresh_token": "hJDhLH3cfsUrQlT4MxA6s8xAFEqdgc"
-        }
-
-    In the case of an error, there will be a non-200 HTTP Status and and
-    data detailing the error.
-
-# 4. Use an Access Token with the API
-
-Many of the docker.io API requests will require a Authorization request
-header field. Simply ensure you add this header with "Bearer <`access_token`>":
-
-    GET /api/v1.1/resource HTTP/1.1
-    Host: docker.io
-    Authorization: Bearer 2YotnFZFEjr1zCsicMWpAA
diff --git a/docs/sources/reference/api/docker_remote_api.md b/docs/sources/reference/api/docker_remote_api.md
index 36f3538..e712f86 100644
--- a/docs/sources/reference/api/docker_remote_api.md
+++ b/docs/sources/reference/api/docker_remote_api.md
@@ -18,13 +18,39 @@
    encoded (JSON) string with credentials:
    `{'username': string, 'password': string, 'email': string, 'serveraddress' : string}`
 
-The current version of the API is v1.13
+The current version of the API is v1.14
 
-Calling `/images/<name>/insert` is the same as calling
-`/v1.13/images/<name>/insert`.
+Calling `/info` is the same as calling
+`/v1.14/info`.
 
 You can still call an old version of the API using
-`/v1.12/images/<name>/insert`.
+`/v1.13/info`.
+
+## v1.14
+
+### Full Documentation
+
+[*Docker Remote API v1.14*](/reference/api/docker_remote_api_v1.14/)
+
+### What's new
+
+`DELETE /containers/(id)`
+
+**New!**
+When using `force`, the container will be immediately killed with SIGKILL.
+
+`POST /containers/(id)/start`
+
+**New!**
+The `hostConfig` option now accepts the field `CapAdd`, which specifies a list of capabilities
+to add, and the field `CapDrop`, which specifies a list of capabilities to drop.
+
+`POST /images/create`
+
+**New!**
+The `fromImage` and `repo` parameters now supports the `repo:tag` format.
+Consequently,  the `tag` parameter is now obsolete. Using the new format and
+the `tag` parameter at the same time will return an error.
 
 ## v1.13
 
@@ -100,7 +126,7 @@
 
 `GET /containers/(id)/logs`
 
-This url is prefered method for getting container logs now.
+This url is preferred method for getting container logs now.
 
 ## v1.10
 
@@ -284,7 +310,7 @@
 
 **New!**
 You can now split stderr from stdout. This is done by
-prefixing a header to each transmition. See
+prefixing a header to each transmission. See
 [`POST /containers/(id)/attach`](
 /reference/api/docker_remote_api_v1.9/#post--containers-(id)-attach "POST /containers/(id)/attach").
 The WebSocket attach is unchanged. Note that attach calls on the
@@ -344,7 +370,7 @@
 ## v1.3
 
 docker v0.5.0
-[51f6c4a](https://github.com/dotcloud/docker/commit/51f6c4a7372450d164c61e0054daf0223ddbd909)
+[51f6c4a](https://github.com/docker/docker/commit/51f6c4a7372450d164c61e0054daf0223ddbd909)
 
 ### Full Documentation
 
@@ -384,7 +410,7 @@
 ## v1.2
 
 docker v0.4.2
-[2e7649b](https://github.com/dotcloud/docker/commit/2e7649beda7c820793bd46766cbc2cfeace7b168)
+[2e7649b](https://github.com/docker/docker/commit/2e7649beda7c820793bd46766cbc2cfeace7b168)
 
 ### Full Documentation
 
@@ -416,7 +442,7 @@
 ## v1.1
 
 docker v0.4.0
-[a8ae398](https://github.com/dotcloud/docker/commit/a8ae398bf52e97148ee7bd0d5868de2e15bd297f)
+[a8ae398](https://github.com/docker/docker/commit/a8ae398bf52e97148ee7bd0d5868de2e15bd297f)
 
 ### Full Documentation
 
@@ -443,7 +469,7 @@
 ## v1.0
 
 docker v0.3.4
-[8d73740](https://github.com/dotcloud/docker/commit/8d73740343778651c09160cde9661f5f387b36f4)
+[8d73740](https://github.com/docker/docker/commit/8d73740343778651c09160cde9661f5f387b36f4)
 
 ### Full Documentation
 
diff --git a/docs/sources/reference/api/docker_remote_api_v1.0.md b/docs/sources/reference/api/docker_remote_api_v1.0.md
index b906298..197991d 100644
--- a/docs/sources/reference/api/docker_remote_api_v1.0.md
+++ b/docs/sources/reference/api/docker_remote_api_v1.0.md
@@ -194,7 +194,7 @@
                              "Bridge": "",
                              "PortMapping": null
                      },
-                     "SysInitPath": "/home/kitty/go/src/github.com/dotcloud/docker/bin/docker",
+                     "SysInitPath": "/home/kitty/go/src/github.com/docker/docker/bin/docker",
                      "ResolvConfPath": "/etc/resolv.conf",
                      "Volumes": {}
         }
@@ -566,6 +566,13 @@
 
         {{ STREAM }}
 
+	Query Parameters:
+
+
+
+	-	**url** – The url from where the file is taken
+	-	**path** – The path where the file is stored
+
     Status Codes:
 
     -   **200** – no error
@@ -670,12 +677,6 @@
 
         {{ STREAM }}
 
-    Query Parameters:
-
-     
-
-    -   **registry** – the registry you wan to push, optional
-
     Status Codes:
 
     -   **200** – no error
diff --git a/docs/sources/reference/api/docker_remote_api_v1.1.md b/docs/sources/reference/api/docker_remote_api_v1.1.md
index 4e449bc..928e321 100644
--- a/docs/sources/reference/api/docker_remote_api_v1.1.md
+++ b/docs/sources/reference/api/docker_remote_api_v1.1.md
@@ -194,7 +194,7 @@
                              "Bridge": "",
                              "PortMapping": null
                      },
-                     "SysInitPath": "/home/kitty/go/src/github.com/dotcloud/docker/bin/docker",
+                     "SysInitPath": "/home/kitty/go/src/github.com/docker/docker/bin/docker",
                      "ResolvConfPath": "/etc/resolv.conf",
                      "Volumes": {}
         }
@@ -573,6 +573,13 @@
         {"error":"Invalid..."}
         ...
 
+	Query Parameters:
+
+
+
+	-	**url** – The url from where the file is taken
+	-	**path** – The path where the file is stored
+
     Status Codes:
 
     -   **200** – no error
@@ -680,12 +687,6 @@
         {"error":"Invalid..."}
         ...
 
-    Query Parameters:
-
-     
-
-    -   **registry** – the registry you wan to push, optional
-
     Status Codes:
 
     -   **200** – no error
diff --git a/docs/sources/reference/api/docker_remote_api_v1.10.md b/docs/sources/reference/api/docker_remote_api_v1.10.md
index 264cdef..6ffae3e 100644
--- a/docs/sources/reference/api/docker_remote_api_v1.10.md
+++ b/docs/sources/reference/api/docker_remote_api_v1.10.md
@@ -220,7 +220,7 @@
                              "Bridge": "",
                              "PortMapping": null
                      },
-                     "SysInitPath": "/home/kitty/go/src/github.com/dotcloud/docker/bin/docker",
+                     "SysInitPath": "/home/kitty/go/src/github.com/docker/docker/bin/docker",
                      "ResolvConfPath": "/etc/resolv.conf",
                      "Volumes": {},
                      "HostConfig": {
@@ -739,6 +739,13 @@
         {"error":"Invalid..."}
         ...
 
+	Query Parameters:
+
+
+
+	-	**url** – The url from where the file is taken
+	-	**path** – The path where the file is stored
+
     Status Codes:
 
     -   **200** – no error
@@ -846,11 +853,20 @@
         {"error":"Invalid..."}
         ...
 
+    If you wish to push an image on to a private registry, that image must already have been tagged
+    into a repository which references that registry host name and port.  This repository name should 
+    then be used in the URL. This mirrors the flow of the CLI.
+
+    **Example request**:
+
+        POST /images/registry.acme.com:5000/test/push HTTP/1.1    
+    
+
     Query Parameters:
 
      
 
-    -   **registry** – the registry you wan to push, optional
+    -   **tag** – the tag to associate with the image on the registry, optional
 
     Request Headers:
 
diff --git a/docs/sources/reference/api/docker_remote_api_v1.11.md b/docs/sources/reference/api/docker_remote_api_v1.11.md
index ae2daae..a0187db 100644
--- a/docs/sources/reference/api/docker_remote_api_v1.11.md
+++ b/docs/sources/reference/api/docker_remote_api_v1.11.md
@@ -224,7 +224,7 @@
                              "Bridge": "",
                              "PortMapping": null
                      },
-                     "SysInitPath": "/home/kitty/go/src/github.com/dotcloud/docker/bin/docker",
+                     "SysInitPath": "/home/kitty/go/src/github.com/docker/docker/bin/docker",
                      "ResolvConfPath": "/etc/resolv.conf",
                      "Volumes": {},
                      "HostConfig": {
@@ -861,11 +861,20 @@
         {"error":"Invalid..."}
         ...
 
+    If you wish to push an image on to a private registry, that image must already have been tagged
+    into a repository which references that registry host name and port.  This repository name should 
+    then be used in the URL. This mirrors the flow of the CLI.
+
+    **Example request**:
+
+        POST /images/registry.acme.com:5000/test/push HTTP/1.1    
+    
+
     Query Parameters:
 
      
 
-    -   **registry** – the registry you wan to push, optional
+    -   **tag** – the tag to associate with the image on the registry, optional
 
     Request Headers:
 
diff --git a/docs/sources/reference/api/docker_remote_api_v1.12.md b/docs/sources/reference/api/docker_remote_api_v1.12.md
index 19fb24f..9ea83e2 100644
--- a/docs/sources/reference/api/docker_remote_api_v1.12.md
+++ b/docs/sources/reference/api/docker_remote_api_v1.12.md
@@ -90,6 +90,8 @@
         non-running ones.
     -   **size** – 1/True/true or 0/False/false, Show the containers
         sizes
+    -   **filters** – a JSON encoded value of the filters (a map[string][]string)
+        to process on the images list.
 
     Status Codes:
 
@@ -224,7 +226,7 @@
                              "Bridge": "",
                              "PortMapping": null
                      },
-                     "SysInitPath": "/home/kitty/go/src/github.com/dotcloud/docker/bin/docker",
+                     "SysInitPath": "/home/kitty/go/src/github.com/docker/docker/bin/docker",
                      "ResolvConfPath": "/etc/resolv.conf",
                      "Volumes": {},
                      "HostConfig": {
@@ -759,7 +761,7 @@
      
 
     -   **all** – 1/True/true or 0/False/false, default false
-    -   **filters** – a json encoded value of the filters (a map[string][]string) to process on the images list.
+    -   **filters** – a JSON encoded value of the filters (a map[string][]string) to process on the images list.
 
 
 
@@ -808,30 +810,7 @@
     -   **200** – no error
     -   **500** – server error
 
-### Insert a file in an image
 
-`POST /images/(name)/insert`
-
-Insert a file from `url` in the image `name` at `path`
-
-    **Example request**:
-
-        POST /images/test/insert?path=/usr&url=myurl HTTP/1.1
-
-    **Example response**:
-
-        HTTP/1.1 200 OK
-        Content-Type: application/json
-
-        {"status":"Inserting..."}
-        {"status":"Inserting", "progress":"1/? (n/a)", "progressDetail":{"current":1}}
-        {"error":"Invalid..."}
-        ...
-
-    Status Codes:
-
-    -   **200** – no error
-    -   **500** – server error
 
 ### Inspect an image
 
@@ -937,11 +916,20 @@
         {"error":"Invalid..."}
         ...
 
+    If you wish to push an image on to a private registry, that image must already have been tagged
+    into a repository which references that registry host name and port.  This repository name should 
+    then be used in the URL. This mirrors the flow of the CLI.
+
+    **Example request**:
+
+        POST /images/registry.acme.com:5000/test/push HTTP/1.1    
+    
+
     Query Parameters:
 
      
 
-    -   **registry** – the registry you wan to push, optional
+    -   **tag** – the tag to associate with the image on the registry, optional
 
     Request Headers:
 
diff --git a/docs/sources/reference/api/docker_remote_api_v1.13.md b/docs/sources/reference/api/docker_remote_api_v1.13.md
index e0ad957..d782391 100644
--- a/docs/sources/reference/api/docker_remote_api_v1.13.md
+++ b/docs/sources/reference/api/docker_remote_api_v1.13.md
@@ -1,4 +1,4 @@
-page_title: Remote API v1.12
+page_title: Remote API v1.13
 page_description: API Documentation for Docker
 page_keywords: API, Docker, rcli, REST, documentation
 
@@ -224,7 +224,7 @@
                              "Bridge": "",
                              "PortMapping": null
                      },
-                     "SysInitPath": "/home/kitty/go/src/github.com/dotcloud/docker/bin/docker",
+                     "SysInitPath": "/home/kitty/go/src/github.com/docker/docker/bin/docker",
                      "ResolvConfPath": "/etc/resolv.conf",
                      "Volumes": {},
                      "HostConfig": {
@@ -808,30 +808,7 @@
     -   **200** – no error
     -   **500** – server error
 
-### Insert a file in an image
 
-`POST /images/(name)/insert`
-
-Insert a file from `url` in the image `name` at `path`
-
-    **Example request**:
-
-        POST /images/test/insert?path=/usr&url=myurl HTTP/1.1
-
-    **Example response**:
-
-        HTTP/1.1 200 OK
-        Content-Type: application/json
-
-        {"status":"Inserting..."}
-        {"status":"Inserting", "progress":"1/? (n/a)", "progressDetail":{"current":1}}
-        {"error":"Invalid..."}
-        ...
-
-    Status Codes:
-
-    -   **200** – no error
-    -   **500** – server error
 
 ### Inspect an image
 
@@ -937,11 +914,20 @@
         {"error":"Invalid..."}
         ...
 
+    If you wish to push an image on to a private registry, that image must already have been tagged
+    into a repository which references that registry host name and port.  This repository name should 
+    then be used in the URL. This mirrors the flow of the CLI.
+
+    **Example request**:
+
+        POST /images/registry.acme.com:5000/test/push HTTP/1.1    
+    
+
     Query Parameters:
 
      
 
-    -   **registry** – the registry you wan to push, optional
+    -   **tag** – the tag to associate with the image on the registry, optional
 
     Request Headers:
 
@@ -1184,7 +1170,6 @@
              "NGoroutines":21,
              "NEventsListener":0,
              "InitPath":"/usr/bin/docker",
-             "Sockets":["unix:///var/run/docker.sock"],
              "IndexServerAddress":["https://index.docker.io/v1/"],
              "MemoryLimit":true,
              "SwapLimit":false,
diff --git a/docs/sources/reference/api/docker_remote_api_v1.14.md b/docs/sources/reference/api/docker_remote_api_v1.14.md
new file mode 100644
index 0000000..9a9c36e
--- /dev/null
+++ b/docs/sources/reference/api/docker_remote_api_v1.14.md
@@ -0,0 +1,1411 @@
+page_title: Remote API v1.14
+page_description: API Documentation for Docker
+page_keywords: API, Docker, rcli, REST, documentation
+
+# Docker Remote API v1.14
+
+## 1. Brief introduction
+
+ - The Remote API has replaced `rcli`.
+ - The daemon listens on `unix:///var/run/docker.sock` but you can
+   [*Bind Docker to another host/port or a Unix socket*](
+   /use/basics/#bind-docker).
+ - The API tends to be REST, but for some complex commands, like `attach`
+   or `pull`, the HTTP connection is hijacked to transport `STDOUT`,
+   `STDIN` and `STDERR`.
+
+# 2. Endpoints
+
+## 2.1 Containers
+
+### List containers
+
+`GET /containers/json`
+
+List containers
+
+    **Example request**:
+
+        GET /containers/json?all=1&before=8dfafdbc3a40&size=1 HTTP/1.1
+
+    **Example response**:
+
+        HTTP/1.1 200 OK
+        Content-Type: application/json
+
+        [
+             {
+                     "Id": "8dfafdbc3a40",
+                     "Image": "base:latest",
+                     "Command": "echo 1",
+                     "Created": 1367854155,
+                     "Status": "Exit 0",
+                     "Ports":[{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}],
+                     "SizeRw":12288,
+                     "SizeRootFs":0
+             },
+             {
+                     "Id": "9cd87474be90",
+                     "Image": "base:latest",
+                     "Command": "echo 222222",
+                     "Created": 1367854155,
+                     "Status": "Exit 0",
+                     "Ports":[],
+                     "SizeRw":12288,
+                     "SizeRootFs":0
+             },
+             {
+                     "Id": "3176a2479c92",
+                     "Image": "base:latest",
+                     "Command": "echo 3333333333333333",
+                     "Created": 1367854154,
+                     "Status": "Exit 0",
+                     "Ports":[],
+                     "SizeRw":12288,
+                     "SizeRootFs":0
+             },
+             {
+                     "Id": "4cb07b47f9fb",
+                     "Image": "base:latest",
+                     "Command": "echo 444444444444444444444444444444444",
+                     "Created": 1367854152,
+                     "Status": "Exit 0",
+                     "Ports":[],
+                     "SizeRw":12288,
+                     "SizeRootFs":0
+             }
+        ]
+
+    Query Parameters:
+
+     
+
+    -   **all** – 1/True/true or 0/False/false, Show all containers.
+        Only running containers are shown by default
+    -   **limit** – Show `limit` last created
+        containers, include non-running ones.
+    -   **since** – Show only containers created since Id, include
+        non-running ones.
+    -   **before** – Show only containers created before Id, include
+        non-running ones.
+    -   **size** – 1/True/true or 0/False/false, Show the containers
+        sizes
+
+    Status Codes:
+
+    -   **200** – no error
+    -   **400** – bad parameter
+    -   **500** – server error
+
+### Create a container
+
+`POST /containers/create`
+
+Create a container
+
+    **Example request**:
+
+        POST /containers/create HTTP/1.1
+        Content-Type: application/json
+
+        {
+             "Hostname":"",
+             "User":"",
+             "Memory":0,
+             "MemorySwap":0,
+             "AttachStdin":false,
+             "AttachStdout":true,
+             "AttachStderr":true,
+             "PortSpecs":null,
+             "Tty":false,
+             "OpenStdin":false,
+             "StdinOnce":false,
+             "Env":null,
+             "Cmd":[
+                     "date"
+             ],
+             "Image":"base",
+             "Volumes":{
+                     "/tmp": {}
+             },
+             "WorkingDir":"",
+             "DisableNetwork": false,
+             "ExposedPorts":{
+                     "22/tcp": {}
+             }
+        }
+
+    **Example response**:
+
+        HTTP/1.1 201 OK
+        Content-Type: application/json
+
+        {
+             "Id":"e90e34656806"
+             "Warnings":[]
+        }
+
+    Json Parameters:
+
+     
+
+    -   **config** – the container's configuration
+
+    Query Parameters:
+
+     
+
+    -   **name** – Assign the specified name to the container. Must
+        match `/?[a-zA-Z0-9_-]+`.
+
+    Status Codes:
+
+    -   **201** – no error
+    -   **404** – no such container
+    -   **406** – impossible to attach (container not running)
+    -   **500** – server error
+
+### Inspect a container
+
+`GET /containers/(id)/json`
+
+Return low-level information on the container `id`
+
+
+    **Example request**:
+
+        GET /containers/4fa6e0f0c678/json HTTP/1.1
+
+    **Example response**:
+
+        HTTP/1.1 200 OK
+        Content-Type: application/json
+
+        {
+                     "Id": "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2",
+                     "Created": "2013-05-07T14:51:42.041847+02:00",
+                     "Path": "date",
+                     "Args": [],
+                     "Config": {
+                             "Hostname": "4fa6e0f0c678",
+                             "User": "",
+                             "Memory": 0,
+                             "MemorySwap": 0,
+                             "AttachStdin": false,
+                             "AttachStdout": true,
+                             "AttachStderr": true,
+                             "PortSpecs": null,
+                             "Tty": false,
+                             "OpenStdin": false,
+                             "StdinOnce": false,
+                             "Env": null,
+                             "Cmd": [
+                                     "date"
+                             ],
+                             "Dns": null,
+                             "Image": "base",
+                             "Volumes": {},
+                             "VolumesFrom": "",
+                             "WorkingDir":""
+
+                     },
+                     "State": {
+                             "Running": false,
+                             "Pid": 0,
+                             "ExitCode": 0,
+                             "StartedAt": "2013-05-07T14:51:42.087658+02:01360",
+                             "Ghost": false
+                     },
+                     "Image": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc",
+                     "NetworkSettings": {
+                             "IpAddress": "",
+                             "IpPrefixLen": 0,
+                             "Gateway": "",
+                             "Bridge": "",
+                             "PortMapping": null
+                     },
+                     "SysInitPath": "/home/kitty/go/src/github.com/docker/docker/bin/docker",
+                     "ResolvConfPath": "/etc/resolv.conf",
+                     "Volumes": {},
+                     "HostConfig": {
+                         "Binds": null,
+                         "ContainerIDFile": "",
+                         "LxcConf": [],
+                         "Privileged": false,
+                         "PortBindings": {
+                            "80/tcp": [
+                                {
+                                    "HostIp": "0.0.0.0",
+                                    "HostPort": "49153"
+                                }
+                            ]
+                         },
+                         "Links": ["/name:alias"],
+                         "PublishAllPorts": false,
+                         "CapAdd: ["NET_ADMIN"],
+                         "CapDrop: ["MKNOD"]
+                     }
+        }
+
+    Status Codes:
+
+    -   **200** – no error
+    -   **404** – no such container
+    -   **500** – server error
+
+### List processes running inside a container
+
+`GET /containers/(id)/top`
+
+List processes running inside the container `id`
+
+    **Example request**:
+
+        GET /containers/4fa6e0f0c678/top HTTP/1.1
+
+    **Example response**:
+
+        HTTP/1.1 200 OK
+        Content-Type: application/json
+
+        {
+             "Titles":[
+                     "USER",
+                     "PID",
+                     "%CPU",
+                     "%MEM",
+                     "VSZ",
+                     "RSS",
+                     "TTY",
+                     "STAT",
+                     "START",
+                     "TIME",
+                     "COMMAND"
+                     ],
+             "Processes":[
+                     ["root","20147","0.0","0.1","18060","1864","pts/4","S","10:06","0:00","bash"],
+                     ["root","20271","0.0","0.0","4312","352","pts/4","S+","10:07","0:00","sleep","10"]
+             ]
+        }
+
+    Query Parameters:
+
+     
+
+    -   **ps_args** – ps arguments to use (e.g., aux)
+
+    Status Codes:
+
+    -   **200** – no error
+    -   **404** – no such container
+    -   **500** – server error
+
+### Get container logs
+
+`GET /containers/(id)/logs`
+
+Get stdout and stderr logs from the container ``id``
+
+    **Example request**:
+
+       GET /containers/4fa6e0f0c678/logs?stderr=1&stdout=1&timestamps=1&follow=1&tail=10 HTTP/1.1
+
+    **Example response**:
+
+       HTTP/1.1 200 OK
+       Content-Type: application/vnd.docker.raw-stream
+
+       {{ STREAM }}
+
+    Query Parameters:
+
+     
+
+    -   **follow** – 1/True/true or 0/False/false, return stream. Default false
+    -   **stdout** – 1/True/true or 0/False/false, show stdout log. Default false
+    -   **stderr** – 1/True/true or 0/False/false, show stderr log. Default false
+    -   **timestamps** – 1/True/true or 0/False/false, print timestamps for
+        every log line. Default false
+    -   **tail** – Output specified number of lines at the end of logs: `all` or `<number>`. Default all
+
+    Status Codes:
+
+    -   **200** – no error
+    -   **404** – no such container
+    -   **500** – server error
+
+### Inspect changes on a container's filesystem
+
+`GET /containers/(id)/changes`
+
+Inspect changes on container `id`'s filesystem
+
+    **Example request**:
+
+        GET /containers/4fa6e0f0c678/changes HTTP/1.1
+
+    **Example response**:
+
+        HTTP/1.1 200 OK
+        Content-Type: application/json
+
+        [
+             {
+                     "Path":"/dev",
+                     "Kind":0
+             },
+             {
+                     "Path":"/dev/kmsg",
+                     "Kind":1
+             },
+             {
+                     "Path":"/test",
+                     "Kind":1
+             }
+        ]
+
+    Status Codes:
+
+    -   **200** – no error
+    -   **404** – no such container
+    -   **500** – server error
+
+### Export a container
+
+`GET /containers/(id)/export`
+
+Export the contents of container `id`
+
+    **Example request**:
+
+        GET /containers/4fa6e0f0c678/export HTTP/1.1
+
+    **Example response**:
+
+        HTTP/1.1 200 OK
+        Content-Type: application/octet-stream
+
+        {{ STREAM }}
+
+    Status Codes:
+
+    -   **200** – no error
+    -   **404** – no such container
+    -   **500** – server error
+
+### Start a container
+
+`POST /containers/(id)/start`
+
+Start the container `id`
+
+    **Example request**:
+
+        POST /containers/(id)/start HTTP/1.1
+        Content-Type: application/json
+
+        {
+             "Binds":["/tmp:/tmp"],
+             "Links":["redis3:redis"],
+             "LxcConf":{"lxc.utsname":"docker"},
+             "PortBindings":{ "22/tcp": [{ "HostPort": "11022" }] },
+             "PublishAllPorts":false,
+             "Privileged":false,
+             "Dns": ["8.8.8.8"],
+             "VolumesFrom": ["parent", "other:ro"],
+             "CapAdd: ["NET_ADMIN"],
+             "CapDrop: ["MKNOD"]
+        }
+
+    **Example response**:
+
+        HTTP/1.1 204 No Content
+        Content-Type: text/plain
+
+    Json Parameters:
+
+     
+
+    -   **hostConfig** – the container's host configuration (optional)
+
+    Status Codes:
+
+    -   **204** – no error
+    -   **304** – container already started
+    -   **404** – no such container
+    -   **500** – server error
+
+### Stop a container
+
+`POST /containers/(id)/stop`
+
+Stop the container `id`
+
+    **Example request**:
+
+        POST /containers/e90e34656806/stop?t=5 HTTP/1.1
+
+    **Example response**:
+
+        HTTP/1.1 204 OK
+
+    Query Parameters:
+
+     
+
+    -   **t** – number of seconds to wait before killing the container
+
+    Status Codes:
+
+    -   **204** – no error
+    -   **304** – container already stopped
+    -   **404** – no such container
+    -   **500** – server error
+
+### Restart a container
+
+`POST /containers/(id)/restart`
+
+Restart the container `id`
+
+    **Example request**:
+
+        POST /containers/e90e34656806/restart?t=5 HTTP/1.1
+
+    **Example response**:
+
+        HTTP/1.1 204 OK
+
+    Query Parameters:
+
+     
+
+    -   **t** – number of seconds to wait before killing the container
+
+    Status Codes:
+
+    -   **204** – no error
+    -   **404** – no such container
+    -   **500** – server error
+
+### Kill a container
+
+`POST /containers/(id)/kill`
+
+Kill the container `id`
+
+    **Example request**:
+
+        POST /containers/e90e34656806/kill HTTP/1.1
+
+    **Example response**:
+
+        HTTP/1.1 204 OK
+
+    Query Parameters
+
+    -   **signal** - Signal to send to the container: integer or string like "SIGINT".
+        When not set, SIGKILL is assumed and the call will waits for the container to exit.
+
+    Status Codes:
+
+    -   **204** – no error
+    -   **404** – no such container
+    -   **500** – server error
+
+### Pause a container
+
+`POST /containers/(id)/pause`
+
+Pause the container `id`
+
+    **Example request**:
+
+        POST /containers/e90e34656806/pause HTTP/1.1
+
+    **Example response**:
+
+        HTTP/1.1 204 OK
+
+    Status Codes:
+
+    -   **204** – no error
+    -   **404** – no such container
+    -   **500** – server error
+
+### Unpause a container
+
+`POST /containers/(id)/unpause`
+
+Unpause the container `id`
+
+    **Example request**:
+
+        POST /containers/e90e34656806/unpause HTTP/1.1
+
+    **Example response**:
+
+        HTTP/1.1 204 OK
+
+    Status Codes:
+
+    -   **204** – no error
+    -   **404** – no such container
+    -   **500** – server error
+
+### Attach to a container
+
+`POST /containers/(id)/attach`
+
+Attach to the container `id`
+
+    **Example request**:
+
+        POST /containers/16253994b7c4/attach?logs=1&stream=0&stdout=1 HTTP/1.1
+
+    **Example response**:
+
+        HTTP/1.1 200 OK
+        Content-Type: application/vnd.docker.raw-stream
+
+        {{ STREAM }}
+
+    Query Parameters:
+
+     
+
+    -   **logs** – 1/True/true or 0/False/false, return logs. Default
+        false
+    -   **stream** – 1/True/true or 0/False/false, return stream.
+        Default false
+    -   **stdin** – 1/True/true or 0/False/false, if stream=true, attach
+        to stdin. Default false
+    -   **stdout** – 1/True/true or 0/False/false, if logs=true, return
+        stdout log, if stream=true, attach to stdout. Default false
+    -   **stderr** – 1/True/true or 0/False/false, if logs=true, return
+        stderr log, if stream=true, attach to stderr. Default false
+
+    Status Codes:
+
+    -   **200** – no error
+    -   **400** – bad parameter
+    -   **404** – no such container
+    -   **500** – server error
+
+    **Stream details**:
+
+    When using the TTY setting is enabled in
+    [`POST /containers/create`
+    ](../docker_remote_api_v1.9/#post--containers-create "POST /containers/create"),
+    the stream is the raw data from the process PTY and client's stdin.
+    When the TTY is disabled, then the stream is multiplexed to separate
+    stdout and stderr.
+
+    The format is a **Header** and a **Payload** (frame).
+
+    **HEADER**
+
+    The header will contain the information on which stream write the
+    stream (stdout or stderr). It also contain the size of the
+    associated frame encoded on the last 4 bytes (uint32).
+
+    It is encoded on the first 8 bytes like this:
+
+        header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4}
+
+    `STREAM_TYPE` can be:
+
+    -   0: stdin (will be written on stdout)
+    -   1: stdout
+    -   2: stderr
+
+    `SIZE1, SIZE2, SIZE3, SIZE4` are the 4 bytes of
+    the uint32 size encoded as big endian.
+
+    **PAYLOAD**
+
+    The payload is the raw stream.
+
+    **IMPLEMENTATION**
+
+    The simplest way to implement the Attach protocol is the following:
+
+    1.  Read 8 bytes
+    2.  chose stdout or stderr depending on the first byte
+    3.  Extract the frame size from the last 4 byets
+    4.  Read the extracted size and output it on the correct output
+    5.  Goto 1)
+
+### Wait a container
+
+`POST /containers/(id)/wait`
+
+Block until container `id` stops, then returns the exit code
+
+    **Example request**:
+
+        POST /containers/16253994b7c4/wait HTTP/1.1
+
+    **Example response**:
+
+        HTTP/1.1 200 OK
+        Content-Type: application/json
+
+        {"StatusCode":0}
+
+    Status Codes:
+
+    -   **200** – no error
+    -   **404** – no such container
+    -   **500** – server error
+
+### Remove a container
+
+`DELETE /containers/(id)`
+
+Remove the container `id` from the filesystem
+
+    **Example request**:
+
+        DELETE /containers/16253994b7c4?v=1 HTTP/1.1
+
+    **Example response**:
+
+        HTTP/1.1 204 OK
+
+    Query Parameters:
+
+     
+
+    -   **v** – 1/True/true or 0/False/false, Remove the volumes
+        associated to the container. Default false
+    -   **force** - 1/True/true or 0/False/false, Kill then remove the container.
+        Default false
+
+    Status Codes:
+
+    -   **204** – no error
+    -   **400** – bad parameter
+    -   **404** – no such container
+    -   **500** – server error
+
+### Copy files or folders from a container
+
+`POST /containers/(id)/copy`
+
+Copy files or folders of container `id`
+
+    **Example request**:
+
+        POST /containers/4fa6e0f0c678/copy HTTP/1.1
+        Content-Type: application/json
+
+        {
+             "Resource":"test.txt"
+        }
+
+    **Example response**:
+
+        HTTP/1.1 200 OK
+        Content-Type: application/octet-stream
+
+        {{ STREAM }}
+
+    Status Codes:
+
+    -   **200** – no error
+    -   **404** – no such container
+    -   **500** – server error
+
+## 2.2 Images
+
+### List Images
+
+`GET /images/json`
+
+**Example request**:
+
+        GET /images/json?all=0 HTTP/1.1
+
+    **Example response**:
+
+        HTTP/1.1 200 OK
+        Content-Type: application/json
+
+        [
+          {
+             "RepoTags": [
+               "ubuntu:12.04",
+               "ubuntu:precise",
+               "ubuntu:latest"
+             ],
+             "Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c",
+             "Created": 1365714795,
+             "Size": 131506275,
+             "VirtualSize": 131506275
+          },
+          {
+             "RepoTags": [
+               "ubuntu:12.10",
+               "ubuntu:quantal"
+             ],
+             "ParentId": "27cf784147099545",
+             "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc",
+             "Created": 1364102658,
+             "Size": 24653,
+             "VirtualSize": 180116135
+          }
+        ]
+
+
+    Query Parameters:
+
+     
+
+    -   **all** – 1/True/true or 0/False/false, default false
+    -   **filters** – a json encoded value of the filters (a map[string][]string) to process on the images list.
+
+
+
+### Create an image
+
+`POST /images/create`
+
+Create an image, either by pull it from the registry or by importing it
+
+    **Example request**:
+
+        POST /images/create?fromImage=base HTTP/1.1
+
+    **Example response**:
+
+        HTTP/1.1 200 OK
+        Content-Type: application/json
+
+        {"status":"Pulling..."}
+        {"status":"Pulling", "progress":"1 B/ 100 B", "progressDetail":{"current":1, "total":100}}
+        {"error":"Invalid..."}
+        ...
+
+    When using this endpoint to pull an image from the registry, the
+    `X-Registry-Auth` header can be used to include
+    a base64-encoded AuthConfig object.
+
+    Query Parameters:
+
+     
+
+    -   **fromImage** – name of the image to pull
+    -   **fromSrc** – source to import, - means stdin
+    -   **repo** – repository
+    -   **tag** – tag
+    -   **registry** – the registry to pull from
+
+    Request Headers:
+
+     
+
+    -   **X-Registry-Auth** – base64-encoded AuthConfig object
+
+    Status Codes:
+
+    -   **200** – no error
+    -   **500** – server error
+
+
+
+### Inspect an image
+
+`GET /images/(name)/json`
+
+Return low-level information on the image `name`
+
+    **Example request**:
+
+        GET /images/base/json HTTP/1.1
+
+    **Example response**:
+
+        HTTP/1.1 200 OK
+        Content-Type: application/json
+
+        {
+             "Created":"2013-03-23T22:24:18.818426-07:00",
+             "Container":"3d67245a8d72ecf13f33dffac9f79dcdf70f75acb84d308770391510e0c23ad0",
+             "ContainerConfig":
+                     {
+                             "Hostname":"",
+                             "User":"",
+                             "Memory":0,
+                             "MemorySwap":0,
+                             "AttachStdin":false,
+                             "AttachStdout":false,
+                             "AttachStderr":false,
+                             "PortSpecs":null,
+                             "Tty":true,
+                             "OpenStdin":true,
+                             "StdinOnce":false,
+                             "Env":null,
+                             "Cmd": ["/bin/bash"],
+                             "Dns":null,
+                             "Image":"base",
+                             "Volumes":null,
+                             "VolumesFrom":"",
+                             "WorkingDir":""
+                     },
+             "Id":"b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc",
+             "Parent":"27cf784147099545",
+             "Size": 6824592
+        }
+
+    Status Codes:
+
+    -   **200** – no error
+    -   **404** – no such image
+    -   **500** – server error
+
+### Get the history of an image
+
+`GET /images/(name)/history`
+
+Return the history of the image `name`
+
+    **Example request**:
+
+        GET /images/base/history HTTP/1.1
+
+    **Example response**:
+
+        HTTP/1.1 200 OK
+        Content-Type: application/json
+
+        [
+             {
+                     "Id":"b750fe79269d",
+                     "Created":1364102658,
+                     "CreatedBy":"/bin/bash"
+             },
+             {
+                     "Id":"27cf78414709",
+                     "Created":1364068391,
+                     "CreatedBy":""
+             }
+        ]
+
+    Status Codes:
+
+    -   **200** – no error
+    -   **404** – no such image
+    -   **500** – server error
+
+### Push an image on the registry
+
+`POST /images/(name)/push`
+
+Push the image `name` on the registry
+
+    **Example request**:
+
+        POST /images/test/push HTTP/1.1
+
+    **Example response**:
+
+        HTTP/1.1 200 OK
+        Content-Type: application/json
+
+        {"status":"Pushing..."}
+        {"status":"Pushing", "progress":"1/? (n/a)", "progressDetail":{"current":1}}}
+        {"error":"Invalid..."}
+        ...
+
+    If you wish to push an image on to a private registry, that image must already have been tagged
+    into a repository which references that registry host name and port.  This repository name should 
+    then be used in the URL. This mirrors the flow of the CLI.
+
+    **Example request**:
+
+        POST /images/registry.acme.com:5000/test/push HTTP/1.1    
+    
+
+    Query Parameters:
+
+     
+
+    -   **tag** – the tag to associate with the image on the registry, optional
+
+    Request Headers:
+
+     
+
+    -   **X-Registry-Auth** – include a base64-encoded AuthConfig
+        object.
+
+    Status Codes:
+
+    -   **200** – no error
+    -   **404** – no such image
+    -   **500** – server error
+
+### Tag an image into a repository
+
+`POST /images/(name)/tag`
+
+Tag the image `name` into a repository
+
+    **Example request**:
+
+        POST /images/test/tag?repo=myrepo&force=0 HTTP/1.1
+
+    **Example response**:
+
+        HTTP/1.1 201 OK
+
+    Query Parameters:
+
+     
+
+    -   **repo** – The repository to tag in
+    -   **force** – 1/True/true or 0/False/false, default false
+
+    Status Codes:
+
+    -   **201** – no error
+    -   **400** – bad parameter
+    -   **404** – no such image
+    -   **409** – conflict
+    -   **500** – server error
+
+### Remove an image
+
+`DELETE /images/(name)`
+
+Remove the image `name` from the filesystem
+
+    **Example request**:
+
+        DELETE /images/test HTTP/1.1
+
+    **Example response**:
+
+        HTTP/1.1 200 OK
+        Content-type: application/json
+
+        [
+         {"Untagged":"3e2f21a89f"},
+         {"Deleted":"3e2f21a89f"},
+         {"Deleted":"53b4f83ac9"}
+        ]
+
+    Query Parameters:
+
+     
+
+    -   **force** – 1/True/true or 0/False/false, default false
+    -   **noprune** – 1/True/true or 0/False/false, default false
+
+    Status Codes:
+
+    -   **200** – no error
+    -   **404** – no such image
+    -   **409** – conflict
+    -   **500** – server error
+
+### Search images
+
+`GET /images/search`
+
+Search for an image on [Docker Hub](https://hub.docker.com).
+
+> **Note**:
+> The response keys have changed from API v1.6 to reflect the JSON
+> sent by the registry server to the docker daemon's request.
+
+    **Example request**:
+
+        GET /images/search?term=sshd HTTP/1.1
+
+    **Example response**:
+
+        HTTP/1.1 200 OK
+        Content-Type: application/json
+
+        [
+                {
+                    "description": "",
+                    "is_official": false,
+                    "is_automated": false,
+                    "name": "wma55/u1210sshd",
+                    "star_count": 0
+                },
+                {
+                    "description": "",
+                    "is_official": false,
+                    "is_automated": false,
+                    "name": "jdswinbank/sshd",
+                    "star_count": 0
+                },
+                {
+                    "description": "",
+                    "is_official": false,
+                    "is_automated": false,
+                    "name": "vgauthier/sshd",
+                    "star_count": 0
+                }
+        ...
+        ]
+
+    Query Parameters:
+
+     
+
+    -   **term** – term to search
+
+    Status Codes:
+
+    -   **200** – no error
+    -   **500** – server error
+
+## 2.3 Misc
+
+### Build an image from Dockerfile via stdin
+
+`POST /build`
+
+Build an image from Dockerfile via stdin
+
+    **Example request**:
+
+        POST /build HTTP/1.1
+
+        {{ STREAM }}
+
+    **Example response**:
+
+        HTTP/1.1 200 OK
+        Content-Type: application/json
+
+        {"stream":"Step 1..."}
+        {"stream":"..."}
+        {"error":"Error...", "errorDetail":{"code": 123, "message": "Error..."}}
+
+    The stream must be a tar archive compressed with one of the
+    following algorithms: identity (no compression), gzip, bzip2, xz.
+
+    The archive must include a file called `Dockerfile`
+    at its root. It may include any number of other files,
+    which will be accessible in the build context (See the [*ADD build
+    command*](/reference/builder/#dockerbuilder)).
+
+    Query Parameters:
+
+     
+
+    -   **t** – repository name (and optionally a tag) to be applied to
+        the resulting image in case of success
+    -   **q** – suppress verbose build output
+    -   **nocache** – do not use the cache when building the image
+    -   **rm** - remove intermediate containers after a successful build (default behavior)
+    -   **forcerm - always remove intermediate containers (includes rm)
+
+    Request Headers:
+
+     
+
+    -   **Content-type** – should be set to
+        `"application/tar"`.
+    -   **X-Registry-Config** – base64-encoded ConfigFile object
+
+    Status Codes:
+
+    -   **200** – no error
+    -   **500** – server error
+
+### Check auth configuration
+
+`POST /auth`
+
+Get the default username and email
+
+    **Example request**:
+
+        POST /auth HTTP/1.1
+        Content-Type: application/json
+
+        {
+             "username":"hannibal",
+             "password:"xxxx",
+             "email":"hannibal@a-team.com",
+             "serveraddress":"https://index.docker.io/v1/"
+        }
+
+    **Example response**:
+
+        HTTP/1.1 200 OK
+
+    Status Codes:
+
+    -   **200** – no error
+    -   **204** – no error
+    -   **500** – server error
+
+### Display system-wide information
+
+`GET /info`
+
+Display system-wide information
+
+    **Example request**:
+
+        GET /info HTTP/1.1
+
+    **Example response**:
+
+        HTTP/1.1 200 OK
+        Content-Type: application/json
+
+        {
+             "Containers":11,
+             "Images":16,
+             "Driver":"btrfs",
+             "ExecutionDriver":"native-0.1",
+             "KernelVersion":"3.12.0-1-amd64"
+             "Debug":false,
+             "NFd": 11,
+             "NGoroutines":21,
+             "NEventsListener":0,
+             "InitPath":"/usr/bin/docker",
+             "IndexServerAddress":["https://index.docker.io/v1/"],
+             "MemoryLimit":true,
+             "SwapLimit":false,
+             "IPv4Forwarding":true
+        }
+
+    Status Codes:
+
+    -   **200** – no error
+    -   **500** – server error
+
+### Show the docker version information
+
+`GET /version`
+
+Show the docker version information
+
+    **Example request**:
+
+        GET /version HTTP/1.1
+
+    **Example response**:
+
+        HTTP/1.1 200 OK
+        Content-Type: application/json
+
+        {
+             "ApiVersion":"1.12",
+             "Version":"0.2.2",
+             "GitCommit":"5a2a5cc+CHANGES",
+             "GoVersion":"go1.0.3"
+        }
+
+    Status Codes:
+
+    -   **200** – no error
+    -   **500** – server error
+
+### Ping the docker server
+
+`GET /_ping`
+
+Ping the docker server
+
+    **Example request**:
+
+        GET /_ping HTTP/1.1
+
+    **Example response**:
+
+        HTTP/1.1 200 OK
+
+        OK
+
+    Status Codes:
+
+    -   **200** - no error
+    -   **500** - server error
+
+### Create a new image from a container's changes
+
+`POST /commit`
+
+Create a new image from a container's changes
+
+    **Example request**:
+
+        POST /commit?container=44c004db4b17&m=message&repo=myrepo HTTP/1.1
+        Content-Type: application/json
+
+        {
+             "Hostname":"",
+             "User":"",
+             "Memory":0,
+             "MemorySwap":0,
+             "AttachStdin":false,
+             "AttachStdout":true,
+             "AttachStderr":true,
+             "PortSpecs":null,
+             "Tty":false,
+             "OpenStdin":false,
+             "StdinOnce":false,
+             "Env":null,
+             "Cmd":[
+                     "date"
+             ],
+             "Volumes":{
+                     "/tmp": {}
+             },
+             "WorkingDir":"",
+             "DisableNetwork": false,
+             "ExposedPorts":{
+                     "22/tcp": {}
+             }
+        }
+
+    **Example response**:
+
+        HTTP/1.1 201 OK
+            Content-Type: application/vnd.docker.raw-stream
+
+        {"Id":"596069db4bf5"}
+
+    Json Parameters:
+
+
+
+    -  **config** - the container's configuration
+
+    Query Parameters:
+
+     
+
+    -   **container** – source container
+    -   **repo** – repository
+    -   **tag** – tag
+    -   **m** – commit message
+    -   **author** – author (e.g., "John Hannibal Smith
+        <[hannibal@a-team.com](mailto:hannibal%40a-team.com)>")
+
+    Status Codes:
+
+    -   **201** – no error
+    -   **404** – no such container
+    -   **500** – server error
+
+### Monitor Docker's events
+
+`GET /events`
+
+Get events from docker, either in real time via streaming, or
+via polling (using since)
+
+    **Example request**:
+
+        GET /events?since=1374067924
+
+    **Example response**:
+
+        HTTP/1.1 200 OK
+        Content-Type: application/json
+
+        {"status":"create","id":"dfdf82bd3881","from":"base:latest","time":1374067924}
+        {"status":"start","id":"dfdf82bd3881","from":"base:latest","time":1374067924}
+        {"status":"stop","id":"dfdf82bd3881","from":"base:latest","time":1374067966}
+        {"status":"destroy","id":"dfdf82bd3881","from":"base:latest","time":1374067970}
+
+    Query Parameters:
+
+     
+
+    -   **since** – timestamp used for polling
+    -   **until** – timestamp used for polling
+
+    Status Codes:
+
+    -   **200** – no error
+    -   **500** – server error
+
+### Get a tarball containing all images and tags in a repository
+
+`GET /images/(name)/get`
+
+Get a tarball containing all images and metadata for the repository
+specified by `name`.
+
+    **Example request**
+
+        GET /images/ubuntu/get
+
+    **Example response**:
+
+        HTTP/1.1 200 OK
+        Content-Type: application/x-tar
+
+        Binary data stream
+
+    Status Codes:
+
+    -   **200** – no error
+    -   **500** – server error
+
+### Load a tarball with a set of images and tags into docker
+
+`POST /images/load`
+
+Load a set of images and tags into the docker repository.
+
+    **Example request**
+
+        POST /images/load
+
+        Tarball in body
+
+    **Example response**:
+
+        HTTP/1.1 200 OK
+
+    Status Codes:
+
+    -   **200** – no error
+    -   **500** – server error
+
+# 3. Going further
+
+## 3.1 Inside `docker run`
+
+Here are the steps of `docker run`:
+
+- Create the container
+
+- If the status code is 404, it means the image doesn't exists:
+    - Try to pull it
+    - Then retry to create the container
+
+- Start the container
+
+- If you are not in detached mode:
+    - Attach to the container, using logs=1 (to have stdout and
+      stderr from the container's start) and stream=1
+
+- If in detached mode or only stdin is attached:
+    - Display the container's id
+
+## 3.2 Hijacking
+
+In this version of the API, /attach, uses hijacking to transport stdin,
+stdout and stderr on the same socket. This might change in the future.
+
+## 3.3 CORS Requests
+
+To enable cross origin requests to the remote api add the flag
+"–api-enable-cors" when running docker in daemon mode.
+
+    $ docker -d -H="192.168.1.9:2375" --api-enable-cors
diff --git a/docs/sources/reference/api/docker_remote_api_v1.2.md b/docs/sources/reference/api/docker_remote_api_v1.2.md
index 37a8e1c..2530fb9 100644
--- a/docs/sources/reference/api/docker_remote_api_v1.2.md
+++ b/docs/sources/reference/api/docker_remote_api_v1.2.md
@@ -206,7 +206,7 @@
                              "Bridge": "",
                              "PortMapping": null
                      },
-                     "SysInitPath": "/home/kitty/go/src/github.com/dotcloud/docker/bin/docker",
+                     "SysInitPath": "/home/kitty/go/src/github.com/docker/docker/bin/docker",
                      "ResolvConfPath": "/etc/resolv.conf",
                      "Volumes": {}
         }
@@ -589,6 +589,13 @@
         {"error":"Invalid..."}
         ...
 
+	Query Parameters:
+
+
+
+	-	**url** – The url from where the file is taken
+	-	**path** – The path where the file is stored
+
     Status Codes:
 
     -   **200** – no error
@@ -699,12 +706,6 @@
         {"error":"Invalid..."}
         ...
 
-    Query Parameters:
-
-     
-
-    -   **registry** – the registry you wan to push, optional
-
     Status Codes:
 
     -   **200** – no error
diff --git a/docs/sources/reference/api/docker_remote_api_v1.3.md b/docs/sources/reference/api/docker_remote_api_v1.3.md
index b510f66..ff6fcac 100644
--- a/docs/sources/reference/api/docker_remote_api_v1.3.md
+++ b/docs/sources/reference/api/docker_remote_api_v1.3.md
@@ -208,7 +208,7 @@
                              "Bridge": "",
                              "PortMapping": null
                      },
-                     "SysInitPath": "/home/kitty/go/src/github.com/dotcloud/docker/bin/docker",
+                     "SysInitPath": "/home/kitty/go/src/github.com/docker/docker/bin/docker",
                      "ResolvConfPath": "/etc/resolv.conf",
                      "Volumes": {}
         }
@@ -639,6 +639,13 @@
         {"error":"Invalid..."}
         ...
 
+	Query Parameters:
+
+
+
+	-	**url** – The url from where the file is taken
+	-	**path** – The path where the file is stored
+
     Status Codes:
 
     -   **200** – no error
@@ -748,12 +755,6 @@
         {"error":"Invalid..."}
         ...
 
-    Query Parameters:
-
-     
-
-    -   **registry** – the registry you wan to push, optional
-
     Status Codes:
 
     -   **200** – no error
diff --git a/docs/sources/reference/api/docker_remote_api_v1.4.md b/docs/sources/reference/api/docker_remote_api_v1.4.md
index 0e49402..77d8e15 100644
--- a/docs/sources/reference/api/docker_remote_api_v1.4.md
+++ b/docs/sources/reference/api/docker_remote_api_v1.4.md
@@ -213,7 +213,7 @@
                              "Bridge": "",
                              "PortMapping": null
                      },
-                     "SysInitPath": "/home/kitty/go/src/github.com/dotcloud/docker/bin/docker",
+                     "SysInitPath": "/home/kitty/go/src/github.com/docker/docker/bin/docker",
                      "ResolvConfPath": "/etc/resolv.conf",
                      "Volumes": {}
         }
@@ -685,6 +685,13 @@
         {"error":"Invalid..."}
         ...
 
+	Query Parameters:
+
+
+
+	-	**url** – The url from where the file is taken
+	-	**path** – The path where the file is stored
+
     Status Codes:
 
     -   **200** – no error
@@ -794,12 +801,6 @@
     {"status":"Pushing..."} {"status":"Pushing", "progress":"1/? (n/a)"}
     {"error":"Invalid..."} ...
 
-    Query Parameters:
-
-     
-
-    -   **registry** – the registry you wan to push, optional
-
     Status Codes:
 
     -   **200** – no error :statuscode 404: no such image :statuscode
diff --git a/docs/sources/reference/api/docker_remote_api_v1.5.md b/docs/sources/reference/api/docker_remote_api_v1.5.md
index 33c1aec..abf6e33 100644
--- a/docs/sources/reference/api/docker_remote_api_v1.5.md
+++ b/docs/sources/reference/api/docker_remote_api_v1.5.md
@@ -211,7 +211,7 @@
             "Bridge": "",
             "PortMapping": null
           },
-          "SysInitPath": "/home/kitty/go/src/github.com/dotcloud/docker/bin/docker",
+          "SysInitPath": "/home/kitty/go/src/github.com/docker/docker/bin/docker",
           "ResolvConfPath": "/etc/resolv.conf",
           "Volumes": {}
         }
@@ -686,6 +686,13 @@
         {"error":"Invalid..."}
         ...
 
+	Query Parameters:
+
+
+
+	-	**url** – The url from where the file is taken
+	-	**path** – The path where the file is stored
+
     Status Codes:
 
     -   **200** – no error
@@ -798,12 +805,6 @@
     The `X-Registry-Auth` header can be used to
     include a base64-encoded AuthConfig object.
 
-    Query Parameters:
-
-     
-
-    -   **registry** – the registry you wan to push, optional
-
     Status Codes:
 
     -   **200** – no error
diff --git a/docs/sources/reference/api/docker_remote_api_v1.6.md b/docs/sources/reference/api/docker_remote_api_v1.6.md
index 4500c15..11dd452 100644
--- a/docs/sources/reference/api/docker_remote_api_v1.6.md
+++ b/docs/sources/reference/api/docker_remote_api_v1.6.md
@@ -261,7 +261,7 @@
                              "Bridge": "",
                              "PortMapping": null
                      },
-                     "SysInitPath": "/home/kitty/go/src/github.com/dotcloud/docker/bin/docker",
+                     "SysInitPath": "/home/kitty/go/src/github.com/docker/docker/bin/docker",
                      "ResolvConfPath": "/etc/resolv.conf",
                      "Volumes": {}
         }
@@ -793,6 +793,13 @@
         {"error":"Invalid..."}
         ...
 
+	Query Parameters:
+
+
+
+	-	**url** – The url from where the file is taken
+	-	**path** – The path where the file is stored
+
     Status Codes:
 
     -   **200** – no error
@@ -903,12 +910,6 @@
     > The `X-Registry-Auth` header can be used to
     > include a base64-encoded AuthConfig object.
 
-    Query Parameters:
-
-     
-
-    -   **registry** – the registry you wan to push, optional
-
     Status Codes:
 
     -   **200** – no error :statuscode 404: no such image :statuscode
diff --git a/docs/sources/reference/api/docker_remote_api_v1.7.md b/docs/sources/reference/api/docker_remote_api_v1.7.md
index 402efa4..10ff841 100644
--- a/docs/sources/reference/api/docker_remote_api_v1.7.md
+++ b/docs/sources/reference/api/docker_remote_api_v1.7.md
@@ -217,7 +217,7 @@
                              "Bridge": "",
                              "PortMapping": null
                      },
-                     "SysInitPath": "/home/kitty/go/src/github.com/dotcloud/docker/bin/docker",
+                     "SysInitPath": "/home/kitty/go/src/github.com/docker/docker/bin/docker",
                      "ResolvConfPath": "/etc/resolv.conf",
                      "Volumes": {}
         }
@@ -712,6 +712,13 @@
         {"error":"Invalid..."}
         ...
 
+	Query Parameters:
+
+
+
+	-	**url** – The url from where the file is taken
+	-	**path** – The path where the file is stored
+
     Status Codes:
 
     -   **200** – no error
@@ -821,12 +828,6 @@
         {"error":"Invalid..."}
         ...
 
-    Query Parameters:
-
-     
-
-    -   **registry** – the registry you wan to push, optional
-
     Request Headers:
 
      
diff --git a/docs/sources/reference/api/docker_remote_api_v1.8.md b/docs/sources/reference/api/docker_remote_api_v1.8.md
index 78fccaf..b8bc008 100644
--- a/docs/sources/reference/api/docker_remote_api_v1.8.md
+++ b/docs/sources/reference/api/docker_remote_api_v1.8.md
@@ -237,7 +237,7 @@
                              "Bridge": "",
                              "PortMapping": null
                      },
-                     "SysInitPath": "/home/kitty/go/src/github.com/dotcloud/docker/bin/docker",
+                     "SysInitPath": "/home/kitty/go/src/github.com/docker/docker/bin/docker",
                      "ResolvConfPath": "/etc/resolv.conf",
                      "Volumes": {},
                      "HostConfig": {
@@ -754,6 +754,13 @@
         {"error":"Invalid..."}
         ...
 
+	Query Parameters:
+
+
+
+	-	**url** – The url from where the file is taken
+	-	**path** – The path where the file is stored
+
     Status Codes:
 
     -   **200** – no error
@@ -863,12 +870,6 @@
         {"error":"Invalid..."}
         ...
 
-    Query Parameters:
-
-     
-
-    -   **registry** – the registry you wan to push, optional
-
     Request Headers:
 
      
diff --git a/docs/sources/reference/api/docker_remote_api_v1.9.md b/docs/sources/reference/api/docker_remote_api_v1.9.md
index 741a9ac..38f4ca8 100644
--- a/docs/sources/reference/api/docker_remote_api_v1.9.md
+++ b/docs/sources/reference/api/docker_remote_api_v1.9.md
@@ -237,7 +237,7 @@
                              "Bridge": "",
                              "PortMapping": null
                      },
-                     "SysInitPath": "/home/kitty/go/src/github.com/dotcloud/docker/bin/docker",
+                     "SysInitPath": "/home/kitty/go/src/github.com/docker/docker/bin/docker",
                      "ResolvConfPath": "/etc/resolv.conf",
                      "Volumes": {},
                      "HostConfig": {
@@ -758,6 +758,13 @@
         {"error":"Invalid..."}
         ...
 
+	Query Parameters:
+
+
+
+	-	**url** – The url from where the file is taken
+	-	**path** – The path where the file is stored
+
     Status Codes:
 
     -   **200** – no error
@@ -867,12 +874,6 @@
         {"error":"Invalid..."}
         ...
 
-    Query Parameters:
-
-     
-
-    -   **registry** – the registry you wan to push, optional
-
     Request Headers:
 
      
diff --git a/docs/sources/reference/api/hub_registry_spec.md b/docs/sources/reference/api/hub_registry_spec.md
index 1a2cf94..b2d29ab 100644
--- a/docs/sources/reference/api/hub_registry_spec.md
+++ b/docs/sources/reference/api/hub_registry_spec.md
@@ -35,7 +35,7 @@
    service using tokens
  - It supports different storage backends (S3, cloud files, local FS)
  - It doesn't have a local database
- - [Source Code](https://github.com/dotcloud/docker-registry)
+ - [Source Code](https://github.com/docker/docker-registry)
 
 We expect that there will be multiple registries out there. To help to
 grasp the context, here are some examples of registries:
@@ -479,7 +479,7 @@
     POST /v1/users:
 
     **Body**:
-    {"email": "[sam@dotcloud.com](mailto:sam%40dotcloud.com)",
+    {"email": "[sam@docker.com](mailto:sam%40docker.com)",
     "password": "toto42", "username": "foobar"`}
 
     **Validation**:
diff --git a/docs/sources/reference/api/registry_api.md b/docs/sources/reference/api/registry_api.md
index 2840693..49776b9 100644
--- a/docs/sources/reference/api/registry_api.md
+++ b/docs/sources/reference/api/registry_api.md
@@ -57,7 +57,7 @@
 > **Note**:
 > The latter implies that while HTTP is the protocol of choice for a registry,
 > multiple schemes are possible (and in some cases, trivial):
-> 
+>
 >  - HTTP with GET (and PUT for read-write registries);
 >  - local mount point;
 >  - remote docker addressed through SSH.
@@ -67,6 +67,8 @@
 (and optionally doing consistency checks). Authentication and authorization
 are then delegated to SSH (e.g., with public keys).
 
+The default namespace for a private repository is `library`.
+
 # Endpoints
 
 ## Images
@@ -305,7 +307,7 @@
 
     **Example Request**:
 
-        GET /v1/repositories/foo/bar/tags HTTP/1.1
+        GET /v1/repositories/reynholm/help-system-server/tags HTTP/1.1
         Host: registry-1.docker.io
         Accept: application/json
         Content-Type: application/json
@@ -335,13 +337,13 @@
     - **401** – Requires authorization
     - **404** – Repository not found
 
-`GET /v1/repositories/(namespace)/(repository)/tags/(tag*):
+`GET /v1/repositories/(namespace)/(repository)/tags/(tag*)`
 
 Get a tag for the given repo.
 
     **Example Request**:
 
-        GET /v1/repositories/foo/bar/tags/latest HTTP/1.1
+        GET /v1/repositories/reynholm/help-system-server/tags/latest HTTP/1.1
         Host: registry-1.docker.io
         Accept: application/json
         Content-Type: application/json
@@ -369,13 +371,13 @@
     - **401** – Requires authorization
     - **404** – Tag not found
 
-`DELETE /v1/repositories/(namespace)/(repository)/tags/(tag*):
+`DELETE /v1/repositories/(namespace)/(repository)/tags/(tag*)`
 
 Delete the tag for the repo
 
     **Example Request**:
 
-        DELETE /v1/repositories/foo/bar/tags/latest HTTP/1.1
+        DELETE /v1/repositories/reynholm/help-system-server/tags/latest HTTP/1.1
         Host: registry-1.docker.io
         Accept: application/json
         Content-Type: application/json
@@ -402,13 +404,13 @@
     - **401** – Requires authorization
     - **404** – Tag not found
 
-`PUT /v1/repositories/(namespace)/(repository)/tags/(tag*):
+`PUT /v1/repositories/(namespace)/(repository)/tags/(tag*)`
 
 Put a tag for the given repo.
 
     **Example Request**:
 
-        PUT /v1/repositories/foo/bar/tags/latest HTTP/1.1
+        PUT /v1/repositories/reynholm/help-system-server/tags/latest HTTP/1.1
         Host: registry-1.docker.io
         Accept: application/json
         Content-Type: application/json
@@ -446,7 +448,7 @@
 
     **Example Request**:
 
-        DELETE /v1/repositories/foo/bar/ HTTP/1.1
+        DELETE /v1/repositories/reynholm/help-system-server/ HTTP/1.1
         Host: registry-1.docker.io
         Accept: application/json
         Content-Type: application/json
diff --git a/docs/sources/reference/api/remote_api_client_libraries.md b/docs/sources/reference/api/remote_api_client_libraries.md
index d1d26a1..8f50804 100644
--- a/docs/sources/reference/api/remote_api_client_libraries.md
+++ b/docs/sources/reference/api/remote_api_client_libraries.md
@@ -140,5 +140,17 @@
       <td><a class="reference external" href="https://github.com/spotify/docker-client">https://github.com/spotify/docker-client</a></td>
       <td>Active</td>
     </tr>
+    <tr class="row-odd">
+      <td>Groovy</td>
+      <td>docker-client</td>
+      <td><a class="reference external" href="https://github.com/gesellix-docker/docker-client">https://github.com/gesellix-docker/docker-client</a></td>
+      <td>Active</td>
+    </tr>
+    <tr class="row-even">
+      <td>Java</td>
+      <td>jclouds-docker</td>
+      <td><a class="reference external" href="https://github.com/jclouds/jclouds-labs/tree/master/docker">https://github.com/jclouds/jclouds-labs/tree/master/docker</a></td>
+      <td>Active</td>
+    </tr>
   </tbody>
 </table>
diff --git a/docs/sources/reference/builder.md b/docs/sources/reference/builder.md
index 9119093..796d07d 100644
--- a/docs/sources/reference/builder.md
+++ b/docs/sources/reference/builder.md
@@ -4,15 +4,17 @@
 
 # Dockerfile Reference
 
-**Docker can act as a builder** and read instructions from a text *Dockerfile*
-to automate the steps you would otherwise take manually to create an image.
-Executing `docker build` will run your steps and commit them along the way,
-giving you a final image.
+**Docker can build images automatically** by reading the instructions
+from a `Dockerfile`. A `Dockerfile` is a text document that contains all
+the commands you would normally execute manually in order to build a
+Docker image. By calling `docker build` from your terminal, you can have
+Docker build your image step by step, executing the instructions
+successively.
 
 ## Usage
 
 To [*build*](../commandline/cli/#cli-build) an image from a source repository,
-create a description file called Dockerfile at the root of your repository.
+create a description file called `Dockerfile` at the root of your repository.
 This file will describe the steps to assemble the image.
 
 Then call `docker build` with the path of your source repository as the argument
@@ -25,6 +27,19 @@
 whole context must be transferred to the daemon. The Docker CLI reports
 "Sending build context to Docker daemon" when the context is sent to the daemon.
 
+> **Warning**
+> Avoid using your root directory, `/`, as the root of the source repository. The 
+> `docker build` command will use whatever directory contains the Dockerfile as the build
+> context (including all of its subdirectories). The build context will be sent to the
+> Docker daemon before building the image, which means if you use `/` as the source
+> repository, the entire contents of your hard drive will get sent to the daemon (and
+> thus to the machine running the daemon). You probably don't want that.
+
+In most cases, it's best to put each Dockerfile in an empty directory, and then add only
+the files needed for building that Dockerfile to that directory. To further speed up the
+build, you can exclude files and directories by adding a `.dockerignore` file to the same
+directory.
+
 You can specify a repository and tag at which to save the new image if
 the build succeeds:
 
@@ -55,13 +70,12 @@
      ---> 1a5ffc17324d
     Successfully built 1a5ffc17324d
 
-When you're done with your build, you're ready to look into
-[*Pushing a repository to its registry*](
-/userguide/dockerrepos/#image-push).
+When you're done with your build, you're ready to look into [*Pushing a
+repository to its registry*]( /userguide/dockerrepos/#image-push).
 
 ## Format
 
-Here is the format of the Dockerfile:
+Here is the format of the `Dockerfile`:
 
     # Comment
     INSTRUCTION arguments
@@ -69,8 +83,8 @@
 The Instruction is not case-sensitive, however convention is for them to
 be UPPERCASE in order to distinguish them from arguments more easily.
 
-Docker evaluates the instructions in a Dockerfile in order. **The first
-instruction must be \`FROM\`** in order to specify the [*Base
+Docker runs the instructions in a `Dockerfile` in order. **The
+first instruction must be \`FROM\`** in order to specify the [*Base
 Image*](/terms/image/#base-image-def) from which you are building.
 
 Docker will treat lines that *begin* with `#` as a
@@ -80,10 +94,10 @@
     # Comment
     RUN echo 'we are running some # of cool things'
 
-Here is the set of instructions you can use in a Dockerfile
-for building images.
+Here is the set of instructions you can use in a `Dockerfile` for building
+images.
 
-## .dockerignore
+## The `.dockerignore` file
 
 If a file named `.dockerignore` exists in the source repository, then it
 is interpreted as a newline-separated list of exclusion patterns.
@@ -124,15 +138,15 @@
     FROM <image>:<tag>
 
 The `FROM` instruction sets the [*Base Image*](/terms/image/#base-image-def)
-for subsequent instructions. As such, a valid Dockerfile must have `FROM` as
+for subsequent instructions. As such, a valid `Dockerfile` must have `FROM` as
 its first instruction. The image can be any valid image – it is especially easy
 to start by **pulling an image** from the [*Public Repositories*](
 /userguide/dockerrepos/#using-public-repositories).
 
-`FROM` must be the first non-comment instruction in the Dockerfile.
+`FROM` must be the first non-comment instruction in the `Dockerfile`.
 
-`FROM` can appear multiple times within a single Dockerfile in order to create
-multiple images. Simply make a note of the last image id output by the commit
+`FROM` can appear multiple times within a single `Dockerfile` in order to create
+multiple images. Simply make a note of the last image ID output by the commit
 before each new `FROM` command.
 
 If no `tag` is given to the `FROM` instruction, `latest` is assumed. If the
@@ -154,7 +168,7 @@
 
 The `RUN` instruction will execute any commands in a new layer on top of the
 current image and commit the results. The resulting committed image will be
-used for the next step in the Dockerfile.
+used for the next step in the `Dockerfile`.
 
 Layering `RUN` instructions and generating commits conforms to the core
 concepts of Docker where commits are cheap and containers can be created from
@@ -163,44 +177,52 @@
 The *exec* form makes it possible to avoid shell string munging, and to `RUN`
 commands using a base image that does not contain `/bin/sh`.
 
-The cache for `RUN` instructions isn't invalidated automatically during the
-next build. The cache for an instruction like `RUN apt-get dist-upgrade -y`
-will be reused during the next build.
-The cache for `RUN` instructions can be invalidated by using the `--no-cache`
-flag, for example `docker build --no-cache`.
+> **Note**:
+> To use a different shell, other than '/bin/sh', use the *exec* form
+> passing in the desired shell. For example,
+> `RUN ["/bin/bash", "-c", "echo hello"]`
 
-The first encountered `ADD` instruction will invalidate the cache for all
-following instructions from the 'Dockerfile' if the contents of the context
-have changed. This will also invalidate the cache for `RUN` instructions.
+The cache for `RUN` instructions isn't invalidated automatically during
+the next build. The cache for an instruction like `RUN apt-get
+dist-upgrade -y` will be reused during the next build.  The cache for
+`RUN` instructions can be invalidated by using the `--no-cache` flag,
+for example `docker build --no-cache`.
+
+The cache for `RUN` instructions can be invalidated by `ADD` instructions. See
+[below](#add) for details.
 
 ### Known Issues (RUN)
 
-- [Issue 783](https://github.com/dotcloud/docker/issues/783) is about file
+- [Issue 783](https://github.com/docker/docker/issues/783) is about file
   permissions problems that can occur when using the AUFS file system. You
   might notice it during an attempt to `rm` a file, for example. The issue
   describes a workaround.
-- [Issue 2424](https://github.com/dotcloud/docker/issues/2424) Locale will
-  not be set automatically.
 
 ## CMD
 
-CMD has three forms:
+The `CMD` instruction has three forms:
 
 - `CMD ["executable","param1","param2"]` (like an *exec*, this is the preferred form)
 - `CMD ["param1","param2"]` (as *default parameters to ENTRYPOINT*)
 - `CMD command param1 param2` (as a *shell*)
 
-There can only be one CMD in a Dockerfile. If you list more than one CMD
-then only the last CMD will take effect.
+There can only be one `CMD` instruction in a `Dockerfile`. If you list more than one `CMD`
+then only the last `CMD` will take effect.
 
-**The main purpose of a CMD is to provide defaults for an executing
+**The main purpose of a `CMD` is to provide defaults for an executing
 container.** These defaults can include an executable, or they can omit
-the executable, in which case you must specify an ENTRYPOINT as well.
+the executable, in which case you must specify an `ENTRYPOINT`
+instruction as well.
+
+> **Note**:
+> If `CMD` is used to provide default arguments for the `ENTRYPOINT` 
+> instruction, both the `CMD` and `ENTRYPOINT` instructions should be specified 
+> with the JSON array format.
 
 When used in the shell or exec formats, the `CMD` instruction sets the command
 to be executed when running the image.
 
-If you use the *shell* form of the CMD, then the `<command>` will execute in
+If you use the *shell* form of the `CMD`, then the `<command>` will execute in
 `/bin/sh -c`:
 
     FROM ubuntu
@@ -208,7 +230,7 @@
 
 If you want to **run your** `<command>` **without a shell** then you must
 express the command as a JSON array and give the full path to the executable.
-**This array form is the preferred format of CMD.** Any additional parameters
+**This array form is the preferred format of `CMD`.** Any additional parameters
 must be individually expressed as strings in the array:
 
     FROM ubuntu
@@ -219,7 +241,7 @@
 [*ENTRYPOINT*](#entrypoint).
 
 If the user specifies arguments to `docker run` then they will override the
-default specified in CMD.
+default specified in `CMD`.
 
 > **Note**:
 > don't confuse `RUN` with `CMD`. `RUN` actually runs a command and commits
@@ -265,26 +287,30 @@
 `<dest>` is the absolute path to which the source will be copied inside the
 destination container.
 
-All new files and directories are created with a uid and gid of 0.
+All new files and directories are created with a UID and GID of 0.
 
-In the case where `<src>` is a remote file URL, the destination will have permissions 600.
+In the case where `<src>` is a remote file URL, the destination will
+have permissions of 600.
 
 > **Note**:
-> If you build by passing a Dockerfile through STDIN (`docker build - < somefile`),
-> there is no build context, so the Dockerfile can only contain a URL
-> based ADD statement.
+> If you build by passing a `Dockerfile` through STDIN (`docker
+> build - < somefile`), there is no build context, so the `Dockerfile`
+> can only contain a URL based `ADD` instruction. You can also pass a
+> compressed archive through STDIN: (`docker build - < archive.tar.gz`),
+> the `Dockerfile` at the root of the archive and the rest of the
+> archive will get used at the context of the build.
 
-> You can also pass a compressed archive through STDIN:
-> (`docker build - < archive.tar.gz`), the `Dockerfile` at the root of
-> the archive and the rest of the archive will get used at the context
-> of the build.
->
 > **Note**:
-> If your URL files are protected using authentication, you will need to
-> use `RUN wget` , `RUN curl`
-> or use another tool from within the container as ADD does not support
+> If your URL files are protected using authentication, you
+> will need to use `RUN wget`, `RUN curl` or use another tool from
+> within the container as the `ADD` instruction does not support
 > authentication.
 
+> **Note**:
+> The first encountered `ADD` instruction will invalidate the cache for all
+> following instructions from the Dockerfile if the contents of `<src>` have
+> changed. This includes invalidating the cache for `RUN` instructions.
+
 The copy obeys the following rules:
 
 - The `<src>` path must be inside the *context* of the build;
@@ -310,9 +336,9 @@
   from *remote* URLs are **not** decompressed. When a directory is copied or
   unpacked, it has the same behavior as `tar -x`: the result is the union of:
 
-    1. whatever existed at the destination path and
-    2. the contents of the source tree, with conflicts resolved in favor of 
-       "2." on a file-by-file basis.
+    1. Whatever existed at the destination path and
+    2. The contents of the source tree, with conflicts resolved in favor
+       of "2." on a file-by-file basis.
 
 - If `<src>` is any other kind of file, it is copied individually along with
   its metadata. In this case, if `<dest>` ends with a trailing slash `/`, it
@@ -338,7 +364,7 @@
 `<dest>` is the absolute path to which the source will be copied inside the
 destination container.
 
-All new files and directories are created with a uid and gid of 0.
+All new files and directories are created with a UID and GID of 0.
 
 > **Note**:
 > If you build using STDIN (`docker build - < somefile`), there is no
@@ -370,41 +396,47 @@
 ENTRYPOINT has two forms:
 
 - `ENTRYPOINT ["executable", "param1", "param2"]`
-  (like an *exec*, preferred form)
+  (like an *exec*, the preferred form)
 - `ENTRYPOINT command param1 param2`
   (as a *shell*)
 
-There can only be one `ENTRYPOINT` in a Dockerfile. If you have more than one
-`ENTRYPOINT`, then only the last one in the Dockerfile will have an effect.
+There can only be one `ENTRYPOINT` in a `Dockerfile`. If you have more
+than one `ENTRYPOINT`, then only the last one in the `Dockerfile` will
+have an effect.
 
-An `ENTRYPOINT` helps you to configure a container that you can run as an
-executable. That is, when you specify an `ENTRYPOINT`, then the whole container
-runs as if it was just that executable.
+An `ENTRYPOINT` helps you to configure a container that you can run as
+an executable. That is, when you specify an `ENTRYPOINT`, then the whole
+container runs as if it was just that executable.
 
-The `ENTRYPOINT` instruction adds an entry command that will **not** be
-overwritten when arguments are passed to `docker run`, unlike the behavior
-of `CMD`. This allows arguments to be passed to the entrypoint. i.e. 
-`docker run <image> -d` will pass the "-d" argument to the ENTRYPOINT.
+Unlike the behavior of the `CMD` instruction, The `ENTRYPOINT`
+instruction adds an entry command that will **not** be overwritten when
+arguments are passed to `docker run`. This allows arguments to be passed
+to the entry point, i.e.  `docker run <image> -d` will pass the `-d`
+argument to the entry point.
 
-You can specify parameters either in the ENTRYPOINT JSON array (as in
-"like an exec" above), or by using a CMD statement. Parameters in the
-ENTRYPOINT will not be overridden by the `docker run`
-arguments, but parameters specified via CMD will be overridden
-by `docker run` arguments.
+You can specify parameters either in the `ENTRYPOINT` JSON array (as in
+"like an exec" above), or by using a `CMD` instruction. Parameters in
+the `ENTRYPOINT` instruction will not be overridden by the `docker run`
+arguments, but parameters specified via a `CMD` instruction will be
+overridden by `docker run` arguments.
 
-Like a `CMD`, you can specify a plain string for the `ENTRYPOINT` and it will
-execute in `/bin/sh -c`:
+Like a `CMD`, you can specify a plain string for the `ENTRYPOINT` and it
+will execute in `/bin/sh -c`:
 
     FROM ubuntu
-    ENTRYPOINT wc -l -
+    ENTRYPOINT ls -l
 
-For example, that Dockerfile's image will *always* take STDIN as input
-("-") and print the number of lines ("-l"). If you wanted to make this
-optional but default, you could use a CMD:
+For example, that `Dockerfile`'s image will *always* take a directory as
+an input and return a directory listing. If you wanted to make this
+optional but default, you could use a `CMD` instruction:
 
     FROM ubuntu
-    CMD ["-l", "-"]
-    ENTRYPOINT ["/usr/bin/wc"]
+    CMD ["-l"]
+    ENTRYPOINT ["ls"]
+
+> **Note**:
+> It is preferable to use the JSON array format for specifying
+> `ENTRYPOINT` instructions.
 
 ## VOLUME
 
@@ -421,34 +453,37 @@
 
     USER daemon
 
-The `USER` instruction sets the username or UID to use when running the image
+The `USER` instruction sets the user name or UID to use when running the image
 and for any following `RUN` directives.
 
 ## WORKDIR
 
     WORKDIR /path/to/workdir
 
-The `WORKDIR` instruction sets the working directory for the `RUN`, `CMD` and
-`ENTRYPOINT` Dockerfile commands that follow it.
+The `WORKDIR` instruction sets the working directory for any `RUN`, `CMD` and
+`ENTRYPOINT` instructions that follow it in the `Dockerfile`.
 
-It can be used multiple times in the one Dockerfile. If a relative path
+It can be used multiple times in the one `Dockerfile`. If a relative path
 is provided, it will be relative to the path of the previous `WORKDIR`
 instruction. For example:
 
-    WORKDIR /a WORKDIR b WORKDIR c RUN pwd
+    WORKDIR /a
+    WORKDIR b
+    WORKDIR c
+    RUN pwd
 
-The output of the final `pwd` command in this
-Dockerfile would be `/a/b/c`.
+The output of the final `pwd` command in this Dockerfile would be
+`/a/b/c`.
 
 ## ONBUILD
 
     ONBUILD [INSTRUCTION]
 
-The `ONBUILD` instruction adds to the image a
-"trigger" instruction to be executed at a later time, when the image is
-used as the base for another build. The trigger will be executed in the
-context of the downstream build, as if it had been inserted immediately
-after the *FROM* instruction in the downstream Dockerfile.
+The `ONBUILD` instruction adds to the image a *trigger* instruction to
+be executed at a later time, when the image is used as the base for
+another build. The trigger will be executed in the context of the
+downstream build, as if it had been inserted immediately after the
+`FROM` instruction in the downstream `Dockerfile`.
 
 Any build instruction can be registered as a trigger.
 
@@ -456,33 +491,33 @@
 to build other images, for example an application build environment or a
 daemon which may be customized with user-specific configuration.
 
-For example, if your image is a reusable python application builder, it
+For example, if your image is a reusable Python application builder, it
 will require application source code to be added in a particular
 directory, and it might require a build script to be called *after*
-that. You can't just call *ADD* and *RUN* now, because you don't yet
+that. You can't just call `ADD` and `RUN` now, because you don't yet
 have access to the application source code, and it will be different for
 each application build. You could simply provide application developers
-with a boilerplate Dockerfile to copy-paste into their application, but
+with a boilerplate `Dockerfile` to copy-paste into their application, but
 that is inefficient, error-prone and difficult to update because it
 mixes with application-specific code.
 
-The solution is to use *ONBUILD* to register in advance instructions to
+The solution is to use `ONBUILD` to register advance instructions to
 run later, during the next build stage.
 
 Here's how it works:
 
-1. When it encounters an *ONBUILD* instruction, the builder adds a
+1. When it encounters an `ONBUILD` instruction, the builder adds a
    trigger to the metadata of the image being built. The instruction
    does not otherwise affect the current build.
 2. At the end of the build, a list of all triggers is stored in the
-   image manifest, under the key *OnBuild*. They can be inspected with
-   *docker inspect*.
+   image manifest, under the key `OnBuild`. They can be inspected with
+   the `docker inspect` command.
 3. Later the image may be used as a base for a new build, using the
-   *FROM* instruction. As part of processing the *FROM* instruction,
-   the downstream builder looks for *ONBUILD* triggers, and executes
+   `FROM` instruction. As part of processing the `FROM` instruction,
+   the downstream builder looks for `ONBUILD` triggers, and executes
    them in the same order they were registered. If any of the triggers
-   fail, the *FROM* instruction is aborted which in turn causes the
-   build to fail. If all triggers succeed, the FROM instruction
+   fail, the `FROM` instruction is aborted which in turn causes the
+   build to fail. If all triggers succeed, the `FROM` instruction
    completes and the build continues as usual.
 4. Triggers are cleared from the final image after being executed. In
    other words they are not inherited by "grand-children" builds.
@@ -494,9 +529,9 @@
     ONBUILD RUN /usr/local/bin/python-build --dir /app/src
     [...]
 
-> **Warning**: Chaining ONBUILD instructions using ONBUILD ONBUILD isn't allowed.
+> **Warning**: Chaining `ONBUILD` instructions using `ONBUILD ONBUILD` isn't allowed.
 
-> **Warning**: ONBUILD may not trigger FROM or MAINTAINER instructions.
+> **Warning**: The `ONBUILD` instruction may not trigger `FROM` or `MAINTAINER` instructions.
 
 ## Dockerfile Examples
 
@@ -507,23 +542,16 @@
     FROM      ubuntu
     MAINTAINER Victor Vieux <victor@docker.com>
 
-    # make sure the package repository is up to date
-    RUN echo "deb http://archive.ubuntu.com/ubuntu precise main universe" > /etc/apt/sources.list
-    RUN apt-get update
-
-    RUN apt-get install -y inotify-tools nginx apache2 openssh-server
+    RUN apt-get update && apt-get install -y inotify-tools nginx apache2 openssh-server
 
     # Firefox over VNC
     #
     # VERSION               0.3
 
     FROM ubuntu
-    # make sure the package repository is up to date
-    RUN echo "deb http://archive.ubuntu.com/ubuntu precise main universe" > /etc/apt/sources.list
-    RUN apt-get update
 
     # Install vnc, xvfb in order to create a 'fake' display and firefox
-    RUN apt-get install -y x11vnc xvfb firefox
+    RUN apt-get update && apt-get install -y x11vnc xvfb firefox
     RUN mkdir /.vnc
     # Setup a password
     RUN x11vnc -storepasswd 1234 ~/.vnc/passwd
@@ -547,3 +575,4 @@
 
     # You᾿ll now have two images, 907ad6c2736f with /bar, and 695d7793cbe4 with
     # /oink.
+
diff --git a/docs/sources/reference/commandline/cli.md b/docs/sources/reference/commandline/cli.md
index 301593f..7b9e2ab 100644
--- a/docs/sources/reference/commandline/cli.md
+++ b/docs/sources/reference/commandline/cli.md
@@ -65,15 +65,14 @@
       -H, --host=[]                              The socket(s) to bind to in daemon mode
                                                    specified using one or more tcp://host:port, unix:///path/to/socket, fd://* or fd://socketfd.
       --icc=true                                 Enable inter-container communication
-      --ip="0.0.0.0"                             Default IP address to use when binding container ports
+      --ip=0.0.0.0                               Default IP address to use when binding container ports
       --ip-forward=true                          Enable net.ipv4.ip_forward
       --iptables=true                            Enable Docker's addition of iptables rules
       --mtu=0                                    Set the containers network MTU
                                                    if no value is provided: default to the default route MTU or 1500 if no default route is available
       -p, --pidfile="/var/run/docker.pid"        Path to use for daemon PID file
-      -r, --restart=true                         Restart previously running containers
       -s, --storage-driver=""                    Force the Docker runtime to use a specific storage driver
-      --selinux-enabled=false                    Enable selinux support
+      --selinux-enabled=false                    Enable selinux support. SELinux does not presently support the BTRFS storage driver
       --storage-opt=[]                           Set storage driver options
       --tls=false                                Use TLS; implied by tls-verify flags
       --tlscacert="/home/sven/.docker/ca.pem"    Trust only remotes providing a certificate signed by the CA given here
@@ -117,15 +116,14 @@
 specified socket activated files aren't found then docker will exit. You
 can find examples of using systemd socket activation with docker and
 systemd in the [docker source tree](
-https://github.com/dotcloud/docker/blob/master/contrib/init/systemd/socket-activation/).
+https://github.com/docker/docker/blob/master/contrib/init/systemd/socket-activation/).
 
 Docker supports softlinks for the Docker data directory
-(`/var/lib/docker`) and for `/tmp`. TMPDIR and the data directory can be set
-like this:
+(`/var/lib/docker`) and for `/var/lib/docker/tmp`. The `DOCKER_TMPDIR` and the data directory can be set like this:
 
-    TMPDIR=/mnt/disk2/tmp /usr/local/bin/docker -d -D -g /var/lib/docker -H unix:// > /var/lib/boot2docker/docker.log 2>&1
+    DOCKER_TMPDIR=/mnt/disk2/tmp /usr/local/bin/docker -d -D -g /var/lib/docker -H unix:// > /var/lib/boot2docker/docker.log 2>&1
     # or
-    export TMPDIR=/mnt/disk2/tmp
+    export DOCKER_TMPDIR=/mnt/disk2/tmp
     /usr/local/bin/docker -d -D -g /var/lib/docker -H unix:// > /var/lib/boot2docker/docker.log 2>&1
 
 ## attach
@@ -135,7 +133,7 @@
     Attach to a running container
 
       --no-stdin=false    Do not attach STDIN
-      --sig-proxy=true    Proxify all received signals to the process (even in non-TTY mode). SIGCHLD is not proxied.
+      --sig-proxy=true    Proxy all received signals to the process (even in non-TTY mode). SIGCHLD, SIGKILL, and SIGSTOP are not proxied.
 
 The `attach` command will allow you to view or
 interact with any running container, detached (`-d`)
@@ -324,7 +322,7 @@
 
 > **Note:** `docker build` will return a `no such file or directory` error
 > if the file or directory does not exist in the uploaded context. This may
-> happen if there is no context, or if you specify a file that is elsewhere 
+> happen if there is no context, or if you specify a file that is elsewhere
 > on the Host system. The context is limited to the current directory (and its
 > children) for security reasons, and to ensure repeatable builds on remote
 > Docker hosts. This is also the reason why `ADD ../file` will not work.
@@ -396,9 +394,9 @@
     A /go
     A /go/src
     A /go/src/github.com
-    A /go/src/github.com/dotcloud
-    A /go/src/github.com/dotcloud/docker
-    A /go/src/github.com/dotcloud/docker/.git
+    A /go/src/github.com/docker
+    A /go/src/github.com/docker/docker
+    A /go/src/github.com/docker/docker/.git
     ....
 
 ## events
@@ -425,24 +423,24 @@
 
 **Shell 1: (Again .. now showing events):**
 
-    [2013-09-03 15:49:26 +0200 CEST] 4386fb97867d: (from 12de384bfb10) start
-    [2013-09-03 15:49:29 +0200 CEST] 4386fb97867d: (from 12de384bfb10) die
-    [2013-09-03 15:49:29 +0200 CEST] 4386fb97867d: (from 12de384bfb10) stop
+    2014-05-10T17:42:14.999999999Z07:00 4386fb97867d: (from 12de384bfb10) start
+    2014-05-10T17:42:14.999999999Z07:00 4386fb97867d: (from 12de384bfb10) die
+    2014-05-10T17:42:14.999999999Z07:00 4386fb97867d: (from 12de384bfb10) stop
 
 **Show events in the past from a specified time:**
 
     $ sudo docker events --since 1378216169
-    [2013-09-03 15:49:29 +0200 CEST] 4386fb97867d: (from 12de384bfb10) die
-    [2013-09-03 15:49:29 +0200 CEST] 4386fb97867d: (from 12de384bfb10) stop
+    2014-03-10T17:42:14.999999999Z07:00 4386fb97867d: (from 12de384bfb10) die
+    2014-03-10T17:42:14.999999999Z07:00 4386fb97867d: (from 12de384bfb10) stop
 
     $ sudo docker events --since '2013-09-03'
-    [2013-09-03 15:49:26 +0200 CEST] 4386fb97867d: (from 12de384bfb10) start
-    [2013-09-03 15:49:29 +0200 CEST] 4386fb97867d: (from 12de384bfb10) die
-    [2013-09-03 15:49:29 +0200 CEST] 4386fb97867d: (from 12de384bfb10) stop
+    2014-09-03T17:42:14.999999999Z07:00 4386fb97867d: (from 12de384bfb10) start
+    2014-09-03T17:42:14.999999999Z07:00 4386fb97867d: (from 12de384bfb10) die
+    2014-09-03T17:42:14.999999999Z07:00 4386fb97867d: (from 12de384bfb10) stop
 
     $ sudo docker events --since '2013-09-03 15:49:29 +0200 CEST'
-    [2013-09-03 15:49:29 +0200 CEST] 4386fb97867d: (from 12de384bfb10) die
-    [2013-09-03 15:49:29 +0200 CEST] 4386fb97867d: (from 12de384bfb10) stop
+    2014-09-03T15:49:29.999999999Z07:00 4386fb97867d: (from 12de384bfb10) die
+    2014-09-03T15:49:29.999999999Z07:00 4386fb97867d: (from 12de384bfb10) stop
 
 ## export
 
@@ -500,7 +498,7 @@
     <none>                        <none>              77af4d6b9913        19 hours ago        1.089 GB
     committest                    latest              b6fa739cedf5        19 hours ago        1.089 GB
     <none>                        <none>              78a85c484f71        19 hours ago        1.089 GB
-    $ docker                        latest              30557a29d5ab        20 hours ago        1.089 GB
+    docker                        latest              30557a29d5ab        20 hours ago        1.089 GB
     <none>                        <none>              0124422dd9f9        20 hours ago        1.089 GB
     <none>                        <none>              18ad6fad3402        22 hours ago        1.082 GB
     <none>                        <none>              f9f1e26352f0        23 hours ago        1.089 GB
@@ -514,7 +512,7 @@
     <none>                        <none>              77af4d6b9913e693e8d0b4b294fa62ade6054e6b2f1ffb617ac955dd63fb0182   19 hours ago        1.089 GB
     committest                    latest              b6fa739cedf5ea12a620a439402b6004d057da800f91c7524b5086a5e4749c9f   19 hours ago        1.089 GB
     <none>                        <none>              78a85c484f71509adeaace20e72e941f6bdd2b25b4c75da8693efd9f61a37921   19 hours ago        1.089 GB
-    $ docker                        latest              30557a29d5abc51e5f1d5b472e79b7e296f595abcf19fe6b9199dbbc809c6ff4   20 hours ago        1.089 GB
+    docker                        latest              30557a29d5abc51e5f1d5b472e79b7e296f595abcf19fe6b9199dbbc809c6ff4   20 hours ago        1.089 GB
     <none>                        <none>              0124422dd9f9cf7ef15c0617cda3931ee68346455441d66ab8bdc5b05e9fdce5   20 hours ago        1.089 GB
     <none>                        <none>              18ad6fad340262ac2a636efd98a6d1f0ea775ae3d45240d3418466495a19a81b   22 hours ago        1.082 GB
     <none>                        <none>              f9f1e26352f0a3ba6a0ff68167559f64f3e21ff7ada60366e2d44a04befd1d3a   23 hours ago        1.089 GB
@@ -605,18 +603,18 @@
 For example:
 
     $ sudo docker -D info
-    Containers: 16
-    Images: 2138
+    Containers: 14
+    Images: 52
     Storage Driver: btrfs
-    Execution Driver: native-0.1
-    Kernel Version: 3.12.0-1-amd64
+    Execution Driver: native-0.2
+    Kernel Version: 3.13.0-24-generic
+    Operating System: Ubuntu 14.04 LTS
     Debug mode (server): false
     Debug mode (client): true
-    Fds: 16
-    Goroutines: 104
+    Fds: 10
+    Goroutines: 9
     EventsListeners: 0
     Init Path: /usr/bin/docker
-    Sockets: [unix:///var/run/docker.sock tcp://0.0.0.0:4243]
     Username: svendowideit
     Registry: [https://index.docker.io/v1/]
 
@@ -732,6 +730,16 @@
     example:
     $ docker login localhost:8080
 
+## logout
+
+    Usage: docker logout [SERVER]
+
+    Log out from a Docker registry, if no server is specified "https://index.docker.io/v1/" is the default.
+
+For example:
+
+    $ docker logout localhost:8080
+
 ## logs
 
     Usage: docker logs CONTAINER
@@ -750,12 +758,32 @@
 Passing a negative number or a non-integer to `--tail` is invalid and the
 value is set to `all` in that case. This behavior may change in the future.
 
+The `docker logs --timestamp` commands will add an RFC3339Nano
+timestamp, for example `2014-05-10T17:42:14.999999999Z07:00`, to each
+log entry.
+
 ## port
 
     Usage: docker port CONTAINER PRIVATE_PORT
 
     Lookup the public-facing port that is NAT-ed to PRIVATE_PORT
 
+## pause
+
+    Usage: docker pause CONTAINER
+
+    Pause all processes within a container
+
+The `docker pause` command uses the cgroups freezer to suspend all processes in
+a container.  Traditionally when suspending a process the `SIGSTOP` signal is
+used, which is observable by the process being suspended. With the cgroups freezer
+the process is unaware, and unable to capture, that it is being suspended,
+and subsequently resumed.
+
+See the [cgroups freezer documentation]
+(https://www.kernel.org/doc/Documentation/cgroups/freezer-subsystem.txt) for
+further details.
+
 ## ps
 
     Usage: docker ps [OPTIONS]
@@ -764,6 +792,8 @@
 
       -a, --all=false       Show all containers. Only running containers are shown by default.
       --before=""           Show only container created before Id or Name, include non-running ones.
+      -f, --filter=[]       Provide filter values. Valid filters:
+                              exited=<int> - containers with exit code of <int>
       -l, --latest=false    Show only the latest created container, include non-running ones.
       -n=-1                 Show n last created containers, include non-running ones.
       --no-trunc=false      Don't truncate output
@@ -781,6 +811,25 @@
 `docker ps` will show only running containers by default. To see all containers:
 `docker ps -a`
 
+### Filtering
+
+The filtering flag (-f or --filter) format is a "key=value" pair. If there is more
+than one filter, then pass multiple flags (e.g. `--filter "foo=bar" --filter "bif=baz"`)
+
+Current filters:
+ * exited (int - the code of exited containers. Only useful with '--all')
+
+
+#### Successfully exited containers
+
+    $ sudo docker ps -a --filter 'exited=0'
+    CONTAINER ID        IMAGE             COMMAND                CREATED             STATUS                   PORTS                      NAMES
+    ea09c3c82f6e        registry:latest   /srv/run.sh            2 weeks ago         Exited (0) 2 weeks ago   127.0.0.1:5000->5000/tcp   desperate_leakey
+    106ea823fe4e        fedora:latest     /bin/sh -c 'bash -l'   2 weeks ago         Exited (0) 2 weeks ago                              determined_albattani
+    48ee228c9464        fedora:20         bash                   2 weeks ago         Exited (0) 2 weeks ago                              tender_torvalds
+
+This shows all the containers that have exited with status of '0'
+
 ## pull
 
     Usage: docker pull NAME[:TAG]
@@ -834,13 +883,13 @@
 
     Remove one or more containers
 
-      -f, --force=false      Force removal of running container
+      -f, --force=false      Force the removal of a running container (uses SIGKILL)
       -l, --link=false       Remove the specified link and not the underlying container
       -v, --volumes=false    Remove the volumes associated with the container
 
 ### Known Issues (rm)
 
--   [Issue 197](https://github.com/dotcloud/docker/issues/197) indicates
+-   [Issue 197](https://github.com/docker/docker/issues/197) indicates
     that `docker kill` may leave directories behind
     and make it difficult to remove the container.
 
@@ -859,7 +908,12 @@
 and the `/redis` containers removing all
 network communication.
 
-    $ sudo docker rm $(docker ps -a -q)
+    $ sudo docker rm --force redis
+    redis
+
+The main process inside the container referenced under the link `/redis` will receive
+SIGKILL, then the container will be removed.
+
 
 This command will delete all stopped containers. The command
 `docker ps -a -q` will return all existing container
@@ -909,20 +963,23 @@
 
     Run a command in a new container
 
-      -a, --attach=[]            Attach to stdin, stdout or stderr.
+      -a, --attach=[]            Attach to STDIN, STDOUT or STDERR.
       -c, --cpu-shares=0         CPU shares (relative weight)
+      --cap-add=[]               Add Linux capabilities
+      --cap-drop=[]              Drop Linux capabilities
       --cidfile=""               Write the container ID to the file
       --cpuset=""                CPUs in which to allow execution (0-3, 0,1)
-      -d, --detach=false         Detached mode: Run container in the background, print new container id
-      --dns=[]                   Set custom dns servers
-      --dns-search=[]            Set custom dns search domains
+      -d, --detach=false         Detached mode: run container in the background and print new container ID
+      --device=[]                Add a host device to the container (e.g. --device=/dev/sdc:/dev/xvdc)
+      --dns=[]                   Set custom DNS servers
+      --dns-search=[]            Set custom DNS search domains
       -e, --env=[]               Set environment variables
-      --entrypoint=""            Overwrite the default entrypoint of the image
-      --env-file=[]              Read in a line delimited file of ENV variables
+      --entrypoint=""            Overwrite the default ENTRYPOINT of the image
+      --env-file=[]              Read in a line delimited file of environment variables
       --expose=[]                Expose a port from the container without publishing it to your host
       -h, --hostname=""          Container host name
-      -i, --interactive=false    Keep stdin open even if not attached
-      --link=[]                  Add link to another container (name:alias)
+      -i, --interactive=false    Keep STDIN open even if not attached
+      --link=[]                  Add link to another container in the form of name:alias
       --lxc-conf=[]              (lxc exec-driver only) Add custom lxc options --lxc-conf="lxc.cgroup.cpuset.cpus = 0,1"
       -m, --memory=""            Memory limit (format: <number><optional unit>, where unit = b, k, m or g)
       --name=""                  Assign a name to the container
@@ -936,11 +993,12 @@
                                    format: ip:hostPort:containerPort | ip::containerPort | hostPort:containerPort
                                    (use 'docker port' to see the actual mapping)
       --privileged=false         Give extended privileges to this container
+      --restart=""               Restart policy to apply when a container exits (no, on-failure, always)
       --rm=false                 Automatically remove the container when it exits (incompatible with -d)
-      --sig-proxy=true           Proxify received signals to the process (even in non-tty mode). SIGCHLD is not proxied.
-      -t, --tty=false            Allocate a pseudo-tty
+      --sig-proxy=true           Proxy received signals to the process (even in non-TTY mode). SIGCHLD, SIGSTOP, and SIGKILL are not proxied.
+      -t, --tty=false            Allocate a pseudo-TTY
       -u, --user=""              Username or UID
-      -v, --volume=[]            Bind mount a volume (e.g., from the host: -v /host:/container, from docker: -v /container)
+      -v, --volume=[]            Bind mount a volume (e.g., from the host: -v /host:/container, from Docker: -v /container)
       --volumes-from=[]          Mount volumes from the specified container(s)
       -w, --workdir=""           Working directory inside the container
 
@@ -960,7 +1018,7 @@
 
 ### Known Issues (run –volumes-from)
 
-- [Issue 2702](https://github.com/dotcloud/docker/issues/2702):
+- [Issue 2702](https://github.com/docker/docker/issues/2702):
   "lxc-start: Permission denied - failed to mount" could indicate a
   permissions problem with AppArmor. Please see the issue for a
   workaround.
@@ -1087,14 +1145,14 @@
 The `--name` flag will assign the name `console` to the newly created
 container.
 
-    $ sudo docker run --volumes-from 777f7dc92da7,ba8c0c54f0f2:ro -i -t ubuntu pwd
+    $ sudo docker run --volumes-from 777f7dc92da7 --volumes-from ba8c0c54f0f2:ro -i -t ubuntu pwd
 
 The `--volumes-from` flag mounts all the defined volumes from the referenced
-containers. Containers can be specified by a comma separated list or by
-repetitions of the `--volumes-from` argument. The container ID may be
-optionally suffixed with `:ro` or `:rw` to mount the volumes in read-only
-or read-write mode, respectively. By default, the volumes are mounted in
-the same mode (read write or read only) as the reference container.
+containers. Containers can be specified by repetitions of the `--volumes-from`
+argument. The container ID may be optionally suffixed with `:ro` or `:rw` to
+mount the volumes in read-only or read-write mode, respectively. By default,
+the volumes are mounted in the same mode (read write or read only) as
+the reference container.
 
 The `-a` flag tells `docker run` to bind to the container's `STDIN`, `STDOUT` or
 `STDERR`. This makes it possible to manipulate the output and input as needed.
@@ -1118,6 +1176,20 @@
 useful if you need to pipe a file or something else into a container and
 retrieve the container's ID once the container has finished running.
 
+   $ sudo docker run --device=/dev/sdc:/dev/xvdc --device=/dev/sdd --device=/dev/zero:/dev/nulo -i -t ubuntu ls -l /dev/{xvdc,sdd,nulo}
+   brw-rw---- 1 root disk 8, 2 Feb  9 16:05 /dev/xvdc
+   brw-rw---- 1 root disk 8, 3 Feb  9 16:05 /dev/sdd
+   crw-rw-rw- 1 root root 1, 5 Feb  9 16:05 /dev/nulo
+
+It is often necessary to directly expose devices to a container.  ``--device``
+option enables that.  For example, a specific block storage device or loop
+device or audio device can be added to an otherwise unprivileged container
+(without the ``--privileged`` flag) and have the application directly access it.
+
+** Security note: **
+
+``--device`` cannot be safely used with ephemeral devices.  Block devices that may be removed should not be added to untrusted containers with ``--device``!
+
 **A complete example:**
 
     $ sudo docker run -d --name static static-web-files sh
@@ -1149,6 +1221,31 @@
    `--rm` option means that when the container exits, the container's layer is
    removed.
 
+#### Restart Policies
+
+Using the `--restart` flag on Docker run you can specify a restart policy for
+how a container should or should not be restarted on exit.
+
+** no ** - Do not restart the container when it exits.
+
+** on-failure ** - Restart the container only if it exits with a non zero exit status.
+
+** always ** - Always restart the container reguardless of the exit status.
+
+You can also specify the maximum amount of times Docker will try to restart the
+container when using the ** on-failure ** policy.  The default is that Docker will try forever to restart the container.
+
+    $ sudo docker run --restart=always redis
+
+This will run the `redis` container with a restart policy of ** always ** so that if
+the container exits, Docker will restart it.
+
+    $ sudo docker run --restart=on-failure:10 redis
+
+This will run the `redis` container with a restart policy of ** on-failure ** and a
+maximum restart count of 10.  If the `redis` container exits with a non-zero exit
+status more than 10 times in a row Docker will abort trying to restart the container.
+
 ## save
 
     Usage: docker save IMAGE
@@ -1197,7 +1294,7 @@
       -a, --attach=false         Attach container's STDOUT and STDERR and forward all signals to the process
       -i, --interactive=false    Attach container's STDIN
 
-When run on a container that has already been started, 
+When run on a container that has already been started,
 takes no action and succeeds unconditionally.
 
 ## stop
@@ -1213,7 +1310,7 @@
 
 ## tag
 
-    Usage: docker tag [OPTIONS] IMAGE [REGISTRYHOST/][USERNAME/]NAME[:TAG]
+    Usage: docker tag [OPTIONS] IMAGE[:TAG] [REGISTRYHOST/][USERNAME/]NAME[:TAG]
 
     Tag an image into a repository
 
@@ -1229,6 +1326,19 @@
 
     Display the running processes of a container
 
+## unpause
+
+    Usage: docker unpause CONTAINER
+
+    Unpause all processes within a container
+
+The `docker unpause` command uses the cgroups freezer to un-suspend all
+processes in a container.
+
+See the [cgroups freezer documentation]
+(https://www.kernel.org/doc/Documentation/cgroups/freezer-subsystem.txt) for
+further details.
+
 ## version
 
     Usage: docker version
diff --git a/docs/sources/reference/run.md b/docs/sources/reference/run.md
index a539ab0..a933a32 100644
--- a/docs/sources/reference/run.md
+++ b/docs/sources/reference/run.md
@@ -11,7 +11,7 @@
 defaults related to the binary to run, the networking to expose, and
 more, but `docker run` gives final control to the operator who starts
 the container from the image. That's the main reason
-[*run*](/reference/commandline/cli/#cli-run) has more options than any
+[*run*](/reference/commandline/cli/#run) has more options than any
 other `docker` command.
 
 ## General Form
@@ -21,23 +21,21 @@
     $ docker run [OPTIONS] IMAGE[:TAG] [COMMAND] [ARG...]
 
 To learn how to interpret the types of `[OPTIONS]`,
-see [*Option types*](/reference/commandline/cli/#cli-options).
+see [*Option types*](/reference/commandline/cli/#option-types).
 
 The list of `[OPTIONS]` breaks down into two groups:
 
 1. Settings exclusive to operators, including:
-
- - Detached or Foreground running,
- - Container Identification,
- - Network settings, and
- - Runtime Constraints on CPU and Memory
- - Privileges and LXC Configuration
-
-2. Setting shared between operators and developers, where operators can
+     * Detached or Foreground running,
+     * Container Identification,
+     * Network settings, and
+     * Runtime Constraints on CPU and Memory
+     * Privileges and LXC Configuration
+2. Settings shared between operators and developers, where operators can
    override defaults developers set in images at build time.
 
-Together, the `docker run [OPTIONS]` give complete control over runtime
-behavior to the operator, allowing them to override all defaults set by
+Together, the `docker run [OPTIONS]` give the operator complete control over runtime
+behavior, allowing them to override all defaults set by
 the developer during `docker build` and nearly all the defaults set by
 the Docker runtime itself.
 
@@ -55,7 +53,7 @@
  - [Network Settings](#network-settings)
  - [Clean Up (--rm)](#clean-up-rm)
  - [Runtime Constraints on CPU and Memory](#runtime-constraints-on-cpu-and-memory)
- - [Runtime Privilege and LXC Configuration](#runtime-privilege-and-lxc-configuration)
+ - [Runtime Privilege, Linux Capabilities, and LXC Configuration](#runtime-privilege-linux-capabilities-and-lxc-configuration)
 
 ## Detached vs Foreground
 
@@ -88,7 +86,7 @@
     -i=false        : Keep STDIN open even if not attached
 
 If you do not specify `-a` then Docker will [attach all standard
-streams]( https://github.com/dotcloud/docker/blob/
+streams]( https://github.com/docker/docker/blob/
 75a7f4d90cde0295bcfb7213004abce8d4779b75/commands.go#L1797). You can
 specify to which of the three standard streams (`STDIN`, `STDOUT`,
 `STDERR`) you'd like to connect instead, as in:
@@ -126,6 +124,12 @@
 PID files):
 
     --cidfile="": Write the container ID to the file
+    
+### Image[:tag]
+
+While not strictly a means of identifying a container, you can specify a version of an
+image you'd like to run the container with by adding `image[:tag]` to the command. For
+example, `docker run ubuntu:14.04`.
 
 ## Network Settings
 
@@ -222,8 +226,10 @@
 give more shares of CPU time to one or more containers when you start
 them via Docker.
 
-## Runtime Privilege and LXC Configuration
+## Runtime Privilege, Linux Capabilities, and LXC Configuration
 
+    --cap-add: Add Linux capabilities
+    --cap-drop: Drop Linux capabilities
     --privileged=false: Give extended privileges to this container
     --lxc-conf=[]: (lxc exec-driver only) Add custom lxc options --lxc-conf="lxc.cgroup.cpuset.cpus = 0,1"
 
@@ -231,7 +237,7 @@
 example, run a Docker daemon inside a Docker container. This is because
 by default a container is not allowed to access any devices, but a
 "privileged" container is given access to all devices (see [lxc-template.go](
-https://github.com/dotcloud/docker/blob/master/daemon/execdriver/lxc/lxc_template.go)
+https://github.com/docker/docker/blob/master/daemon/execdriver/lxc/lxc_template.go)
 and documentation on [cgroups devices](
 https://www.kernel.org/doc/Documentation/cgroups/devices.txt)).
 
@@ -242,11 +248,21 @@
 information about running with `--privileged` is available on the
 [Docker Blog](http://blog.docker.com/2013/09/docker-can-now-run-within-docker/).
 
+In addition to `--privileged`, the operator can have fine grain control over the
+capabilities using `--cap-add` and `--cap-drop`. By default, Docker has a default
+list of capabilities that are kept. Both flags support the value `all`, so if the
+operator wants to have all capabilities but `MKNOD` they could use:
+
+    $ docker run --cap-add=ALL --cap-drop=MKNOD ...
+
+For interacting with the network stack, instead of using `--privileged` they
+should use `--cap-add=NET_ADMIN` to modify the network interfaces.
+
 If the Docker daemon was started using the `lxc` exec-driver
 (`docker -d --exec-driver=lxc`) then the operator can also specify LXC options
 using one or more `--lxc-conf` parameters. These can be new parameters or
 override existing parameters from the [lxc-template.go](
-https://github.com/dotcloud/docker/blob/master/daemon/execdriver/lxc/lxc_template.go).
+https://github.com/docker/docker/blob/master/daemon/execdriver/lxc/lxc_template.go).
 Note that in the future, a given host's docker daemon may not use LXC, so this
 is an implementation-specific configuration meant for operators already
 familiar with using LXC directly.
@@ -385,7 +401,7 @@
     $ docker port 4241164edf6f 6379
     2014/01/25 00:55:38 Error: No public port '6379' published for 4241164edf6f
 
-Yet we can get information about the Redis container'sexposed ports
+Yet we can get information about the Redis container's exposed ports
 with `--link`. Choose an alias that will form a
 valid environment variable!
 
@@ -423,8 +439,8 @@
     --volumes-from="": Mount all volumes from the given container(s)
 
 The volumes commands are complex enough to have their own documentation
-in section [*Share Directories via
-Volumes*](/userguide/dockervolumes/#volume-def).  A developer can define
+in section [*Managing data in 
+containers*](/userguide/dockervolumes/#volume-def). A developer can define
 one or more `VOLUME`'s associated with an image, but only the operator
 can give access from one container to another (or from a container to a
 volume mounted on the host).
diff --git a/docs/sources/userguide/dockerhub.md b/docs/sources/userguide/dockerhub.md
index 5bb1ede..62438b9 100644
--- a/docs/sources/userguide/dockerhub.md
+++ b/docs/sources/userguide/dockerhub.md
@@ -5,8 +5,8 @@
 # Getting Started with Docker Hub
 
 
-This section provides a quick introduction to the [Docker Hub](https://hub.docker.com)
-and will show you how to create an account.
+This section provides a quick introduction to the [Docker Hub](https://hub.docker.com),
+including how to create an account.
 
 The [Docker Hub](https://hub.docker.com) is a centralized resource for working with
 Docker and its components. Docker Hub helps you collaborate with colleagues and get the
@@ -23,7 +23,7 @@
 
 ## Creating a Docker Hub Account
 
-There are two ways for you to register and create a Docker Hub account:
+There are two ways for you to register and create an account:
 
 1. Via the web, or
 2. Via the command line.
@@ -31,9 +31,9 @@
 ### Register via the web
 
 Fill in the [sign-up form](https://hub.docker.com/account/signup/) by
-choosing your user name and password and specifying email address. You can also sign up
-for the Docker Weekly mailing list, which has lots of information about what's going on
-in the world of Docker.
+choosing your user name and password and entering a valid email address. You can also
+sign up for the Docker Weekly mailing list, which has lots of information about what's
+going on in the world of Docker.
 
 ![Register using the sign-up page](/userguide/register-web.png)
 
@@ -46,10 +46,9 @@
 
 ### Confirm your email
 
-Once you've filled in the form, check your email for a welcome message and confirmation
-to activate your account.
+Once you've filled in the form, check your email for a welcome message asking for
+confirmation so we can activate your account.
 
-![Confirm your registration](/userguide/register-confirm.png)
 
 ### Login
 
@@ -61,7 +60,7 @@
 
     $ sudo docker login
 
-Your Docker Hub account is now active and ready for you to use!
+Your Docker Hub account is now active and ready to use.
 
 ##  Next steps
 
diff --git a/docs/sources/userguide/dockerimages.md b/docs/sources/userguide/dockerimages.md
index c3f5461..e6583a0 100644
--- a/docs/sources/userguide/dockerimages.md
+++ b/docs/sources/userguide/dockerimages.md
@@ -130,7 +130,7 @@
 returned a list of image names, descriptions, Stars (which measure the social
 popularity of images - if a user likes an image then they can "star" it), and
 the Official and Automated build statuses. Official repositories are built and
-maintained by the [Stackbrew](https://github.com/dotcloud/stackbrew) project,
+maintained by the [Stackbrew](https://github.com/docker/stackbrew) project,
 and Automated repositories are [Automated Builds](
 /userguide/dockerrepos/#automated-builds) that allow you to validate the source
 and content of an image.
@@ -245,8 +245,7 @@
     # This is a comment
     FROM ubuntu:14.04
     MAINTAINER Kate Smith <ksmith@example.com>
-    RUN apt-get -qq update
-    RUN apt-get -qqy install ruby ruby-dev
+    RUN apt-get update && apt-get install -y ruby ruby-dev
     RUN gem install sinatra
 
 Let's look at what our `Dockerfile` does. Each instruction prefixes a statement and is capitalized.
@@ -272,38 +271,168 @@
 Now let's take our `Dockerfile` and use the `docker build` command to build an image.
 
     $ sudo docker build -t="ouruser/sinatra:v2" .
-    Uploading context  2.56 kB
-    Uploading context
+    Sending build context to Docker daemon 2.048 kB
+    Sending build context to Docker daemon 
     Step 0 : FROM ubuntu:14.04
-     ---> 99ec81b80c55
+     ---> e54ca5efa2e9
     Step 1 : MAINTAINER Kate Smith <ksmith@example.com>
-     ---> Running in 7c5664a8a0c1
-     ---> 2fa8ca4e2a13
-    Removing intermediate container 7c5664a8a0c1
-    Step 2 : RUN apt-get -qq update
-     ---> Running in b07cc3fb4256
-     ---> 50d21070ec0c
-    Removing intermediate container b07cc3fb4256
-    Step 3 : RUN apt-get -qqy install ruby ruby-dev
-     ---> Running in a5b038dd127e
+     ---> Using cache
+     ---> 851baf55332b
+    Step 2 : RUN apt-get update && apt-get install -y ruby ruby-dev
+     ---> Running in 3a2558904e9b
     Selecting previously unselected package libasan0:amd64.
     (Reading database ... 11518 files and directories currently installed.)
     Preparing to unpack .../libasan0_4.8.2-19ubuntu1_amd64.deb ...
-    . . .
+    Unpacking libasan0:amd64 (4.8.2-19ubuntu1) ...
+    Selecting previously unselected package libatomic1:amd64.
+    Preparing to unpack .../libatomic1_4.8.2-19ubuntu1_amd64.deb ...
+    Unpacking libatomic1:amd64 (4.8.2-19ubuntu1) ...
+    Selecting previously unselected package libgmp10:amd64.
+    Preparing to unpack .../libgmp10_2%3a5.1.3+dfsg-1ubuntu1_amd64.deb ...
+    Unpacking libgmp10:amd64 (2:5.1.3+dfsg-1ubuntu1) ...
+    Selecting previously unselected package libisl10:amd64.
+    Preparing to unpack .../libisl10_0.12.2-1_amd64.deb ...
+    Unpacking libisl10:amd64 (0.12.2-1) ...
+    Selecting previously unselected package libcloog-isl4:amd64.
+    Preparing to unpack .../libcloog-isl4_0.18.2-1_amd64.deb ...
+    Unpacking libcloog-isl4:amd64 (0.18.2-1) ...
+    Selecting previously unselected package libgomp1:amd64.
+    Preparing to unpack .../libgomp1_4.8.2-19ubuntu1_amd64.deb ...
+    Unpacking libgomp1:amd64 (4.8.2-19ubuntu1) ...
+    Selecting previously unselected package libitm1:amd64.
+    Preparing to unpack .../libitm1_4.8.2-19ubuntu1_amd64.deb ...
+    Unpacking libitm1:amd64 (4.8.2-19ubuntu1) ...
+    Selecting previously unselected package libmpfr4:amd64.
+    Preparing to unpack .../libmpfr4_3.1.2-1_amd64.deb ...
+    Unpacking libmpfr4:amd64 (3.1.2-1) ...
+    Selecting previously unselected package libquadmath0:amd64.
+    Preparing to unpack .../libquadmath0_4.8.2-19ubuntu1_amd64.deb ...
+    Unpacking libquadmath0:amd64 (4.8.2-19ubuntu1) ...
+    Selecting previously unselected package libtsan0:amd64.
+    Preparing to unpack .../libtsan0_4.8.2-19ubuntu1_amd64.deb ...
+    Unpacking libtsan0:amd64 (4.8.2-19ubuntu1) ...
+    Selecting previously unselected package libyaml-0-2:amd64.
+    Preparing to unpack .../libyaml-0-2_0.1.4-3ubuntu3_amd64.deb ...
+    Unpacking libyaml-0-2:amd64 (0.1.4-3ubuntu3) ...
+    Selecting previously unselected package libmpc3:amd64.
+    Preparing to unpack .../libmpc3_1.0.1-1ubuntu1_amd64.deb ...
+    Unpacking libmpc3:amd64 (1.0.1-1ubuntu1) ...
+    Selecting previously unselected package openssl.
+    Preparing to unpack .../openssl_1.0.1f-1ubuntu2.4_amd64.deb ...
+    Unpacking openssl (1.0.1f-1ubuntu2.4) ...
+    Selecting previously unselected package ca-certificates.
+    Preparing to unpack .../ca-certificates_20130906ubuntu2_all.deb ...
+    Unpacking ca-certificates (20130906ubuntu2) ...
+    Selecting previously unselected package manpages.
+    Preparing to unpack .../manpages_3.54-1ubuntu1_all.deb ...
+    Unpacking manpages (3.54-1ubuntu1) ...
+    Selecting previously unselected package binutils.
+    Preparing to unpack .../binutils_2.24-5ubuntu3_amd64.deb ...
+    Unpacking binutils (2.24-5ubuntu3) ...
+    Selecting previously unselected package cpp-4.8.
+    Preparing to unpack .../cpp-4.8_4.8.2-19ubuntu1_amd64.deb ...
+    Unpacking cpp-4.8 (4.8.2-19ubuntu1) ...
+    Selecting previously unselected package cpp.
+    Preparing to unpack .../cpp_4%3a4.8.2-1ubuntu6_amd64.deb ...
+    Unpacking cpp (4:4.8.2-1ubuntu6) ...
+    Selecting previously unselected package libgcc-4.8-dev:amd64.
+    Preparing to unpack .../libgcc-4.8-dev_4.8.2-19ubuntu1_amd64.deb ...
+    Unpacking libgcc-4.8-dev:amd64 (4.8.2-19ubuntu1) ...
+    Selecting previously unselected package gcc-4.8.
+    Preparing to unpack .../gcc-4.8_4.8.2-19ubuntu1_amd64.deb ...
+    Unpacking gcc-4.8 (4.8.2-19ubuntu1) ...
+    Selecting previously unselected package gcc.
+    Preparing to unpack .../gcc_4%3a4.8.2-1ubuntu6_amd64.deb ...
+    Unpacking gcc (4:4.8.2-1ubuntu6) ...
+    Selecting previously unselected package libc-dev-bin.
+    Preparing to unpack .../libc-dev-bin_2.19-0ubuntu6_amd64.deb ...
+    Unpacking libc-dev-bin (2.19-0ubuntu6) ...
+    Selecting previously unselected package linux-libc-dev:amd64.
+    Preparing to unpack .../linux-libc-dev_3.13.0-30.55_amd64.deb ...
+    Unpacking linux-libc-dev:amd64 (3.13.0-30.55) ...
+    Selecting previously unselected package libc6-dev:amd64.
+    Preparing to unpack .../libc6-dev_2.19-0ubuntu6_amd64.deb ...
+    Unpacking libc6-dev:amd64 (2.19-0ubuntu6) ...
+    Selecting previously unselected package ruby.
+    Preparing to unpack .../ruby_1%3a1.9.3.4_all.deb ...
+    Unpacking ruby (1:1.9.3.4) ...
+    Selecting previously unselected package ruby1.9.1.
+    Preparing to unpack .../ruby1.9.1_1.9.3.484-2ubuntu1_amd64.deb ...
+    Unpacking ruby1.9.1 (1.9.3.484-2ubuntu1) ...
+    Selecting previously unselected package libruby1.9.1.
+    Preparing to unpack .../libruby1.9.1_1.9.3.484-2ubuntu1_amd64.deb ...
+    Unpacking libruby1.9.1 (1.9.3.484-2ubuntu1) ...
+    Selecting previously unselected package manpages-dev.
+    Preparing to unpack .../manpages-dev_3.54-1ubuntu1_all.deb ...
+    Unpacking manpages-dev (3.54-1ubuntu1) ...
+    Selecting previously unselected package ruby1.9.1-dev.
+    Preparing to unpack .../ruby1.9.1-dev_1.9.3.484-2ubuntu1_amd64.deb ...
+    Unpacking ruby1.9.1-dev (1.9.3.484-2ubuntu1) ...
+    Selecting previously unselected package ruby-dev.
+    Preparing to unpack .../ruby-dev_1%3a1.9.3.4_all.deb ...
+    Unpacking ruby-dev (1:1.9.3.4) ...
+    Setting up libasan0:amd64 (4.8.2-19ubuntu1) ...
+    Setting up libatomic1:amd64 (4.8.2-19ubuntu1) ...
+    Setting up libgmp10:amd64 (2:5.1.3+dfsg-1ubuntu1) ...
+    Setting up libisl10:amd64 (0.12.2-1) ...
+    Setting up libcloog-isl4:amd64 (0.18.2-1) ...
+    Setting up libgomp1:amd64 (4.8.2-19ubuntu1) ...
+    Setting up libitm1:amd64 (4.8.2-19ubuntu1) ...
+    Setting up libmpfr4:amd64 (3.1.2-1) ...
+    Setting up libquadmath0:amd64 (4.8.2-19ubuntu1) ...
+    Setting up libtsan0:amd64 (4.8.2-19ubuntu1) ...
+    Setting up libyaml-0-2:amd64 (0.1.4-3ubuntu3) ...
+    Setting up libmpc3:amd64 (1.0.1-1ubuntu1) ...
+    Setting up openssl (1.0.1f-1ubuntu2.4) ...
+    Setting up ca-certificates (20130906ubuntu2) ...
+    debconf: unable to initialize frontend: Dialog
+    debconf: (TERM is not set, so the dialog frontend is not usable.)
+    debconf: falling back to frontend: Readline
+    debconf: unable to initialize frontend: Readline
+    debconf: (This frontend requires a controlling tty.)
+    debconf: falling back to frontend: Teletype
+    Setting up manpages (3.54-1ubuntu1) ...
+    Setting up binutils (2.24-5ubuntu3) ...
+    Setting up cpp-4.8 (4.8.2-19ubuntu1) ...
+    Setting up cpp (4:4.8.2-1ubuntu6) ...
+    Setting up libgcc-4.8-dev:amd64 (4.8.2-19ubuntu1) ...
+    Setting up gcc-4.8 (4.8.2-19ubuntu1) ...
+    Setting up gcc (4:4.8.2-1ubuntu6) ...
+    Setting up libc-dev-bin (2.19-0ubuntu6) ...
+    Setting up linux-libc-dev:amd64 (3.13.0-30.55) ...
+    Setting up libc6-dev:amd64 (2.19-0ubuntu6) ...
+    Setting up manpages-dev (3.54-1ubuntu1) ...
+    Setting up libruby1.9.1 (1.9.3.484-2ubuntu1) ...
+    Setting up ruby1.9.1-dev (1.9.3.484-2ubuntu1) ...
+    Setting up ruby-dev (1:1.9.3.4) ...
     Setting up ruby (1:1.9.3.4) ...
     Setting up ruby1.9.1 (1.9.3.484-2ubuntu1) ...
     Processing triggers for libc-bin (2.19-0ubuntu6) ...
-     ---> 2acb20f17878
-    Removing intermediate container a5b038dd127e
-    Step 4 : RUN gem install sinatra
-     ---> Running in 5e9d0065c1f7
-    . . .
+    Processing triggers for ca-certificates (20130906ubuntu2) ...
+    Updating certificates in /etc/ssl/certs... 164 added, 0 removed; done.
+    Running hooks in /etc/ca-certificates/update.d....done.
+     ---> c55c31703134
+    Removing intermediate container 3a2558904e9b
+    Step 3 : RUN gem install sinatra
+     ---> Running in 6b81cb6313e5
+    unable to convert "\xC3" to UTF-8 in conversion from ASCII-8BIT to UTF-8 to US-ASCII for README.rdoc, skipping
+    unable to convert "\xC3" to UTF-8 in conversion from ASCII-8BIT to UTF-8 to US-ASCII for README.rdoc, skipping
+    Successfully installed rack-1.5.2
+    Successfully installed tilt-1.4.1
     Successfully installed rack-protection-1.5.3
     Successfully installed sinatra-1.4.5
     4 gems installed
-     ---> 324104cde6ad
-    Removing intermediate container 5e9d0065c1f7
-    Successfully built 324104cde6ad
+    Installing ri documentation for rack-1.5.2...
+    Installing ri documentation for tilt-1.4.1...
+    Installing ri documentation for rack-protection-1.5.3...
+    Installing ri documentation for sinatra-1.4.5...
+    Installing RDoc documentation for rack-1.5.2...
+    Installing RDoc documentation for tilt-1.4.1...
+    Installing RDoc documentation for rack-protection-1.5.3...
+    Installing RDoc documentation for sinatra-1.4.5...
+     ---> 97feabe5d2ed
+    Removing intermediate container 6b81cb6313e5
+    Successfully built 97feabe5d2ed
 
 We've specified our `docker build` command and used the `-t` flag to identify
 our new image as belonging to the user `ouruser`, the repository name `sinatra`
@@ -328,6 +457,11 @@
 (also helpfully tagged as `ouruser/sinatra:v2`) and all intermediate
 containers will get removed to clean things up.
 
+> **Note:** 
+> An image can't have more than 127 layers regardless of the storage driver.
+> This limitation is set globally to encourage optimization of the overall 
+> size of images.
+
 We can then create a container from our new image.
 
     $ sudo docker run -t -i ouruser/sinatra:v2 /bin/bash
diff --git a/docs/sources/userguide/dockerlinks.md b/docs/sources/userguide/dockerlinks.md
index 20a5c1a..3624bf7 100644
--- a/docs/sources/userguide/dockerlinks.md
+++ b/docs/sources/userguide/dockerlinks.md
@@ -4,48 +4,47 @@
 
 # Linking Containers Together
 
-In [the Using Docker section](/userguide/usingdocker) we touched on
-connecting to a service running inside a Docker container via a network
-port. This is one of the ways that you can interact with services and
-applications running inside Docker containers. In this section we're
-going to give you a refresher on connecting to a Docker container via a
-network port as well as introduce you to the concepts of container
-linking.
+In [the Using Docker section](/userguide/usingdocker), you saw how you can
+connect to a service running inside a Docker container via a network
+port. But a port connection is only one way you can interact with services and
+applications running inside Docker containers. In this section, we'll briefly revisit
+connecting via a network port and then we'll introduce you to another method of access:
+container linking.
 
 ## Network port mapping refresher
 
-In [the Using Docker section](/userguide/usingdocker) we created a
-container that ran a Python Flask application.
+In [the Using Docker section](/userguide/usingdocker), you created a
+container that ran a Python Flask application:
 
     $ sudo docker run -d -P training/webapp python app.py
 
 > **Note:** 
 > Containers have an internal network and an IP address
-> (remember we used the `docker inspect` command to show the container's
+> (as we saw when we used the `docker inspect` command to show the container's
 > IP address in the [Using Docker](/userguide/usingdocker/) section).
 > Docker can have a variety of network configurations. You can see more
 > information on Docker networking [here](/articles/networking/).
 
-When we created that container we used the `-P` flag to automatically map any
-network ports inside that container to a random high port from the range 49000
-to 49900 on our Docker host.  When we subsequently ran `docker ps` we saw that
-port 5000 was bound to port 49155.
+When that container was created, the `-P` flag was used to automatically map any
+network ports inside it to a random high port from the range 49000
+to 49900 on our Docker host.  Next, when `docker ps` was run, you saw that
+port 5000 in the container was bound to port 49155 on the host.
 
     $ sudo docker ps nostalgic_morse
     CONTAINER ID  IMAGE                   COMMAND       CREATED        STATUS        PORTS                    NAMES
     bc533791f3f5  training/webapp:latest  python app.py 5 seconds ago  Up 2 seconds  0.0.0.0:49155->5000/tcp  nostalgic_morse
 
-We also saw how we can bind a container's ports to a specific port using
-the `-p` flag.
+You also saw how you can bind a container's ports to a specific port using
+the `-p` flag:
 
     $ sudo docker run -d -p 5000:5000 training/webapp python app.py
 
-And we saw why this isn't such a great idea because it constrains us to
+And you saw why this isn't such a great idea because it constrains you to
 only one container on that specific port.
 
-There are also a few other ways we can configure the `-p` flag. By
+There are also a few other ways you can configure the `-p` flag. By
 default the `-p` flag will bind the specified port to all interfaces on
-the host machine. But we can also specify a binding to a specific
+the host machine. But you can also specify a binding to a specific
 interface, for example only to the `localhost`.
 
     $ sudo docker run -d -p 127.0.0.1:5000:5000 training/webapp python app.py
@@ -53,20 +52,19 @@
 This would bind port 5000 inside the container to port 5000 on the
 `localhost` or `127.0.0.1` interface on the host machine.
 
-Or to bind port 5000 of the container to a dynamic port but only on the
-`localhost` we could:
+Or, to bind port 5000 of the container to a dynamic port but only on the
+`localhost`, you could use:
 
     $ sudo docker run -d -p 127.0.0.1::5000 training/webapp python app.py
 
-We can also bind UDP ports by adding a trailing `/udp`, for example:
+You can also bind UDP ports by adding a trailing `/udp`. For example:
 
     $ sudo docker run -d -p 127.0.0.1:5000:5000/udp training/webapp python app.py
 
-We also saw the useful `docker port` shortcut which showed us the
-current port bindings, this is also useful for showing us specific port
-configurations. For example if we've bound the container port to the
-`localhost` on the host machine this will be shown in the `docker port`
-output.
+You also learned about the useful `docker port` shortcut which showed us the
+current port bindings. This is also useful for showing you specific port
+configurations. For example, if you've bound the container port to the
+`localhost` on the host machine, then the `docker port` output will reflect that.
 
     $ docker port nostalgic_morse 5000
     127.0.0.1:49155
@@ -78,101 +76,110 @@
 
 Network port mappings are not the only way Docker containers can connect
 to one another. Docker also has a linking system that allows you to link
-multiple containers together and share connection information between
-them. Docker linking will create a parent child relationship where the
-parent container can see selected information about its child.
+multiple containers together and send connection information from one to another.
+When containers are linked, information about a source container can be sent to a
+recipient container. This allows the recipient to see selected data describing
+aspects of the source container.
 
 ## Container naming
 
-To perform this linking Docker relies on the names of your containers.
-We've already seen that each container we create has an automatically
-created name, indeed we've become familiar with our old friend
+To establish links, Docker relies on the names of your containers.
+You've already seen that each container you create has an automatically
+created name; indeed you've become familiar with our old friend
 `nostalgic_morse` during this guide. You can also name containers
 yourself. This naming provides two useful functions:
 
-1. It's useful to name containers that do specific functions in a way
+1. It can be useful to name containers that do specific functions in a way
    that makes it easier for you to remember them, for example naming a
-   container with a web application in it `web`.
+   container containing a web application `web`.
 
 2. It provides Docker with a reference point that allows it to refer to other
-   containers, for example link container `web` to container `db`.
+   containers, for example, you can specify to link the container `web` to container `db`.
 
 You can name your container by using the `--name` flag, for example:
 
     $ sudo docker run -d -P --name web training/webapp python app.py
 
-You can see we've launched a new container and used the `--name` flag to
-call the container `web`. We can see the container's name using the
+This launches a new container and uses the `--name` flag to
+name the container `web`. You can see the container's name using the
 `docker ps` command.
 
     $ sudo docker ps -l
     CONTAINER ID  IMAGE                  COMMAND        CREATED       STATUS       PORTS                    NAMES
     aed84ee21bde  training/webapp:latest python app.py  12 hours ago  Up 2 seconds 0.0.0.0:49154->5000/tcp  web
 
-We can also use `docker inspect` to return the container's name.
+You can also use `docker inspect` to return the container's name.
 
     $ sudo docker inspect -f "{{ .Name }}" aed84ee21bde
     /web
 
 > **Note:** 
 > Container names have to be unique. That means you can only call
-> one container `web`. If you want to re-use a container name you must delete the
-> old container with the `docker rm` command before you can create a new
+> one container `web`. If you want to re-use a container name you must delete
+> the old container (with `docker rm`) before you can create a new
 > container with the same name. As an alternative you can use the `--rm`
 > flag with the `docker run` command. This will delete the container
-> immediately after it stops.
+> immediately after it is stopped.
 
 ## Container Linking
 
-Links allow containers to discover and securely communicate with each
-other. To create a link you use the `--link` flag. Let's create a new
-container, this one a database.
+Links allow containers to discover each other and securely transfer information about one
+container to another container. When you set up a link, you create a conduit between a
+source container and a recipient container. The recipient can then access select data
+about the source. To create a link, you use the `--link` flag. First, create a new
+container, this time one containing a database.
 
     $ sudo docker run -d --name db training/postgres
 
-Here we've created a new container called `db` using the `training/postgres`
+This creates a new container called `db` from the `training/postgres`
 image, which contains a PostgreSQL database.
 
-Now let's create a new `web` container and link it with our `db` container.
+Now, you need to delete the `web` container you created previously so you can replace it
+with a linked one:
+
+    $ docker rm -f web
+
+Now, create a new `web` container and link it with your `db` container.
 
     $ sudo docker run -d -P --name web --link db:db training/webapp python app.py
 
-This will link the new `web` container with the `db` container we created
+This will link the new `web` container with the `db` container you created
 earlier. The `--link` flag takes the form:
 
     --link name:alias
 
 Where `name` is the name of the container we're linking to and `alias` is an
-alias for the link name. We'll see how that alias gets used shortly.
+alias for the link name. You'll see how that alias gets used shortly.
 
-Let's look at our linked containers using `docker ps`.
+Next, look at your linked containers using `docker ps`.
 
     $ docker ps
     CONTAINER ID  IMAGE                     COMMAND               CREATED             STATUS             PORTS                    NAMES
-    349169744e49  training/postgres:latest  su postgres -c '/usr  About a minute ago  Up About a minute  5432/tcp                 db
-    aed84ee21bde  training/webapp:latest    python app.py         16 hours ago        Up 2 minutes       0.0.0.0:49154->5000/tcp  db/web,web
+    349169744e49  training/postgres:latest  su postgres -c '/usr  About a minute ago  Up About a minute  5432/tcp                 db, web/db
+    aed84ee21bde  training/webapp:latest    python app.py         16 hours ago        Up 2 minutes       0.0.0.0:49154->5000/tcp  web
 
-We can see our named containers, `db` and `web`, and we can see that the `web`
-containers also shows `db/web` in the `NAMES` column. This tells us that the
-`web` container is linked to the `db` container in a parent/child relationship.
+You can see your named containers, `db` and `web`, and you can see that the `db`
+container also shows `web/db` in the `NAMES` column. This tells you that the
+`web` container is linked to the `db` container, which allows it to access information
+about the `db` container.
 
-So what does linking the containers do? Well we've discovered the link creates
-a parent-child relationship between the two containers. The parent container,
-here `db`, can access information on the child container `web`. To do this
-Docker creates a secure tunnel between the containers without the need to
-expose any ports externally on the container. You'll note when we started the
-`db` container we did not use either of the `-P` or `-p` flags. As we're
-linking the containers we don't need to expose the PostgreSQL database via the
-network.
+So what does linking the containers actually do? You've learned that a link creates a
+source container that can provide information about itself to a recipient container. In
+our example, the recipient, `web`, can access information about the source `db`. To do
+this, Docker creates a secure tunnel between the containers that doesn't need to
+expose any ports externally on the container; you'll note when we started the
+`db` container we did not use either the `-P` or `-p` flags. That's a big benefit of
+linking: we don't need to expose the source container, here the PostgreSQL database, to
+the network.
 
-Docker exposes connectivity information for the parent container inside the
-child container in two ways:
+Docker exposes connectivity information for the source container to the
+recipient container in two ways:
 
 * Environment variables,
 * Updating the `/etc/hosts` file.
 
-Let's look first at the environment variables Docker sets. Let's run the `env`
-command to list the container's environment variables.
+Docker can set a number of environment variables. You run the `env`
+command to list the specified container's environment variables.
 
 ```
     $ sudo docker run --rm --name web2 --link db:db training/webapp env
@@ -191,26 +198,27 @@
 > container. Similarly, some daemons (such as `sshd`)
 > will scrub them when spawning shells for connection.
 
-We can see that Docker has created a series of environment variables with
-useful information about our `db` container. Each variable is prefixed with
-`DB_` which is populated from the `alias` we specified above. If our `alias`
-were `db1` the variables would be prefixed with `DB1_`. You can use these
+You can see that Docker has created a series of environment variables with
+useful information about the source `db` container. Each variable is prefixed with
+`DB_`, which is populated from the `alias` you specified above. If the `alias`
+were `db1`, the variables would be prefixed with `DB1_`. You can use these
 environment variables to configure your applications to connect to the database
-on the `db` container. The connection will be secure, private and only the
+on the `db` container. The connection will be secure and private; only the
 linked `web` container will be able to talk to the `db` container.
 
-In addition to the environment variables Docker adds a host entry for the
-linked parent to the `/etc/hosts` file. Let's look at this file on the `web`
-container now.
+In addition to the environment variables, Docker adds a host entry for the
+source container to the `/etc/hosts` file. Here's an entry for the `web`
+container:
 
+    $ sudo docker run -t -i --rm --link db:db training/webapp /bin/bash
     root@aed84ee21bde:/opt/webapp# cat /etc/hosts
     172.17.0.7  aed84ee21bde
     . . .
     172.17.0.5  db
 
-We can see two relevant host entries. The first is an entry for the `web`
+You can see two relevant host entries. The first is an entry for the `web`
 container that uses the Container ID as a host name. The second entry uses the
-link alias to reference the IP address of the `db` container. Let's try to ping
+link alias to reference the IP address of the `db` container. You can ping
 that host now via this host name.
 
     root@aed84ee21bde:/opt/webapp# apt-get install -yqq inetutils-ping
@@ -221,21 +229,22 @@
     56 bytes from 172.17.0.5: icmp_seq=2 ttl=64 time=0.256 ms
 
 > **Note:** 
-> We had to install `ping` because our container didn't have it.
+> In the example, you'll note you had to install `ping` because it was not included
+> in the container initially.
 
-We've used the `ping` command to ping the `db` container using it's host entry
-which resolves to `172.17.0.5`. We can make use of this host entry to configure
-an application to make use of our `db` container.
+Here, you used the `ping` command to ping the `db` container using its host entry,
+which resolves to `172.17.0.5`. You can use this host entry to configure an application
+to make use of your `db` container.
 
 > **Note:** 
-> You can link multiple child containers to a single parent. For
-> example, we could have multiple web containers attached to our `db`
-> container.
+> You can link multiple recipient containers to a single source. For
+> example, you could have multiple (differently named) web containers attached to your
+>`db` container.
 
 # Next step
 
-Now we know how to link Docker containers together the next step is
-learning how to manage data, volumes and mounts inside our containers.
+Now that you know how to link Docker containers together, the next step is
+learning how to manage data, volumes and mounts inside your containers.
 
 Go to [Managing Data in Containers](/userguide/dockervolumes).
 
diff --git a/docs/sources/userguide/dockervolumes.md b/docs/sources/userguide/dockervolumes.md
index 93ac37b..97593a1 100644
--- a/docs/sources/userguide/dockervolumes.md
+++ b/docs/sources/userguide/dockervolumes.md
@@ -59,9 +59,10 @@
 create it for you.
 
 > **Note:** 
-> This is not available from a `Dockerfile` due the portability
+> This is not available from a `Dockerfile` due to the portability
 > and sharing purpose of it. As the host directory is, by its nature,
-> host-dependent it might not work all hosts.
+> host-dependent, a host directory specified in a `Dockerfile` probably
+> wouldn't work on all hosts.
 
 Docker defaults to a read-write volume but we can also mount a directory
 read-only.
@@ -71,6 +72,24 @@
 Here we've mounted the same `/src/webapp` directory but we've added the `ro`
 option to specify that the mount should be read-only.
 
+### Mount a Host File as a Data Volume
+
+The `-v` flag can also be used to mount a single file  - instead of *just* 
+directories - from the host machine.
+
+    $ sudo docker run --rm -it -v ~/.bash_history:/.bash_history ubuntu /bin/bash
+
+This will drop you into a bash shell in a new container, you will have your bash 
+history from the host and when you exit the container, the host will have the 
+history of the commands typed while in the container.
+
+> **Note:** 
+> Many tools used to edit files including `vi` and `sed --in-place` may result 
+> in an inode change. Since Docker v1.1.0, this will produce an error such as
+> "*sed: cannot rename ./sedKdJ9Dy: Device or resource busy*". In the case where 
+> you want to edit the mounted file, it is often easiest to instead mount the 
+> parent directory.
+
 ## Creating and mounting a Data Volume Container
 
 If you have some persistent data that you want to share between
@@ -80,7 +99,7 @@
 
 Let's create a new named container with a volume to share.
 
-    $ sudo docker run -d -v /dbdata --name dbdata training/postgres
+    $ sudo docker run -d -v /dbdata --name dbdata training/postgres echo Data-only container for postgres
 
 You can then use the `--volumes-from` flag to mount the `/dbdata` volume in another container.
 
@@ -112,14 +131,14 @@
 
     $ sudo docker run --volumes-from dbdata -v $(pwd):/backup ubuntu tar cvf /backup/backup.tar /dbdata
 
-Here's we've launched a new container and mounted the volume from the
+Here we've launched a new container and mounted the volume from the
 `dbdata` container. We've then mounted a local host directory as
 `/backup`. Finally, we've passed a command that uses `tar` to backup the
 contents of the `dbdata` volume to a `backup.tar` file inside our
 `/backup` directory. When the command completes and the container stops
 we'll be left with a backup of our `dbdata` volume.
 
-You could then to restore to the same container, or another that you've made
+You could then restore it to the same container, or another that you've made
 elsewhere. Create a new container.
 
     $ sudo docker run -v /dbdata --name dbdata2 ubuntu /bin/bash
diff --git a/docs/sources/userguide/index.md b/docs/sources/userguide/index.md
index eef59c0..08d6be0 100644
--- a/docs/sources/userguide/index.md
+++ b/docs/sources/userguide/index.md
@@ -87,7 +87,7 @@
 * [Docker blog](http://blog.docker.com/)
 * [Docker documentation](http://docs.docker.com/)
 * [Docker Getting Started Guide](http://www.docker.com/gettingstarted/)
-* [Docker code on GitHub](https://github.com/dotcloud/docker)
+* [Docker code on GitHub](https://github.com/docker/docker)
 * [Docker mailing
   list](https://groups.google.com/forum/#!forum/docker-user)
 * Docker on IRC: irc.freenode.net and channel #docker
diff --git a/docs/sources/userguide/login-web.png b/docs/sources/userguide/login-web.png
index 8fe04d8..e9d26b5 100644
--- a/docs/sources/userguide/login-web.png
+++ b/docs/sources/userguide/login-web.png
Binary files differ
diff --git a/docs/sources/userguide/register-confirm.png b/docs/sources/userguide/register-confirm.png
deleted file mode 100644
index 4057cbe..0000000
--- a/docs/sources/userguide/register-confirm.png
+++ /dev/null
Binary files differ
diff --git a/docs/sources/userguide/register-web.png b/docs/sources/userguide/register-web.png
index 2c950d2..6c549f8 100644
--- a/docs/sources/userguide/register-web.png
+++ b/docs/sources/userguide/register-web.png
Binary files differ
diff --git a/docs/sources/userguide/search.png b/docs/sources/userguide/search.png
index 2737074..ded0d0d 100644
--- a/docs/sources/userguide/search.png
+++ b/docs/sources/userguide/search.png
Binary files differ
diff --git a/docs/sources/userguide/usingdocker.md b/docs/sources/userguide/usingdocker.md
index 857eac5..a882a79 100644
--- a/docs/sources/userguide/usingdocker.md
+++ b/docs/sources/userguide/usingdocker.md
@@ -76,7 +76,7 @@
 
 Or you can also pass the `--help` flag to the `docker` binary.
 
-    $ sudo docker images --help
+    $ sudo docker attach --help
 
 This will display the help text and all available flags:
 
diff --git a/docs/theme/mkdocs/base.html b/docs/theme/mkdocs/base.html
index 8f2bd06..2f518b5 100644
--- a/docs/theme/mkdocs/base.html
+++ b/docs/theme/mkdocs/base.html
@@ -4,10 +4,11 @@
   <meta charset="utf-8">
   <meta http-equiv="X-UA-Compatible" content="IE=edge">
   <meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=no">
-{% set docker_version = "$VERSION" %}{% set docker_commit = "$GITCOMMIT" %}{% set docker_branch = "$GIT_BRANCH" %}{% set aws_bucket = "$AWS_S3_BUCKET" %}
+  {% set docker_version = "$VERSION" %}{% set major_minor = "$MAJOR_MINOR" %}{% set docker_commit = "$GITCOMMIT" %}{% set docker_branch = "$GIT_BRANCH" %}{% set aws_bucket = "$AWS_S3_BUCKET" %}{% set build_date = "$BUILD_DATE" %}
   <meta name="docker_version" content="{{ docker_version }}">
   <meta name="docker_git_branch" content="{{ docker_branch }}">
   <meta name="docker_git_commit" content="{{ docker_commit }}">
+  <meta name="docker_build_date" content="{{ build_date }}">
 
   {% if meta.page_description %}<meta name="description" content="{{ meta.page_description[0] }}">{% endif %}
   {% if meta.page_keywords %}<meta name="keywords" content="{{ meta.page_keywords[0] }}">{% endif %}
@@ -27,15 +28,15 @@
   <![endif]-->
   {% if config.google_analytics %}
   <script type="text/javascript">
-  var _gaq = _gaq || [];
-  _gaq.push(['_setAccount', '{{ config.google_analytics[0] }}']);
-  _gaq.push(['_trackPageview']);
-  _gaq.push(['_trackPageLoadTime']);
-  (function() {
-    var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;
-    ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';
-    var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s);
-  })();
+  (function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
+  (i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
+  m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
+  })(window,document,'script','//www.google-analytics.com/analytics.js','ga');
+  ga('create', '{{ config.google_analytics[0] }}', 'docker.com');
+  ga('require', 'linkid', 'linkid.js');
+  ga('send', 'pageview', {
+   'page': location.pathname + location.search  + location.hash,
+  });
   </script>
   {% endif %}
 </head>
@@ -48,11 +49,23 @@
   <div id="content" class="container">
 {% if current_page.title != '**HIDDEN**' %}
     <div class="row" id="top-header">
-      <div class="span8">
+      <div class="span7">
         <h1 class="header2">{{ current_page.title }}</h1>
       </div>
-      <div class="span4 text-right edit-on-github">
-        <a class="home-link3" href="https://github.com/dotcloud/docker/blob/master/docs/sources/{{ current_page.input_path }}" class="tertiary-nav">Edit on GitHub</a>
+      <div class="span5">
+        <div id="versionnav" class="span3 pull-right">
+          <ul class="nav version pull-right">
+            <li class="dropdown">
+              <a id="logged-in-header-username" class="dropdown-toggle" data-toggle="dropdown" href="#">
+		      Latest (Version {{ major_minor }})
+              </a>
+              <ul id="documentation-version-list" class="dropdown-menu pull-right">
+		<li role="presentation" class="divider"></li>
+		<li> <a class="home-link3 tertiary-nav" href="https://github.com/docker/docker/blob/master/docs/sources/{{ current_page.input_path }}" >Edit on GitHub</a></li>
+              </ul>
+            </li>
+          </ul>
+        </div>
       </div>
     </div>
 {% endif %}
@@ -119,19 +132,18 @@
 })();
 </script>
 <script type="text/javascript">
-  // Function to make the sticky header possible
-  var shiftWindow = function() {
-    scrollBy(0, -80);
-  };
-
-  window.addEventListener("hashchange", shiftWindow);
-  $(window).load(function() {
-    if (window.location.hash) {
-      shiftWindow();
-    }
-  });
   $(document).ready(function() {
     $('#content').css("min-height", $(window).height() - 553 );
+    // load the complete versions list
+    $.get("/versions.html_fragment", function( data ) {
+    	$('#documentation-version-list').prepend(data);
+	//remove any "/v1.1/" bits from font.
+	path = document.location.pathname.replace(/^\/v\d\.\d/, "");
+	$('#documentation-version-list a.version').each(function(i, e) {
+		e.href = e.href+path;
+		$(e).removeClass()
+	});
+    });
   })
   var userName = getCookie('docker_sso_username');
   if (userName) {
diff --git a/docs/theme/mkdocs/breadcrumbs.html b/docs/theme/mkdocs/breadcrumbs.html
index 3dc2dbb..c99e10f 100644
--- a/docs/theme/mkdocs/breadcrumbs.html
+++ b/docs/theme/mkdocs/breadcrumbs.html
@@ -8,5 +8,5 @@
       {% endif %}
     {% endif %}
   {% endfor %}
-  <li class="pull-right edit-on-github"><a href="https://github.com/dotcloud/docker/blob/master/docs/sources/{{ current_page.input_path }}"><span class="glyphicon glyphicon-edit"></span>Edit on GitHub</a></li>
+  <li class="pull-right edit-on-github"><a href="https://github.com/docker/docker/blob/master/docs/sources/{{ current_page.input_path }}"><span class="glyphicon glyphicon-edit"></span>Edit on GitHub</a></li>
 </ol>
\ No newline at end of file
diff --git a/docs/theme/mkdocs/css/docs.css b/docs/theme/mkdocs/css/docs.css
index 0f42e22..bd9711c 100644
--- a/docs/theme/mkdocs/css/docs.css
+++ b/docs/theme/mkdocs/css/docs.css
@@ -24,6 +24,18 @@
   height: 100%;
 }
 
+#leftnav h3 {
+  font-size: 10px;
+  font-weight: 700;
+  color: #394d54;
+  line-height: 1;
+  margin: 0px 0 10px 0;
+  padding-left: 20px;
+  white-space: nowrap;
+  overflow: hidden;
+  text-overflow: ellipsis;
+}
+
 .content-body {
   padding: 0px 0px 0px 20px;
 }
@@ -56,12 +68,9 @@
 #nav_menu > #docsnav > #main-nav > li > a {
   color: #253237;
 }
-#nav_menu > #docsnav > #main-nav > li.dd_on_hover > a {
-  color: #5992a3;
-}
 #nav_menu > #docsnav > #main-nav > li.dd_on_hover {
-  background: #b1d5df;
-  color: #5992a3;
+  background: #d3f1fb;
+  color: #253237;
 }
 #nav_menu > #docsnav > #main-nav > li > span > b {
   border-top-color: #b1d5df !important;
diff --git a/docs/theme/mkdocs/css/main.css b/docs/theme/mkdocs/css/main.css
index 18e65eb..3375f79 100644
--- a/docs/theme/mkdocs/css/main.css
+++ b/docs/theme/mkdocs/css/main.css
@@ -847,7 +847,8 @@
 .navbar #usernav .nav li {
   padding-top: 15px;
 }
-.navbar #usernav .nav li a {
+.navbar #usernav .nav li a,
+#versionnav .nav li a.dropdown-toggle {
   font-size: 14px;
   font-weight: 400;
   color: #394d54;
@@ -856,10 +857,12 @@
   padding: 0 20px 0 0;
   background: url("../img/nav/caret-down-user-icon.svg") no-repeat 100% 50%;
 }
-.navbar #usernav .nav li a:hover {
+.navbar #usernav .nav li a:hover,
+#versionnav .nav li a.dropdown-toggle:hover {
   background-image: url("../img/nav/caret-down-user-icon-over.svg");
 }
-.navbar #usernav .nav li ul li {
+.navbar #usernav .nav li ul li,
+#versionnav .version {
   padding: 0;
   margin: 0;
   height: 28px;
@@ -902,7 +905,7 @@
   margin-bottom: 22px;
 }
 #leftnav .nav {
-  margin: 0;
+  margin: 0, 0, 20px, 0;
 }
 #leftnav .nav > li > a {
   line-height: 22px;
diff --git a/docs/theme/mkdocs/footer.html b/docs/theme/mkdocs/footer.html
index 1e2d73c..05316b4 100644
--- a/docs/theme/mkdocs/footer.html
+++ b/docs/theme/mkdocs/footer.html
@@ -1,49 +1,62 @@
 <div id="footer-container" class="container">
   <div id="footer" class="grey-body">
     <div class="row">
-      <div class="span3">
+      <div class="span2">
         <span class="footer-title">Community</span>
         <ul class="unstyled">
-          <li><a class="primary-button" href="http://www.docker.com/community/events/">Events</a></li>
+          <li><a class="primary-button" href="https://www.docker.com/community/events/">Events</a></li>
           <li><a class="primary-button" href="http://posts.docker.com">Friends' Posts</a></li>
-          <li><a class="primary-button" href="http://www.docker.com/community/meetups/">Meetups</a></li>
-          <li><a class="primary-button" href="http://www.docker.com/community/governance/">Governance</a></li>
+          <li><a class="primary-button" href="https://www.docker.com/community/meetups/">Meetups</a></li>
+          <li><a class="primary-button" href="https://www.docker.com/community/governance/">Governance</a></li>
           <li><a class="primary-button" href="http://forums.docker.com">Forums</a></li>
           <li><a class="primary-button" href="http://botbot.me/freenode/docker">IRC</a></li>
-          <li><a class="primary-button" href="https://github.com/dotcloud/docker">GitHub</a></li>
+          <li><a class="primary-button" href="https://github.com/docker/docker">GitHub</a></li>
           <li><a class="primary-button" href="http://stackoverflow.com/search?q=docker">Stackoverflow</a></li>
           <li><a class="primary-button" href="http://www.cafepress.com/docker">Swag</a></li>
         </ul>
       </div>
-      <div class="span3">
+      <div class="span2">
+        <span class="footer-title">Enterprise</span>
+        <ul class="unstyled">
+          <li><a class="primary-button" href="https://www.docker.com/enterprise/support/">Support</a></li>
+          <li><a class="primary-button" href="https://www.docker.com/enterprise/education/">Education</a></li>
+          <li><a class="primary-button" href="https://www.docker.com/enterprise/services/">Services</a></li>
+        </ul>
+        <span class="footer-title">Partner Solutions</span>
+        <ul class="unstyled">
+          <li><a class="primary-button" href="https://www.docker.com/partners/find/">Find a Partner</a></li>
+          <li><a class="primary-button" href="https://www.docker.com/partners/program/">Partner Program</a></li>
+          <li><a class="primary-button" href="https://www.docker.com/partners/learn/">Learn More</a></li>
+        </ul>
+      </div>
+      <div class="span2">
         <span class="footer-title">Resources</span>
         <ul class="unstyled">
           <li><a class="primary-button" href="https://docs.docker.com">Documentation</a></li>
-          <li><a class="primary-button" href="http://www.docker.com/resources/help/">Help</a></li>
-          <li><a class="primary-button" href="http://www.docker.com/resources/education/">Education</a></li>
-          <li><a class="primary-button" href="http://www.docker.com/resources/partners/">Partners</a></li>
-          <li><a class="primary-button" href="http://www.docker.com/resources/services/">Services</a></li>
-          <li><a class="primary-button" href="http://www.docker.com/resources/howtobuy/">How To Buy</a></li>
+          <li><a class="primary-button" href="https://www.docker.com/resources/help/">Help</a></li>
+          <li><a class="primary-button" href="https://www.docker.com/resources/usecases/">Use Cases</a></li>
+          <li><a class="primary-button" href="http://www.docker.com/tryit/">Online Tutorial</a></li>
+          <li><a class="primary-button" href="https://www.docker.com/resources/howtobuy/">How To Buy</a></li>
           <li><a class="primary-button" href="http://status.docker.com">Status</a></li>
-          <li><a class="primary-button" href="http://www.docker.com/resources/security/">Security</a></li>
+          <li><a class="primary-button" href="https://www.docker.com/resources/security/">Security</a></li>
         </ul>
       </div>
-      <div class="span3">
+      <div class="span2">
         <span class="footer-title">Company</span>
         <ul class="unstyled">
-          <li><a class="primary-button" href="http://www.docker.com/company/aboutus/">About Us</a></li>
-          <li><a class="primary-button" href="http://www.docker.com/company/team/">Team</a></li>
-          <li><a class="primary-button" href="http://www.docker.com/company/news/">News</a></li>
-          <li><a class="primary-button" href="http://www.docker.com/company/press/">Press</a></li>
-          <li><a class="primary-button" href="http://www.docker.com/company/careers/">Careers</a></li>
-          <li><a class="primary-button" href="http://www.docker.com/company/contact/">Contact</a></li>
+          <li><a class="primary-button" href="https://www.docker.com/company/aboutus/">About Us</a></li>
+          <li><a class="primary-button" href="https://www.docker.com/company/team/">Team</a></li>
+          <li><a class="primary-button" href="https://www.docker.com/company/news/">News</a></li>
+          <li><a class="primary-button" href="https://www.docker.com/company/press/">Press</a></li>
+          <li><a class="primary-button" href="https://www.docker.com/company/careers/">Careers</a></li>
+          <li><a class="primary-button" href="https://www.docker.com/company/contact/">Contact</a></li>
         </ul>
       </div>
       <div class="span3">
         <span class="footer-title">Connect</span>
         <div class="search">
           <span>Subscribe to our newsletter</span>
-          <form action="http://www.docker.com/subscribe_newsletter/" method="post">
+          <form action="https://www.docker.com/subscribe_newsletter/" method="post">
             <input type='hidden' name='csrfmiddlewaretoken' value='aWL78QXQkY8DSKNYh6cl08p5eTLl7sOa' />
             <tr><th><label for="id_email">Email:</label></th><td><input class="form-control" id="id_email" name="email" placeholder="Enter your email" type="text" /></td></tr>
             
diff --git a/docs/theme/mkdocs/header.html b/docs/theme/mkdocs/header.html
index 3560929..a3b1d9b 100644
--- a/docs/theme/mkdocs/header.html
+++ b/docs/theme/mkdocs/header.html
@@ -24,7 +24,7 @@
     </form>
     <ul class="nav">
       <li><a href="https://registry.hub.docker.com" title="Browse Repos">Browse Repos</a></li>
-      <li><a href="http://docs.docker.com" title="Documentation">Documentation</a></li>
+      <li class="active"><a href="http://docs.docker.com" title="Documentation">Documentation</a></li>
       <li><a href="http://www.docker.com/community/participate/" title="Community">Community</a></li>
       <li><a href="http://www.docker.com/resources/help/" title="Help">Help</a></li>
     </ul>
diff --git a/docs/theme/mkdocs/toc.html b/docs/theme/mkdocs/toc.html
index 96d15c2..1de2a42 100644
--- a/docs/theme/mkdocs/toc.html
+++ b/docs/theme/mkdocs/toc.html
@@ -1,5 +1,8 @@
-  {% for toc_item in toc %}
-    {% for toc_item in toc_item.children %}
-      <li class=""><a href="{{ toc_item.url }}">{{ toc_item.title }}</a></li>
+{% for toc_item in toc %}
+  {% for toc_h2_item in toc_item.children %}
+    <li class=""><a href="{{ toc_h2_item.url }}">{{ toc_h2_item.title }}</a></li>
+    {% for toc_h3_item in toc_h2_item.children %}
+      <h3><a href="{{ toc_h3_item.url }}">{{ toc_h3_item.title }}</a></h3>
+    {% endfor %}
   {% endfor %}
 {% endfor %}
diff --git a/engine/engine.go b/engine/engine.go
index 5c3228d..4550df9 100644
--- a/engine/engine.go
+++ b/engine/engine.go
@@ -7,8 +7,10 @@
 	"os"
 	"sort"
 	"strings"
+	"sync"
+	"time"
 
-	"github.com/dotcloud/docker/utils"
+	"github.com/docker/docker/utils"
 )
 
 // Installer is a standard interface for objects which can "install" themselves
@@ -43,14 +45,18 @@
 // It acts as a store for *containers*, and allows manipulation of these
 // containers by executing *jobs*.
 type Engine struct {
-	handlers map[string]Handler
-	catchall Handler
-	hack     Hack // data for temporary hackery (see hack.go)
-	id       string
-	Stdout   io.Writer
-	Stderr   io.Writer
-	Stdin    io.Reader
-	Logging  bool
+	handlers   map[string]Handler
+	catchall   Handler
+	hack       Hack // data for temporary hackery (see hack.go)
+	id         string
+	Stdout     io.Writer
+	Stderr     io.Writer
+	Stdin      io.Reader
+	Logging    bool
+	tasks      sync.WaitGroup
+	l          sync.RWMutex // lock for shutdown
+	shutdown   bool
+	onShutdown []func() // shutdown handlers
 }
 
 func (eng *Engine) Register(name string, handler Handler) error {
@@ -130,6 +136,77 @@
 	return job
 }
 
+// OnShutdown registers a new callback to be called by Shutdown.
+// This is typically used by services to perform cleanup.
+func (eng *Engine) OnShutdown(h func()) {
+	eng.l.Lock()
+	eng.onShutdown = append(eng.onShutdown, h)
+	eng.l.Unlock()
+}
+
+// Shutdown permanently shuts down eng as follows:
+// - It refuses all new jobs, permanently.
+// - It waits for all active jobs to complete (with no timeout)
+// - It calls all shutdown handlers concurrently (if any)
+// - It returns when all handlers complete, or after 15 seconds,
+//	whichever happens first.
+func (eng *Engine) Shutdown() {
+	eng.l.Lock()
+	if eng.shutdown {
+		eng.l.Unlock()
+		return
+	}
+	eng.shutdown = true
+	eng.l.Unlock()
+	// We don't need to protect the rest with a lock, to allow
+	// for other calls to immediately fail with "shutdown" instead
+	// of hanging for 15 seconds.
+	// This requires all concurrent calls to check for shutdown, otherwise
+	// it might cause a race.
+
+	// Wait for all jobs to complete.
+	// Timeout after 5 seconds.
+	tasksDone := make(chan struct{})
+	go func() {
+		eng.tasks.Wait()
+		close(tasksDone)
+	}()
+	select {
+	case <-time.After(time.Second * 5):
+	case <-tasksDone:
+	}
+
+	// Call shutdown handlers, if any.
+	// Timeout after 10 seconds.
+	var wg sync.WaitGroup
+	for _, h := range eng.onShutdown {
+		wg.Add(1)
+		go func(h func()) {
+			defer wg.Done()
+			h()
+		}(h)
+	}
+	done := make(chan struct{})
+	go func() {
+		wg.Wait()
+		close(done)
+	}()
+	select {
+	case <-time.After(time.Second * 10):
+	case <-done:
+	}
+	return
+}
+
+// IsShutdown returns true if the engine is in the process
+// of shutting down, or already shut down.
+// Otherwise it returns false.
+func (eng *Engine) IsShutdown() bool {
+	eng.l.RLock()
+	defer eng.l.RUnlock()
+	return eng.shutdown
+}
+
 // ParseJob creates a new job from a text description using a shell-like syntax.
 //
 // The following syntax is used to parse `input`:
diff --git a/engine/env_test.go b/engine/env_test.go
index f76d879..fe1db04 100644
--- a/engine/env_test.go
+++ b/engine/env_test.go
@@ -5,7 +5,7 @@
 	"encoding/json"
 	"testing"
 
-	"github.com/dotcloud/docker/pkg/testutils"
+	"github.com/docker/docker/pkg/testutils"
 )
 
 func TestEnvLenZero(t *testing.T) {
diff --git a/engine/job.go b/engine/job.go
index ab8120d..1c25212 100644
--- a/engine/job.go
+++ b/engine/job.go
@@ -32,7 +32,6 @@
 	handler Handler
 	status  Status
 	end     time.Time
-	onExit  []func()
 }
 
 type Status int
@@ -47,6 +46,20 @@
 // If the job returns a failure status, an error is returned
 // which includes the status.
 func (job *Job) Run() error {
+	if job.Eng.IsShutdown() {
+		return fmt.Errorf("engine is shutdown")
+	}
+	// FIXME: this is a temporary workaround to avoid Engine.Shutdown
+	// waiting 5 seconds for server/api.ServeApi to complete (which it never will)
+	// everytime the daemon is cleanly restarted.
+	// The permanent fix is to implement Job.Stop and Job.OnStop so that
+	// ServeApi can cooperate and terminate cleanly.
+	if job.Name != "serveapi" {
+		job.Eng.l.Lock()
+		job.Eng.tasks.Add(1)
+		job.Eng.l.Unlock()
+		defer job.Eng.tasks.Done()
+	}
 	// FIXME: make this thread-safe
 	// FIXME: implement wait
 	if !job.end.IsZero() {
diff --git a/engine/shutdown_test.go b/engine/shutdown_test.go
new file mode 100644
index 0000000..13d8049
--- /dev/null
+++ b/engine/shutdown_test.go
@@ -0,0 +1,80 @@
+package engine
+
+import (
+	"testing"
+	"time"
+)
+
+func TestShutdownEmpty(t *testing.T) {
+	eng := New()
+	if eng.IsShutdown() {
+		t.Fatalf("IsShutdown should be false")
+	}
+	eng.Shutdown()
+	if !eng.IsShutdown() {
+		t.Fatalf("IsShutdown should be true")
+	}
+}
+
+func TestShutdownAfterRun(t *testing.T) {
+	eng := New()
+	var called bool
+	eng.Register("foo", func(job *Job) Status {
+		called = true
+		return StatusOK
+	})
+	if err := eng.Job("foo").Run(); err != nil {
+		t.Fatal(err)
+	}
+	eng.Shutdown()
+	if err := eng.Job("foo").Run(); err == nil {
+		t.Fatalf("%#v", *eng)
+	}
+}
+
+// An approximate and racy, but better-than-nothing test that
+//
+func TestShutdownDuringRun(t *testing.T) {
+	var (
+		jobDelay     time.Duration = 500 * time.Millisecond
+		jobDelayLow  time.Duration = 100 * time.Millisecond
+		jobDelayHigh time.Duration = 700 * time.Millisecond
+	)
+	eng := New()
+	var completed bool
+	eng.Register("foo", func(job *Job) Status {
+		time.Sleep(jobDelay)
+		completed = true
+		return StatusOK
+	})
+	go eng.Job("foo").Run()
+	time.Sleep(50 * time.Millisecond)
+	done := make(chan struct{})
+	var startShutdown time.Time
+	go func() {
+		startShutdown = time.Now()
+		eng.Shutdown()
+		close(done)
+	}()
+	time.Sleep(50 * time.Millisecond)
+	if err := eng.Job("foo").Run(); err == nil {
+		t.Fatalf("run on shutdown should fail: %#v", *eng)
+	}
+	<-done
+	// Verify that Shutdown() blocks for roughly 500ms, instead
+	// of returning almost instantly.
+	//
+	// We use >100ms to leave ample margin for race conditions between
+	// goroutines. It's possible (but unlikely in reasonable testing
+	// conditions), that this test will cause a false positive or false
+	// negative. But it's probably better than not having any test
+	// for the 99.999% of time where testing conditions are reasonable.
+	if d := time.Since(startShutdown); d.Nanoseconds() < jobDelayLow.Nanoseconds() {
+		t.Fatalf("shutdown did not block long enough: %v", d)
+	} else if d.Nanoseconds() > jobDelayHigh.Nanoseconds() {
+		t.Fatalf("shutdown blocked too long: %v", d)
+	}
+	if !completed {
+		t.Fatalf("job did not complete")
+	}
+}
diff --git a/events/events.go b/events/events.go
new file mode 100644
index 0000000..57a82ca
--- /dev/null
+++ b/events/events.go
@@ -0,0 +1,176 @@
+package events
+
+import (
+	"encoding/json"
+	"sync"
+	"time"
+
+	"github.com/docker/docker/engine"
+	"github.com/docker/docker/utils"
+)
+
+const eventsLimit = 64
+
+type listener chan<- *utils.JSONMessage
+
+type Events struct {
+	mu          sync.RWMutex
+	events      []*utils.JSONMessage
+	subscribers []listener
+}
+
+func New() *Events {
+	return &Events{
+		events: make([]*utils.JSONMessage, 0, eventsLimit),
+	}
+}
+
+// Install installs events public api in docker engine
+func (e *Events) Install(eng *engine.Engine) error {
+	// Here you should describe public interface
+	jobs := map[string]engine.Handler{
+		"events":            e.Get,
+		"log":               e.Log,
+		"subscribers_count": e.SubscribersCount,
+	}
+	for name, job := range jobs {
+		if err := eng.Register(name, job); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (e *Events) Get(job *engine.Job) engine.Status {
+	var (
+		since   = job.GetenvInt64("since")
+		until   = job.GetenvInt64("until")
+		timeout = time.NewTimer(time.Unix(until, 0).Sub(time.Now()))
+	)
+
+	// If no until, disable timeout
+	if until == 0 {
+		timeout.Stop()
+	}
+
+	listener := make(chan *utils.JSONMessage)
+	e.subscribe(listener)
+	defer e.unsubscribe(listener)
+
+	job.Stdout.Write(nil)
+
+	// Resend every event in the [since, until] time interval.
+	if since != 0 {
+		if err := e.writeCurrent(job, since, until); err != nil {
+			return job.Error(err)
+		}
+	}
+
+	for {
+		select {
+		case event, ok := <-listener:
+			if !ok {
+				return engine.StatusOK
+			}
+			if err := writeEvent(job, event); err != nil {
+				return job.Error(err)
+			}
+		case <-timeout.C:
+			return engine.StatusOK
+		}
+	}
+}
+
+func (e *Events) Log(job *engine.Job) engine.Status {
+	if len(job.Args) != 3 {
+		return job.Errorf("usage: %s ACTION ID FROM", job.Name)
+	}
+	// not waiting for receivers
+	go e.log(job.Args[0], job.Args[1], job.Args[2])
+	return engine.StatusOK
+}
+
+func (e *Events) SubscribersCount(job *engine.Job) engine.Status {
+	ret := &engine.Env{}
+	ret.SetInt("count", e.subscribersCount())
+	ret.WriteTo(job.Stdout)
+	return engine.StatusOK
+}
+
+func writeEvent(job *engine.Job, event *utils.JSONMessage) error {
+	// When sending an event JSON serialization errors are ignored, but all
+	// other errors lead to the eviction of the listener.
+	if b, err := json.Marshal(event); err == nil {
+		if _, err = job.Stdout.Write(b); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (e *Events) writeCurrent(job *engine.Job, since, until int64) error {
+	e.mu.RLock()
+	for _, event := range e.events {
+		if event.Time >= since && (event.Time <= until || until == 0) {
+			if err := writeEvent(job, event); err != nil {
+				e.mu.RUnlock()
+				return err
+			}
+		}
+	}
+	e.mu.RUnlock()
+	return nil
+}
+
+func (e *Events) subscribersCount() int {
+	e.mu.RLock()
+	c := len(e.subscribers)
+	e.mu.RUnlock()
+	return c
+}
+
+func (e *Events) log(action, id, from string) {
+	e.mu.Lock()
+	now := time.Now().UTC().Unix()
+	jm := &utils.JSONMessage{Status: action, ID: id, From: from, Time: now}
+	if len(e.events) == cap(e.events) {
+		// discard oldest event
+		copy(e.events, e.events[1:])
+		e.events[len(e.events)-1] = jm
+	} else {
+		e.events = append(e.events, jm)
+	}
+	for _, s := range e.subscribers {
+		// We give each subscriber a 100ms time window to receive the event,
+		// after which we move to the next.
+		select {
+		case s <- jm:
+		case <-time.After(100 * time.Millisecond):
+		}
+	}
+	e.mu.Unlock()
+}
+
+func (e *Events) subscribe(l listener) {
+	e.mu.Lock()
+	e.subscribers = append(e.subscribers, l)
+	e.mu.Unlock()
+}
+
+// unsubscribe closes and removes the specified listener from the list of
+// previously registed ones.
+// It returns a boolean value indicating if the listener was successfully
+// found, closed and unregistered.
+func (e *Events) unsubscribe(l listener) bool {
+	e.mu.Lock()
+	for i, subscriber := range e.subscribers {
+		if subscriber == l {
+			close(l)
+			e.subscribers = append(e.subscribers[:i], e.subscribers[i+1:]...)
+			e.mu.Unlock()
+			return true
+		}
+	}
+	e.mu.Unlock()
+	return false
+}
diff --git a/events/events_test.go b/events/events_test.go
new file mode 100644
index 0000000..d4fc664
--- /dev/null
+++ b/events/events_test.go
@@ -0,0 +1,154 @@
+package events
+
+import (
+	"bytes"
+	"encoding/json"
+	"fmt"
+	"io"
+	"testing"
+	"time"
+
+	"github.com/docker/docker/engine"
+	"github.com/docker/docker/utils"
+)
+
+func TestEventsPublish(t *testing.T) {
+	e := New()
+	l1 := make(chan *utils.JSONMessage)
+	l2 := make(chan *utils.JSONMessage)
+	e.subscribe(l1)
+	e.subscribe(l2)
+	count := e.subscribersCount()
+	if count != 2 {
+		t.Fatalf("Must be 2 subscribers, got %d", count)
+	}
+	go e.log("test", "cont", "image")
+	select {
+	case msg := <-l1:
+		if len(e.events) != 1 {
+			t.Fatalf("Must be only one event, got %d", len(e.events))
+		}
+		if msg.Status != "test" {
+			t.Fatalf("Status should be test, got %s", msg.Status)
+		}
+		if msg.ID != "cont" {
+			t.Fatalf("ID should be cont, got %s", msg.ID)
+		}
+		if msg.From != "image" {
+			t.Fatalf("From should be image, got %s", msg.From)
+		}
+	case <-time.After(1 * time.Second):
+		t.Fatal("Timeout waiting for broadcasted message")
+	}
+	select {
+	case msg := <-l2:
+		if len(e.events) != 1 {
+			t.Fatalf("Must be only one event, got %d", len(e.events))
+		}
+		if msg.Status != "test" {
+			t.Fatalf("Status should be test, got %s", msg.Status)
+		}
+		if msg.ID != "cont" {
+			t.Fatalf("ID should be cont, got %s", msg.ID)
+		}
+		if msg.From != "image" {
+			t.Fatalf("From should be image, got %s", msg.From)
+		}
+	case <-time.After(1 * time.Second):
+		t.Fatal("Timeout waiting for broadcasted message")
+	}
+}
+
+func TestEventsPublishTimeout(t *testing.T) {
+	e := New()
+	l := make(chan *utils.JSONMessage)
+	e.subscribe(l)
+
+	c := make(chan struct{})
+	go func() {
+		e.log("test", "cont", "image")
+		close(c)
+	}()
+
+	select {
+	case <-c:
+	case <-time.After(time.Second):
+		t.Fatal("Timeout publishing message")
+	}
+}
+
+func TestLogEvents(t *testing.T) {
+	e := New()
+	eng := engine.New()
+	if err := e.Install(eng); err != nil {
+		t.Fatal(err)
+	}
+
+	for i := 0; i < eventsLimit+16; i++ {
+		action := fmt.Sprintf("action_%d", i)
+		id := fmt.Sprintf("cont_%d", i)
+		from := fmt.Sprintf("image_%d", i)
+		job := eng.Job("log", action, id, from)
+		if err := job.Run(); err != nil {
+			t.Fatal(err)
+		}
+	}
+	time.Sleep(50 * time.Millisecond)
+	if len(e.events) != eventsLimit {
+		t.Fatalf("Must be %d events, got %d", eventsLimit, len(e.events))
+	}
+
+	job := eng.Job("events")
+	job.SetenvInt64("since", 1)
+	job.SetenvInt64("until", time.Now().Unix())
+	buf := bytes.NewBuffer(nil)
+	job.Stdout.Add(buf)
+	if err := job.Run(); err != nil {
+		t.Fatal(err)
+	}
+	buf = bytes.NewBuffer(buf.Bytes())
+	dec := json.NewDecoder(buf)
+	var msgs []utils.JSONMessage
+	for {
+		var jm utils.JSONMessage
+		if err := dec.Decode(&jm); err != nil {
+			if err == io.EOF {
+				break
+			}
+			t.Fatal(err)
+		}
+		msgs = append(msgs, jm)
+	}
+	if len(msgs) != eventsLimit {
+		t.Fatalf("Must be %d events, got %d", eventsLimit, len(msgs))
+	}
+	first := msgs[0]
+	if first.Status != "action_16" {
+		t.Fatalf("First action is %s, must be action_15", first.Status)
+	}
+	last := msgs[len(msgs)-1]
+	if last.Status != "action_79" {
+		t.Fatalf("First action is %s, must be action_79", first.Status)
+	}
+}
+
+func TestEventsCountJob(t *testing.T) {
+	e := New()
+	eng := engine.New()
+	if err := e.Install(eng); err != nil {
+		t.Fatal(err)
+	}
+	l1 := make(chan *utils.JSONMessage)
+	l2 := make(chan *utils.JSONMessage)
+	e.subscribe(l1)
+	e.subscribe(l2)
+	job := eng.Job("subscribers_count")
+	env, _ := job.Stdout.AddEnv()
+	if err := job.Run(); err != nil {
+		t.Fatal(err)
+	}
+	count := env.GetInt("count")
+	if count != 2 {
+		t.Fatalf("There must be 2 subscribers, got %d", count)
+	}
+}
diff --git a/graph/MAINTAINERS b/graph/MAINTAINERS
new file mode 100644
index 0000000..e409454
--- /dev/null
+++ b/graph/MAINTAINERS
@@ -0,0 +1,5 @@
+Solomon Hykes <solomon@docker.com> (@shykes)
+Victor Vieux <vieux@docker.com> (@vieux)
+Michael Crosby <michael@crosbymichael.com> (@crosbymichael)
+Cristian Staretu <cristian.staretu@gmail.com> (@unclejack)
+Tibor Vass <teabee89@gmail.com> (@tiborvass)
diff --git a/graph/export.go b/graph/export.go
new file mode 100644
index 0000000..d24d971
--- /dev/null
+++ b/graph/export.go
@@ -0,0 +1,147 @@
+package graph
+
+import (
+	"encoding/json"
+	"io"
+	"io/ioutil"
+	"os"
+	"path"
+
+	"github.com/docker/docker/archive"
+	"github.com/docker/docker/engine"
+	"github.com/docker/docker/pkg/log"
+	"github.com/docker/docker/pkg/parsers"
+)
+
+// CmdImageExport exports all images with the given tag. All versions
+// containing the same tag are exported. The resulting output is an
+// uncompressed tar ball.
+// name is the set of tags to export.
+// out is the writer where the images are written to.
+func (s *TagStore) CmdImageExport(job *engine.Job) engine.Status {
+	if len(job.Args) != 1 {
+		return job.Errorf("Usage: %s IMAGE\n", job.Name)
+	}
+	name := job.Args[0]
+	// get image json
+	tempdir, err := ioutil.TempDir("", "docker-export-")
+	if err != nil {
+		return job.Error(err)
+	}
+	defer os.RemoveAll(tempdir)
+
+	log.Debugf("Serializing %s", name)
+
+	rootRepoMap := map[string]Repository{}
+	rootRepo, err := s.Get(name)
+	if err != nil {
+		return job.Error(err)
+	}
+	if rootRepo != nil {
+		// this is a base repo name, like 'busybox'
+
+		for _, id := range rootRepo {
+			if err := s.exportImage(job.Eng, id, tempdir); err != nil {
+				return job.Error(err)
+			}
+		}
+		rootRepoMap[name] = rootRepo
+	} else {
+		img, err := s.LookupImage(name)
+		if err != nil {
+			return job.Error(err)
+		}
+		if img != nil {
+			// This is a named image like 'busybox:latest'
+			repoName, repoTag := parsers.ParseRepositoryTag(name)
+			if err := s.exportImage(job.Eng, img.ID, tempdir); err != nil {
+				return job.Error(err)
+			}
+			// check this length, because a lookup of a truncated has will not have a tag
+			// and will not need to be added to this map
+			if len(repoTag) > 0 {
+				rootRepoMap[repoName] = Repository{repoTag: img.ID}
+			}
+		} else {
+			// this must be an ID that didn't get looked up just right?
+			if err := s.exportImage(job.Eng, name, tempdir); err != nil {
+				return job.Error(err)
+			}
+		}
+	}
+	// write repositories, if there is something to write
+	if len(rootRepoMap) > 0 {
+		rootRepoJson, _ := json.Marshal(rootRepoMap)
+
+		if err := ioutil.WriteFile(path.Join(tempdir, "repositories"), rootRepoJson, os.FileMode(0644)); err != nil {
+			return job.Error(err)
+		}
+	} else {
+		log.Debugf("There were no repositories to write")
+	}
+
+	fs, err := archive.Tar(tempdir, archive.Uncompressed)
+	if err != nil {
+		return job.Error(err)
+	}
+	defer fs.Close()
+
+	if _, err := io.Copy(job.Stdout, fs); err != nil {
+		return job.Error(err)
+	}
+	log.Debugf("End Serializing %s", name)
+	return engine.StatusOK
+}
+
+// FIXME: this should be a top-level function, not a class method
+func (s *TagStore) exportImage(eng *engine.Engine, name, tempdir string) error {
+	for n := name; n != ""; {
+		// temporary directory
+		tmpImageDir := path.Join(tempdir, n)
+		if err := os.Mkdir(tmpImageDir, os.FileMode(0755)); err != nil {
+			if os.IsExist(err) {
+				return nil
+			}
+			return err
+		}
+
+		var version = "1.0"
+		var versionBuf = []byte(version)
+
+		if err := ioutil.WriteFile(path.Join(tmpImageDir, "VERSION"), versionBuf, os.FileMode(0644)); err != nil {
+			return err
+		}
+
+		// serialize json
+		json, err := os.Create(path.Join(tmpImageDir, "json"))
+		if err != nil {
+			return err
+		}
+		job := eng.Job("image_inspect", n)
+		job.SetenvBool("raw", true)
+		job.Stdout.Add(json)
+		if err := job.Run(); err != nil {
+			return err
+		}
+
+		// serialize filesystem
+		fsTar, err := os.Create(path.Join(tmpImageDir, "layer.tar"))
+		if err != nil {
+			return err
+		}
+		job = eng.Job("image_tarlayer", n)
+		job.Stdout.Add(fsTar)
+		if err := job.Run(); err != nil {
+			return err
+		}
+
+		// find parent
+		job = eng.Job("image_get", n)
+		info, _ := job.Stdout.AddEnv()
+		if err := job.Run(); err != nil {
+			return err
+		}
+		n = info.Get("Parent")
+	}
+	return nil
+}
diff --git a/graph/graph.go b/graph/graph.go
index 0badd8d..8f01739 100644
--- a/graph/graph.go
+++ b/graph/graph.go
@@ -12,13 +12,14 @@
 	"syscall"
 	"time"
 
-	"github.com/dotcloud/docker/archive"
-	"github.com/dotcloud/docker/daemon/graphdriver"
-	"github.com/dotcloud/docker/dockerversion"
-	"github.com/dotcloud/docker/image"
-	"github.com/dotcloud/docker/pkg/truncindex"
-	"github.com/dotcloud/docker/runconfig"
-	"github.com/dotcloud/docker/utils"
+	"github.com/docker/docker/archive"
+	"github.com/docker/docker/daemon/graphdriver"
+	"github.com/docker/docker/dockerversion"
+	"github.com/docker/docker/image"
+	"github.com/docker/docker/pkg/log"
+	"github.com/docker/docker/pkg/truncindex"
+	"github.com/docker/docker/runconfig"
+	"github.com/docker/docker/utils"
 )
 
 // A Graph is a store for versioned filesystem images and the relationship between them.
@@ -64,7 +65,7 @@
 		}
 	}
 	graph.idIndex = truncindex.NewTruncIndex(ids)
-	utils.Debugf("Restored %d elements", len(dir))
+	log.Debugf("Restored %d elements", len(dir))
 	return nil
 }
 
diff --git a/graph/history.go b/graph/history.go
new file mode 100644
index 0000000..2030c4c
--- /dev/null
+++ b/graph/history.go
@@ -0,0 +1,46 @@
+package graph
+
+import (
+	"strings"
+
+	"github.com/docker/docker/engine"
+	"github.com/docker/docker/image"
+)
+
+func (s *TagStore) CmdHistory(job *engine.Job) engine.Status {
+	if n := len(job.Args); n != 1 {
+		return job.Errorf("Usage: %s IMAGE", job.Name)
+	}
+	name := job.Args[0]
+	foundImage, err := s.LookupImage(name)
+	if err != nil {
+		return job.Error(err)
+	}
+
+	lookupMap := make(map[string][]string)
+	for name, repository := range s.Repositories {
+		for tag, id := range repository {
+			// If the ID already has a reverse lookup, do not update it unless for "latest"
+			if _, exists := lookupMap[id]; !exists {
+				lookupMap[id] = []string{}
+			}
+			lookupMap[id] = append(lookupMap[id], name+":"+tag)
+		}
+	}
+
+	outs := engine.NewTable("Created", 0)
+	err = foundImage.WalkHistory(func(img *image.Image) error {
+		out := &engine.Env{}
+		out.Set("Id", img.ID)
+		out.SetInt64("Created", img.Created.Unix())
+		out.Set("CreatedBy", strings.Join(img.ContainerConfig.Cmd, " "))
+		out.SetList("Tags", lookupMap[img.ID])
+		out.SetInt64("Size", img.Size)
+		outs.Add(out)
+		return nil
+	})
+	if _, err := outs.WriteListTo(job.Stdout); err != nil {
+		return job.Error(err)
+	}
+	return engine.StatusOK
+}
diff --git a/graph/import.go b/graph/import.go
new file mode 100644
index 0000000..049742a
--- /dev/null
+++ b/graph/import.go
@@ -0,0 +1,61 @@
+package graph
+
+import (
+	"net/http"
+	"net/url"
+
+	"github.com/docker/docker/archive"
+	"github.com/docker/docker/engine"
+	"github.com/docker/docker/utils"
+)
+
+func (s *TagStore) CmdImport(job *engine.Job) engine.Status {
+	if n := len(job.Args); n != 2 && n != 3 {
+		return job.Errorf("Usage: %s SRC REPO [TAG]", job.Name)
+	}
+	var (
+		src     = job.Args[0]
+		repo    = job.Args[1]
+		tag     string
+		sf      = utils.NewStreamFormatter(job.GetenvBool("json"))
+		archive archive.ArchiveReader
+		resp    *http.Response
+	)
+	if len(job.Args) > 2 {
+		tag = job.Args[2]
+	}
+
+	if src == "-" {
+		archive = job.Stdin
+	} else {
+		u, err := url.Parse(src)
+		if err != nil {
+			return job.Error(err)
+		}
+		if u.Scheme == "" {
+			u.Scheme = "http"
+			u.Host = src
+			u.Path = ""
+		}
+		job.Stdout.Write(sf.FormatStatus("", "Downloading from %s", u))
+		resp, err = utils.Download(u.String())
+		if err != nil {
+			return job.Error(err)
+		}
+		progressReader := utils.ProgressReader(resp.Body, int(resp.ContentLength), job.Stdout, sf, true, "", "Importing")
+		defer progressReader.Close()
+		archive = progressReader
+	}
+	img, err := s.graph.Create(archive, "", "", "Imported from "+src, "", nil, nil)
+	if err != nil {
+		return job.Error(err)
+	}
+	// Optionally register the image at REPO/TAG
+	if repo != "" {
+		if err := s.Set(repo, tag, img.ID, true); err != nil {
+			return job.Error(err)
+		}
+	}
+	job.Stdout.Write(sf.FormatStatus("", img.ID))
+	return engine.StatusOK
+}
diff --git a/graph/list.go b/graph/list.go
new file mode 100644
index 0000000..0e0e97e
--- /dev/null
+++ b/graph/list.go
@@ -0,0 +1,103 @@
+package graph
+
+import (
+	"fmt"
+	"log"
+	"path"
+	"strings"
+
+	"github.com/docker/docker/engine"
+	"github.com/docker/docker/image"
+	"github.com/docker/docker/pkg/parsers/filters"
+)
+
+func (s *TagStore) CmdImages(job *engine.Job) engine.Status {
+	var (
+		allImages   map[string]*image.Image
+		err         error
+		filt_tagged = true
+	)
+
+	imageFilters, err := filters.FromParam(job.Getenv("filters"))
+	if err != nil {
+		return job.Error(err)
+	}
+	if i, ok := imageFilters["dangling"]; ok {
+		for _, value := range i {
+			if strings.ToLower(value) == "true" {
+				filt_tagged = false
+			}
+		}
+	}
+
+	if job.GetenvBool("all") && filt_tagged {
+		allImages, err = s.graph.Map()
+	} else {
+		allImages, err = s.graph.Heads()
+	}
+	if err != nil {
+		return job.Error(err)
+	}
+	lookup := make(map[string]*engine.Env)
+	s.Lock()
+	for name, repository := range s.Repositories {
+		if job.Getenv("filter") != "" {
+			if match, _ := path.Match(job.Getenv("filter"), name); !match {
+				continue
+			}
+		}
+		for tag, id := range repository {
+			image, err := s.graph.Get(id)
+			if err != nil {
+				log.Printf("Warning: couldn't load %s from %s/%s: %s", id, name, tag, err)
+				continue
+			}
+
+			if out, exists := lookup[id]; exists {
+				if filt_tagged {
+					out.SetList("RepoTags", append(out.GetList("RepoTags"), fmt.Sprintf("%s:%s", name, tag)))
+				}
+			} else {
+				// get the boolean list for if only the untagged images are requested
+				delete(allImages, id)
+				if filt_tagged {
+					out := &engine.Env{}
+					out.Set("ParentId", image.Parent)
+					out.SetList("RepoTags", []string{fmt.Sprintf("%s:%s", name, tag)})
+					out.Set("Id", image.ID)
+					out.SetInt64("Created", image.Created.Unix())
+					out.SetInt64("Size", image.Size)
+					out.SetInt64("VirtualSize", image.GetParentsSize(0)+image.Size)
+					lookup[id] = out
+				}
+			}
+
+		}
+	}
+	s.Unlock()
+
+	outs := engine.NewTable("Created", len(lookup))
+	for _, value := range lookup {
+		outs.Add(value)
+	}
+
+	// Display images which aren't part of a repository/tag
+	if job.Getenv("filter") == "" {
+		for _, image := range allImages {
+			out := &engine.Env{}
+			out.Set("ParentId", image.Parent)
+			out.SetList("RepoTags", []string{"<none>:<none>"})
+			out.Set("Id", image.ID)
+			out.SetInt64("Created", image.Created.Unix())
+			out.SetInt64("Size", image.Size)
+			out.SetInt64("VirtualSize", image.GetParentsSize(0)+image.Size)
+			outs.Add(out)
+		}
+	}
+
+	outs.ReverseSort()
+	if _, err := outs.WriteListTo(job.Stdout); err != nil {
+		return job.Error(err)
+	}
+	return engine.StatusOK
+}
diff --git a/graph/load.go b/graph/load.go
new file mode 100644
index 0000000..ec87ef5
--- /dev/null
+++ b/graph/load.go
@@ -0,0 +1,128 @@
+package graph
+
+import (
+	"encoding/json"
+	"io"
+	"io/ioutil"
+	"os"
+	"path"
+
+	"github.com/docker/docker/archive"
+	"github.com/docker/docker/engine"
+	"github.com/docker/docker/image"
+	"github.com/docker/docker/pkg/log"
+)
+
+// Loads a set of images into the repository. This is the complementary of ImageExport.
+// The input stream is an uncompressed tar ball containing images and metadata.
+func (s *TagStore) CmdLoad(job *engine.Job) engine.Status {
+	tmpImageDir, err := ioutil.TempDir("", "docker-import-")
+	if err != nil {
+		return job.Error(err)
+	}
+	defer os.RemoveAll(tmpImageDir)
+
+	var (
+		repoTarFile = path.Join(tmpImageDir, "repo.tar")
+		repoDir     = path.Join(tmpImageDir, "repo")
+	)
+
+	tarFile, err := os.Create(repoTarFile)
+	if err != nil {
+		return job.Error(err)
+	}
+	if _, err := io.Copy(tarFile, job.Stdin); err != nil {
+		return job.Error(err)
+	}
+	tarFile.Close()
+
+	repoFile, err := os.Open(repoTarFile)
+	if err != nil {
+		return job.Error(err)
+	}
+	if err := os.Mkdir(repoDir, os.ModeDir); err != nil {
+		return job.Error(err)
+	}
+	images, err := s.graph.Map()
+	if err != nil {
+		return job.Error(err)
+	}
+	excludes := make([]string, len(images))
+	i := 0
+	for k := range images {
+		excludes[i] = k
+		i++
+	}
+	if err := archive.Untar(repoFile, repoDir, &archive.TarOptions{Excludes: excludes}); err != nil {
+		return job.Error(err)
+	}
+
+	dirs, err := ioutil.ReadDir(repoDir)
+	if err != nil {
+		return job.Error(err)
+	}
+
+	for _, d := range dirs {
+		if d.IsDir() {
+			if err := s.recursiveLoad(job.Eng, d.Name(), tmpImageDir); err != nil {
+				return job.Error(err)
+			}
+		}
+	}
+
+	repositoriesJson, err := ioutil.ReadFile(path.Join(tmpImageDir, "repo", "repositories"))
+	if err == nil {
+		repositories := map[string]Repository{}
+		if err := json.Unmarshal(repositoriesJson, &repositories); err != nil {
+			return job.Error(err)
+		}
+
+		for imageName, tagMap := range repositories {
+			for tag, address := range tagMap {
+				if err := s.Set(imageName, tag, address, true); err != nil {
+					return job.Error(err)
+				}
+			}
+		}
+	} else if !os.IsNotExist(err) {
+		return job.Error(err)
+	}
+
+	return engine.StatusOK
+}
+
+func (s *TagStore) recursiveLoad(eng *engine.Engine, address, tmpImageDir string) error {
+	if err := eng.Job("image_get", address).Run(); err != nil {
+		log.Debugf("Loading %s", address)
+
+		imageJson, err := ioutil.ReadFile(path.Join(tmpImageDir, "repo", address, "json"))
+		if err != nil {
+			log.Debugf("Error reading json", err)
+			return err
+		}
+
+		layer, err := os.Open(path.Join(tmpImageDir, "repo", address, "layer.tar"))
+		if err != nil {
+			log.Debugf("Error reading embedded tar", err)
+			return err
+		}
+		img, err := image.NewImgJSON(imageJson)
+		if err != nil {
+			log.Debugf("Error unmarshalling json", err)
+			return err
+		}
+		if img.Parent != "" {
+			if !s.graph.Exists(img.Parent) {
+				if err := s.recursiveLoad(eng, img.Parent, tmpImageDir); err != nil {
+					return err
+				}
+			}
+		}
+		if err := s.graph.Register(imageJson, layer, img); err != nil {
+			return err
+		}
+	}
+	log.Debugf("Completed processing %s", address)
+
+	return nil
+}
diff --git a/graph/pools_test.go b/graph/pools_test.go
new file mode 100644
index 0000000..785a4bd
--- /dev/null
+++ b/graph/pools_test.go
@@ -0,0 +1,41 @@
+package graph
+
+import "testing"
+
+func TestPools(t *testing.T) {
+	s := &TagStore{
+		pullingPool: make(map[string]chan struct{}),
+		pushingPool: make(map[string]chan struct{}),
+	}
+
+	if _, err := s.poolAdd("pull", "test1"); err != nil {
+		t.Fatal(err)
+	}
+	if _, err := s.poolAdd("pull", "test2"); err != nil {
+		t.Fatal(err)
+	}
+	if _, err := s.poolAdd("push", "test1"); err == nil || err.Error() != "pull test1 is already in progress" {
+		t.Fatalf("Expected `pull test1 is already in progress`")
+	}
+	if _, err := s.poolAdd("pull", "test1"); err == nil || err.Error() != "pull test1 is already in progress" {
+		t.Fatalf("Expected `pull test1 is already in progress`")
+	}
+	if _, err := s.poolAdd("wait", "test3"); err == nil || err.Error() != "Unknown pool type" {
+		t.Fatalf("Expected `Unknown pool type`")
+	}
+	if err := s.poolRemove("pull", "test2"); err != nil {
+		t.Fatal(err)
+	}
+	if err := s.poolRemove("pull", "test2"); err != nil {
+		t.Fatal(err)
+	}
+	if err := s.poolRemove("pull", "test1"); err != nil {
+		t.Fatal(err)
+	}
+	if err := s.poolRemove("push", "test1"); err != nil {
+		t.Fatal(err)
+	}
+	if err := s.poolRemove("wait", "test3"); err == nil || err.Error() != "Unknown pool type" {
+		t.Fatalf("Expected `Unknown pool type`")
+	}
+}
diff --git a/graph/pull.go b/graph/pull.go
new file mode 100644
index 0000000..180612f
--- /dev/null
+++ b/graph/pull.go
@@ -0,0 +1,301 @@
+package graph
+
+import (
+	"fmt"
+	"io"
+	"net"
+	"net/url"
+	"strings"
+	"time"
+
+	"github.com/docker/docker/engine"
+	"github.com/docker/docker/image"
+	"github.com/docker/docker/pkg/log"
+	"github.com/docker/docker/registry"
+	"github.com/docker/docker/utils"
+)
+
+func (s *TagStore) CmdPull(job *engine.Job) engine.Status {
+	if n := len(job.Args); n != 1 && n != 2 {
+		return job.Errorf("Usage: %s IMAGE [TAG]", job.Name)
+	}
+	var (
+		localName   = job.Args[0]
+		tag         string
+		sf          = utils.NewStreamFormatter(job.GetenvBool("json"))
+		authConfig  = &registry.AuthConfig{}
+		metaHeaders map[string][]string
+	)
+	if len(job.Args) > 1 {
+		tag = job.Args[1]
+	}
+
+	job.GetenvJson("authConfig", authConfig)
+	job.GetenvJson("metaHeaders", &metaHeaders)
+
+	c, err := s.poolAdd("pull", localName+":"+tag)
+	if err != nil {
+		if c != nil {
+			// Another pull of the same repository is already taking place; just wait for it to finish
+			job.Stdout.Write(sf.FormatStatus("", "Repository %s already being pulled by another client. Waiting.", localName))
+			<-c
+			return engine.StatusOK
+		}
+		return job.Error(err)
+	}
+	defer s.poolRemove("pull", localName+":"+tag)
+
+	// Resolve the Repository name from fqn to endpoint + name
+	hostname, remoteName, err := registry.ResolveRepositoryName(localName)
+	if err != nil {
+		return job.Error(err)
+	}
+
+	endpoint, err := registry.ExpandAndVerifyRegistryUrl(hostname)
+	if err != nil {
+		return job.Error(err)
+	}
+
+	r, err := registry.NewSession(authConfig, registry.HTTPRequestFactory(metaHeaders), endpoint, true)
+	if err != nil {
+		return job.Error(err)
+	}
+
+	if endpoint == registry.IndexServerAddress() {
+		// If pull "index.docker.io/foo/bar", it's stored locally under "foo/bar"
+		localName = remoteName
+	}
+
+	if err = s.pullRepository(r, job.Stdout, localName, remoteName, tag, sf, job.GetenvBool("parallel")); err != nil {
+		return job.Error(err)
+	}
+
+	return engine.StatusOK
+}
+
+func (s *TagStore) pullRepository(r *registry.Session, out io.Writer, localName, remoteName, askedTag string, sf *utils.StreamFormatter, parallel bool) error {
+	out.Write(sf.FormatStatus("", "Pulling repository %s", localName))
+
+	repoData, err := r.GetRepositoryData(remoteName)
+	if err != nil {
+		if strings.Contains(err.Error(), "HTTP code: 404") {
+			return fmt.Errorf("Error: image %s not found", remoteName)
+		} else {
+			// Unexpected HTTP error
+			return err
+		}
+	}
+
+	log.Debugf("Retrieving the tag list")
+	tagsList, err := r.GetRemoteTags(repoData.Endpoints, remoteName, repoData.Tokens)
+	if err != nil {
+		log.Errorf("%v", err)
+		return err
+	}
+
+	for tag, id := range tagsList {
+		repoData.ImgList[id] = &registry.ImgData{
+			ID:       id,
+			Tag:      tag,
+			Checksum: "",
+		}
+	}
+
+	log.Debugf("Registering tags")
+	// If no tag has been specified, pull them all
+	if askedTag == "" {
+		for tag, id := range tagsList {
+			repoData.ImgList[id].Tag = tag
+		}
+	} else {
+		// Otherwise, check that the tag exists and use only that one
+		id, exists := tagsList[askedTag]
+		if !exists {
+			return fmt.Errorf("Tag %s not found in repository %s", askedTag, localName)
+		}
+		repoData.ImgList[id].Tag = askedTag
+	}
+
+	errors := make(chan error)
+	for _, image := range repoData.ImgList {
+		downloadImage := func(img *registry.ImgData) {
+			if askedTag != "" && img.Tag != askedTag {
+				log.Debugf("(%s) does not match %s (id: %s), skipping", img.Tag, askedTag, img.ID)
+				if parallel {
+					errors <- nil
+				}
+				return
+			}
+
+			if img.Tag == "" {
+				log.Debugf("Image (id: %s) present in this repository but untagged, skipping", img.ID)
+				if parallel {
+					errors <- nil
+				}
+				return
+			}
+
+			// ensure no two downloads of the same image happen at the same time
+			if c, err := s.poolAdd("pull", "img:"+img.ID); err != nil {
+				if c != nil {
+					out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Layer already being pulled by another client. Waiting.", nil))
+					<-c
+					out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Download complete", nil))
+				} else {
+					log.Debugf("Image (id: %s) pull is already running, skipping: %v", img.ID, err)
+				}
+				if parallel {
+					errors <- nil
+				}
+				return
+			}
+			defer s.poolRemove("pull", "img:"+img.ID)
+
+			out.Write(sf.FormatProgress(utils.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s", img.Tag, localName), nil))
+			success := false
+			var lastErr error
+			for _, ep := range repoData.Endpoints {
+				out.Write(sf.FormatProgress(utils.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s, endpoint: %s", img.Tag, localName, ep), nil))
+				if err := s.pullImage(r, out, img.ID, ep, repoData.Tokens, sf); err != nil {
+					// It's not ideal that only the last error is returned, it would be better to concatenate the errors.
+					// As the error is also given to the output stream the user will see the error.
+					lastErr = err
+					out.Write(sf.FormatProgress(utils.TruncateID(img.ID), fmt.Sprintf("Error pulling image (%s) from %s, endpoint: %s, %s", img.Tag, localName, ep, err), nil))
+					continue
+				}
+				success = true
+				break
+			}
+			if !success {
+				err := fmt.Errorf("Error pulling image (%s) from %s, %v", img.Tag, localName, lastErr)
+				out.Write(sf.FormatProgress(utils.TruncateID(img.ID), err.Error(), nil))
+				if parallel {
+					errors <- err
+					return
+				}
+			}
+			out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Download complete", nil))
+
+			if parallel {
+				errors <- nil
+			}
+		}
+
+		if parallel {
+			go downloadImage(image)
+		} else {
+			downloadImage(image)
+		}
+	}
+	if parallel {
+		var lastError error
+		for i := 0; i < len(repoData.ImgList); i++ {
+			if err := <-errors; err != nil {
+				lastError = err
+			}
+		}
+		if lastError != nil {
+			return lastError
+		}
+
+	}
+	for tag, id := range tagsList {
+		if askedTag != "" && tag != askedTag {
+			continue
+		}
+		if err := s.Set(localName, tag, id, true); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (s *TagStore) pullImage(r *registry.Session, out io.Writer, imgID, endpoint string, token []string, sf *utils.StreamFormatter) error {
+	history, err := r.GetRemoteHistory(imgID, endpoint, token)
+	if err != nil {
+		return err
+	}
+	out.Write(sf.FormatProgress(utils.TruncateID(imgID), "Pulling dependent layers", nil))
+	// FIXME: Try to stream the images?
+	// FIXME: Launch the getRemoteImage() in goroutines
+
+	for i := len(history) - 1; i >= 0; i-- {
+		id := history[i]
+
+		// ensure no two downloads of the same layer happen at the same time
+		if c, err := s.poolAdd("pull", "layer:"+id); err != nil {
+			log.Debugf("Image (id: %s) pull is already running, skipping: %v", id, err)
+			<-c
+		}
+		defer s.poolRemove("pull", "layer:"+id)
+
+		if !s.graph.Exists(id) {
+			out.Write(sf.FormatProgress(utils.TruncateID(id), "Pulling metadata", nil))
+			var (
+				imgJSON []byte
+				imgSize int
+				err     error
+				img     *image.Image
+			)
+			retries := 5
+			for j := 1; j <= retries; j++ {
+				imgJSON, imgSize, err = r.GetRemoteImageJSON(id, endpoint, token)
+				if err != nil && j == retries {
+					out.Write(sf.FormatProgress(utils.TruncateID(id), "Error pulling dependent layers", nil))
+					return err
+				} else if err != nil {
+					time.Sleep(time.Duration(j) * 500 * time.Millisecond)
+					continue
+				}
+				img, err = image.NewImgJSON(imgJSON)
+				if err != nil && j == retries {
+					out.Write(sf.FormatProgress(utils.TruncateID(id), "Error pulling dependent layers", nil))
+					return fmt.Errorf("Failed to parse json: %s", err)
+				} else if err != nil {
+					time.Sleep(time.Duration(j) * 500 * time.Millisecond)
+					continue
+				} else {
+					break
+				}
+			}
+
+			for j := 1; j <= retries; j++ {
+				// Get the layer
+				status := "Pulling fs layer"
+				if j > 1 {
+					status = fmt.Sprintf("Pulling fs layer [retries: %d]", j)
+				}
+				out.Write(sf.FormatProgress(utils.TruncateID(id), status, nil))
+				layer, err := r.GetRemoteImageLayer(img.ID, endpoint, token, int64(imgSize))
+				if uerr, ok := err.(*url.Error); ok {
+					err = uerr.Err
+				}
+				if terr, ok := err.(net.Error); ok && terr.Timeout() && j < retries {
+					time.Sleep(time.Duration(j) * 500 * time.Millisecond)
+					continue
+				} else if err != nil {
+					out.Write(sf.FormatProgress(utils.TruncateID(id), "Error pulling dependent layers", nil))
+					return err
+				}
+				defer layer.Close()
+
+				err = s.graph.Register(imgJSON,
+					utils.ProgressReader(layer, imgSize, out, sf, false, utils.TruncateID(id), "Downloading"),
+					img)
+				if terr, ok := err.(net.Error); ok && terr.Timeout() && j < retries {
+					time.Sleep(time.Duration(j) * 500 * time.Millisecond)
+					continue
+				} else if err != nil {
+					out.Write(sf.FormatProgress(utils.TruncateID(id), "Error downloading dependent layers", nil))
+					return err
+				} else {
+					break
+				}
+			}
+		}
+		out.Write(sf.FormatProgress(utils.TruncateID(id), "Download complete", nil))
+
+	}
+	return nil
+}
diff --git a/graph/push.go b/graph/push.go
new file mode 100644
index 0000000..db5b73a
--- /dev/null
+++ b/graph/push.go
@@ -0,0 +1,250 @@
+package graph
+
+import (
+	"fmt"
+	"io"
+	"io/ioutil"
+	"os"
+	"path"
+
+	"github.com/docker/docker/archive"
+	"github.com/docker/docker/engine"
+	"github.com/docker/docker/pkg/log"
+	"github.com/docker/docker/registry"
+	"github.com/docker/docker/utils"
+)
+
+// Retrieve the all the images to be uploaded in the correct order
+func (s *TagStore) getImageList(localRepo map[string]string, requestedTag string) ([]string, map[string][]string, error) {
+	var (
+		imageList   []string
+		imagesSeen  map[string]bool     = make(map[string]bool)
+		tagsByImage map[string][]string = make(map[string][]string)
+	)
+
+	for tag, id := range localRepo {
+		if requestedTag != "" && requestedTag != tag {
+			continue
+		}
+		var imageListForThisTag []string
+
+		tagsByImage[id] = append(tagsByImage[id], tag)
+
+		for img, err := s.graph.Get(id); img != nil; img, err = img.GetParent() {
+			if err != nil {
+				return nil, nil, err
+			}
+
+			if imagesSeen[img.ID] {
+				// This image is already on the list, we can ignore it and all its parents
+				break
+			}
+
+			imagesSeen[img.ID] = true
+			imageListForThisTag = append(imageListForThisTag, img.ID)
+		}
+
+		// reverse the image list for this tag (so the "most"-parent image is first)
+		for i, j := 0, len(imageListForThisTag)-1; i < j; i, j = i+1, j-1 {
+			imageListForThisTag[i], imageListForThisTag[j] = imageListForThisTag[j], imageListForThisTag[i]
+		}
+
+		// append to main image list
+		imageList = append(imageList, imageListForThisTag...)
+	}
+	if len(imageList) == 0 {
+		return nil, nil, fmt.Errorf("No images found for the requested repository / tag")
+	}
+	log.Debugf("Image list: %v", imageList)
+	log.Debugf("Tags by image: %v", tagsByImage)
+
+	return imageList, tagsByImage, nil
+}
+
+func (s *TagStore) pushRepository(r *registry.Session, out io.Writer, localName, remoteName string, localRepo map[string]string, tag string, sf *utils.StreamFormatter) error {
+	out = utils.NewWriteFlusher(out)
+	log.Debugf("Local repo: %s", localRepo)
+	imgList, tagsByImage, err := s.getImageList(localRepo, tag)
+	if err != nil {
+		return err
+	}
+
+	out.Write(sf.FormatStatus("", "Sending image list"))
+
+	var (
+		repoData   *registry.RepositoryData
+		imageIndex []*registry.ImgData
+	)
+
+	for _, imgId := range imgList {
+		if tags, exists := tagsByImage[imgId]; exists {
+			// If an image has tags you must add an entry in the image index
+			// for each tag
+			for _, tag := range tags {
+				imageIndex = append(imageIndex, &registry.ImgData{
+					ID:  imgId,
+					Tag: tag,
+				})
+			}
+		} else {
+			// If the image does not have a tag it still needs to be sent to the
+			// registry with an empty tag so that it is accociated with the repository
+			imageIndex = append(imageIndex, &registry.ImgData{
+				ID:  imgId,
+				Tag: "",
+			})
+
+		}
+	}
+
+	log.Debugf("Preparing to push %s with the following images and tags", localRepo)
+	for _, data := range imageIndex {
+		log.Debugf("Pushing ID: %s with Tag: %s", data.ID, data.Tag)
+	}
+
+	// Register all the images in a repository with the registry
+	// If an image is not in this list it will not be associated with the repository
+	repoData, err = r.PushImageJSONIndex(remoteName, imageIndex, false, nil)
+	if err != nil {
+		return err
+	}
+
+	nTag := 1
+	if tag == "" {
+		nTag = len(localRepo)
+	}
+	for _, ep := range repoData.Endpoints {
+		out.Write(sf.FormatStatus("", "Pushing repository %s (%d tags)", localName, nTag))
+
+		for _, imgId := range imgList {
+			if r.LookupRemoteImage(imgId, ep, repoData.Tokens) {
+				out.Write(sf.FormatStatus("", "Image %s already pushed, skipping", utils.TruncateID(imgId)))
+			} else {
+				if _, err := s.pushImage(r, out, remoteName, imgId, ep, repoData.Tokens, sf); err != nil {
+					// FIXME: Continue on error?
+					return err
+				}
+			}
+
+			for _, tag := range tagsByImage[imgId] {
+				out.Write(sf.FormatStatus("", "Pushing tag for rev [%s] on {%s}", utils.TruncateID(imgId), ep+"repositories/"+remoteName+"/tags/"+tag))
+
+				if err := r.PushRegistryTag(remoteName, imgId, tag, ep, repoData.Tokens); err != nil {
+					return err
+				}
+			}
+		}
+	}
+
+	if _, err := r.PushImageJSONIndex(remoteName, imageIndex, true, repoData.Endpoints); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (s *TagStore) pushImage(r *registry.Session, out io.Writer, remote, imgID, ep string, token []string, sf *utils.StreamFormatter) (checksum string, err error) {
+	out = utils.NewWriteFlusher(out)
+	jsonRaw, err := ioutil.ReadFile(path.Join(s.graph.Root, imgID, "json"))
+	if err != nil {
+		return "", fmt.Errorf("Cannot retrieve the path for {%s}: %s", imgID, err)
+	}
+	out.Write(sf.FormatProgress(utils.TruncateID(imgID), "Pushing", nil))
+
+	imgData := &registry.ImgData{
+		ID: imgID,
+	}
+
+	// Send the json
+	if err := r.PushImageJSONRegistry(imgData, jsonRaw, ep, token); err != nil {
+		if err == registry.ErrAlreadyExists {
+			out.Write(sf.FormatProgress(utils.TruncateID(imgData.ID), "Image already pushed, skipping", nil))
+			return "", nil
+		}
+		return "", err
+	}
+
+	layerData, err := s.graph.TempLayerArchive(imgID, archive.Uncompressed, sf, out)
+	if err != nil {
+		return "", fmt.Errorf("Failed to generate layer archive: %s", err)
+	}
+	defer os.RemoveAll(layerData.Name())
+
+	// Send the layer
+	log.Debugf("rendered layer for %s of [%d] size", imgData.ID, layerData.Size)
+
+	checksum, checksumPayload, err := r.PushImageLayerRegistry(imgData.ID, utils.ProgressReader(layerData, int(layerData.Size), out, sf, false, utils.TruncateID(imgData.ID), "Pushing"), ep, token, jsonRaw)
+	if err != nil {
+		return "", err
+	}
+	imgData.Checksum = checksum
+	imgData.ChecksumPayload = checksumPayload
+	// Send the checksum
+	if err := r.PushImageChecksumRegistry(imgData, ep, token); err != nil {
+		return "", err
+	}
+
+	out.Write(sf.FormatProgress(utils.TruncateID(imgData.ID), "Image successfully pushed", nil))
+	return imgData.Checksum, nil
+}
+
+// FIXME: Allow to interrupt current push when new push of same image is done.
+func (s *TagStore) CmdPush(job *engine.Job) engine.Status {
+	if n := len(job.Args); n != 1 {
+		return job.Errorf("Usage: %s IMAGE", job.Name)
+	}
+	var (
+		localName   = job.Args[0]
+		sf          = utils.NewStreamFormatter(job.GetenvBool("json"))
+		authConfig  = &registry.AuthConfig{}
+		metaHeaders map[string][]string
+	)
+
+	tag := job.Getenv("tag")
+	job.GetenvJson("authConfig", authConfig)
+	job.GetenvJson("metaHeaders", &metaHeaders)
+	if _, err := s.poolAdd("push", localName); err != nil {
+		return job.Error(err)
+	}
+	defer s.poolRemove("push", localName)
+
+	// Resolve the Repository name from fqn to endpoint + name
+	hostname, remoteName, err := registry.ResolveRepositoryName(localName)
+	if err != nil {
+		return job.Error(err)
+	}
+
+	endpoint, err := registry.ExpandAndVerifyRegistryUrl(hostname)
+	if err != nil {
+		return job.Error(err)
+	}
+
+	img, err := s.graph.Get(localName)
+	r, err2 := registry.NewSession(authConfig, registry.HTTPRequestFactory(metaHeaders), endpoint, false)
+	if err2 != nil {
+		return job.Error(err2)
+	}
+
+	if err != nil {
+		reposLen := 1
+		if tag == "" {
+			reposLen = len(s.Repositories[localName])
+		}
+		job.Stdout.Write(sf.FormatStatus("", "The push refers to a repository [%s] (len: %d)", localName, reposLen))
+		// If it fails, try to get the repository
+		if localRepo, exists := s.Repositories[localName]; exists {
+			if err := s.pushRepository(r, job.Stdout, localName, remoteName, localRepo, tag, sf); err != nil {
+				return job.Error(err)
+			}
+			return engine.StatusOK
+		}
+		return job.Error(err)
+	}
+
+	var token []string
+	job.Stdout.Write(sf.FormatStatus("", "The push refers to an image: [%s]", localName))
+	if _, err := s.pushImage(r, job.Stdout, remoteName, img.ID, endpoint, token, sf); err != nil {
+		return job.Error(err)
+	}
+	return engine.StatusOK
+}
diff --git a/graph/service.go b/graph/service.go
index 3201d6b..b7db35d 100644
--- a/graph/service.go
+++ b/graph/service.go
@@ -1,19 +1,35 @@
 package graph
 
 import (
+	"fmt"
 	"io"
 
-	"github.com/dotcloud/docker/engine"
-	"github.com/dotcloud/docker/image"
-	"github.com/dotcloud/docker/utils"
+	"github.com/docker/docker/engine"
+	"github.com/docker/docker/image"
+	"github.com/docker/docker/pkg/log"
 )
 
 func (s *TagStore) Install(eng *engine.Engine) error {
-	eng.Register("image_set", s.CmdSet)
-	eng.Register("image_tag", s.CmdTag)
-	eng.Register("image_get", s.CmdGet)
-	eng.Register("image_inspect", s.CmdLookup)
-	eng.Register("image_tarlayer", s.CmdTarLayer)
+	for name, handler := range map[string]engine.Handler{
+		"image_set":      s.CmdSet,
+		"image_tag":      s.CmdTag,
+		"tag":            s.CmdTagLegacy, // FIXME merge with "image_tag"
+		"image_get":      s.CmdGet,
+		"image_inspect":  s.CmdLookup,
+		"image_tarlayer": s.CmdTarLayer,
+		"image_export":   s.CmdImageExport,
+		"history":        s.CmdHistory,
+		"images":         s.CmdImages,
+		"viz":            s.CmdViz,
+		"load":           s.CmdLoad,
+		"import":         s.CmdImport,
+		"pull":           s.CmdPull,
+		"push":           s.CmdPush,
+	} {
+		if err := eng.Register(name, handler); err != nil {
+			return fmt.Errorf("Could not register %q: %v", name, err)
+		}
+	}
 	return nil
 }
 
@@ -64,29 +80,6 @@
 	return engine.StatusOK
 }
 
-// CmdTag assigns a new name and tag to an existing image. If the tag already exists,
-// it is changed and the image previously referenced by the tag loses that reference.
-// This may cause the old image to be garbage-collected if its reference count reaches zero.
-//
-// Syntax: image_tag NEWNAME OLDNAME
-// Example: image_tag shykes/myapp:latest shykes/myapp:1.42.0
-func (s *TagStore) CmdTag(job *engine.Job) engine.Status {
-	if len(job.Args) != 2 {
-		return job.Errorf("usage: %s NEWNAME OLDNAME", job.Name)
-	}
-	var (
-		newName = job.Args[0]
-		oldName = job.Args[1]
-	)
-	newRepo, newTag := utils.ParseRepositoryTag(newName)
-	// FIXME: Set should either parse both old and new name, or neither.
-	// 	the current prototype is inconsistent.
-	if err := s.Set(newRepo, newTag, oldName, true); err != nil {
-		return job.Error(err)
-	}
-	return engine.StatusOK
-}
-
 // CmdGet returns information about an image.
 // If the image doesn't exist, an empty object is returned, to allow
 // checking for an image's existence.
@@ -180,7 +173,7 @@
 		if written, err := io.Copy(job.Stdout, fs); err != nil {
 			return job.Error(err)
 		} else {
-			utils.Debugf("rendered layer for %s of [%d] size", image.ID, written)
+			log.Debugf("rendered layer for %s of [%d] size", image.ID, written)
 		}
 
 		return engine.StatusOK
diff --git a/graph/tag.go b/graph/tag.go
new file mode 100644
index 0000000..3d89422
--- /dev/null
+++ b/graph/tag.go
@@ -0,0 +1,44 @@
+package graph
+
+import (
+	"github.com/docker/docker/engine"
+	"github.com/docker/docker/pkg/parsers"
+)
+
+// CmdTag assigns a new name and tag to an existing image. If the tag already exists,
+// it is changed and the image previously referenced by the tag loses that reference.
+// This may cause the old image to be garbage-collected if its reference count reaches zero.
+//
+// Syntax: image_tag NEWNAME OLDNAME
+// Example: image_tag shykes/myapp:latest shykes/myapp:1.42.0
+func (s *TagStore) CmdTag(job *engine.Job) engine.Status {
+	if len(job.Args) != 2 {
+		return job.Errorf("usage: %s NEWNAME OLDNAME", job.Name)
+	}
+	var (
+		newName = job.Args[0]
+		oldName = job.Args[1]
+	)
+	newRepo, newTag := parsers.ParseRepositoryTag(newName)
+	// FIXME: Set should either parse both old and new name, or neither.
+	// 	the current prototype is inconsistent.
+	if err := s.Set(newRepo, newTag, oldName, true); err != nil {
+		return job.Error(err)
+	}
+	return engine.StatusOK
+}
+
+// FIXME: merge into CmdTag above, and merge "image_tag" and "tag" into a single job.
+func (s *TagStore) CmdTagLegacy(job *engine.Job) engine.Status {
+	if len(job.Args) != 2 && len(job.Args) != 3 {
+		return job.Errorf("Usage: %s IMAGE REPOSITORY [TAG]\n", job.Name)
+	}
+	var tag string
+	if len(job.Args) == 3 {
+		tag = job.Args[2]
+	}
+	if err := s.Set(job.Args[1], tag, job.Args[0], job.GetenvBool("force")); err != nil {
+		return job.Error(err)
+	}
+	return engine.StatusOK
+}
diff --git a/graph/tags.go b/graph/tags.go
index 7af6d38..30176ae 100644
--- a/graph/tags.go
+++ b/graph/tags.go
@@ -10,8 +10,9 @@
 	"strings"
 	"sync"
 
-	"github.com/dotcloud/docker/image"
-	"github.com/dotcloud/docker/utils"
+	"github.com/docker/docker/image"
+	"github.com/docker/docker/pkg/parsers"
+	"github.com/docker/docker/utils"
 )
 
 const DEFAULTTAG = "latest"
@@ -21,6 +22,10 @@
 	graph        *Graph
 	Repositories map[string]Repository
 	sync.Mutex
+	// FIXME: move push/pull-related fields
+	// to a helper type
+	pullingPool map[string]chan struct{}
+	pushingPool map[string]chan struct{}
 }
 
 type Repository map[string]string
@@ -34,6 +39,8 @@
 		path:         abspath,
 		graph:        graph,
 		Repositories: make(map[string]Repository),
+		pullingPool:  make(map[string]chan struct{}),
+		pushingPool:  make(map[string]chan struct{}),
 	}
 	// Load the json file if it exists, otherwise create it.
 	if err := store.reload(); os.IsNotExist(err) {
@@ -72,7 +79,7 @@
 func (store *TagStore) LookupImage(name string) (*image.Image, error) {
 	// FIXME: standardize on returning nil when the image doesn't exist, and err for everything else
 	// (so we can pass all errors here)
-	repos, tag := utils.ParseRepositoryTag(name)
+	repos, tag := parsers.ParseRepositoryTag(name)
 	if tag == "" {
 		tag = DEFAULTTAG
 	}
@@ -262,3 +269,46 @@
 	}
 	return nil
 }
+
+func (s *TagStore) poolAdd(kind, key string) (chan struct{}, error) {
+	s.Lock()
+	defer s.Unlock()
+
+	if c, exists := s.pullingPool[key]; exists {
+		return c, fmt.Errorf("pull %s is already in progress", key)
+	}
+	if c, exists := s.pushingPool[key]; exists {
+		return c, fmt.Errorf("push %s is already in progress", key)
+	}
+
+	c := make(chan struct{})
+	switch kind {
+	case "pull":
+		s.pullingPool[key] = c
+	case "push":
+		s.pushingPool[key] = c
+	default:
+		return nil, fmt.Errorf("Unknown pool type")
+	}
+	return c, nil
+}
+
+func (s *TagStore) poolRemove(kind, key string) error {
+	s.Lock()
+	defer s.Unlock()
+	switch kind {
+	case "pull":
+		if c, exists := s.pullingPool[key]; exists {
+			close(c)
+			delete(s.pullingPool, key)
+		}
+	case "push":
+		if c, exists := s.pushingPool[key]; exists {
+			close(c)
+			delete(s.pushingPool, key)
+		}
+	default:
+		return fmt.Errorf("Unknown pool type")
+	}
+	return nil
+}
diff --git a/graph/tags_unit_test.go b/graph/tags_unit_test.go
index 42e0977..c6edc2d 100644
--- a/graph/tags_unit_test.go
+++ b/graph/tags_unit_test.go
@@ -2,11 +2,11 @@
 
 import (
 	"bytes"
-	"github.com/dotcloud/docker/daemon/graphdriver"
-	_ "github.com/dotcloud/docker/daemon/graphdriver/vfs" // import the vfs driver so it is used in the tests
-	"github.com/dotcloud/docker/image"
-	"github.com/dotcloud/docker/utils"
-	"github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
+	"github.com/docker/docker/daemon/graphdriver"
+	_ "github.com/docker/docker/daemon/graphdriver/vfs" // import the vfs driver so it is used in the tests
+	"github.com/docker/docker/image"
+	"github.com/docker/docker/utils"
+	"github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
 	"io"
 	"os"
 	"path"
@@ -19,11 +19,19 @@
 )
 
 func fakeTar() (io.Reader, error) {
+	uid := os.Getuid()
+	gid := os.Getgid()
+
 	content := []byte("Hello world!\n")
 	buf := new(bytes.Buffer)
 	tw := tar.NewWriter(buf)
 	for _, name := range []string{"/etc/postgres/postgres.conf", "/etc/passwd", "/var/log/postgres/postgres.conf"} {
 		hdr := new(tar.Header)
+
+		// Leaving these fields blank requires root privileges
+		hdr.Uid = uid
+		hdr.Gid = gid
+
 		hdr.Size = int64(len(content))
 		hdr.Name = name
 		if err := tw.WriteHeader(hdr); err != nil {
@@ -53,8 +61,6 @@
 		t.Fatal(err)
 	}
 	img := &image.Image{ID: testImageID}
-	// FIXME: this fails on Darwin with:
-	// tags_unit_test.go:36: mkdir /var/folders/7g/b3ydb5gx4t94ndr_cljffbt80000gq/T/docker-test569b-tRunner-075013689/vfs/dir/foo/etc/postgres: permission denied
 	if err := graph.Register(nil, archive, img); err != nil {
 		t.Fatal(err)
 	}
diff --git a/graph/viz.go b/graph/viz.go
new file mode 100644
index 0000000..924c22b
--- /dev/null
+++ b/graph/viz.go
@@ -0,0 +1,38 @@
+package graph
+
+import (
+	"strings"
+
+	"github.com/docker/docker/engine"
+	"github.com/docker/docker/image"
+)
+
+func (s *TagStore) CmdViz(job *engine.Job) engine.Status {
+	images, _ := s.graph.Map()
+	if images == nil {
+		return engine.StatusOK
+	}
+	job.Stdout.Write([]byte("digraph docker {\n"))
+
+	var (
+		parentImage *image.Image
+		err         error
+	)
+	for _, image := range images {
+		parentImage, err = image.GetParent()
+		if err != nil {
+			return job.Errorf("Error while getting parent image: %v", err)
+		}
+		if parentImage != nil {
+			job.Stdout.Write([]byte(" \"" + parentImage.ID + "\" -> \"" + image.ID + "\"\n"))
+		} else {
+			job.Stdout.Write([]byte(" base -> \"" + image.ID + "\" [style=invis]\n"))
+		}
+	}
+
+	for id, repos := range s.GetRepoRefs() {
+		job.Stdout.Write([]byte(" \"" + id + "\" [label=\"" + id + "\\n" + strings.Join(repos, "\\n") + "\",shape=box,fillcolor=\"paleturquoise\",style=\"filled,rounded\"];\n"))
+	}
+	job.Stdout.Write([]byte(" base [style=invisible]\n}\n"))
+	return engine.StatusOK
+}
diff --git a/hack/MAINTAINERS b/hack/MAINTAINERS
index 299d9a1..15e4433 100644
--- a/hack/MAINTAINERS
+++ b/hack/MAINTAINERS
@@ -1,2 +1,4 @@
 Tianon Gravi <admwiggin@gmail.com> (@tianon)
+Cristian Staretu <cristian.staretu@gmail.com> (@unclejack)
+Tibor Vass <teabee89@gmail.com> (@tiborvass)
 dind: Jerome Petazzoni <jerome@docker.com> (@jpetazzo)
diff --git a/hack/MAINTAINERS.md b/hack/MAINTAINERS.md
index 8af6013..0a4cd14 100644
--- a/hack/MAINTAINERS.md
+++ b/hack/MAINTAINERS.md
@@ -4,12 +4,12 @@
 
 Dear maintainer. Thank you for investing the time and energy to help
 make Docker as useful as possible. Maintaining a project is difficult,
-sometimes unrewarding work.  Sure, you will get to contribute cool
+sometimes unrewarding work. Sure, you will get to contribute cool
 features to the project. But most of your time will be spent reviewing,
-cleaning up, documenting, answering questions, justifying design
+cleaning up, documenting, answering questions, and justifying design
 decisions - while everyone has all the fun! But remember - the quality
-of the maintainers work is what distinguishes the good projects from the
-great.  So please be proud of your work, even the unglamourous parts,
+of the maintainers' work is what distinguishes the good projects from
+the great. So please be proud of your work, even the unglamourous parts,
 and encourage a culture of appreciation and respect for *every* aspect
 of improving the project - not just the hot new features.
 
@@ -20,34 +20,34 @@
 This is a living document - if you see something out of date or missing,
 speak up!
 
-## What are a maintainer's responsibility?
+## What is a maintainer's responsibility?
 
 It is every maintainer's responsibility to:
 
-1. Expose a clear roadmap for improving their component.
+1. Expose a clear road map for improving their component.
 2. Deliver prompt feedback and decisions on pull requests.
 3. Be available to anyone with questions, bug reports, criticism etc.
   on their component. This includes IRC, GitHub requests and the mailing
   list.
 4. Make sure their component respects the philosophy, design and
-  roadmap of the project.
+  road map of the project.
 
 ## How are decisions made?
 
-Short answer: with pull requests to the docker repository.
+Short answer: with pull requests to the Docker repository.
 
 Docker is an open-source project with an open design philosophy. This
 means that the repository is the source of truth for EVERY aspect of the
-project, including its philosophy, design, roadmap and APIs. *If it's
-part of the project, it's in the repo. It's in the repo, it's part of
+project, including its philosophy, design, road map, and APIs. *If it's
+part of the project, it's in the repo. If it's in the repo, it's part of
 the project.*
 
 As a result, all decisions can be expressed as changes to the
 repository. An implementation change is a change to the source code. An
 API change is a change to the API specification. A philosophy change is
-a change to the philosophy manifesto. And so on.
+a change to the philosophy manifesto, and so on.
 
-All decisions affecting docker, big and small, follow the same 3 steps:
+All decisions affecting Docker, big and small, follow the same 3 steps:
 
 * Step 1: Open a pull request. Anyone can do this.
 
@@ -55,21 +55,47 @@
 
 * Step 3: Accept (`LGTM`) or refuse a pull request. The relevant maintainers do 
 this (see below "Who decides what?")
-
+ + Accepting pull requests
+  - If the pull request appears to be ready to merge, give it a `LGTM`, which
+    stands for "Looks Good To Me".
+  - If the pull request has some small problems that need to be changed, make
+    a comment adressing the issues.
+  - If the changes needed to a PR are small, you can add a "LGTM once the
+    following comments are adressed..." this will reduce needless back and
+    forth.
+  - If the PR only needs a few changes before being merged, any MAINTAINER can
+    make a replacement PR that incorporates the existing commits and fixes the
+    problems before a fast track merge.
+ + Closing pull requests
+  - If a PR appears to be abandoned, after having attempted to contact the
+    original contributor, then a replacement PR may be made.  Once the
+    replacement PR is made, any contributor may close the original one.
+  - If you are not sure if the pull request implements a good feature or you
+    do not understand the purpose of the PR, ask the contributor to provide
+    more documentation.  If the contributor is not able to adequately explain
+    the purpose of the PR, the PR may be closed by any MAINTAINER.
+  - If a MAINTAINER feels that the pull request is sufficiently architecturally
+    flawed, or if the pull request needs significantly more design discussion
+    before being considered, the MAINTAINER should close the pull request with
+    a short explanation of what discussion still needs to be had.  It is
+    important not to leave such pull requests open, as this will waste both the
+    MAINTAINER's time and the contributor's time.  It is not good to string a
+    contributor on for weeks or months, having them make many changes to a PR
+    that will eventually be rejected.
 
 ## Who decides what?
 
 All decisions are pull requests, and the relevant maintainers make
-decisions by accepting or refusing the pull request. Review and acceptance
-by anyone is denoted by adding a comment in the pull request: `LGTM`. 
-However, only currently listed `MAINTAINERS` are counted towards the required
-majority.
+decisions by accepting or refusing pull requests. Review and acceptance
+by anyone is denoted by adding a comment in the pull request: `LGTM`.
+However, only currently listed `MAINTAINERS` are counted towards the
+required majority.
 
 Docker follows the timeless, highly efficient and totally unfair system
 known as [Benevolent dictator for
 life](http://en.wikipedia.org/wiki/Benevolent_Dictator_for_Life), with
 yours truly, Solomon Hykes, in the role of BDFL. This means that all
-decisions are made by default by Solomon. Since making every decision
+decisions are made, by default, by Solomon. Since making every decision
 myself would be highly un-scalable, in practice decisions are spread
 across multiple maintainers.
 
@@ -90,7 +116,7 @@
 Please let your co-maintainers and other contributors know by raising a pull
 request that comments out your `MAINTAINERS` file entry using a `#`.
 
-### I'm a maintainer, should I make pull requests too?
+### I'm a maintainer. Should I make pull requests too?
 
 Yes. Nobody should ever push to master directly. All changes should be
 made through a pull request.
diff --git a/hack/PACKAGERS.md b/hack/PACKAGERS.md
index 82d959c..265f7d6 100644
--- a/hack/PACKAGERS.md
+++ b/hack/PACKAGERS.md
@@ -45,10 +45,10 @@
 To build Docker, you will need the following:
 
 * A recent version of git and mercurial
-* Go version 1.2 or later
+* Go version 1.3 or later
 * A clean checkout of the source added to a valid [Go
   workspace](http://golang.org/doc/code.html#Workspaces) under the path
-  *src/github.com/dotcloud/docker* (unless you plan to use `AUTO_GOPATH`,
+  *src/github.com/docker/docker* (unless you plan to use `AUTO_GOPATH`,
   explained in more detail below).
 
 To build the Docker daemon, you will additionally need:
@@ -145,18 +145,23 @@
 ```
 
 This will cause the build scripts to set up a reasonable `GOPATH` that
-automatically and properly includes both dotcloud/docker from the local
+automatically and properly includes both docker/docker from the local
 directory, and the local "./vendor" directory as necessary.
 
 ### `DOCKER_BUILDTAGS`
 
 If you're building a binary that may need to be used on platforms that include
 AppArmor, you will need to set `DOCKER_BUILDTAGS` as follows:
-
 ```bash
 export DOCKER_BUILDTAGS='apparmor'
 ```
 
+If you're building a binary that may need to be used on platforms that include
+SELinux, you will need to use the `selinux` build tag:
+```bash
+export DOCKER_BUILDTAGS='selinux'
+```
+
 There are build tags for disabling graphdrivers as well. By default, support
 for all graphdrivers are built in.
 
@@ -175,13 +180,9 @@
 export DOCKER_BUILDTAGS='exclude_graphdriver_aufs'
 ```
 
-NOTE: if you need to set more than one build tag, space separate them.
-
-If you're building a binary that may need to be used on platforms that include
-SELinux, you will need to set `DOCKER_BUILDTAGS` as follows:
-
+NOTE: if you need to set more than one build tag, space separate them:
 ```bash
-export DOCKER_BUILDTAGS='selinux'
+export DOCKER_BUILDTAGS='apparmor selinux exclude_graphdriver_aufs'
 ```
 
 ### Static Daemon
@@ -270,9 +271,9 @@
 * a [properly
   mounted](https://github.com/tianon/cgroupfs-mount/blob/master/cgroupfs-mount)
   cgroupfs hierarchy (having a single, all-encompassing "cgroup" mount point
-  [is](https://github.com/dotcloud/docker/issues/2683)
-  [not](https://github.com/dotcloud/docker/issues/3485)
-  [sufficient](https://github.com/dotcloud/docker/issues/4568))
+  [is](https://github.com/docker/docker/issues/2683)
+  [not](https://github.com/docker/docker/issues/3485)
+  [sufficient](https://github.com/docker/docker/issues/4568))
 
 Additionally, the Docker client needs the following software to be installed and
 available at runtime:
@@ -300,7 +301,7 @@
 * LXC execution driver (requires version 1.0 or later of the LXC utility scripts)
 * AUFS graph driver (requires AUFS patches/support enabled in the kernel, and at
   least the "auplink" utility from aufs-tools)
-* experimental BTRFS graph driver (requires BTRFS support enabled in the kernel)
+* BTRFS graph driver (requires BTRFS support enabled in the kernel)
 
 ## Daemon Init Script
 
diff --git a/hack/RELEASE-CHECKLIST.md b/hack/RELEASE-CHECKLIST.md
index 2fe1a3c..839f5c1 100644
--- a/hack/RELEASE-CHECKLIST.md
+++ b/hack/RELEASE-CHECKLIST.md
@@ -7,7 +7,7 @@
 to keep it up-to-date.
 
 It is important to note that this document assumes that the git remote in your
-repository that corresponds to "https://github.com/dotcloud/docker" is named
+repository that corresponds to "https://github.com/docker/docker" is named
 "origin".  If yours is not (for example, if you've chosen to name it "upstream"
 or something similar instead), be sure to adjust the listed snippets for your
 local environment accordingly.  If you are not sure what your upstream remote is
@@ -18,7 +18,7 @@
 
 ```bash
 export GITHUBUSER="YOUR_GITHUB_USER"
-git remote add origin https://github.com/dotcloud/docker.git
+git remote add origin https://github.com/docker/docker.git
 git remote add $GITHUBUSER git@github.com:$GITHUBUSER/docker.git
 ```
 
@@ -32,9 +32,23 @@
 git branch -D release || true
 git checkout --track origin/release
 git checkout -b bump_$VERSION
+```
+
+If it's a regular release, we usually merge master.
+```bash
 git merge origin/master
 ```
 
+Otherwise, if it is a hotfix release, we cherry-pick only the commits we want.
+```bash
+# get the commits ids we want to cherry-pick
+git log
+# cherry-pick the commits starting from the oldest one, without including merge commits
+git cherry-pick <commit-id>
+git cherry-pick <commit-id>
+...
+```
+
 ### 2. Update CHANGELOG.md
 
 You can run this command for reference with git 2.0:
@@ -132,7 +146,7 @@
 (You will need the `awsconfig` file added to the `docs/` dir)
 
 ```bash
-make AWS_S3_BUCKET=beta-docs.docker.io docs-release
+make AWS_S3_BUCKET=beta-docs.docker.io BUILD_ROOT=yes docs-release
 ```
 
 ### 5. Commit and create a pull request to the "release" branch
@@ -141,7 +155,7 @@
 git add VERSION CHANGELOG.md
 git commit -m "Bump version to $VERSION"
 git push $GITHUBUSER bump_$VERSION
-echo "https://github.com/$GITHUBUSER/docker/compare/dotcloud:release...$GITHUBUSER:bump_$VERSION?expand=1"
+echo "https://github.com/$GITHUBUSER/docker/compare/docker:release...$GITHUBUSER:bump_$VERSION?expand=1"
 ```
 
 That last command will give you the proper link to visit to ensure that you
@@ -181,7 +195,7 @@
 help testing!  An easy way to get some useful links for sharing:
 
 ```bash
-echo "Ubuntu/Debian install script: curl -sLS https://test.docker.io/ | sh"
+echo "Ubuntu/Debian: https://test.docker.io/ubuntu or curl -sSL https://test.docker.io/ | sh"
 echo "Linux 64bit binary: https://test.docker.io/builds/Linux/x86_64/docker-${VERSION#v}"
 echo "Darwin/OSX 64bit client binary: https://test.docker.io/builds/Darwin/x86_64/docker-${VERSION#v}"
 echo "Darwin/OSX 32bit client binary: https://test.docker.io/builds/Darwin/i386/docker-${VERSION#v}"
@@ -235,6 +249,16 @@
 
 ### 11. Update the docs branch
 
+If this is a MAJOR.MINOR.0 release, you need to make an branch for the previous release's
+documentation:
+
+```bash
+git checkout -b docs-$PREVIOUS_MAJOR_MINOR docs
+git fetch
+git reset --hard origin/docs
+git push -f origin docs-$PREVIOUS_MAJOR_MINOR
+```
+
 You will need the `awsconfig` file added to the `docs/` directory to contain the
 s3 credentials for the bucket you are deploying to.
 
@@ -243,13 +267,15 @@
 git fetch
 git reset --hard origin/release
 git push -f origin docs
-make AWS_S3_BUCKET=docs.docker.io docs-release
+make AWS_S3_BUCKET=docs.docker.com BUILD_ROOT=yes docs-release
 ```
 
-The docs will appear on http://docs.docker.io/ (though there may be cached
-versions, so its worth checking http://docs.docker.io.s3-website-us-west-2.amazonaws.com/).
+The docs will appear on http://docs.docker.com/ (though there may be cached
+versions, so its worth checking http://docs.docker.com.s3-website-us-east-1.amazonaws.com/).
 For more information about documentation releases, see `docs/README.md`.
 
+Ask Sven, or JohnC to invalidate the cloudfront cache using the CND Planet chrome applet.
+
 ### 12. Create a new pull request to merge release back into master
 
 ```bash
@@ -262,7 +288,7 @@
 git add VERSION
 git commit -m "Change version to $(cat VERSION)"
 git push $GITHUBUSER merge_release_$VERSION
-echo "https://github.com/$GITHUBUSER/docker/compare/dotcloud:master...$GITHUBUSER:merge_release_$VERSION?expand=1"
+echo "https://github.com/$GITHUBUSER/docker/compare/docker:master...$GITHUBUSER:merge_release_$VERSION?expand=1"
 ```
 
 Again, get two maintainers to validate, then merge, then push that pretty
diff --git a/hack/ROADMAP.md b/hack/ROADMAP.md
index c38be56..d49664b 100644
--- a/hack/ROADMAP.md
+++ b/hack/ROADMAP.md
@@ -3,7 +3,7 @@
 This document is a high-level overview of where we want to take Docker next.
 It is a curated selection of planned improvements which are either important, difficult, or both.
 
-For a more complete view of planned and requested improvements, see [the Github issues](https://github.com/dotcloud/docker/issues).
+For a more complete view of planned and requested improvements, see [the Github issues](https://github.com/docker/docker/issues).
 
 To suggest changes to the roadmap, including additions, please write the change as if it were already in effect, and make a pull request.
 
@@ -31,9 +31,9 @@
 
 ## Broader kernel support
 
-Our goal is to make Docker run everywhere, but currently Docker requires Linux version 3.8 or higher with lxc and aufs support. If you’re deploying new machines for the purpose of running Docker, this is a fairly easy requirement to meet. However, if you’re adding Docker to an existing deployment, you may not have the flexibility to update and patch the kernel.
+Our goal is to make Docker run everywhere, but currently Docker requires Linux version 3.8 or higher with cgroups support. If you’re deploying new machines for the purpose of running Docker, this is a fairly easy requirement to meet. However, if you’re adding Docker to an existing deployment, you may not have the flexibility to update and patch the kernel.
 
-Expanding Docker’s kernel support is a priority. This includes running on older kernel versions, but also on kernels with no AUFS support, or with incomplete lxc capabilities.
+Expanding Docker’s kernel support is a priority. This includes running on older kernel versions, specifically focusing on versions already popular in server deployments such as those used by RHEL and the OpenVZ stack.
 
 
 ## Cross-architecture support
diff --git a/hack/dind b/hack/dind
index 77629ad..f8fae63 100755
--- a/hack/dind
+++ b/hack/dind
@@ -2,7 +2,7 @@
 set -e
 
 # DinD: a wrapper script which allows docker to be run inside a docker container.
-# Original version by Jerome Petazzoni <jerome@dotcloud.com>
+# Original version by Jerome Petazzoni <jerome@docker.com>
 # See the blog post: http://blog.docker.com/2013/09/docker-can-now-run-within-docker/
 #
 # This script should be executed inside a docker container in privilieged mode
diff --git a/hack/generate-authors.sh b/hack/generate-authors.sh
new file mode 100755
index 0000000..83f61df
--- /dev/null
+++ b/hack/generate-authors.sh
@@ -0,0 +1,15 @@
+#!/bin/bash
+set -e
+
+cd "$(dirname "$(readlink -f "$BASH_SOURCE")")/.."
+
+# see also ".mailmap" for how email addresses and names are deduplicated
+
+{
+	cat <<-'EOH'
+	# This file lists all individuals having contributed content to the repository.
+	# For how it is generated, see `hack/generate-authors.sh`.
+	EOH
+	echo
+	git log --format='%aN <%aE>' | sort -uf
+} > AUTHORS
diff --git a/hack/infrastructure/README.md b/hack/infrastructure/README.md
deleted file mode 100644
index d12fc4c..0000000
--- a/hack/infrastructure/README.md
+++ /dev/null
@@ -1,90 +0,0 @@
-# Docker project infrastructure
-
-This is an overview of the Docker infrastructure.
-
-**Note: obviously, credentials should not be stored in this repository.**
-However, when there are credentials, we should list how to obtain them
-(e.g. who has them).
-
-
-## Providers
-
-This should be the list of all the entities providing some kind of
-infrastructure service to the Docker project (either for free,
-or paid by dotCloud).
-
-
-Provider      | Service
---------------|-------------------------------------------------
-AWS           | packages (S3 bucket), dotCloud PAAS, dev-env, ci
-CloudFlare    | cdn
-Digital Ocean | ci
-dotCloud PAAS | website, index, registry, ssl, blog
-DynECT        | dns (docker.com)            
-GitHub        | repository
-Linode        | stackbrew
-Mailgun       | outgoing e-mail            
-ReadTheDocs   | docs
-
-*Ordered-by: lexicographic*
-
-
-## URLs
-
-This should be the list of all the infrastructure-related URLs
-and which service is handling them.
-
-URL                                          | Service
----------------------------------------------|---------------------------------
- http://blog.docker.com/                     | blog
-*http://cdn-registry-1.docker.io/            | registry (pull)
- http://debug.docker.io/                     | debug tool
- http://docs.docker.com/                     | documentation served from an S3 bucket
- http://docker-ci.dotcloud.com/              | ci
- http://docker.com/                          | redirect to www.docker.com (dynect)
-*http://get.docker.io/                       | packages
- https://github.com/dotcloud/docker          | repository
-*https://hub.docker.com/                     | Docker Hub
- http://registry-1.docker.io/                | registry (push)
- http://staging-docker-ci.dotcloud.com/      | ci
-*http://test.docker.io/                      | packages
-*http://www.docker.com/                      | website
- http://? (internal URL, not for public use) | stackbrew
-
-*Ordered-by: lexicographic*
-
-**Note:** an asterisk in front of the URL means that it is cached by CloudFlare.
-
-
-## Services
-
-This should be the list of all services referenced above.
-
-Service             | Maintainer(s)              | How to update    | Source
---------------------|----------------------------|------------------|-------
-blog                | [@jbarbier]                | dotcloud push    | https://github.com/dotcloud/blog.docker.io
-cdn                 | [@jpetazzo][] [@samalba][] | cloudflare panel | N/A
-ci                  | [@mzdaniel]                | See [docker-ci]  | See [docker-ci]
-docs                | [@metalivedev]             | github webhook   | docker repo
-docsproxy           | [@dhrp]                    | dotcloud push    | https://github.com/dotcloud/docker-docs-dotcloud-proxy
-index               | [@kencochrane]             | dotcloud push    | private
-packages            | [@jpetazzo]                | hack/release     | docker repo
-registry            | [@samalba]                 | dotcloud push    | https://github.com/dotcloud/docker-registry
-repository (github) | N/A                        | N/A              | N/A
-ssl (dotcloud)      | [@jpetazzo]                | dotcloud ops     | N/A
-ssl (cloudflare)    | [@jpetazzo]                | cloudflare panel | N/A
-stackbrew           | [@shin-]                   | manual           | https://github.com/dotcloud/stackbrew/stackbrew
-website             | [@dhrp]                    | dotcloud push    | https://github.com/dotcloud/www.docker.io
-
-*Ordered-by: lexicographic*
-
-
-[docker-ci]: docker-ci.rst
-[@dhrp]: https://github.com/dhrp
-[@jbarbier]: https://github.com/jbarbier
-[@jpetazzo]: https://github.com/jpetazzo
-[@kencochrane]: https://github.com/kencochrane
-[@metalivedev]: https://github.com/metalivedev
-[@mzdaniel]: https://github.com/mzdaniel
-[@samalba]: https://github.com/samalba
-[@shin-]: https://github.com/shin-
diff --git a/hack/install.sh b/hack/install.sh
index 43248cf..52f058e 100755
--- a/hack/install.sh
+++ b/hack/install.sh
@@ -2,7 +2,7 @@
 set -e
 #
 # This script is meant for quick & easy install via:
-#   'curl -sL https://get.docker.io/ | sh'
+#   'curl -sSL https://get.docker.io/ | sh'
 # or:
 #   'wget -qO- https://get.docker.io/ | sh'
 #
@@ -54,7 +54,7 @@
 
 curl=''
 if command_exists curl; then
-	curl='curl -sL'
+	curl='curl -sSL'
 elif command_exists wget; then
 	curl='wget -qO-'
 elif command_exists busybox && busybox --list-modules | grep -q wget; then
@@ -85,7 +85,7 @@
 		if command_exists docker && [ -e /var/run/docker.sock ]; then
 			(
 				set -x
-				$sh_c 'docker run busybox echo "Docker has been successfully installed!"'
+				$sh_c 'docker run hello-world'
 			) || true
 		fi
 		your_user=your-user
@@ -133,7 +133,7 @@
 		if [ -z "$curl" ]; then
 			apt_get_update
 			( set -x; $sh_c 'sleep 3; apt-get install -y -q curl' )
-			curl='curl -sL'
+			curl='curl -sSL'
 		fi
 		(
 			set -x
@@ -150,7 +150,7 @@
 		if command_exists docker && [ -e /var/run/docker.sock ]; then
 			(
 				set -x
-				$sh_c 'docker run busybox echo "Docker has been successfully installed!"'
+				$sh_c 'docker run hello-world'
 			) || true
 		fi
 		your_user=your-user
diff --git a/hack/make.sh b/hack/make.sh
index d5bcfcf..05e3310 100755
--- a/hack/make.sh
+++ b/hack/make.sh
@@ -6,7 +6,7 @@
 #
 # Requirements:
 # - The current directory should be a checkout of the docker source code
-#   (http://github.com/dotcloud/docker). Whatever version is checked out
+#   (http://github.com/docker/docker). Whatever version is checked out
 #   will be built.
 # - The VERSION file, at the root of the repository, should exist, and
 #   will be used as Docker binary version and package version.
@@ -23,9 +23,11 @@
 
 set -o pipefail
 
+export DOCKER_PKG='github.com/docker/docker'
+
 # We're a nice, sexy, little shell script, and people might try to run us;
 # but really, they shouldn't. We want to be in a container!
-if [ "$(pwd)" != '/go/src/github.com/dotcloud/docker' ] || [ -z "$DOCKER_CROSSPLATFORMS" ]; then
+if [ "$(pwd)" != "/go/src/$DOCKER_PKG" ] || [ -z "$DOCKER_CROSSPLATFORMS" ]; then
 	{
 		echo "# WARNING! I don't seem to be running in the Docker container."
 		echo "# The result of this command might be an incorrect build, and will not be"
@@ -42,17 +44,17 @@
 DEFAULT_BUNDLES=(
 	validate-dco
 	validate-gofmt
-	
+
 	binary
-	
+
 	test-unit
 	test-integration
 	test-integration-cli
-	
+
 	dynbinary
 	dyntest-unit
 	dyntest-integration
-	
+
 	cover
 	cross
 	tgz
@@ -77,8 +79,8 @@
 
 if [ "$AUTO_GOPATH" ]; then
 	rm -rf .gopath
-	mkdir -p .gopath/src/github.com/dotcloud
-	ln -sf ../../../.. .gopath/src/github.com/dotcloud/docker
+	mkdir -p .gopath/src/"$(dirname "${DOCKER_PKG}")"
+	ln -sf ../../../.. .gopath/src/"${DOCKER_PKG}"
 	export GOPATH="$(pwd)/.gopath:$(pwd)/vendor"
 fi
 
@@ -88,11 +90,15 @@
 	exit 1
 fi
 
+if [ -z "$DOCKER_CLIENTONLY" ]; then
+	DOCKER_BUILDTAGS+=" daemon"
+fi
+
 # Use these flags when compiling the tests and final binary
 LDFLAGS='
 	-w
-	-X github.com/dotcloud/docker/dockerversion.GITCOMMIT "'$GITCOMMIT'"
-	-X github.com/dotcloud/docker/dockerversion.VERSION "'$VERSION'"
+	-X '$DOCKER_PKG'/dockerversion.GITCOMMIT "'$GITCOMMIT'"
+	-X '$DOCKER_PKG'/dockerversion.VERSION "'$VERSION'"
 '
 LDFLAGS_STATIC='-linkmode external'
 EXTLDFLAGS_STATIC='-static'
@@ -103,7 +109,7 @@
 EXTLDFLAGS_STATIC_DOCKER="$EXTLDFLAGS_STATIC -lpthread -Wl,--unresolved-symbols=ignore-in-object-files"
 LDFLAGS_STATIC_DOCKER="
 	$LDFLAGS_STATIC
-	-X github.com/dotcloud/docker/dockerversion.IAMSTATIC true
+	-X $DOCKER_PKG/dockerversion.IAMSTATIC true
 	-extldflags \"$EXTLDFLAGS_STATIC_DOCKER\"
 "
 
@@ -150,12 +156,37 @@
 		testcover=( -cover -coverprofile "$coverprofile" $coverpkg )
 	fi
 	(
-		echo '+ go test' $TESTFLAGS "github.com/dotcloud/docker${dir#.}"
+		echo '+ go test' $TESTFLAGS "${DOCKER_PKG}${dir#.}"
 		cd "$dir"
 		go test ${testcover[@]} -ldflags "$LDFLAGS" "${BUILDFLAGS[@]}" $TESTFLAGS
 	)
 }
 
+# Compile phase run by parallel in test-unit. No support for coverpkg
+go_compile_test_dir() {
+	dir=$1
+	out_file="$DEST/precompiled/$dir.test"
+	testcover=()
+	if [ "$HAVE_GO_TEST_COVER" ]; then
+		# if our current go install has -cover, we want to use it :)
+		mkdir -p "$DEST/coverprofiles"
+		coverprofile="docker${dir#.}"
+		coverprofile="$DEST/coverprofiles/${coverprofile//\//-}"
+		testcover=( -cover -coverprofile "$coverprofile" ) # missing $coverpkg
+	fi
+	if [ "$BUILDFLAGS_FILE" ]; then
+		readarray -t BUILDFLAGS < "$BUILDFLAGS_FILE"
+	fi
+	(
+		cd "$dir"
+		go test "${testcover[@]}" -ldflags "$LDFLAGS" "${BUILDFLAGS[@]}" $TESTFLAGS -c
+	)
+	[ $? -ne 0 ] && return 1
+	mkdir -p "$(dirname "$out_file")"
+	mv "$dir/$(basename "$dir").test" "$out_file"
+	echo "Precompiled: ${DOCKER_PKG}${dir#.}"
+}
+
 # This helper function walks the current directory looking for directories
 # holding certain files ($1 parameter), and prints their paths on standard
 # output, one per line.
diff --git a/hack/make/.ensure-scratch b/hack/make/.ensure-scratch
index 487e85a..9a9a43a 100644
--- a/hack/make/.ensure-scratch
+++ b/hack/make/.ensure-scratch
@@ -2,8 +2,8 @@
 
 if ! docker inspect scratch &> /dev/null; then
 	# let's build a "docker save" tarball for "scratch"
-	# see https://github.com/dotcloud/docker/pull/5262
-	# and also https://github.com/dotcloud/docker/issues/4242
+	# see https://github.com/docker/docker/pull/5262
+	# and also https://github.com/docker/docker/issues/4242
 	mkdir -p /docker-scratch
 	(
 		cd /docker-scratch
diff --git a/hack/make/.validate b/hack/make/.validate
index cf6be53..0228091 100644
--- a/hack/make/.validate
+++ b/hack/make/.validate
@@ -4,7 +4,7 @@
 	# this is kind of an expensive check, so let's not do this twice if we
 	# are running more than one validate bundlescript
 	
-	VALIDATE_REPO='https://github.com/dotcloud/docker.git'
+	VALIDATE_REPO='https://github.com/docker/docker.git'
 	VALIDATE_BRANCH='master'
 	
 	if [ "$TRAVIS" = 'true' -a "$TRAVIS_PULL_REQUEST" != 'false' ]; then
diff --git a/hack/make/cross b/hack/make/cross
index 32fbbc3..6ca06f8 100644
--- a/hack/make/cross
+++ b/hack/make/cross
@@ -3,6 +3,12 @@
 
 DEST=$1
 
+# explicit list of os/arch combos that support being a daemon
+declare -A daemonSupporting
+daemonSupporting=(
+	[linux/amd64]=1
+)
+
 # if we have our linux/amd64 version compiled, let's symlink it in
 if [ -x "$DEST/../binary/docker-$VERSION" ]; then
 	mkdir -p "$DEST/linux/amd64"
@@ -18,7 +24,10 @@
 		mkdir -p "$DEST/$platform" # bundles/VERSION/cross/GOOS/GOARCH/docker-VERSION
 		export GOOS=${platform%/*}
 		export GOARCH=${platform##*/}
-		export LDFLAGS_STATIC_DOCKER="" # we just need a simple client for these platforms (TODO this might change someday)
+		if [ -z "${daemonSupporting[$platform]}" ]; then
+			export LDFLAGS_STATIC_DOCKER="" # we just need a simple client for these platforms
+			export BUILDFLAGS=( "${BUILDFLAGS[@]/ daemon/}" ) # remove the "daemon" build tag from platforms that aren't supported
+		fi
 		source "$(dirname "$BASH_SOURCE")/binary" "$DEST/$platform"
 	)
 done
diff --git a/hack/make/dynbinary b/hack/make/dynbinary
index 74bb0dd..5064a79 100644
--- a/hack/make/dynbinary
+++ b/hack/make/dynbinary
@@ -39,7 +39,7 @@
 # exported so that "dyntest" can easily access it later without recalculating it
 
 (
-	export LDFLAGS_STATIC_DOCKER="-X github.com/dotcloud/docker/dockerversion.INITSHA1 \"$DOCKER_INITSHA1\" -X github.com/dotcloud/docker/dockerversion.INITPATH \"$DOCKER_INITPATH\""
+	export LDFLAGS_STATIC_DOCKER="-X $DOCKER_PKG/dockerversion.INITSHA1 \"$DOCKER_INITSHA1\" -X $DOCKER_PKG/dockerversion.INITPATH \"$DOCKER_INITPATH\""
 	export BUILDFLAGS=( "${BUILDFLAGS[@]/netgo /}" ) # disable netgo, since we don't need it for a dynamic binary
 	source "$(dirname "$BASH_SOURCE")/binary"
 )
diff --git a/hack/make/dyntest-integration b/hack/make/dyntest-integration
index 03d7cbe..1cc7349 100644
--- a/hack/make/dyntest-integration
+++ b/hack/make/dyntest-integration
@@ -12,7 +12,7 @@
 (
 	export TEST_DOCKERINIT_PATH="$INIT"
 	export LDFLAGS_STATIC_DOCKER="
-		-X github.com/dotcloud/docker/dockerversion.INITSHA1 \"$DOCKER_INITSHA1\"
+		-X $DOCKER_PKG/dockerversion.INITSHA1 \"$DOCKER_INITSHA1\"
 	"
 	source "$(dirname "$BASH_SOURCE")/test-integration"
 )
diff --git a/hack/make/dyntest-unit b/hack/make/dyntest-unit
index ce934f1..cffef98 100644
--- a/hack/make/dyntest-unit
+++ b/hack/make/dyntest-unit
@@ -12,7 +12,7 @@
 (
 	export TEST_DOCKERINIT_PATH="$INIT"
 	export LDFLAGS_STATIC_DOCKER="
-		-X github.com/dotcloud/docker/dockerversion.INITSHA1 \"$DOCKER_INITSHA1\"
+		-X $DOCKER_PKG/dockerversion.INITSHA1 \"$DOCKER_INITSHA1\"
 	"
 	source "$(dirname "$BASH_SOURCE")/test-unit"
 )
diff --git a/hack/make/test-integration b/hack/make/test-integration
index baad134..b49ae59 100644
--- a/hack/make/test-integration
+++ b/hack/make/test-integration
@@ -5,7 +5,7 @@
 
 bundle_test_integration() {
 	LDFLAGS="$LDFLAGS $LDFLAGS_STATIC_DOCKER" go_test_dir ./integration \
-		"-coverpkg $(find_dirs '*.go' | sed 's,^\.,github.com/dotcloud/docker,g' | paste -d, -s)"
+		"-coverpkg $(find_dirs '*.go' | sed 's,^\.,'$DOCKER_PKG',g' | paste -d, -s)"
 }
 
 # this "grep" hides some really irritating warnings that "go test -coverpkg"
diff --git a/hack/make/test-unit b/hack/make/test-unit
index 552810f..03172f9 100644
--- a/hack/make/test-unit
+++ b/hack/make/test-unit
@@ -2,6 +2,7 @@
 set -e
 
 DEST=$1
+: ${PARALLEL_JOBS:=$(nproc)}
 
 RED=$'\033[31m'
 GREEN=$'\033[32m'
@@ -22,35 +23,63 @@
 			TESTDIRS=$(find_dirs '*_test.go')
 		fi
 
-		TESTS_FAILED=()
-		for test_dir in $TESTDIRS; do
-			echo
+		if command -v parallel &> /dev/null; then (
+			# accomodate parallel to be able to access variables
+			export SHELL="$BASH"
+			export HOME="$(mktemp -d)"
+			mkdir -p "$HOME/.parallel"
+			touch "$HOME/.parallel/ignored_vars"
+			export -f go_compile_test_dir
+			export LDFLAGS="$LDFLAGS $LDFLAGS_STATIC_DOCKER"
+			export TESTFLAGS
+			export HAVE_GO_TEST_COVER
+			export DEST
+			# some hack to export array variables
+			export BUILDFLAGS_FILE="$HOME/buildflags_file"
+			( IFS=$'\n'; echo "${BUILDFLAGS[*]}" ) > "$BUILDFLAGS_FILE"
 
-			if ! LDFLAGS="$LDFLAGS $LDFLAGS_STATIC_DOCKER" go_test_dir "$test_dir"; then
-				TESTS_FAILED+=("$test_dir")
-				echo
-				echo "${RED}Tests failed: $test_dir${TEXTRESET}"
-				sleep 1 # give it a second, so observers watching can take note
-			fi
-		done
-
-		echo
-		echo
-		echo
-
-		# if some tests fail, we want the bundlescript to fail, but we want to
-		# try running ALL the tests first, hence TESTS_FAILED
-		if [ "${#TESTS_FAILED[@]}" -gt 0 ]; then
-			echo "${RED}Test failures in: ${TESTS_FAILED[@]}${TEXTRESET}"
-			echo
-			false
-		else
-			echo "${GREEN}Test success${TEXTRESET}"
-			echo
-			true
+			echo "$TESTDIRS" | parallel --jobs "$PARALLEL_JOBS" --halt 2 --env _ go_compile_test_dir
+			rm -rf "$HOME"
+		) else
+			# aww, no "parallel" available - fall back to boring
+			for test_dir in $TESTDIRS; do
+				go_compile_test_dir "$test_dir"
+			done
 		fi
+		echo "$TESTDIRS" | go_run_test_dir
 	}
 }
 
+go_run_test_dir() {
+	TESTS_FAILED=()
+	while read dir; do
+		echo
+		echo '+ go test' $TESTFLAGS "${DOCKER_PKG}${dir#.}"
+		precompiled="$DEST/precompiled/$dir.test"
+		if ! ( cd "$dir" && "$precompiled" ); then
+			TESTS_FAILED+=("$dir")
+			echo
+			echo "${RED}Tests failed: $dir${TEXTRESET}"
+			sleep 1 # give it a second, so observers watching can take note
+		fi
+	done
+
+	echo
+	echo
+	echo
+
+	# if some tests fail, we want the bundlescript to fail, but we want to
+	# try running ALL the tests first, hence TESTS_FAILED
+	if [ "${#TESTS_FAILED[@]}" -gt 0 ]; then
+		echo "${RED}Test failures in: ${TESTS_FAILED[@]}${TEXTRESET}"
+		echo
+		false
+	else
+		echo "${GREEN}Test success${TEXTRESET}"
+		echo
+		true
+	fi
+}
+
 exec > >(tee -a $DEST/test.log) 2>&1
 bundle_test_unit
diff --git a/hack/make/ubuntu b/hack/make/ubuntu
index 0d19d75..98ec423 100644
--- a/hack/make/ubuntu
+++ b/hack/make/ubuntu
@@ -36,7 +36,7 @@
 	mkdir -p $DIR/etc/default
 	cp contrib/init/sysvinit-debian/docker.default $DIR/etc/default/docker
 	mkdir -p $DIR/lib/systemd/system
-	cp contrib/init/systemd/docker.service $DIR/lib/systemd/system/
+	cp contrib/init/systemd/docker.{service,socket} $DIR/lib/systemd/system/
 
 	# Include contributed completions
 	mkdir -p $DIR/etc/bash_completion.d
diff --git a/hack/make/validate-dco b/hack/make/validate-dco
index 6dbbe22..1c75d91 100644
--- a/hack/make/validate-dco
+++ b/hack/make/validate-dco
@@ -9,13 +9,22 @@
 : ${adds:=0}
 : ${dels:=0}
 
+# "Username may only contain alphanumeric characters or dashes and cannot begin with a dash"
+githubUsernameRegex='[a-zA-Z0-9][a-zA-Z0-9-]+'
+
+# https://github.com/docker/docker/blob/master/CONTRIBUTING.md#sign-your-work
+dcoPrefix='Signed-off-by:'
+dcoRegex="^(Docker-DCO-1.1-)?$dcoPrefix ([^<]+) <([^<>@]+@[^<>]+)>( \\(github: ($githubUsernameRegex)\\))?$"
+
+check_dco() {
+	grep -qE "$dcoRegex"
+}
+
 if [ $adds -eq 0 -a $dels -eq 0 ]; then
 	echo '0 adds, 0 deletions; nothing to validate! :)'
 elif [ -z "$notDocs" -a $adds -le 1 -a $dels -le 1 ]; then
 	echo 'Congratulations!  DCO small-patch-exception material!'
 else
-	dcoPrefix='Docker-DCO-1.1-Signed-off-by:'
-	dcoRegex="^$dcoPrefix ([^<]+) <([^<>@]+@[^<>]+)> \\(github: (\S+)\\)$"
 	commits=( $(validate_log --format='format:%H%n') )
 	badCommits=()
 	for commit in "${commits[@]}"; do
@@ -23,7 +32,7 @@
 			# no content (ie, Merge commit, etc)
 			continue
 		fi
-		if ! git log -1 --format='format:%B' "$commit" | grep -qE "$dcoRegex"; then
+		if ! git log -1 --format='format:%B' "$commit" | check_dco; then
 			badCommits+=( "$commit" )
 		fi
 	done
@@ -39,7 +48,7 @@
 			echo 'Please amend each commit to include a properly formatted DCO marker.'
 			echo
 			echo 'Visit the following URL for information about the Docker DCO:'
-			echo ' https://github.com/dotcloud/docker/blob/master/CONTRIBUTING.md#sign-your-work'
+			echo ' https://github.com/docker/docker/blob/master/CONTRIBUTING.md#sign-your-work'
 			echo
 		} >&2
 		false
diff --git a/hack/release.sh b/hack/release.sh
index 8642a4e..436c287 100755
--- a/hack/release.sh
+++ b/hack/release.sh
@@ -41,8 +41,8 @@
 [ "$AWS_ACCESS_KEY" ] || usage
 [ "$AWS_SECRET_KEY" ] || usage
 [ "$GPG_PASSPHRASE" ] || usage
-[ -d /go/src/github.com/dotcloud/docker ] || usage
-cd /go/src/github.com/dotcloud/docker
+[ -d /go/src/github.com/docker/docker ] || usage
+cd /go/src/github.com/docker/docker
 [ -x hack/make.sh ] || usage
 
 RELEASE_BUNDLES=(
@@ -282,12 +282,16 @@
 	apt-get update
 	apt-get install -y apt-transport-https
 fi
+
 # Add the repository to your APT sources
 echo deb $(s3_url)/ubuntu docker main > /etc/apt/sources.list.d/docker.list
+
 # Then import the repository key
 apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9
+
 # Install docker
-apt-get update ; apt-get install -y lxc-docker
+apt-get update
+apt-get install -y lxc-docker
 
 #
 # Alternatively, just use the curl-able install.sh script provided at $(s3_url)
@@ -318,7 +322,7 @@
 
 	cat <<EOF | write_to_s3 s3://$BUCKET/builds/index
 # To install, run the following command as root:
-curl -O $(s3_url)/builds/Linux/x86_64/docker-$VERSION && chmod +x docker-$VERSION && sudo mv docker-$VERSION /usr/local/bin/docker
+curl -sSL -O $(s3_url)/builds/Linux/x86_64/docker-$VERSION && chmod +x docker-$VERSION && sudo mv docker-$VERSION /usr/local/bin/docker
 # Then start docker in daemon mode:
 sudo /usr/local/bin/docker -d
 EOF
@@ -354,7 +358,7 @@
 Key-Length: 4096
 Passphrase: $GPG_PASSPHRASE
 Name-Real: Docker Release Tool
-Name-Email: docker@dotcloud.com
+Name-Email: docker@docker.com
 Name-Comment: releasedocker
 Expire-Date: 0
 %commit
diff --git a/hack/vendor.sh b/hack/vendor.sh
index 2ee530a..7af22ea 100755
--- a/hack/vendor.sh
+++ b/hack/vendor.sh
@@ -45,22 +45,22 @@
 
 clone git github.com/gorilla/mux 136d54f81f
 
-clone git github.com/syndtr/gocapability 3c85049eae
-
 clone git github.com/tchap/go-patricia v1.0.1
 
 clone hg code.google.com/p/go.net 84a4013f96e0
 
 clone hg code.google.com/p/gosqlite 74691fb6f837
 
-# get Go tip's archive/tar, for xattr support
-# TODO after Go 1.3 drops, bump our minimum supported version and drop this vendored dep
-clone hg code.google.com/p/go 3458ba248590
+# get Go tip's archive/tar, for xattr support and improved performance
+# TODO after Go 1.4 drops, bump our minimum supported version and drop this vendored dep
+clone hg code.google.com/p/go 1b17b3426e3c
 mv src/code.google.com/p/go/src/pkg/archive/tar tmp-tar
 rm -rf src/code.google.com/p/go
 mkdir -p src/code.google.com/p/go/src/pkg/archive
 mv tmp-tar src/code.google.com/p/go/src/pkg/archive/tar
 
-clone git github.com/godbus/dbus v1
-clone git github.com/coreos/go-systemd v2
-clone git github.com/docker/libcontainer 53cfe0a1eba9145bf5329abbb52b0072ccab8a00
+clone git github.com/docker/libcontainer db65c35051d05f3fb218a0e84a11267e0894fe0a
+# see src/github.com/docker/libcontainer/update-vendor.sh which is the "source of truth" for libcontainer deps (just like this file)
+rm -rf src/github.com/docker/libcontainer/vendor
+eval "$(grep '^clone ' src/github.com/docker/libcontainer/update-vendor.sh | grep -v 'github.com/codegangsta/cli')"
+# we exclude "github.com/codegangsta/cli" here because it's only needed for "nsinit", which Docker doesn't include
diff --git a/image/graph.go b/image/graph.go
index 64a38d7..31fbdd9 100644
--- a/image/graph.go
+++ b/image/graph.go
@@ -1,7 +1,7 @@
 package image
 
 import (
-	"github.com/dotcloud/docker/daemon/graphdriver"
+	"github.com/docker/docker/daemon/graphdriver"
 )
 
 type Graph interface {
diff --git a/image/image.go b/image/image.go
index 5c25094..702782c 100644
--- a/image/image.go
+++ b/image/image.go
@@ -3,17 +3,24 @@
 import (
 	"encoding/json"
 	"fmt"
-	"github.com/dotcloud/docker/archive"
-	"github.com/dotcloud/docker/daemon/graphdriver"
-	"github.com/dotcloud/docker/runconfig"
-	"github.com/dotcloud/docker/utils"
 	"io/ioutil"
 	"os"
 	"path"
 	"strconv"
 	"time"
+
+	"github.com/docker/docker/archive"
+	"github.com/docker/docker/daemon/graphdriver"
+	"github.com/docker/docker/pkg/log"
+	"github.com/docker/docker/runconfig"
+	"github.com/docker/docker/utils"
 )
 
+// Set the max depth to the aufs default that most
+// kernels are compiled with
+// For more information see: http://sourceforge.net/p/aufs/aufs3-standalone/ci/aufs3.12/tree/config.mk
+const MaxImageDepth = 127
+
 type Image struct {
 	ID              string            `json:"id"`
 	Parent          string            `json:"parent,omitempty"`
@@ -87,11 +94,11 @@
 			}
 		} else {
 			start := time.Now().UTC()
-			utils.Debugf("Start untar layer")
+			log.Debugf("Start untar layer")
 			if err := archive.ApplyLayer(layer, layerData); err != nil {
 				return err
 			}
-			utils.Debugf("Untar time: %vs", time.Now().UTC().Sub(start).Seconds())
+			log.Debugf("Untar time: %vs", time.Now().UTC().Sub(start).Seconds())
 
 			if img.Parent == "" {
 				if size, err = utils.TreeSize(layer); err != nil {
@@ -295,11 +302,27 @@
 	return count, nil
 }
 
+// CheckDepth returns an error if the depth of an image, as returned
+// by ImageDepth, is too large to support creating a container from it
+// on this daemon.
+func (img *Image) CheckDepth() error {
+	// We add 2 layers to the depth because the container's rw and
+	// init layer add to the restriction
+	depth, err := img.Depth()
+	if err != nil {
+		return err
+	}
+	if depth+2 >= MaxImageDepth {
+		return fmt.Errorf("Cannot create container with more than %d parents", MaxImageDepth)
+	}
+	return nil
+}
+
 // Build an Image object from raw json data
 func NewImgJSON(src []byte) (*Image, error) {
 	ret := &Image{}
 
-	utils.Debugf("Json string: {%s}", src)
+	log.Debugf("Json string: {%s}", src)
 	// FIXME: Is there a cleaner way to "purify" the input json?
 	if err := json.Unmarshal(src, ret); err != nil {
 		return nil, err
diff --git a/integration-cli/build_tests/TestBuildAddTar/1/Dockerfile b/integration-cli/build_tests/TestBuildAddTar/1/Dockerfile
deleted file mode 100644
index 2091b0e..0000000
--- a/integration-cli/build_tests/TestBuildAddTar/1/Dockerfile
+++ /dev/null
@@ -1,3 +0,0 @@
-FROM busybox
-ADD test.tar /test.tar
-RUN cat /test.tar/test/foo
diff --git a/integration-cli/build_tests/TestBuildAddTar/1/test.tar b/integration-cli/build_tests/TestBuildAddTar/1/test.tar
deleted file mode 100644
index 33639c6..0000000
--- a/integration-cli/build_tests/TestBuildAddTar/1/test.tar
+++ /dev/null
Binary files differ
diff --git a/integration-cli/build_tests/TestBuildAddTar/2/Dockerfile b/integration-cli/build_tests/TestBuildAddTar/2/Dockerfile
deleted file mode 100644
index 830e9dd..0000000
--- a/integration-cli/build_tests/TestBuildAddTar/2/Dockerfile
+++ /dev/null
@@ -1,3 +0,0 @@
-FROM busybox
-ADD test.tar /
-RUN cat /test/foo
diff --git a/integration-cli/build_tests/TestBuildAddTar/2/test.tar b/integration-cli/build_tests/TestBuildAddTar/2/test.tar
deleted file mode 100644
index 33639c6..0000000
--- a/integration-cli/build_tests/TestBuildAddTar/2/test.tar
+++ /dev/null
Binary files differ
diff --git a/integration-cli/build_tests/TestBuildCacheADD/1/Dockerfile b/integration-cli/build_tests/TestBuildCacheADD/1/Dockerfile
deleted file mode 100644
index 7287771..0000000
--- a/integration-cli/build_tests/TestBuildCacheADD/1/Dockerfile
+++ /dev/null
@@ -1,2 +0,0 @@
-FROM busybox
-ADD https://index.docker.io/robots.txt /
diff --git a/integration-cli/build_tests/TestBuildCacheADD/2/Dockerfile b/integration-cli/build_tests/TestBuildCacheADD/2/Dockerfile
deleted file mode 100644
index afe79b8..0000000
--- a/integration-cli/build_tests/TestBuildCacheADD/2/Dockerfile
+++ /dev/null
@@ -1,2 +0,0 @@
-FROM busybox
-ADD http://example.com/index.html /
diff --git a/integration-cli/build_tests/TestBuildWithInaccessibleFilesInContext/ignoredinaccessible/.dockerignore b/integration-cli/build_tests/TestBuildWithInaccessibleFilesInContext/ignoredinaccessible/.dockerignore
new file mode 100644
index 0000000..fb1fad8
--- /dev/null
+++ b/integration-cli/build_tests/TestBuildWithInaccessibleFilesInContext/ignoredinaccessible/.dockerignore
@@ -0,0 +1 @@
+directoryWeCantStat
diff --git a/integration-cli/build_tests/TestBuildWithInaccessibleFilesInContext/ignoredinaccessible/Dockerfile b/integration-cli/build_tests/TestBuildWithInaccessibleFilesInContext/ignoredinaccessible/Dockerfile
new file mode 100644
index 0000000..0964b8e
--- /dev/null
+++ b/integration-cli/build_tests/TestBuildWithInaccessibleFilesInContext/ignoredinaccessible/Dockerfile
@@ -0,0 +1,2 @@
+FROM busybox
+ADD . /foo/
diff --git a/integration-cli/build_tests/TestBuildWithInaccessibleFilesInContext/ignoredinaccessible/directoryWeCantStat/bar b/integration-cli/build_tests/TestBuildWithInaccessibleFilesInContext/ignoredinaccessible/directoryWeCantStat/bar
new file mode 100644
index 0000000..257cc56
--- /dev/null
+++ b/integration-cli/build_tests/TestBuildWithInaccessibleFilesInContext/ignoredinaccessible/directoryWeCantStat/bar
@@ -0,0 +1 @@
+foo
diff --git a/integration-cli/docker_cli_build_test.go b/integration-cli/docker_cli_build_test.go
index 039423e..bcff199 100644
--- a/integration-cli/docker_cli_build_test.go
+++ b/integration-cli/docker_cli_build_test.go
@@ -1,8 +1,9 @@
 package main
 
 import (
+	"archive/tar"
 	"fmt"
-
+	"io/ioutil"
 	"os"
 	"os/exec"
 	"path/filepath"
@@ -10,45 +11,43 @@
 	"testing"
 	"time"
 
-	"github.com/dotcloud/docker/archive"
+	"github.com/docker/docker/archive"
 )
 
 func TestBuildCacheADD(t *testing.T) {
-	buildDirectory := filepath.Join(workingDirectory, "build_tests", "TestBuildCacheADD", "1")
-	buildCmd := exec.Command(dockerBinary, "build", "-t", "testcacheadd1", ".")
-	buildCmd.Dir = buildDirectory
-	exitCode, err := runCommand(buildCmd)
-	errorOut(err, t, fmt.Sprintf("build failed to complete: %v", err))
-
-	if err != nil || exitCode != 0 {
-		t.Fatal("failed to build the image")
+	name := "testbuildtwoimageswithadd"
+	defer deleteImages(name)
+	server, err := fakeStorage(map[string]string{
+		"robots.txt": "hello",
+		"index.html": "world",
+	})
+	if err != nil {
+		t.Fatal(err)
 	}
-
-	buildDirectory = filepath.Join(workingDirectory, "build_tests", "TestBuildCacheADD", "2")
-	buildCmd = exec.Command(dockerBinary, "build", "-t", "testcacheadd2", ".")
-	buildCmd.Dir = buildDirectory
-	out, exitCode, err := runCommandWithOutput(buildCmd)
-	errorOut(err, t, fmt.Sprintf("build failed to complete: %v %v", out, err))
-
-	if err != nil || exitCode != 0 {
-		t.Fatal("failed to build the image")
+	defer server.Close()
+	if _, err := buildImage(name,
+		fmt.Sprintf(`FROM scratch
+		ADD %s/robots.txt /`, server.URL),
+		true); err != nil {
+		t.Fatal(err)
 	}
-
+	out, _, err := buildImageWithOut(name,
+		fmt.Sprintf(`FROM scratch
+		ADD %s/index.html /`, server.URL),
+		true)
+	if err != nil {
+		t.Fatal(err)
+	}
 	if strings.Contains(out, "Using cache") {
 		t.Fatal("2nd build used cache on ADD, it shouldn't")
 	}
 
-	deleteImages("testcacheadd1")
-	deleteImages("testcacheadd2")
-
-	logDone("build - build two images with ADD")
+	logDone("build - build two images with remote ADD")
 }
 
 func TestBuildSixtySteps(t *testing.T) {
 	buildDirectory := filepath.Join(workingDirectory, "build_tests", "TestBuildSixtySteps")
-	buildCmd := exec.Command(dockerBinary, "build", "-t", "foobuildsixtysteps", ".")
-	buildCmd.Dir = buildDirectory
-	out, exitCode, err := runCommandWithOutput(buildCmd)
+	out, exitCode, err := dockerCmdInDir(t, buildDirectory, "build", "-t", "foobuildsixtysteps", ".")
 	errorOut(err, t, fmt.Sprintf("build failed to complete: %v %v", out, err))
 
 	if err != nil || exitCode != 0 {
@@ -67,9 +66,7 @@
 		t.Fatal(err)
 	}
 	f.Close()
-	buildCmd := exec.Command(dockerBinary, "build", "-t", "testaddimg", ".")
-	buildCmd.Dir = buildDirectory
-	out, exitCode, err := runCommandWithOutput(buildCmd)
+	out, exitCode, err := dockerCmdInDir(t, buildDirectory, "build", "-t", "testaddimg", ".")
 	errorOut(err, t, fmt.Sprintf("build failed to complete: %v %v", out, err))
 
 	if err != nil || exitCode != 0 {
@@ -119,9 +116,7 @@
 
 func TestAddSingleFileToExistDir(t *testing.T) {
 	buildDirectory := filepath.Join(workingDirectory, "build_tests", "TestAdd")
-	buildCmd := exec.Command(dockerBinary, "build", "-t", "testaddimg", "SingleFileToExistDir")
-	buildCmd.Dir = buildDirectory
-	out, exitCode, err := runCommandWithOutput(buildCmd)
+	out, exitCode, err := dockerCmdInDir(t, buildDirectory, "build", "-t", "testaddimg", "SingleFileToExistDir")
 	errorOut(err, t, fmt.Sprintf("build failed to complete: %v %v", out, err))
 
 	if err != nil || exitCode != 0 {
@@ -135,9 +130,7 @@
 
 func TestAddSingleFileToNonExistDir(t *testing.T) {
 	buildDirectory := filepath.Join(workingDirectory, "build_tests", "TestAdd")
-	buildCmd := exec.Command(dockerBinary, "build", "-t", "testaddimg", "SingleFileToNonExistDir")
-	buildCmd.Dir = buildDirectory
-	out, exitCode, err := runCommandWithOutput(buildCmd)
+	out, exitCode, err := dockerCmdInDir(t, buildDirectory, "build", "-t", "testaddimg", "SingleFileToNonExistDir")
 	errorOut(err, t, fmt.Sprintf("build failed to complete: %v %v", out, err))
 
 	if err != nil || exitCode != 0 {
@@ -151,9 +144,7 @@
 
 func TestAddDirContentToRoot(t *testing.T) {
 	buildDirectory := filepath.Join(workingDirectory, "build_tests", "TestAdd")
-	buildCmd := exec.Command(dockerBinary, "build", "-t", "testaddimg", "DirContentToRoot")
-	buildCmd.Dir = buildDirectory
-	out, exitCode, err := runCommandWithOutput(buildCmd)
+	out, exitCode, err := dockerCmdInDir(t, buildDirectory, "build", "-t", "testaddimg", "DirContentToRoot")
 	errorOut(err, t, fmt.Sprintf("build failed to complete: %v %v", out, err))
 
 	if err != nil || exitCode != 0 {
@@ -167,9 +158,7 @@
 
 func TestAddDirContentToExistDir(t *testing.T) {
 	buildDirectory := filepath.Join(workingDirectory, "build_tests", "TestAdd")
-	buildCmd := exec.Command(dockerBinary, "build", "-t", "testaddimg", "DirContentToExistDir")
-	buildCmd.Dir = buildDirectory
-	out, exitCode, err := runCommandWithOutput(buildCmd)
+	out, exitCode, err := dockerCmdInDir(t, buildDirectory, "build", "-t", "testaddimg", "DirContentToExistDir")
 	errorOut(err, t, fmt.Sprintf("build failed to complete: %v %v", out, err))
 
 	if err != nil || exitCode != 0 {
@@ -192,9 +181,7 @@
 		t.Fatal(err)
 	}
 	f.Close()
-	buildCmd := exec.Command(dockerBinary, "build", "-t", "testaddimg", ".")
-	buildCmd.Dir = buildDirectory
-	out, exitCode, err := runCommandWithOutput(buildCmd)
+	out, exitCode, err := dockerCmdInDir(t, buildDirectory, "build", "-t", "testaddimg", ".")
 	errorOut(err, t, fmt.Sprintf("build failed to complete: %v %v", out, err))
 
 	if err != nil || exitCode != 0 {
@@ -208,9 +195,7 @@
 
 func TestAddEtcToRoot(t *testing.T) {
 	buildDirectory := filepath.Join(workingDirectory, "build_tests", "TestAdd")
-	buildCmd := exec.Command(dockerBinary, "build", "-t", "testaddimg", "EtcToRoot")
-	buildCmd.Dir = buildDirectory
-	out, exitCode, err := runCommandWithOutput(buildCmd)
+	out, exitCode, err := dockerCmdInDir(t, buildDirectory, "build", "-t", "testaddimg", "EtcToRoot")
 	errorOut(err, t, fmt.Sprintf("build failed to complete: %v %v", out, err))
 
 	if err != nil || exitCode != 0 {
@@ -228,9 +213,7 @@
 		t.Fatal(err)
 	}
 	f.Close()
-	buildCmd := exec.Command(dockerBinary, "build", "-t", "testcopyimg", ".")
-	buildCmd.Dir = buildDirectory
-	out, exitCode, err := runCommandWithOutput(buildCmd)
+	out, exitCode, err := dockerCmdInDir(t, buildDirectory, "build", "-t", "testcopyimg", ".")
 	errorOut(err, t, fmt.Sprintf("build failed to complete: %v %v", out, err))
 
 	if err != nil || exitCode != 0 {
@@ -280,9 +263,7 @@
 
 func TestCopySingleFileToExistDir(t *testing.T) {
 	buildDirectory := filepath.Join(workingDirectory, "build_tests", "TestCopy")
-	buildCmd := exec.Command(dockerBinary, "build", "-t", "testcopyimg", "SingleFileToExistDir")
-	buildCmd.Dir = buildDirectory
-	out, exitCode, err := runCommandWithOutput(buildCmd)
+	out, exitCode, err := dockerCmdInDir(t, buildDirectory, "build", "-t", "testcopyimg", "SingleFileToExistDir")
 	errorOut(err, t, fmt.Sprintf("build failed to complete: %v %v", out, err))
 
 	if err != nil || exitCode != 0 {
@@ -296,9 +277,7 @@
 
 func TestCopySingleFileToNonExistDir(t *testing.T) {
 	buildDirectory := filepath.Join(workingDirectory, "build_tests", "TestCopy")
-	buildCmd := exec.Command(dockerBinary, "build", "-t", "testcopyimg", "SingleFileToNonExistDir")
-	buildCmd.Dir = buildDirectory
-	out, exitCode, err := runCommandWithOutput(buildCmd)
+	out, exitCode, err := dockerCmdInDir(t, buildDirectory, "build", "-t", "testcopyimg", "SingleFileToNonExistDir")
 	errorOut(err, t, fmt.Sprintf("build failed to complete: %v %v", out, err))
 
 	if err != nil || exitCode != 0 {
@@ -312,9 +291,7 @@
 
 func TestCopyDirContentToRoot(t *testing.T) {
 	buildDirectory := filepath.Join(workingDirectory, "build_tests", "TestCopy")
-	buildCmd := exec.Command(dockerBinary, "build", "-t", "testcopyimg", "DirContentToRoot")
-	buildCmd.Dir = buildDirectory
-	out, exitCode, err := runCommandWithOutput(buildCmd)
+	out, exitCode, err := dockerCmdInDir(t, buildDirectory, "build", "-t", "testcopyimg", "DirContentToRoot")
 	errorOut(err, t, fmt.Sprintf("build failed to complete: %v %v", out, err))
 
 	if err != nil || exitCode != 0 {
@@ -328,9 +305,7 @@
 
 func TestCopyDirContentToExistDir(t *testing.T) {
 	buildDirectory := filepath.Join(workingDirectory, "build_tests", "TestCopy")
-	buildCmd := exec.Command(dockerBinary, "build", "-t", "testcopyimg", "DirContentToExistDir")
-	buildCmd.Dir = buildDirectory
-	out, exitCode, err := runCommandWithOutput(buildCmd)
+	out, exitCode, err := dockerCmdInDir(t, buildDirectory, "build", "-t", "testcopyimg", "DirContentToExistDir")
 	errorOut(err, t, fmt.Sprintf("build failed to complete: %v %v", out, err))
 
 	if err != nil || exitCode != 0 {
@@ -353,9 +328,7 @@
 		t.Fatal(err)
 	}
 	f.Close()
-	buildCmd := exec.Command(dockerBinary, "build", "-t", "testcopyimg", ".")
-	buildCmd.Dir = buildDirectory
-	out, exitCode, err := runCommandWithOutput(buildCmd)
+	out, exitCode, err := dockerCmdInDir(t, buildDirectory, "build", "-t", "testcopyimg", ".")
 	errorOut(err, t, fmt.Sprintf("build failed to complete: %v %v", out, err))
 
 	if err != nil || exitCode != 0 {
@@ -369,9 +342,7 @@
 
 func TestCopyEtcToRoot(t *testing.T) {
 	buildDirectory := filepath.Join(workingDirectory, "build_tests", "TestCopy")
-	buildCmd := exec.Command(dockerBinary, "build", "-t", "testcopyimg", "EtcToRoot")
-	buildCmd.Dir = buildDirectory
-	out, exitCode, err := runCommandWithOutput(buildCmd)
+	out, exitCode, err := dockerCmdInDir(t, buildDirectory, "build", "-t", "testcopyimg", "EtcToRoot")
 	errorOut(err, t, fmt.Sprintf("build failed to complete: %v %v", out, err))
 
 	if err != nil || exitCode != 0 {
@@ -464,9 +435,7 @@
 		// This test doesn't require that we run commands as an unprivileged user
 		pathToDirectoryWhichContainsLinks := filepath.Join(buildDirectory, "linksdirectory")
 
-		buildCmd := exec.Command(dockerBinary, "build", "-t", "testlinksok", ".")
-		buildCmd.Dir = pathToDirectoryWhichContainsLinks
-		out, exitCode, err := runCommandWithOutput(buildCmd)
+		out, exitCode, err := dockerCmdInDir(t, pathToDirectoryWhichContainsLinks, "build", "-t", "testlinksok", ".")
 		if err != nil || exitCode != 0 {
 			t.Fatalf("build should have worked: %s %s", err, out)
 		}
@@ -474,9 +443,32 @@
 		deleteImages("testlinksok")
 
 	}
+	{
+		// This is used to ensure we don't try to add inaccessible files when they are ignored by a .dockerignore pattern
+		pathToInaccessibleDirectoryBuildDirectory := filepath.Join(buildDirectory, "ignoredinaccessible")
+		pathToDirectoryWithoutReadAccess := filepath.Join(pathToInaccessibleDirectoryBuildDirectory, "directoryWeCantStat")
+		pathToFileInDirectoryWithoutReadAccess := filepath.Join(pathToDirectoryWithoutReadAccess, "bar")
+		err := os.Chown(pathToDirectoryWithoutReadAccess, 0, 0)
+		errorOut(err, t, fmt.Sprintf("failed to chown directory to root: %s", err))
+		err = os.Chmod(pathToDirectoryWithoutReadAccess, 0444)
+		errorOut(err, t, fmt.Sprintf("failed to chmod directory to 755: %s", err))
+		err = os.Chmod(pathToFileInDirectoryWithoutReadAccess, 0700)
+		errorOut(err, t, fmt.Sprintf("failed to chmod file to 444: %s", err))
+
+		buildCommandStatement := fmt.Sprintf("%s build -t ignoredinaccessible .", dockerBinary)
+		buildCmd := exec.Command("su", "unprivilegeduser", "-c", buildCommandStatement)
+		buildCmd.Dir = pathToInaccessibleDirectoryBuildDirectory
+		out, exitCode, err := runCommandWithOutput(buildCmd)
+		if err != nil || exitCode != 0 {
+			t.Fatalf("build should have worked: %s %s", err, out)
+		}
+		deleteImages("ignoredinaccessible")
+
+	}
 	deleteImages("inaccessiblefiles")
 	logDone("build - ADD from context with inaccessible files must fail")
 	logDone("build - ADD from context with accessible links must work")
+	logDone("build - ADD from context with ignored inaccessible files must work")
 }
 
 func TestBuildForceRm(t *testing.T) {
@@ -514,9 +506,7 @@
 		}
 
 		buildDirectory := filepath.Join(workingDirectory, "build_tests", "TestBuildRm")
-		buildCmd := exec.Command(dockerBinary, "build", "--rm", "-t", "testbuildrm", ".")
-		buildCmd.Dir = buildDirectory
-		_, exitCode, err := runCommandWithOutput(buildCmd)
+		_, exitCode, err := dockerCmdInDir(t, buildDirectory, "build", "--rm", "-t", "testbuildrm", ".")
 
 		if err != nil || exitCode != 0 {
 			t.Fatal("failed to build the image")
@@ -540,9 +530,7 @@
 		}
 
 		buildDirectory := filepath.Join(workingDirectory, "build_tests", "TestBuildRm")
-		buildCmd := exec.Command(dockerBinary, "build", "-t", "testbuildrm", ".")
-		buildCmd.Dir = buildDirectory
-		_, exitCode, err := runCommandWithOutput(buildCmd)
+		_, exitCode, err := dockerCmdInDir(t, buildDirectory, "build", "-t", "testbuildrm", ".")
 
 		if err != nil || exitCode != 0 {
 			t.Fatal("failed to build the image")
@@ -566,9 +554,7 @@
 		}
 
 		buildDirectory := filepath.Join(workingDirectory, "build_tests", "TestBuildRm")
-		buildCmd := exec.Command(dockerBinary, "build", "--rm=false", "-t", "testbuildrm", ".")
-		buildCmd.Dir = buildDirectory
-		_, exitCode, err := runCommandWithOutput(buildCmd)
+		_, exitCode, err := dockerCmdInDir(t, buildDirectory, "build", "--rm=false", "-t", "testbuildrm", ".")
 
 		if err != nil || exitCode != 0 {
 			t.Fatal("failed to build the image")
@@ -592,8 +578,12 @@
 }
 
 func TestBuildWithVolumes(t *testing.T) {
-	name := "testbuildvolumes"
-	expected := "map[/test1:map[] /test2:map[]]"
+	var (
+		result   map[string]map[string]struct{}
+		name     = "testbuildvolumes"
+		emptyMap = make(map[string]struct{})
+		expected = map[string]map[string]struct{}{"/test1": emptyMap, "/test2": emptyMap}
+	)
 	defer deleteImages(name)
 	_, err := buildImage(name,
 		`FROM scratch
@@ -603,13 +593,22 @@
 	if err != nil {
 		t.Fatal(err)
 	}
-	res, err := inspectField(name, "Config.Volumes")
+	res, err := inspectFieldJSON(name, "Config.Volumes")
 	if err != nil {
 		t.Fatal(err)
 	}
-	if res != expected {
-		t.Fatalf("Volumes %s, expected %s", res, expected)
+
+	err = unmarshalJSON([]byte(res), &result)
+	if err != nil {
+		t.Fatal(err)
 	}
+
+	equal := deepEqual(&expected, &result)
+
+	if !equal {
+		t.Fatalf("Volumes %s, expected %s", result, expected)
+	}
+
 	logDone("build - with volumes")
 }
 
@@ -686,7 +685,7 @@
 
 func TestBuildEnv(t *testing.T) {
 	name := "testbuildenv"
-	expected := "[HOME=/ PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin PORT=2375]"
+	expected := "[PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin PORT=2375]"
 	defer deleteImages(name)
 	_, err := buildImage(name,
 		`FROM busybox
@@ -771,52 +770,53 @@
 
 // #6445 ensure ONBUILD triggers aren't committed to grandchildren
 func TestBuildOnBuildLimitedInheritence(t *testing.T) {
-	name1 := "testonbuildtrigger1"
-	dockerfile1 := `
+	var (
+		out2, out3 string
+	)
+	{
+		name1 := "testonbuildtrigger1"
+		dockerfile1 := `
 		FROM busybox
 		RUN echo "GRANDPARENT"
 		ONBUILD RUN echo "ONBUILD PARENT"
-	`
-	ctx1, err := fakeContext(dockerfile1, nil)
-	if err != nil {
-		t.Fatal(err)
+		`
+		ctx, err := fakeContext(dockerfile1, nil)
+		if err != nil {
+			t.Fatal(err)
+		}
+
+		out1, _, err := dockerCmdInDir(t, ctx.Dir, "build", "-t", name1, ".")
+		errorOut(err, t, fmt.Sprintf("build failed to complete: %v %v", out1, err))
+		defer deleteImages(name1)
 	}
-
-	buildCmd := exec.Command(dockerBinary, "build", "-t", name1, ".")
-	buildCmd.Dir = ctx1.Dir
-	out1, _, err := runCommandWithOutput(buildCmd)
-	errorOut(err, t, fmt.Sprintf("build failed to complete: %v %v", out1, err))
-	defer deleteImages(name1)
-
-	name2 := "testonbuildtrigger2"
-	dockerfile2 := `
+	{
+		name2 := "testonbuildtrigger2"
+		dockerfile2 := `
 		FROM testonbuildtrigger1
-	`
-	ctx2, err := fakeContext(dockerfile2, nil)
-	if err != nil {
-		t.Fatal(err)
+		`
+		ctx, err := fakeContext(dockerfile2, nil)
+		if err != nil {
+			t.Fatal(err)
+		}
+
+		out2, _, err = dockerCmdInDir(t, ctx.Dir, "build", "-t", name2, ".")
+		errorOut(err, t, fmt.Sprintf("build failed to complete: %v %v", out2, err))
+		defer deleteImages(name2)
 	}
-
-	buildCmd = exec.Command(dockerBinary, "build", "-t", name2, ".")
-	buildCmd.Dir = ctx2.Dir
-	out2, _, err := runCommandWithOutput(buildCmd)
-	errorOut(err, t, fmt.Sprintf("build failed to complete: %v %v", out2, err))
-	defer deleteImages(name2)
-
-	name3 := "testonbuildtrigger3"
-	dockerfile3 := `
+	{
+		name3 := "testonbuildtrigger3"
+		dockerfile3 := `
 		FROM testonbuildtrigger2
-	`
-	ctx3, err := fakeContext(dockerfile3, nil)
-	if err != nil {
-		t.Fatal(err)
-	}
+		`
+		ctx, err := fakeContext(dockerfile3, nil)
+		if err != nil {
+			t.Fatal(err)
+		}
 
-	buildCmd = exec.Command(dockerBinary, "build", "-t", name3, ".")
-	buildCmd.Dir = ctx3.Dir
-	out3, _, err := runCommandWithOutput(buildCmd)
-	errorOut(err, t, fmt.Sprintf("build failed to complete: %v %v", out3, err))
-	defer deleteImages(name3)
+		out3, _, err = dockerCmdInDir(t, ctx.Dir, "build", "-t", name3, ".")
+		errorOut(err, t, fmt.Sprintf("build failed to complete: %v %v", out3, err))
+		defer deleteImages(name3)
+	}
 
 	// ONBUILD should be run in second build.
 	if !strings.Contains(out2, "ONBUILD PARENT") {
@@ -1273,14 +1273,14 @@
 	if err != nil {
 		t.Fatal(err)
 	}
-	// Cmd inherited from busybox, maybe will be fixed in #5147
-	if expected := "[/bin/sh]"; res != expected {
+	// Cmd must be cleaned up
+	if expected := "<no value>"; res != expected {
 		t.Fatalf("Cmd %s, expected %s", res, expected)
 	}
 	logDone("build - cleanup cmd after RUN")
 }
 
-func TestBuldForbiddenContextPath(t *testing.T) {
+func TestBuildForbiddenContextPath(t *testing.T) {
 	name := "testbuildforbidpath"
 	defer deleteImages(name)
 	ctx, err := fakeContext(`FROM scratch
@@ -1290,18 +1290,16 @@
 			"test.txt":  "test1",
 			"other.txt": "other",
 		})
-
 	defer ctx.Close()
 	if err != nil {
 		t.Fatal(err)
 	}
-	if _, err := buildImageFromContext(name, ctx, true); err != nil {
-		if !strings.Contains(err.Error(), "Forbidden path outside the build context: ../../ (/)") {
-			t.Fatal("Wrong error, must be about forbidden ../../ path")
-		}
-	} else {
-		t.Fatal("Error must not be nil")
+
+	expected := "Forbidden path outside the build context: ../../ "
+	if _, err := buildImageFromContext(name, ctx, true); err == nil || !strings.Contains(err.Error(), expected) {
+		t.Fatalf("Wrong error: (should contain \"%s\") got:\n%v", expected, err)
 	}
+
 	logDone("build - forbidden context path")
 }
 
@@ -1565,6 +1563,29 @@
 	logDone("build - test .dockerignore of Dockerfile")
 }
 
+func TestDockerignoringWholeDir(t *testing.T) {
+	name := "testbuilddockerignorewholedir"
+	defer deleteImages(name)
+	dockerfile := `
+        FROM busybox
+		COPY . /
+		RUN [[ ! -e /.gitignore ]]
+		RUN [[ -f /Makefile ]]`
+	ctx, err := fakeContext(dockerfile, map[string]string{
+		"Dockerfile":    "FROM scratch",
+		"Makefile":      "all:",
+		".dockerignore": ".*\n",
+	})
+	defer ctx.Close()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if _, err = buildImageFromContext(name, ctx, true); err != nil {
+		t.Fatal(err)
+	}
+	logDone("build - test .dockerignore whole dir with .*")
+}
+
 func TestBuildLineBreak(t *testing.T) {
 	name := "testbuildlinebreak"
 	defer deleteImages(name)
@@ -1733,33 +1754,118 @@
 }
 
 func TestBuildAddTar(t *testing.T) {
+	name := "testbuildaddtar"
+	defer deleteImages(name)
 
-	checkOutput := func(out string) {
-		n := -1
-		x := ""
-		for i, line := range strings.Split(out, "\n") {
-			if strings.HasPrefix(line, "Step 2") {
-				n = i + 2
-				x = line[strings.Index(line, "cat ")+4:]
-			}
-			if i == n {
-				if line != "Hi" {
-					t.Fatalf("Could not find contents of %s (expected 'Hi' got '%s'", x, line)
-				}
-				n = -2
-			}
+	ctx := func() *FakeContext {
+		dockerfile := `
+FROM busybox
+ADD test.tar /
+RUN cat /test/foo | grep Hi
+ADD test.tar /test.tar
+RUN cat /test.tar/test/foo | grep Hi
+ADD test.tar /unlikely-to-exist
+RUN cat /unlikely-to-exist/test/foo | grep Hi
+ADD test.tar /unlikely-to-exist-trailing-slash/
+RUN cat /unlikely-to-exist-trailing-slash/test/foo | grep Hi
+RUN mkdir /existing-directory
+ADD test.tar /existing-directory
+RUN cat /existing-directory/test/foo | grep Hi
+ADD test.tar /existing-directory-trailing-slash/
+RUN cat /existing-directory-trailing-slash/test/foo | grep Hi`
+		tmpDir, err := ioutil.TempDir("", "fake-context")
+		testTar, err := os.Create(filepath.Join(tmpDir, "test.tar"))
+		if err != nil {
+			t.Fatalf("failed to create test.tar archive: %v", err)
 		}
-		if n > -2 {
-			t.Fatalf("Could not find contents of %s in build output", x)
+		defer testTar.Close()
+
+		tw := tar.NewWriter(testTar)
+
+		if err := tw.WriteHeader(&tar.Header{
+			Name: "test/foo",
+			Size: 2,
+		}); err != nil {
+			t.Fatalf("failed to write tar file header: %v", err)
 		}
+		if _, err := tw.Write([]byte("Hi")); err != nil {
+			t.Fatalf("failed to write tar file content: %v", err)
+		}
+		if err := tw.Close(); err != nil {
+			t.Fatalf("failed to close tar archive: %v", err)
+		}
+
+		if err := ioutil.WriteFile(filepath.Join(tmpDir, "Dockerfile"), []byte(dockerfile), 0644); err != nil {
+			t.Fatalf("failed to open destination dockerfile: %v", err)
+		}
+		return &FakeContext{Dir: tmpDir}
+	}()
+
+	if _, err := buildImageFromContext(name, ctx, true); err != nil {
+		t.Fatalf("build failed to complete for TestBuildAddTar: %v", err)
 	}
 
-	for _, n := range []string{"1", "2"} {
-		buildDirectory := filepath.Join(workingDirectory, "build_tests", "TestBuildAddTar", n)
-		buildCmd := exec.Command(dockerBinary, "build", "-t", "testbuildaddtar", ".")
-		buildCmd.Dir = buildDirectory
-		out, _, err := runCommandWithOutput(buildCmd)
-		errorOut(err, t, fmt.Sprintf("build failed to complete for TestBuildAddTar/%s: %v", n, err))
-		checkOutput(out)
+	logDone("build - ADD tar")
+}
+
+func TestBuildFromGIT(t *testing.T) {
+	name := "testbuildfromgit"
+	defer deleteImages(name)
+	git, err := fakeGIT("repo", map[string]string{
+		"Dockerfile": `FROM busybox
+					ADD first /first
+					RUN [ -f /first ]
+					MAINTAINER docker`,
+		"first": "test git data",
+	})
+	if err != nil {
+		t.Fatal(err)
 	}
+	defer git.Close()
+
+	_, err = buildImageFromPath(name, git.RepoURL, true)
+	if err != nil {
+		t.Fatal(err)
+	}
+	res, err := inspectField(name, "Author")
+	if err != nil {
+		t.Fatal(err)
+	}
+	if res != "docker" {
+		t.Fatalf("Maintainer should be docker, got %s", res)
+	}
+	logDone("build - build from GIT")
+}
+
+func TestBuildCleanupCmdOnEntrypoint(t *testing.T) {
+	name := "testbuildcmdcleanuponentrypoint"
+	defer deleteImages(name)
+	if _, err := buildImage(name,
+		`FROM scratch
+        CMD ["test"]
+		ENTRYPOINT ["echo"]`,
+		true); err != nil {
+		t.Fatal(err)
+	}
+	if _, err := buildImage(name,
+		fmt.Sprintf(`FROM %s
+		ENTRYPOINT ["cat"]`, name),
+		true); err != nil {
+		t.Fatal(err)
+	}
+	res, err := inspectField(name, "Config.Cmd")
+	if err != nil {
+		t.Fatal(err)
+	}
+	if expected := "<no value>"; res != expected {
+		t.Fatalf("Cmd %s, expected %s", res, expected)
+	}
+	res, err = inspectField(name, "Config.Entrypoint")
+	if err != nil {
+		t.Fatal(err)
+	}
+	if expected := "[cat]"; res != expected {
+		t.Fatalf("Entrypoint %s, expected %s", res, expected)
+	}
+	logDone("build - cleanup cmd on ENTRYPOINT")
 }
diff --git a/integration-cli/docker_cli_events_test.go b/integration-cli/docker_cli_events_test.go
index b9d184c..7fc2fa1 100644
--- a/integration-cli/docker_cli_events_test.go
+++ b/integration-cli/docker_cli_events_test.go
@@ -1,12 +1,15 @@
 package main
 
 import (
+	"fmt"
 	"os/exec"
+	"strconv"
 	"strings"
 	"testing"
+	"time"
 )
 
-func TestCLIGetEvents(t *testing.T) {
+func TestCLIGetEventsUntag(t *testing.T) {
 	out, _, _ := cmd(t, "images", "-q")
 	image := strings.Split(out, "\n")[0]
 	cmd(t, "tag", image, "utest:tag1")
@@ -27,3 +30,110 @@
 	}
 	logDone("events - untags are logged")
 }
+
+func TestCLIGetEventsPause(t *testing.T) {
+	out, _, _ := cmd(t, "images", "-q")
+	image := strings.Split(out, "\n")[0]
+	cmd(t, "run", "-d", "--name", "testeventpause", image, "sleep", "2")
+	cmd(t, "pause", "testeventpause")
+	cmd(t, "unpause", "testeventpause")
+	eventsCmd := exec.Command(dockerBinary, "events", "--since=0", fmt.Sprintf("--until=%d", time.Now().Unix()))
+	out, _, _ = runCommandWithOutput(eventsCmd)
+	events := strings.Split(out, "\n")
+	if len(events) <= 1 {
+		t.Fatalf("Missing expected event")
+	}
+
+	pauseEvent := strings.Fields(events[len(events)-3])
+	unpauseEvent := strings.Fields(events[len(events)-2])
+
+	if pauseEvent[len(pauseEvent)-1] != "pause" {
+		t.Fatalf("event should be pause, not %#v", pauseEvent)
+	}
+	if unpauseEvent[len(unpauseEvent)-1] != "unpause" {
+		t.Fatalf("event should be pause, not %#v", unpauseEvent)
+	}
+
+	logDone("events - pause/unpause is logged")
+}
+
+func TestCLILimitEvents(t *testing.T) {
+	for i := 0; i < 30; i++ {
+		cmd(t, "run", "busybox", "echo", strconv.Itoa(i))
+	}
+	eventsCmd := exec.Command(dockerBinary, "events", "--since=0", fmt.Sprintf("--until=%d", time.Now().Unix()))
+	out, _, _ := runCommandWithOutput(eventsCmd)
+	events := strings.Split(out, "\n")
+	n_events := len(events) - 1
+	if n_events != 64 {
+		t.Fatalf("events should be limited to 64, but received %d", n_events)
+	}
+	logDone("events - limited to 64 entries")
+}
+
+func TestCLIGetEventsContainerEvents(t *testing.T) {
+	cmd(t, "run", "--rm", "busybox", "true")
+	eventsCmd := exec.Command(dockerBinary, "events", "--since=0", fmt.Sprintf("--until=%d", time.Now().Unix()))
+	out, exitCode, err := runCommandWithOutput(eventsCmd)
+	if exitCode != 0 || err != nil {
+		t.Fatalf("Failed to get events with exit code %d: %s", exitCode, err)
+	}
+	events := strings.Split(out, "\n")
+	events = events[:len(events)-1]
+	if len(events) < 4 {
+		t.Fatalf("Missing expected event")
+	}
+	createEvent := strings.Fields(events[len(events)-4])
+	startEvent := strings.Fields(events[len(events)-3])
+	dieEvent := strings.Fields(events[len(events)-2])
+	destroyEvent := strings.Fields(events[len(events)-1])
+	if createEvent[len(createEvent)-1] != "create" {
+		t.Fatalf("event should be create, not %#v", createEvent)
+	}
+	if startEvent[len(startEvent)-1] != "start" {
+		t.Fatalf("event should be pause, not %#v", startEvent)
+	}
+	if dieEvent[len(dieEvent)-1] != "die" {
+		t.Fatalf("event should be pause, not %#v", dieEvent)
+	}
+	if destroyEvent[len(destroyEvent)-1] != "destroy" {
+		t.Fatalf("event should be pause, not %#v", destroyEvent)
+	}
+
+	logDone("events - container create, start, die, destroy is logged")
+}
+
+func TestCLIGetEventsImageUntagDelete(t *testing.T) {
+	name := "testimageevents"
+	defer deleteImages(name)
+	_, err := buildImage(name,
+		`FROM scratch
+		MAINTAINER "docker"`,
+		true)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if err := deleteImages(name); err != nil {
+		t.Fatal(err)
+	}
+	eventsCmd := exec.Command(dockerBinary, "events", "--since=0", fmt.Sprintf("--until=%d", time.Now().Unix()))
+	out, exitCode, err := runCommandWithOutput(eventsCmd)
+	if exitCode != 0 || err != nil {
+		t.Fatalf("Failed to get events with exit code %d: %s", exitCode, err)
+	}
+	events := strings.Split(out, "\n")
+	t.Log(events)
+	events = events[:len(events)-1]
+	if len(events) < 2 {
+		t.Fatalf("Missing expected event")
+	}
+	untagEvent := strings.Fields(events[len(events)-2])
+	deleteEvent := strings.Fields(events[len(events)-1])
+	if untagEvent[len(untagEvent)-1] != "untag" {
+		t.Fatalf("untag should be untag, not %#v", untagEvent)
+	}
+	if deleteEvent[len(deleteEvent)-1] != "delete" {
+		t.Fatalf("delete should be delete, not %#v", deleteEvent)
+	}
+	logDone("events - image untag, delete is logged")
+}
diff --git a/integration-cli/docker_cli_import_test.go b/integration-cli/docker_cli_import_test.go
index 9b36aa9..f9e86f0 100644
--- a/integration-cli/docker_cli_import_test.go
+++ b/integration-cli/docker_cli_import_test.go
@@ -12,8 +12,8 @@
 	out, _, err := runCommandWithOutput(importCmd)
 	errorOut(err, t, fmt.Sprintf("import failed with errors: %v", err))
 
-	if n := len(strings.Split(out, "\n")); n != 3 {
-		t.Fatalf("display is messed up: %d '\\n' instead of 3", n)
+	if n := strings.Count(out, "\n"); n != 2 {
+		t.Fatalf("display is messed up: %d '\\n' instead of 2", n)
 	}
 
 	logDone("import - cirros was imported and display is fine")
diff --git a/integration-cli/docker_cli_inspect_test.go b/integration-cli/docker_cli_inspect_test.go
new file mode 100644
index 0000000..7e5dd69
--- /dev/null
+++ b/integration-cli/docker_cli_inspect_test.go
@@ -0,0 +1,22 @@
+package main
+
+import (
+	"os/exec"
+	"strings"
+	"testing"
+)
+
+func TestInspectImage(t *testing.T) {
+	imageTest := "scratch"
+	imageTestId := "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158"
+	imagesCmd := exec.Command(dockerBinary, "inspect", "--format='{{.Id}}'", imageTest)
+
+	out, exitCode, err := runCommandWithOutput(imagesCmd)
+	if exitCode != 0 || err != nil {
+		t.Fatalf("failed to inspect image")
+	}
+	if id := strings.TrimSuffix(out, "\n"); id != imageTestId {
+		t.Fatalf("Expected id: %s for image: %s but received id: %s", imageTestId, imageTest, id)
+	}
+	logDone("inspect - inspect an image")
+}
diff --git a/integration-cli/docker_cli_links_test.go b/integration-cli/docker_cli_links_test.go
index d261647..9cd1a3f 100644
--- a/integration-cli/docker_cli_links_test.go
+++ b/integration-cli/docker_cli_links_test.go
@@ -8,7 +8,7 @@
 	"strings"
 	"testing"
 
-	"github.com/dotcloud/docker/pkg/iptables"
+	"github.com/docker/docker/pkg/iptables"
 )
 
 func TestEtcHostsRegularFile(t *testing.T) {
@@ -93,31 +93,60 @@
 }
 
 func TestInspectLinksStarted(t *testing.T) {
+	var (
+		expected = map[string]struct{}{"/container1:/testinspectlink/alias1": {}, "/container2:/testinspectlink/alias2": {}}
+		result   []string
+	)
 	defer deleteAllContainers()
 	cmd(t, "run", "-d", "--name", "container1", "busybox", "sleep", "10")
 	cmd(t, "run", "-d", "--name", "container2", "busybox", "sleep", "10")
 	cmd(t, "run", "-d", "--name", "testinspectlink", "--link", "container1:alias1", "--link", "container2:alias2", "busybox", "sleep", "10")
-	links, err := inspectField("testinspectlink", "HostConfig.Links")
+	links, err := inspectFieldJSON("testinspectlink", "HostConfig.Links")
 	if err != nil {
 		t.Fatal(err)
 	}
-	if expected := "[/container1:/testinspectlink/alias1 /container2:/testinspectlink/alias2]"; links != expected {
-		t.Fatalf("Links %s, but expected %s", links, expected)
+
+	err = unmarshalJSON([]byte(links), &result)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	output := convertSliceOfStringsToMap(result)
+
+	equal := deepEqual(expected, output)
+
+	if !equal {
+		t.Fatalf("Links %s, expected %s", result, expected)
 	}
 	logDone("link - links in started container inspect")
 }
 
 func TestInspectLinksStopped(t *testing.T) {
+	var (
+		expected = map[string]struct{}{"/container1:/testinspectlink/alias1": {}, "/container2:/testinspectlink/alias2": {}}
+		result   []string
+	)
 	defer deleteAllContainers()
 	cmd(t, "run", "-d", "--name", "container1", "busybox", "sleep", "10")
 	cmd(t, "run", "-d", "--name", "container2", "busybox", "sleep", "10")
 	cmd(t, "run", "-d", "--name", "testinspectlink", "--link", "container1:alias1", "--link", "container2:alias2", "busybox", "true")
-	links, err := inspectField("testinspectlink", "HostConfig.Links")
+	links, err := inspectFieldJSON("testinspectlink", "HostConfig.Links")
 	if err != nil {
 		t.Fatal(err)
 	}
-	if expected := "[/container1:/testinspectlink/alias1 /container2:/testinspectlink/alias2]"; links != expected {
-		t.Fatalf("Links %s, but expected %s", links, expected)
+
+	err = unmarshalJSON([]byte(links), &result)
+	if err != nil {
+		t.Fatal(err)
 	}
+
+	output := convertSliceOfStringsToMap(result)
+
+	equal := deepEqual(expected, output)
+
+	if !equal {
+		t.Fatalf("Links %s, but expected %s", result, expected)
+	}
+
 	logDone("link - links in stopped container inspect")
 }
diff --git a/integration-cli/docker_cli_logs_test.go b/integration-cli/docker_cli_logs_test.go
index 8b1d006..b32b734 100644
--- a/integration-cli/docker_cli_logs_test.go
+++ b/integration-cli/docker_cli_logs_test.go
@@ -98,11 +98,11 @@
 		t.Fatalf("Expected log %d lines, received %d\n", testLen+1, len(lines))
 	}
 
-	ts := regexp.MustCompile(`^\[.*?\]`)
+	ts := regexp.MustCompile(`^.* `)
 
 	for _, l := range lines {
 		if l != "" {
-			_, err := time.Parse("["+time.StampMilli+"]", ts.FindString(l))
+			_, err := time.Parse(time.RFC3339Nano+" ", ts.FindString(l))
 			if err != nil {
 				t.Fatalf("Failed to parse timestamp from %v: %v", l, err)
 			}
diff --git a/integration-cli/docker_cli_nat_test.go b/integration-cli/docker_cli_nat_test.go
index 3816e54..caa41aa 100644
--- a/integration-cli/docker_cli_nat_test.go
+++ b/integration-cli/docker_cli_nat_test.go
@@ -7,7 +7,7 @@
 	"os/exec"
 	"testing"
 
-	"github.com/dotcloud/docker/daemon"
+	"github.com/docker/docker/daemon"
 )
 
 func TestNetworkNat(t *testing.T) {
diff --git a/integration-cli/docker_cli_ps_test.go b/integration-cli/docker_cli_ps_test.go
new file mode 100644
index 0000000..4ff3012
--- /dev/null
+++ b/integration-cli/docker_cli_ps_test.go
@@ -0,0 +1,201 @@
+package main
+
+import (
+	"os/exec"
+	"strings"
+	"testing"
+)
+
+func TestListContainers(t *testing.T) {
+	runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "top")
+	out, _, err := runCommandWithOutput(runCmd)
+	errorOut(err, t, out)
+	firstID := stripTrailingCharacters(out)
+
+	runCmd = exec.Command(dockerBinary, "run", "-d", "busybox", "top")
+	out, _, err = runCommandWithOutput(runCmd)
+	errorOut(err, t, out)
+	secondID := stripTrailingCharacters(out)
+
+	// not long running
+	runCmd = exec.Command(dockerBinary, "run", "-d", "busybox", "true")
+	out, _, err = runCommandWithOutput(runCmd)
+	errorOut(err, t, out)
+	thirdID := stripTrailingCharacters(out)
+
+	runCmd = exec.Command(dockerBinary, "run", "-d", "busybox", "top")
+	out, _, err = runCommandWithOutput(runCmd)
+	errorOut(err, t, out)
+	fourthID := stripTrailingCharacters(out)
+
+	// make sure third one is not running
+	runCmd = exec.Command(dockerBinary, "wait", thirdID)
+	out, _, err = runCommandWithOutput(runCmd)
+	errorOut(err, t, out)
+
+	// all
+	runCmd = exec.Command(dockerBinary, "ps", "-a")
+	out, _, err = runCommandWithOutput(runCmd)
+	errorOut(err, t, out)
+
+	if !assertContainerList(out, []string{fourthID, thirdID, secondID, firstID}) {
+		t.Error("Container list is not in the correct order")
+	}
+
+	// running
+	runCmd = exec.Command(dockerBinary, "ps")
+	out, _, err = runCommandWithOutput(runCmd)
+	errorOut(err, t, out)
+
+	if !assertContainerList(out, []string{fourthID, secondID, firstID}) {
+		t.Error("Container list is not in the correct order")
+	}
+
+	// from here all flag '-a' is ignored
+
+	// limit
+	runCmd = exec.Command(dockerBinary, "ps", "-n=2", "-a")
+	out, _, err = runCommandWithOutput(runCmd)
+	errorOut(err, t, out)
+	expected := []string{fourthID, thirdID}
+
+	if !assertContainerList(out, expected) {
+		t.Error("Container list is not in the correct order")
+	}
+
+	runCmd = exec.Command(dockerBinary, "ps", "-n=2")
+	out, _, err = runCommandWithOutput(runCmd)
+	errorOut(err, t, out)
+
+	if !assertContainerList(out, expected) {
+		t.Error("Container list is not in the correct order")
+	}
+
+	// since
+	runCmd = exec.Command(dockerBinary, "ps", "--since", firstID, "-a")
+	out, _, err = runCommandWithOutput(runCmd)
+	errorOut(err, t, out)
+	expected = []string{fourthID, thirdID, secondID}
+
+	if !assertContainerList(out, expected) {
+		t.Error("Container list is not in the correct order")
+	}
+
+	runCmd = exec.Command(dockerBinary, "ps", "--since", firstID)
+	out, _, err = runCommandWithOutput(runCmd)
+	errorOut(err, t, out)
+
+	if !assertContainerList(out, expected) {
+		t.Error("Container list is not in the correct order")
+	}
+
+	// before
+	runCmd = exec.Command(dockerBinary, "ps", "--before", thirdID, "-a")
+	out, _, err = runCommandWithOutput(runCmd)
+	errorOut(err, t, out)
+	expected = []string{secondID, firstID}
+
+	if !assertContainerList(out, expected) {
+		t.Error("Container list is not in the correct order")
+	}
+
+	runCmd = exec.Command(dockerBinary, "ps", "--before", thirdID)
+	out, _, err = runCommandWithOutput(runCmd)
+	errorOut(err, t, out)
+
+	if !assertContainerList(out, expected) {
+		t.Error("Container list is not in the correct order")
+	}
+
+	// since & before
+	runCmd = exec.Command(dockerBinary, "ps", "--since", firstID, "--before", fourthID, "-a")
+	out, _, err = runCommandWithOutput(runCmd)
+	errorOut(err, t, out)
+	expected = []string{thirdID, secondID}
+
+	if !assertContainerList(out, expected) {
+		t.Error("Container list is not in the correct order")
+	}
+
+	runCmd = exec.Command(dockerBinary, "ps", "--since", firstID, "--before", fourthID)
+	out, _, err = runCommandWithOutput(runCmd)
+	errorOut(err, t, out)
+	if !assertContainerList(out, expected) {
+		t.Error("Container list is not in the correct order")
+	}
+
+	// since & limit
+	runCmd = exec.Command(dockerBinary, "ps", "--since", firstID, "-n=2", "-a")
+	out, _, err = runCommandWithOutput(runCmd)
+	errorOut(err, t, out)
+	expected = []string{fourthID, thirdID}
+
+	if !assertContainerList(out, expected) {
+		t.Error("Container list is not in the correct order")
+	}
+
+	runCmd = exec.Command(dockerBinary, "ps", "--since", firstID, "-n=2")
+	out, _, err = runCommandWithOutput(runCmd)
+	errorOut(err, t, out)
+
+	if !assertContainerList(out, expected) {
+		t.Error("Container list is not in the correct order")
+	}
+
+	// before & limit
+	runCmd = exec.Command(dockerBinary, "ps", "--before", fourthID, "-n=1", "-a")
+	out, _, err = runCommandWithOutput(runCmd)
+	errorOut(err, t, out)
+	expected = []string{thirdID}
+
+	if !assertContainerList(out, expected) {
+		t.Error("Container list is not in the correct order")
+	}
+
+	runCmd = exec.Command(dockerBinary, "ps", "--before", fourthID, "-n=1")
+	out, _, err = runCommandWithOutput(runCmd)
+	errorOut(err, t, out)
+
+	if !assertContainerList(out, expected) {
+		t.Error("Container list is not in the correct order")
+	}
+
+	// since & before & limit
+	runCmd = exec.Command(dockerBinary, "ps", "--since", firstID, "--before", fourthID, "-n=1", "-a")
+	out, _, err = runCommandWithOutput(runCmd)
+	errorOut(err, t, out)
+	expected = []string{thirdID}
+
+	if !assertContainerList(out, expected) {
+		t.Error("Container list is not in the correct order")
+	}
+
+	runCmd = exec.Command(dockerBinary, "ps", "--since", firstID, "--before", fourthID, "-n=1")
+	out, _, err = runCommandWithOutput(runCmd)
+	errorOut(err, t, out)
+
+	if !assertContainerList(out, expected) {
+		t.Error("Container list is not in the correct order")
+	}
+
+	deleteAllContainers()
+
+	logDone("ps - test ps options")
+}
+
+func assertContainerList(out string, expected []string) bool {
+	lines := strings.Split(strings.Trim(out, "\n "), "\n")
+	if len(lines)-1 != len(expected) {
+		return false
+	}
+
+	containerIdIndex := strings.Index(lines[0], "CONTAINER ID")
+	for i := 0; i < len(expected); i++ {
+		foundID := lines[i+1][containerIdIndex : containerIdIndex+12]
+		if foundID != expected[i][:12] {
+			return false
+		}
+	}
+
+	return true
+}
diff --git a/integration-cli/docker_cli_restart_test.go b/integration-cli/docker_cli_restart_test.go
new file mode 100644
index 0000000..946a7c7
--- /dev/null
+++ b/integration-cli/docker_cli_restart_test.go
@@ -0,0 +1,127 @@
+package main
+
+import (
+	"os/exec"
+	"strings"
+	"testing"
+	"time"
+)
+
+func TestDockerRestartStoppedContainer(t *testing.T) {
+	runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "echo", "foobar")
+	out, _, err := runCommandWithOutput(runCmd)
+	errorOut(err, t, out)
+
+	cleanedContainerID := stripTrailingCharacters(out)
+
+	runCmd = exec.Command(dockerBinary, "wait", cleanedContainerID)
+	out, _, err = runCommandWithOutput(runCmd)
+	errorOut(err, t, out)
+
+	runCmd = exec.Command(dockerBinary, "logs", cleanedContainerID)
+	out, _, err = runCommandWithOutput(runCmd)
+	errorOut(err, t, out)
+
+	if out != "foobar\n" {
+		t.Errorf("container should've printed 'foobar'")
+	}
+
+	runCmd = exec.Command(dockerBinary, "restart", cleanedContainerID)
+	out, _, err = runCommandWithOutput(runCmd)
+	errorOut(err, t, out)
+
+	runCmd = exec.Command(dockerBinary, "logs", cleanedContainerID)
+	out, _, err = runCommandWithOutput(runCmd)
+	errorOut(err, t, out)
+
+	if out != "foobar\nfoobar\n" {
+		t.Errorf("container should've printed 'foobar' twice")
+	}
+
+	deleteAllContainers()
+
+	logDone("restart - echo foobar for stopped container")
+}
+
+func TestDockerRestartRunningContainer(t *testing.T) {
+	runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", "echo foobar && sleep 30 && echo 'should not print this'")
+	out, _, err := runCommandWithOutput(runCmd)
+	errorOut(err, t, out)
+
+	cleanedContainerID := stripTrailingCharacters(out)
+
+	time.Sleep(1 * time.Second)
+
+	runCmd = exec.Command(dockerBinary, "logs", cleanedContainerID)
+	out, _, err = runCommandWithOutput(runCmd)
+	errorOut(err, t, out)
+
+	if out != "foobar\n" {
+		t.Errorf("container should've printed 'foobar'")
+	}
+
+	runCmd = exec.Command(dockerBinary, "restart", "-t", "1", cleanedContainerID)
+	out, _, err = runCommandWithOutput(runCmd)
+	errorOut(err, t, out)
+
+	runCmd = exec.Command(dockerBinary, "logs", cleanedContainerID)
+	out, _, err = runCommandWithOutput(runCmd)
+	errorOut(err, t, out)
+
+	time.Sleep(1 * time.Second)
+
+	if out != "foobar\nfoobar\n" {
+		t.Errorf("container should've printed 'foobar' twice")
+	}
+
+	deleteAllContainers()
+
+	logDone("restart - echo foobar for running container")
+}
+
+// Test that restarting a container with a volume does not create a new volume on restart. Regression test for #819.
+func TestDockerRestartWithVolumes(t *testing.T) {
+	runCmd := exec.Command(dockerBinary, "run", "-d", "-v", "/test", "busybox", "top")
+	out, _, err := runCommandWithOutput(runCmd)
+	errorOut(err, t, out)
+
+	cleanedContainerID := stripTrailingCharacters(out)
+
+	runCmd = exec.Command(dockerBinary, "inspect", "--format", "{{ len .Volumes }}", cleanedContainerID)
+	out, _, err = runCommandWithOutput(runCmd)
+	errorOut(err, t, out)
+
+	if out = strings.Trim(out, " \n\r"); out != "1" {
+		t.Errorf("expect 1 volume received %s", out)
+	}
+
+	runCmd = exec.Command(dockerBinary, "inspect", "--format", "{{ .Volumes }}", cleanedContainerID)
+	volumes, _, err := runCommandWithOutput(runCmd)
+	errorOut(err, t, volumes)
+
+	runCmd = exec.Command(dockerBinary, "restart", cleanedContainerID)
+	out, _, err = runCommandWithOutput(runCmd)
+	errorOut(err, t, out)
+
+	runCmd = exec.Command(dockerBinary, "inspect", "--format", "{{ len .Volumes }}", cleanedContainerID)
+	out, _, err = runCommandWithOutput(runCmd)
+	errorOut(err, t, out)
+
+	if out = strings.Trim(out, " \n\r"); out != "1" {
+		t.Errorf("expect 1 volume after restart received %s", out)
+	}
+
+	runCmd = exec.Command(dockerBinary, "inspect", "--format", "{{ .Volumes }}", cleanedContainerID)
+	volumesAfterRestart, _, err := runCommandWithOutput(runCmd)
+	errorOut(err, t, volumesAfterRestart)
+
+	if volumes != volumesAfterRestart {
+		volumes = strings.Trim(volumes, " \n\r")
+		volumesAfterRestart = strings.Trim(volumesAfterRestart, " \n\r")
+		t.Errorf("expected volume path: %s Actual path: %s", volumes, volumesAfterRestart)
+	}
+
+	deleteAllContainers()
+
+	logDone("restart - does not create a new volume on restart")
+}
diff --git a/integration-cli/docker_cli_rm_test.go b/integration-cli/docker_cli_rm_test.go
index 34b5df0..0961e63 100644
--- a/integration-cli/docker_cli_rm_test.go
+++ b/integration-cli/docker_cli_rm_test.go
@@ -3,6 +3,7 @@
 import (
 	"os"
 	"os/exec"
+	"strings"
 	"testing"
 )
 
@@ -42,25 +43,100 @@
 	logDone("rm - volume")
 }
 
-func TestRemoveContainerRunning(t *testing.T) {
-	cmd := exec.Command(dockerBinary, "run", "-dt", "--name", "foo", "busybox", "top")
-	if _, err := runCommand(cmd); err != nil {
-		t.Fatal(err)
-	}
+func TestRemoveRunningContainer(t *testing.T) {
+	createRunningContainer(t, "foo")
 
 	// Test cannot remove running container
-	cmd = exec.Command(dockerBinary, "rm", "foo")
+	cmd := exec.Command(dockerBinary, "rm", "foo")
 	if _, err := runCommand(cmd); err == nil {
 		t.Fatalf("Expected error, can't rm a running container")
 	}
 
-	// Remove with -f
-	cmd = exec.Command(dockerBinary, "rm", "-f", "foo")
+	deleteAllContainers()
+
+	logDone("rm - running container")
+}
+
+func TestForceRemoveRunningContainer(t *testing.T) {
+	createRunningContainer(t, "foo")
+
+	// Stop then remove with -s
+	cmd := exec.Command(dockerBinary, "rm", "-f", "foo")
 	if _, err := runCommand(cmd); err != nil {
 		t.Fatal(err)
 	}
 
 	deleteAllContainers()
 
-	logDone("rm - running container")
+	logDone("rm - running container with --force=true")
+}
+
+func TestContainerOrphaning(t *testing.T) {
+	dockerfile1 := `FROM busybox:latest
+	ENTRYPOINT ["/bin/true"]`
+	img := "test-container-orphaning"
+	dockerfile2 := `FROM busybox:latest
+	ENTRYPOINT ["/bin/true"]
+	MAINTAINER Integration Tests`
+
+	// build first dockerfile
+	img1, err := buildImage(img, dockerfile1, true)
+	if err != nil {
+		t.Fatalf("Could not build image %s: %v", img, err)
+	}
+	// run container on first image
+	if out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", img)); err != nil {
+		t.Fatalf("Could not run image %s: %v: %s", img, err, out)
+	}
+	// rebuild dockerfile with a small addition at the end
+	if _, err := buildImage(img, dockerfile2, true); err != nil {
+		t.Fatalf("Could not rebuild image %s: %v", img, err)
+	}
+	// try to remove the image, should error out.
+	if out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "rmi", img)); err == nil {
+		t.Fatalf("Expected to error out removing the image, but succeeded: %s", out)
+	}
+	// check if we deleted the first image
+	out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "images", "-q", "--no-trunc"))
+	if err != nil {
+		t.Fatalf("%v: %s", err, out)
+	}
+	if !strings.Contains(out, img1) {
+		t.Fatalf("Orphaned container (could not find '%s' in docker images): %s", img1, out)
+	}
+
+	deleteAllContainers()
+
+	logDone("rm - container orphaning")
+}
+
+func TestDeleteTagWithExistingContainers(t *testing.T) {
+	container := "test-delete-tag"
+	newtag := "busybox:newtag"
+	bb := "busybox:latest"
+	if out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "tag", bb, newtag)); err != nil {
+		t.Fatalf("Could not tag busybox: %v: %s", err, out)
+	}
+	if out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "--name", container, bb, "/bin/true")); err != nil {
+		t.Fatalf("Could not run busybox: %v: %s", err, out)
+	}
+	out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "rmi", newtag))
+	if err != nil {
+		t.Fatalf("Could not remove tag %s: %v: %s", newtag, err, out)
+	}
+	if d := strings.Count(out, "Untagged: "); d != 1 {
+		t.Fatalf("Expected 1 untagged entry got %d: %q", d, out)
+	}
+
+	deleteAllContainers()
+
+	logDone("rm - delete tag with existing containers")
+
+}
+
+func createRunningContainer(t *testing.T, name string) {
+	cmd := exec.Command(dockerBinary, "run", "-dt", "--name", name, "busybox", "top")
+	if _, err := runCommand(cmd); err != nil {
+		t.Fatal(err)
+	}
 }
diff --git a/integration-cli/docker_cli_run_test.go b/integration-cli/docker_cli_run_test.go
index eda1b3f..b95fd86 100644
--- a/integration-cli/docker_cli_run_test.go
+++ b/integration-cli/docker_cli_run_test.go
@@ -1,15 +1,22 @@
 package main
 
 import (
+	"bufio"
 	"fmt"
+	"io/ioutil"
 	"os"
 	"os/exec"
+	"path/filepath"
 	"reflect"
 	"regexp"
 	"sort"
 	"strings"
 	"sync"
 	"testing"
+	"time"
+
+	"github.com/docker/docker/pkg/mount"
+	"github.com/docker/docker/pkg/networkfs/resolvconf"
 )
 
 // "test123" should be printed by docker run
@@ -654,7 +661,7 @@
 }
 
 func TestEnvironment(t *testing.T) {
-	cmd := exec.Command(dockerBinary, "run", "-h", "testing", "-e=FALSE=true", "-e=TRUE", "-e=TRICKY", "busybox", "env")
+	cmd := exec.Command(dockerBinary, "run", "-h", "testing", "-e=FALSE=true", "-e=TRUE", "-e=TRICKY", "-e=HOME=", "busybox", "env")
 	cmd.Env = append(os.Environ(),
 		"TRUE=false",
 		"TRICKY=tri\ncky\n",
@@ -673,13 +680,13 @@
 
 	goodEnv := []string{
 		"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
-		"HOME=/",
 		"HOSTNAME=testing",
 		"FALSE=true",
 		"TRUE=false",
 		"TRICKY=tri",
 		"cky",
 		"",
+		"HOME=/root",
 	}
 	sort.Strings(goodEnv)
 	if len(goodEnv) != len(actualEnv) {
@@ -719,6 +726,20 @@
 	logDone("run - test container loopback when networking disabled")
 }
 
+func TestNetHostNotAllowedWithLinks(t *testing.T) {
+	_, _, err := cmd(t, "run", "--name", "linked", "busybox", "true")
+
+	cmd := exec.Command(dockerBinary, "run", "--net=host", "--link", "linked:linked", "busybox", "true")
+	_, _, err = runCommandWithOutput(cmd)
+	if err == nil {
+		t.Fatal("Expected error")
+	}
+
+	deleteAllContainers()
+
+	logDone("run - don't allow --net=host to be used with links")
+}
+
 func TestLoopbackOnlyExistsWhenNetworkingDisabled(t *testing.T) {
 	cmd := exec.Command(dockerBinary, "run", "--net=none", "busybox", "ip", "-o", "-4", "a", "show", "up")
 	out, _, err := runCommandWithOutput(cmd)
@@ -780,6 +801,131 @@
 	logDone("run - test un-privileged can mknod")
 }
 
+func TestCapDropInvalid(t *testing.T) {
+	cmd := exec.Command(dockerBinary, "run", "--cap-drop=CHPASS", "busybox", "ls")
+	out, _, err := runCommandWithOutput(cmd)
+	if err == nil {
+		t.Fatal(err, out)
+	}
+
+	logDone("run - test --cap-drop=CHPASS invalid")
+}
+
+func TestCapDropCannotMknod(t *testing.T) {
+	cmd := exec.Command(dockerBinary, "run", "--cap-drop=MKNOD", "busybox", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok")
+	out, _, err := runCommandWithOutput(cmd)
+	if err == nil {
+		t.Fatal(err, out)
+	}
+
+	if actual := strings.Trim(out, "\r\n"); actual == "ok" {
+		t.Fatalf("expected output not ok received %s", actual)
+	}
+	deleteAllContainers()
+
+	logDone("run - test --cap-drop=MKNOD cannot mknod")
+}
+
+func TestCapDropCannotMknodLowerCase(t *testing.T) {
+	cmd := exec.Command(dockerBinary, "run", "--cap-drop=mknod", "busybox", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok")
+	out, _, err := runCommandWithOutput(cmd)
+	if err == nil {
+		t.Fatal(err, out)
+	}
+
+	if actual := strings.Trim(out, "\r\n"); actual == "ok" {
+		t.Fatalf("expected output not ok received %s", actual)
+	}
+	deleteAllContainers()
+
+	logDone("run - test --cap-drop=mknod cannot mknod lowercase")
+}
+
+func TestCapDropALLCannotMknod(t *testing.T) {
+	cmd := exec.Command(dockerBinary, "run", "--cap-drop=ALL", "busybox", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok")
+	out, _, err := runCommandWithOutput(cmd)
+	if err == nil {
+		t.Fatal(err, out)
+	}
+
+	if actual := strings.Trim(out, "\r\n"); actual == "ok" {
+		t.Fatalf("expected output not ok received %s", actual)
+	}
+	deleteAllContainers()
+
+	logDone("run - test --cap-drop=ALL cannot mknod")
+}
+
+func TestCapDropALLAddMknodCannotMknod(t *testing.T) {
+	cmd := exec.Command(dockerBinary, "run", "--cap-drop=ALL", "--cap-add=MKNOD", "busybox", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok")
+	out, _, err := runCommandWithOutput(cmd)
+	if err != nil {
+		t.Fatal(err, out)
+	}
+
+	if actual := strings.Trim(out, "\r\n"); actual != "ok" {
+		t.Fatalf("expected output ok received %s", actual)
+	}
+	deleteAllContainers()
+
+	logDone("run - test --cap-drop=ALL --cap-add=MKNOD can mknod")
+}
+
+func TestCapAddInvalid(t *testing.T) {
+	cmd := exec.Command(dockerBinary, "run", "--cap-add=CHPASS", "busybox", "ls")
+	out, _, err := runCommandWithOutput(cmd)
+	if err == nil {
+		t.Fatal(err, out)
+	}
+
+	logDone("run - test --cap-add=CHPASS invalid")
+}
+
+func TestCapAddCanDownInterface(t *testing.T) {
+	cmd := exec.Command(dockerBinary, "run", "--cap-add=NET_ADMIN", "busybox", "sh", "-c", "ip link set eth0 down && echo ok")
+	out, _, err := runCommandWithOutput(cmd)
+	if err != nil {
+		t.Fatal(err, out)
+	}
+
+	if actual := strings.Trim(out, "\r\n"); actual != "ok" {
+		t.Fatalf("expected output ok received %s", actual)
+	}
+	deleteAllContainers()
+
+	logDone("run - test --cap-add=NET_ADMIN can set eth0 down")
+}
+
+func TestCapAddALLCanDownInterface(t *testing.T) {
+	cmd := exec.Command(dockerBinary, "run", "--cap-add=ALL", "busybox", "sh", "-c", "ip link set eth0 down && echo ok")
+	out, _, err := runCommandWithOutput(cmd)
+	if err != nil {
+		t.Fatal(err, out)
+	}
+
+	if actual := strings.Trim(out, "\r\n"); actual != "ok" {
+		t.Fatalf("expected output ok received %s", actual)
+	}
+	deleteAllContainers()
+
+	logDone("run - test --cap-add=ALL can set eth0 down")
+}
+
+func TestCapAddALLDropNetAdminCanDownInterface(t *testing.T) {
+	cmd := exec.Command(dockerBinary, "run", "--cap-add=ALL", "--cap-drop=NET_ADMIN", "busybox", "sh", "-c", "ip link set eth0 down && echo ok")
+	out, _, err := runCommandWithOutput(cmd)
+	if err == nil {
+		t.Fatal(err, out)
+	}
+
+	if actual := strings.Trim(out, "\r\n"); actual == "ok" {
+		t.Fatalf("expected output not ok received %s", actual)
+	}
+	deleteAllContainers()
+
+	logDone("run - test --cap-add=ALL --cap-drop=NET_ADMIN cannot set eth0 down")
+}
+
 func TestPrivilegedCanMount(t *testing.T) {
 	cmd := exec.Command(dockerBinary, "run", "--privileged", "busybox", "sh", "-c", "mount -t tmpfs none /tmp && echo ok")
 
@@ -916,6 +1062,22 @@
 	logDone("run - unprivileged with chroot")
 }
 
+func TestAddingOptionalDevices(t *testing.T) {
+	cmd := exec.Command(dockerBinary, "run", "--device", "/dev/zero:/dev/nulo", "busybox", "sh", "-c", "ls /dev/nulo")
+
+	out, _, err := runCommandWithOutput(cmd)
+	if err != nil {
+		t.Fatal(err, out)
+	}
+
+	if actual := strings.Trim(out, "\r\n"); actual != "/dev/nulo" {
+		t.Fatalf("expected output /dev/nulo, received %s", actual)
+	}
+	deleteAllContainers()
+
+	logDone("run - test --device argument")
+}
+
 func TestModeHostname(t *testing.T) {
 	cmd := exec.Command(dockerBinary, "run", "-h=testhostname", "busybox", "cat", "/etc/hostname")
 
@@ -983,3 +1145,488 @@
 
 	logDone("run - bind mount /:/ as volume should fail")
 }
+
+// Test recursive bind mount works by default
+func TestDockerRunWithVolumesIsRecursive(t *testing.T) {
+	tmpDir, err := ioutil.TempDir("", "docker_recursive_mount_test")
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	defer os.RemoveAll(tmpDir)
+
+	// Create a temporary tmpfs mount.
+	tmpfsDir := filepath.Join(tmpDir, "tmpfs")
+	if err := os.MkdirAll(tmpfsDir, 0777); err != nil {
+		t.Fatalf("failed to mkdir at %s - %s", tmpfsDir, err)
+	}
+	if err := mount.Mount("tmpfs", tmpfsDir, "tmpfs", ""); err != nil {
+		t.Fatalf("failed to create a tmpfs mount at %s - %s", tmpfsDir, err)
+	}
+
+	f, err := ioutil.TempFile(tmpfsDir, "touch-me")
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer f.Close()
+
+	runCmd := exec.Command(dockerBinary, "run", "--name", "test-data", "--volume", fmt.Sprintf("%s:/tmp:ro", tmpDir), "busybox:latest", "ls", "/tmp/tmpfs")
+	out, stderr, exitCode, err := runCommandWithStdoutStderr(runCmd)
+	if err != nil && exitCode != 0 {
+		t.Fatal(out, stderr, err)
+	}
+	if !strings.Contains(out, filepath.Base(f.Name())) {
+		t.Fatal("Recursive bind mount test failed. Expected file not found")
+	}
+
+	deleteAllContainers()
+
+	logDone("run - volumes are bind mounted recuursively")
+}
+
+func TestDnsDefaultOptions(t *testing.T) {
+	cmd := exec.Command(dockerBinary, "run", "busybox", "cat", "/etc/resolv.conf")
+
+	actual, _, err := runCommandWithOutput(cmd)
+	if err != nil {
+		t.Fatal(err, actual)
+	}
+
+	resolvConf, err := ioutil.ReadFile("/etc/resolv.conf")
+	if os.IsNotExist(err) {
+		t.Fatalf("/etc/resolv.conf does not exist")
+	}
+
+	if actual != string(resolvConf) {
+		t.Fatalf("expected resolv.conf is not the same of actual")
+	}
+
+	deleteAllContainers()
+
+	logDone("run - dns default options")
+}
+
+func TestDnsOptions(t *testing.T) {
+	cmd := exec.Command(dockerBinary, "run", "--dns=127.0.0.1", "--dns-search=mydomain", "busybox", "cat", "/etc/resolv.conf")
+
+	out, _, err := runCommandWithOutput(cmd)
+	if err != nil {
+		t.Fatal(err, out)
+	}
+
+	actual := strings.Replace(strings.Trim(out, "\r\n"), "\n", " ", -1)
+	if actual != "nameserver 127.0.0.1 search mydomain" {
+		t.Fatalf("expected 'nameserver 127.0.0.1 search mydomain', but says: '%s'", actual)
+	}
+
+	cmd = exec.Command(dockerBinary, "run", "--dns=127.0.0.1", "--dns-search=.", "busybox", "cat", "/etc/resolv.conf")
+
+	out, _, err = runCommandWithOutput(cmd)
+	if err != nil {
+		t.Fatal(err, out)
+	}
+
+	actual = strings.Replace(strings.Trim(strings.Trim(out, "\r\n"), " "), "\n", " ", -1)
+	if actual != "nameserver 127.0.0.1" {
+		t.Fatalf("expected 'nameserver 127.0.0.1', but says: '%s'", actual)
+	}
+
+	logDone("run - dns options")
+}
+
+func TestDnsOptionsBasedOnHostResolvConf(t *testing.T) {
+	resolvConf, err := ioutil.ReadFile("/etc/resolv.conf")
+	if os.IsNotExist(err) {
+		t.Fatalf("/etc/resolv.conf does not exist")
+	}
+
+	hostNamservers := resolvconf.GetNameservers(resolvConf)
+	hostSearch := resolvconf.GetSearchDomains(resolvConf)
+
+	cmd := exec.Command(dockerBinary, "run", "--dns=127.0.0.1", "busybox", "cat", "/etc/resolv.conf")
+
+	out, _, err := runCommandWithOutput(cmd)
+	if err != nil {
+		t.Fatal(err, out)
+	}
+
+	if actualNameservers := resolvconf.GetNameservers([]byte(out)); string(actualNameservers[0]) != "127.0.0.1" {
+		t.Fatalf("expected '127.0.0.1', but says: '%s'", string(actualNameservers[0]))
+	}
+
+	actualSearch := resolvconf.GetSearchDomains([]byte(out))
+	if len(actualSearch) != len(hostSearch) {
+		t.Fatalf("expected '%s' search domain(s), but it has: '%s'", len(hostSearch), len(actualSearch))
+	}
+	for i := range actualSearch {
+		if actualSearch[i] != hostSearch[i] {
+			t.Fatalf("expected '%s' domain, but says: '%s'", actualSearch[i], hostSearch[i])
+		}
+	}
+
+	cmd = exec.Command(dockerBinary, "run", "--dns-search=mydomain", "busybox", "cat", "/etc/resolv.conf")
+
+	out, _, err = runCommandWithOutput(cmd)
+	if err != nil {
+		t.Fatal(err, out)
+	}
+
+	actualNameservers := resolvconf.GetNameservers([]byte(out))
+	if len(actualNameservers) != len(hostNamservers) {
+		t.Fatalf("expected '%s' nameserver(s), but it has: '%s'", len(hostNamservers), len(actualNameservers))
+	}
+	for i := range actualNameservers {
+		if actualNameservers[i] != hostNamservers[i] {
+			t.Fatalf("expected '%s' nameserver, but says: '%s'", actualNameservers[i], hostNamservers[i])
+		}
+	}
+
+	if actualSearch = resolvconf.GetSearchDomains([]byte(out)); string(actualSearch[0]) != "mydomain" {
+		t.Fatalf("expected 'mydomain', but says: '%s'", string(actualSearch[0]))
+	}
+
+	deleteAllContainers()
+
+	logDone("run - dns options based on host resolv.conf")
+}
+
+// Regression test for #6983
+func TestAttachStdErrOnlyTTYMode(t *testing.T) {
+	cmd := exec.Command(dockerBinary, "run", "-t", "-a", "stderr", "busybox", "true")
+
+	exitCode, err := runCommand(cmd)
+	if err != nil {
+		t.Fatal(err)
+	} else if exitCode != 0 {
+		t.Fatalf("Container should have exited with error code 0")
+	}
+
+	deleteAllContainers()
+
+	logDone("run - Attach stderr only with -t")
+}
+
+// Regression test for #6983
+func TestAttachStdOutOnlyTTYMode(t *testing.T) {
+	cmd := exec.Command(dockerBinary, "run", "-t", "-a", "stdout", "busybox", "true")
+
+	exitCode, err := runCommand(cmd)
+	if err != nil {
+		t.Fatal(err)
+	} else if exitCode != 0 {
+		t.Fatalf("Container should have exited with error code 0")
+	}
+
+	deleteAllContainers()
+
+	logDone("run - Attach stdout only with -t")
+}
+
+// Regression test for #6983
+func TestAttachStdOutAndErrTTYMode(t *testing.T) {
+	cmd := exec.Command(dockerBinary, "run", "-t", "-a", "stdout", "-a", "stderr", "busybox", "true")
+
+	exitCode, err := runCommand(cmd)
+	if err != nil {
+		t.Fatal(err)
+	} else if exitCode != 0 {
+		t.Fatalf("Container should have exited with error code 0")
+	}
+
+	deleteAllContainers()
+
+	logDone("run - Attach stderr and stdout with -t")
+}
+
+func TestState(t *testing.T) {
+	defer deleteAllContainers()
+	cmd := exec.Command(dockerBinary, "run", "-d", "busybox", "top")
+
+	out, _, err := runCommandWithOutput(cmd)
+	if err != nil {
+		t.Fatal(err, out)
+	}
+	id := strings.TrimSpace(out)
+	state, err := inspectField(id, "State.Running")
+	if err != nil {
+		t.Fatal(err)
+	}
+	if state != "true" {
+		t.Fatal("Container state is 'not running'")
+	}
+	pid1, err := inspectField(id, "State.Pid")
+	if err != nil {
+		t.Fatal(err)
+	}
+	if pid1 == "0" {
+		t.Fatal("Container state Pid 0")
+	}
+
+	cmd = exec.Command(dockerBinary, "stop", id)
+	out, _, err = runCommandWithOutput(cmd)
+	if err != nil {
+		t.Fatal(err, out)
+	}
+	state, err = inspectField(id, "State.Running")
+	if err != nil {
+		t.Fatal(err)
+	}
+	if state != "false" {
+		t.Fatal("Container state is 'running'")
+	}
+	pid2, err := inspectField(id, "State.Pid")
+	if err != nil {
+		t.Fatal(err)
+	}
+	if pid2 == pid1 {
+		t.Fatalf("Container state Pid %s, but expected %s", pid2, pid1)
+	}
+
+	cmd = exec.Command(dockerBinary, "start", id)
+	out, _, err = runCommandWithOutput(cmd)
+	if err != nil {
+		t.Fatal(err, out)
+	}
+	state, err = inspectField(id, "State.Running")
+	if err != nil {
+		t.Fatal(err)
+	}
+	if state != "true" {
+		t.Fatal("Container state is 'not running'")
+	}
+	pid3, err := inspectField(id, "State.Pid")
+	if err != nil {
+		t.Fatal(err)
+	}
+	if pid3 == pid1 {
+		t.Fatalf("Container state Pid %s, but expected %s", pid2, pid1)
+	}
+	logDone("run - test container state.")
+}
+
+// Test for #1737
+func TestCopyVolumeUidGid(t *testing.T) {
+	name := "testrunvolumesuidgid"
+	defer deleteImages(name)
+	defer deleteAllContainers()
+	_, err := buildImage(name,
+		`FROM busybox
+		RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd
+		RUN echo 'dockerio:x:1001:' >> /etc/group
+		RUN mkdir -p /hello && touch /hello/test && chown dockerio.dockerio /hello`,
+		true)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// Test that the uid and gid is copied from the image to the volume
+	cmd := exec.Command(dockerBinary, "run", "--rm", "-v", "/hello", name, "sh", "-c", "ls -l / | grep hello | awk '{print $3\":\"$4}'")
+	out, _, err := runCommandWithOutput(cmd)
+	if err != nil {
+		t.Fatal(err, out)
+	}
+	out = strings.TrimSpace(out)
+	if out != "dockerio:dockerio" {
+		t.Fatalf("Wrong /hello ownership: %s, expected dockerio:dockerio", out)
+	}
+
+	logDone("run - copy uid/gid for volume")
+}
+
+// Test for #1582
+func TestCopyVolumeContent(t *testing.T) {
+	name := "testruncopyvolumecontent"
+	defer deleteImages(name)
+	defer deleteAllContainers()
+	_, err := buildImage(name,
+		`FROM busybox
+		RUN mkdir -p /hello/local && echo hello > /hello/local/world`,
+		true)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// Test that the content is copied from the image to the volume
+	cmd := exec.Command(dockerBinary, "run", "--rm", "-v", "/hello", name, "sh", "-c", "find", "/hello")
+	out, _, err := runCommandWithOutput(cmd)
+	if err != nil {
+		t.Fatal(err, out)
+	}
+	if !(strings.Contains(out, "/hello/local/world") && strings.Contains(out, "/hello/local")) {
+		t.Fatal("Container failed to transfer content to volume")
+	}
+	logDone("run - copy volume content")
+}
+
+func TestRunCleanupCmdOnEntrypoint(t *testing.T) {
+	name := "testrunmdcleanuponentrypoint"
+	defer deleteImages(name)
+	defer deleteAllContainers()
+	if _, err := buildImage(name,
+		`FROM busybox
+		ENTRYPOINT ["echo"]
+        CMD ["testingpoint"]`,
+		true); err != nil {
+		t.Fatal(err)
+	}
+	runCmd := exec.Command(dockerBinary, "run", "--entrypoint", "whoami", name)
+	out, exit, err := runCommandWithOutput(runCmd)
+	if err != nil {
+		t.Fatalf("Error: %v, out: %q", err, out)
+	}
+	if exit != 0 {
+		t.Fatalf("expected exit code 0 received %d, out: %q", exit, out)
+	}
+	out = strings.TrimSpace(out)
+	if out != "root" {
+		t.Fatalf("Expected output root, got %q", out)
+	}
+	logDone("run - cleanup cmd on --entrypoint")
+}
+
+// TestRunWorkdirExistsAndIsFile checks that if 'docker run -w' with existing file can be detected
+func TestRunWorkdirExistsAndIsFile(t *testing.T) {
+	defer deleteAllContainers()
+	runCmd := exec.Command(dockerBinary, "run", "-w", "/bin/cat", "busybox")
+	out, exit, err := runCommandWithOutput(runCmd)
+	if !(err != nil && exit == 1 && strings.Contains(out, "Cannot mkdir: /bin/cat is not a directory")) {
+		t.Fatalf("Docker must complains about making dir, but we got out: %s, exit: %d, err: %s", out, exit, err)
+	}
+	logDone("run - error on existing file for workdir")
+}
+
+func TestRunExitOnStdinClose(t *testing.T) {
+	name := "testrunexitonstdinclose"
+	defer deleteAllContainers()
+	runCmd := exec.Command(dockerBinary, "run", "--name", name, "-i", "busybox", "/bin/cat")
+
+	stdin, err := runCmd.StdinPipe()
+	if err != nil {
+		t.Fatal(err)
+	}
+	stdout, err := runCmd.StdoutPipe()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if err := runCmd.Start(); err != nil {
+		t.Fatal(err)
+	}
+	if _, err := stdin.Write([]byte("hello\n")); err != nil {
+		t.Fatal(err)
+	}
+
+	r := bufio.NewReader(stdout)
+	line, err := r.ReadString('\n')
+	if err != nil {
+		t.Fatal(err)
+	}
+	line = strings.TrimSpace(line)
+	if line != "hello" {
+		t.Fatalf("Output should be 'hello', got '%q'", line)
+	}
+	if err := stdin.Close(); err != nil {
+		t.Fatal(err)
+	}
+	finish := make(chan struct{})
+	go func() {
+		if err := runCmd.Wait(); err != nil {
+			t.Fatal(err)
+		}
+		close(finish)
+	}()
+	select {
+	case <-finish:
+	case <-time.After(1 * time.Second):
+		t.Fatal("docker run failed to exit on stdin close")
+	}
+	state, err := inspectField(name, "State.Running")
+	if err != nil {
+		t.Fatal(err)
+	}
+	if state != "false" {
+		t.Fatal("Container must be stopped after stdin closing")
+	}
+	logDone("run - exit on stdin closing")
+}
+
+// Test for #2267
+func TestWriteHostsFileAndNotCommit(t *testing.T) {
+	name := "writehosts"
+	cmd := exec.Command(dockerBinary, "run", "--name", name, "busybox", "sh", "-c", "echo test2267 >> /etc/hosts && cat /etc/hosts")
+	out, _, err := runCommandWithOutput(cmd)
+	if err != nil {
+		t.Fatal(err, out)
+	}
+	if !strings.Contains(out, "test2267") {
+		t.Fatal("/etc/hosts should contain 'test2267'")
+	}
+
+	cmd = exec.Command(dockerBinary, "diff", name)
+	if err != nil {
+		t.Fatal(err, out)
+	}
+	out, _, err = runCommandWithOutput(cmd)
+	if err != nil {
+		t.Fatal(err, out)
+	}
+	if len(strings.Trim(out, "\r\n")) != 0 {
+		t.Fatal("diff should be empty")
+	}
+
+	logDone("run - write to /etc/hosts and not commited")
+}
+
+// Test for #2267
+func TestWriteHostnameFileAndNotCommit(t *testing.T) {
+	name := "writehostname"
+	cmd := exec.Command(dockerBinary, "run", "--name", name, "busybox", "sh", "-c", "echo test2267 >> /etc/hostname && cat /etc/hostname")
+	out, _, err := runCommandWithOutput(cmd)
+	if err != nil {
+		t.Fatal(err, out)
+	}
+	if !strings.Contains(out, "test2267") {
+		t.Fatal("/etc/hostname should contain 'test2267'")
+	}
+
+	cmd = exec.Command(dockerBinary, "diff", name)
+	if err != nil {
+		t.Fatal(err, out)
+	}
+	out, _, err = runCommandWithOutput(cmd)
+	if err != nil {
+		t.Fatal(err, out)
+	}
+	if len(strings.Trim(out, "\r\n")) != 0 {
+		t.Fatal("diff should be empty")
+	}
+
+	logDone("run - write to /etc/hostname and not commited")
+}
+
+// Test for #2267
+func TestWriteResolvFileAndNotCommit(t *testing.T) {
+	name := "writeresolv"
+	cmd := exec.Command(dockerBinary, "run", "--name", name, "busybox", "sh", "-c", "echo test2267 >> /etc/resolv.conf && cat /etc/resolv.conf")
+	out, _, err := runCommandWithOutput(cmd)
+	if err != nil {
+		t.Fatal(err, out)
+	}
+	if !strings.Contains(out, "test2267") {
+		t.Fatal("/etc/resolv.conf should contain 'test2267'")
+	}
+
+	cmd = exec.Command(dockerBinary, "diff", name)
+	if err != nil {
+		t.Fatal(err, out)
+	}
+	out, _, err = runCommandWithOutput(cmd)
+	if err != nil {
+		t.Fatal(err, out)
+	}
+	if len(strings.Trim(out, "\r\n")) != 0 {
+		t.Fatal("diff should be empty")
+	}
+
+	logDone("run - write to /etc/resolv.conf and not commited")
+}
diff --git a/integration-cli/docker_cli_save_load_test.go b/integration-cli/docker_cli_save_load_test.go
index fb94cad..c70769e 100644
--- a/integration-cli/docker_cli_save_load_test.go
+++ b/integration-cli/docker_cli_save_load_test.go
@@ -59,6 +59,63 @@
 	logDone("load - load a repo using stdout")
 }
 
+func TestSaveSingleTag(t *testing.T) {
+	repoName := "foobar-save-single-tag-test"
+
+	tagCmdFinal := fmt.Sprintf("%v tag busybox:latest %v:latest", dockerBinary, repoName)
+	tagCmd := exec.Command("bash", "-c", tagCmdFinal)
+	out, _, err := runCommandWithOutput(tagCmd)
+	errorOut(err, t, fmt.Sprintf("failed to tag repo: %v %v", out, err))
+
+	idCmdFinal := fmt.Sprintf("%v images -q --no-trunc %v", dockerBinary, repoName)
+	idCmd := exec.Command("bash", "-c", idCmdFinal)
+	out, _, err = runCommandWithOutput(idCmd)
+	errorOut(err, t, fmt.Sprintf("failed to get repo ID: %v %v", out, err))
+
+	cleanedImageID := stripTrailingCharacters(out)
+
+	saveCmdFinal := fmt.Sprintf("%v save %v:latest | tar t | grep -E '(^repositories$|%v)'", dockerBinary, repoName, cleanedImageID)
+	saveCmd := exec.Command("bash", "-c", saveCmdFinal)
+	out, _, err = runCommandWithOutput(saveCmd)
+	errorOut(err, t, fmt.Sprintf("failed to save repo with image ID and 'repositories' file: %v %v", out, err))
+
+	deleteImages(repoName)
+
+	logDone("save - save a specific image:tag")
+}
+
+func TestSaveImageId(t *testing.T) {
+	repoName := "foobar-save-image-id-test"
+
+	tagCmdFinal := fmt.Sprintf("%v tag scratch:latest %v:latest", dockerBinary, repoName)
+	tagCmd := exec.Command("bash", "-c", tagCmdFinal)
+	out, _, err := runCommandWithOutput(tagCmd)
+	errorOut(err, t, fmt.Sprintf("failed to tag repo: %v %v", out, err))
+
+	idLongCmdFinal := fmt.Sprintf("%v images -q --no-trunc %v", dockerBinary, repoName)
+	idLongCmd := exec.Command("bash", "-c", idLongCmdFinal)
+	out, _, err = runCommandWithOutput(idLongCmd)
+	errorOut(err, t, fmt.Sprintf("failed to get repo ID: %v %v", out, err))
+
+	cleanedLongImageID := stripTrailingCharacters(out)
+
+	idShortCmdFinal := fmt.Sprintf("%v images -q %v", dockerBinary, repoName)
+	idShortCmd := exec.Command("bash", "-c", idShortCmdFinal)
+	out, _, err = runCommandWithOutput(idShortCmd)
+	errorOut(err, t, fmt.Sprintf("failed to get repo short ID: %v %v", out, err))
+
+	cleanedShortImageID := stripTrailingCharacters(out)
+
+	saveCmdFinal := fmt.Sprintf("%v save %v | tar t | grep %v", dockerBinary, cleanedShortImageID, cleanedLongImageID)
+	saveCmd := exec.Command("bash", "-c", saveCmdFinal)
+	out, _, err = runCommandWithOutput(saveCmd)
+	errorOut(err, t, fmt.Sprintf("failed to save repo with image ID: %v %v", out, err))
+
+	deleteImages(repoName)
+
+	logDone("save - save a image by ID")
+}
+
 // save a repo and try to load it using flags
 func TestSaveAndLoadRepoFlags(t *testing.T) {
 	runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "true")
diff --git a/integration-cli/docker_cli_search_test.go b/integration-cli/docker_cli_search_test.go
index 16523ff..e8b9efd 100644
--- a/integration-cli/docker_cli_search_test.go
+++ b/integration-cli/docker_cli_search_test.go
@@ -9,7 +9,7 @@
 
 // search for repos named  "registry" on the central registry
 func TestSearchOnCentralRegistry(t *testing.T) {
-	searchCmd := exec.Command(dockerBinary, "search", "stackbrew/busybox")
+	searchCmd := exec.Command(dockerBinary, "search", "busybox")
 	out, exitCode, err := runCommandWithOutput(searchCmd)
 	errorOut(err, t, fmt.Sprintf("encountered error while searching: %v", err))
 
diff --git a/integration-cli/docker_utils.go b/integration-cli/docker_utils.go
index 74576ba..5561fc2 100644
--- a/integration-cli/docker_utils.go
+++ b/integration-cli/docker_utils.go
@@ -8,6 +8,7 @@
 	"os"
 	"os/exec"
 	"path"
+	"path/filepath"
 	"strconv"
 	"strings"
 	"testing"
@@ -87,12 +88,26 @@
 	return
 }
 
+// deprecated, use dockerCmd instead
 func cmd(t *testing.T, args ...string) (string, int, error) {
+	return dockerCmd(t, args...)
+}
+
+func dockerCmd(t *testing.T, args ...string) (string, int, error) {
 	out, status, err := runCommandWithOutput(exec.Command(dockerBinary, args...))
 	errorOut(err, t, fmt.Sprintf("'%s' failed with errors: %v (%v)", strings.Join(args, " "), err, out))
 	return out, status, err
 }
 
+// execute a docker command in a directory
+func dockerCmdInDir(t *testing.T, path string, args ...string) (string, int, error) {
+	dockerCommand := exec.Command(dockerBinary, args...)
+	dockerCommand.Dir = path
+	out, status, err := runCommandWithOutput(dockerCommand)
+	errorOut(err, t, fmt.Sprintf("'%s' failed with errors: %v (%v)", strings.Join(args, " "), err, out))
+	return out, status, err
+}
+
 func findContainerIp(t *testing.T, id string) string {
 	cmd := exec.Command(dockerBinary, "inspect", "--format='{{ .NetworkSettings.IPAddress }}'", id)
 	out, _, err := runCommandWithOutput(cmd)
@@ -211,11 +226,21 @@
 	return strings.TrimSpace(out), nil
 }
 
+func inspectFieldJSON(name, field string) (string, error) {
+	format := fmt.Sprintf("{{json .%s}}", field)
+	inspectCmd := exec.Command(dockerBinary, "inspect", "-f", format, name)
+	out, exitCode, err := runCommandWithOutput(inspectCmd)
+	if err != nil || exitCode != 0 {
+		return "", fmt.Errorf("failed to inspect %s: %s", name, out)
+	}
+	return strings.TrimSpace(out), nil
+}
+
 func getIDByName(name string) (string, error) {
 	return inspectField(name, "Id")
 }
 
-func buildImage(name, dockerfile string, useCache bool) (string, error) {
+func buildImageWithOut(name, dockerfile string, useCache bool) (string, string, error) {
 	args := []string{"build", "-t", name}
 	if !useCache {
 		args = append(args, "--no-cache")
@@ -225,9 +250,18 @@
 	buildCmd.Stdin = strings.NewReader(dockerfile)
 	out, exitCode, err := runCommandWithOutput(buildCmd)
 	if err != nil || exitCode != 0 {
-		return "", fmt.Errorf("failed to build the image: %s", out)
+		return "", out, fmt.Errorf("failed to build the image: %s", out)
 	}
-	return getIDByName(name)
+	id, err := getIDByName(name)
+	if err != nil {
+		return "", out, err
+	}
+	return id, out, nil
+}
+
+func buildImage(name, dockerfile string, useCache bool) (string, error) {
+	id, _, err := buildImageWithOut(name, dockerfile, useCache)
+	return id, err
 }
 
 func buildImageFromContext(name string, ctx *FakeContext, useCache bool) (string, error) {
@@ -244,3 +278,93 @@
 	}
 	return getIDByName(name)
 }
+
+func buildImageFromPath(name, path string, useCache bool) (string, error) {
+	args := []string{"build", "-t", name}
+	if !useCache {
+		args = append(args, "--no-cache")
+	}
+	args = append(args, path)
+	buildCmd := exec.Command(dockerBinary, args...)
+	out, exitCode, err := runCommandWithOutput(buildCmd)
+	if err != nil || exitCode != 0 {
+		return "", fmt.Errorf("failed to build the image: %s", out)
+	}
+	return getIDByName(name)
+}
+
+type FakeGIT struct {
+	*httptest.Server
+	Root    string
+	RepoURL string
+}
+
+func (g *FakeGIT) Close() {
+	g.Server.Close()
+	os.RemoveAll(g.Root)
+}
+
+func fakeGIT(name string, files map[string]string) (*FakeGIT, error) {
+	tmp, err := ioutil.TempDir("", "fake-git-repo")
+	if err != nil {
+		return nil, err
+	}
+	ctx := &FakeContext{tmp}
+	for file, content := range files {
+		if err := ctx.Add(file, content); err != nil {
+			ctx.Close()
+			return nil, err
+		}
+	}
+	defer ctx.Close()
+	curdir, err := os.Getwd()
+	if err != nil {
+		return nil, err
+	}
+	defer os.Chdir(curdir)
+
+	if output, err := exec.Command("git", "init", ctx.Dir).CombinedOutput(); err != nil {
+		return nil, fmt.Errorf("Error trying to init repo: %s (%s)", err, output)
+	}
+	err = os.Chdir(ctx.Dir)
+	if err != nil {
+		return nil, err
+	}
+	if output, err := exec.Command("git", "add", "*").CombinedOutput(); err != nil {
+		return nil, fmt.Errorf("Error trying to add files to repo: %s (%s)", err, output)
+	}
+	if output, err := exec.Command("git", "commit", "-a", "-m", "Initial commit").CombinedOutput(); err != nil {
+		return nil, fmt.Errorf("Error trying to commit to repo: %s (%s)", err, output)
+	}
+
+	root, err := ioutil.TempDir("", "docker-test-git-repo")
+	if err != nil {
+		return nil, err
+	}
+	repoPath := filepath.Join(root, name+".git")
+	if output, err := exec.Command("git", "clone", "--bare", ctx.Dir, repoPath).CombinedOutput(); err != nil {
+		os.RemoveAll(root)
+		return nil, fmt.Errorf("Error trying to clone --bare: %s (%s)", err, output)
+	}
+	err = os.Chdir(repoPath)
+	if err != nil {
+		os.RemoveAll(root)
+		return nil, err
+	}
+	if output, err := exec.Command("git", "update-server-info").CombinedOutput(); err != nil {
+		os.RemoveAll(root)
+		return nil, fmt.Errorf("Error trying to git update-server-info: %s (%s)", err, output)
+	}
+	err = os.Chdir(curdir)
+	if err != nil {
+		os.RemoveAll(root)
+		return nil, err
+	}
+	handler := http.FileServer(http.Dir(root))
+	server := httptest.NewServer(handler)
+	return &FakeGIT{
+		Server:  server,
+		Root:    root,
+		RepoURL: fmt.Sprintf("%s/%s.git", server.URL, name),
+	}, nil
+}
diff --git a/integration-cli/utils.go b/integration-cli/utils.go
index ae7af52..c414c9a 100644
--- a/integration-cli/utils.go
+++ b/integration-cli/utils.go
@@ -2,9 +2,11 @@
 
 import (
 	"bytes"
+	"encoding/json"
 	"fmt"
 	"io"
 	"os/exec"
+	"reflect"
 	"strings"
 	"syscall"
 	"testing"
@@ -111,3 +113,24 @@
 func nLines(s string) int {
 	return strings.Count(s, "\n")
 }
+
+func unmarshalJSON(data []byte, result interface{}) error {
+	err := json.Unmarshal(data, result)
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func deepEqual(expected interface{}, result interface{}) bool {
+	return reflect.DeepEqual(result, expected)
+}
+
+func convertSliceOfStringsToMap(input []string) map[string]struct{} {
+	output := make(map[string]struct{})
+	for _, v := range input {
+		output[v] = struct{}{}
+	}
+	return output
+}
diff --git a/integration/MAINTAINERS b/integration/MAINTAINERS
index d7bef62..ad2d2d2 100644
--- a/integration/MAINTAINERS
+++ b/integration/MAINTAINERS
@@ -1,4 +1,2 @@
-Solomon Hykes <s@docker.com> (@shykes)
-# WE ARE LOOKING FOR VOLUNTEERS TO HELP CLEAN THIS UP.
-# TO VOLUNTEER PLEASE OPEN A PULL REQUEST ADDING YOURSELF TO THIS FILE.
-# WE WILL HELP YOU GET STARTED. THANKS!
+Tibor Vass <teabee89@gmail.com> (@tiborvass)
+Cristian Staretu <cristian.staretu@gmail.com> (@unclejack)
diff --git a/integration/api_test.go b/integration/api_test.go
index b2e4471..e5f93fb 100644
--- a/integration/api_test.go
+++ b/integration/api_test.go
@@ -9,145 +9,16 @@
 	"net"
 	"net/http"
 	"net/http/httptest"
-	"strings"
 	"testing"
 	"time"
 
-	"github.com/dotcloud/docker/api"
-	"github.com/dotcloud/docker/api/server"
-	"github.com/dotcloud/docker/engine"
-	"github.com/dotcloud/docker/image"
-	"github.com/dotcloud/docker/runconfig"
-	"github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
+	"github.com/docker/docker/api"
+	"github.com/docker/docker/api/server"
+	"github.com/docker/docker/engine"
+	"github.com/docker/docker/runconfig"
+	"github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
 )
 
-func TestGetImagesJSON(t *testing.T) {
-	eng := NewTestEngine(t)
-	defer mkDaemonFromEngine(eng, t).Nuke()
-
-	job := eng.Job("images")
-	initialImages, err := job.Stdout.AddListTable()
-	if err != nil {
-		t.Fatal(err)
-	}
-	if err := job.Run(); err != nil {
-		t.Fatal(err)
-	}
-
-	req, err := http.NewRequest("GET", "/images/json?all=0", nil)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	r := httptest.NewRecorder()
-
-	if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil {
-		t.Fatal(err)
-	}
-	assertHttpNotError(r, t)
-
-	images := engine.NewTable("Created", 0)
-	if _, err := images.ReadListFrom(r.Body.Bytes()); err != nil {
-		t.Fatal(err)
-	}
-
-	if images.Len() != initialImages.Len() {
-		t.Errorf("Expected %d image, %d found", initialImages.Len(), images.Len())
-	}
-
-	found := false
-	for _, img := range images.Data {
-		if strings.Contains(img.GetList("RepoTags")[0], unitTestImageName) {
-			found = true
-			break
-		}
-	}
-	if !found {
-		t.Errorf("Expected image %s, %+v found", unitTestImageName, images)
-	}
-
-	r2 := httptest.NewRecorder()
-
-	// all=1
-
-	initialImages = getAllImages(eng, t)
-
-	req2, err := http.NewRequest("GET", "/images/json?all=true", nil)
-	if err != nil {
-		t.Fatal(err)
-	}
-	if err := server.ServeRequest(eng, api.APIVERSION, r2, req2); err != nil {
-		t.Fatal(err)
-	}
-	assertHttpNotError(r2, t)
-
-	images2 := engine.NewTable("Id", 0)
-	if _, err := images2.ReadListFrom(r2.Body.Bytes()); err != nil {
-		t.Fatal(err)
-	}
-
-	if images2.Len() != initialImages.Len() {
-		t.Errorf("Expected %d image, %d found", initialImages.Len(), images2.Len())
-	}
-
-	found = false
-	for _, img := range images2.Data {
-		if img.Get("Id") == unitTestImageID {
-			found = true
-			break
-		}
-	}
-	if !found {
-		t.Errorf("Retrieved image Id differs, expected %s, received %+v", unitTestImageID, images2)
-	}
-
-	r3 := httptest.NewRecorder()
-
-	// filter=a
-	req3, err := http.NewRequest("GET", "/images/json?filter=aaaaaaaaaa", nil)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	if err := server.ServeRequest(eng, api.APIVERSION, r3, req3); err != nil {
-		t.Fatal(err)
-	}
-	assertHttpNotError(r3, t)
-
-	images3 := engine.NewTable("Id", 0)
-	if _, err := images3.ReadListFrom(r3.Body.Bytes()); err != nil {
-		t.Fatal(err)
-	}
-
-	if images3.Len() != 0 {
-		t.Errorf("Expected 0 image, %d found", images3.Len())
-	}
-}
-
-func TestGetImagesByName(t *testing.T) {
-	eng := NewTestEngine(t)
-	defer mkDaemonFromEngine(eng, t).Nuke()
-
-	req, err := http.NewRequest("GET", "/images/"+unitTestImageName+"/json", nil)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	r := httptest.NewRecorder()
-	if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil {
-		t.Fatal(err)
-	}
-	assertHttpNotError(r, t)
-
-	img := &image.Image{}
-	if err := json.Unmarshal(r.Body.Bytes(), img); err != nil {
-		t.Fatal(err)
-	}
-	if img.ID != unitTestImageID {
-		t.Errorf("Error inspecting image")
-	}
-}
-
 func TestGetContainersJSON(t *testing.T) {
 	eng := NewTestEngine(t)
 	defer mkDaemonFromEngine(eng, t).Nuke()
@@ -897,35 +768,6 @@
 	containerWait(eng, containerID, t)
 }
 
-// FIXME: Test deleting running container
-// FIXME: Test deleting container with volume
-// FIXME: Test deleting volume in use by other container
-func TestDeleteContainers(t *testing.T) {
-	eng := NewTestEngine(t)
-	defer mkDaemonFromEngine(eng, t).Nuke()
-
-	containerID := createTestContainer(eng,
-		&runconfig.Config{
-			Image: unitTestImageID,
-			Cmd:   []string{"touch", "/test"},
-		},
-		t,
-	)
-	req, err := http.NewRequest("DELETE", "/containers/"+containerID, nil)
-	if err != nil {
-		t.Fatal(err)
-	}
-	r := httptest.NewRecorder()
-	if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil {
-		t.Fatal(err)
-	}
-	assertHttpNotError(r, t)
-	if r.Code != http.StatusNoContent {
-		t.Fatalf("%d NO CONTENT expected, received %d\n", http.StatusNoContent, r.Code)
-	}
-	containerAssertNotExists(eng, containerID, t)
-}
-
 func TestOptionsRoute(t *testing.T) {
 	eng := NewTestEngine(t)
 	defer mkDaemonFromEngine(eng, t).Nuke()
diff --git a/integration/auth_test.go b/integration/auth_test.go
index 8109bbb..42cd1ac 100644
--- a/integration/auth_test.go
+++ b/integration/auth_test.go
@@ -4,10 +4,11 @@
 	"crypto/rand"
 	"encoding/hex"
 	"fmt"
-	"github.com/dotcloud/docker/registry"
 	"os"
 	"strings"
 	"testing"
+
+	"github.com/docker/docker/registry"
 )
 
 // FIXME: these tests have an external dependency on a staging index hosted
@@ -17,13 +18,13 @@
 
 func TestLogin(t *testing.T) {
 	t.Skip("FIXME: please remove dependency on external services")
-	os.Setenv("DOCKER_INDEX_URL", "https://indexstaging-docker.dotcloud.com")
+	os.Setenv("DOCKER_INDEX_URL", "https://registry-stage.hub.docker.com/v1/")
 	defer os.Setenv("DOCKER_INDEX_URL", "")
 	authConfig := &registry.AuthConfig{
 		Username:      "unittester",
 		Password:      "surlautrerivejetattendrai",
 		Email:         "noise+unittester@docker.com",
-		ServerAddress: "https://indexstaging-docker.dotcloud.com/v1/",
+		ServerAddress: "https://registry-stage.hub.docker.com/v1/",
 	}
 	status, err := registry.Login(authConfig, nil)
 	if err != nil {
@@ -47,7 +48,7 @@
 		Username:      username,
 		Password:      "test42",
 		Email:         fmt.Sprintf("docker-ut+%s@example.com", token),
-		ServerAddress: "https://indexstaging-docker.dotcloud.com/v1/",
+		ServerAddress: "https://registry-stage.hub.docker.com/v1/",
 	}
 	status, err := registry.Login(authConfig, nil)
 	if err != nil {
diff --git a/integration/buildfile_test.go b/integration/buildfile_test.go
deleted file mode 100644
index 147ae35..0000000
--- a/integration/buildfile_test.go
+++ /dev/null
@@ -1,414 +0,0 @@
-package docker
-
-import (
-	"bytes"
-	"encoding/json"
-	"fmt"
-	"io/ioutil"
-	"net"
-	"net/http"
-	"net/http/httptest"
-	"strings"
-	"testing"
-
-	"github.com/dotcloud/docker/archive"
-	"github.com/dotcloud/docker/engine"
-	"github.com/dotcloud/docker/image"
-	"github.com/dotcloud/docker/server"
-	"github.com/dotcloud/docker/utils"
-)
-
-// A testContextTemplate describes a build context and how to test it
-type testContextTemplate struct {
-	// Contents of the Dockerfile
-	dockerfile string
-	// Additional files in the context, eg [][2]string{"./passwd", "gordon"}
-	files [][2]string
-	// Additional remote files to host on a local HTTP server.
-	remoteFiles [][2]string
-}
-
-func (context testContextTemplate) Archive(dockerfile string, t *testing.T) archive.Archive {
-	input := []string{"Dockerfile", dockerfile}
-	for _, pair := range context.files {
-		input = append(input, pair[0], pair[1])
-	}
-	a, err := archive.Generate(input...)
-	if err != nil {
-		t.Fatal(err)
-	}
-	return a
-}
-
-// A table of all the contexts to build and test.
-// A new docker runtime will be created and torn down for each context.
-var testContexts = []testContextTemplate{
-	{
-		`
-from   {IMAGE}
-run    sh -c 'echo root:testpass > /tmp/passwd'
-run    mkdir -p /var/run/sshd
-run    [ "$(cat /tmp/passwd)" = "root:testpass" ]
-run    [ "$(ls -d /var/run/sshd)" = "/var/run/sshd" ]
-`,
-		nil,
-		nil,
-	},
-
-	// Exactly the same as above, except uses a line split with a \ to test
-	// multiline support.
-	{
-		`
-from   {IMAGE}
-run    sh -c 'echo root:testpass \
-	> /tmp/passwd'
-run    mkdir -p /var/run/sshd
-run    [ "$(cat /tmp/passwd)" = "root:testpass" ]
-run    [ "$(ls -d /var/run/sshd)" = "/var/run/sshd" ]
-`,
-		nil,
-		nil,
-	},
-
-	// Line containing literal "\n"
-	{
-		`
-from   {IMAGE}
-run    sh -c 'echo root:testpass > /tmp/passwd'
-run    echo "foo \n bar"; echo "baz"
-run    mkdir -p /var/run/sshd
-run    [ "$(cat /tmp/passwd)" = "root:testpass" ]
-run    [ "$(ls -d /var/run/sshd)" = "/var/run/sshd" ]
-`,
-		nil,
-		nil,
-	},
-	{
-		`
-from {IMAGE}
-add foo /usr/lib/bla/bar
-run [ "$(cat /usr/lib/bla/bar)" = 'hello' ]
-add http://{SERVERADDR}/baz /usr/lib/baz/quux
-run [ "$(cat /usr/lib/baz/quux)" = 'world!' ]
-`,
-		[][2]string{{"foo", "hello"}},
-		[][2]string{{"/baz", "world!"}},
-	},
-
-	{
-		`
-from {IMAGE}
-add f /
-run [ "$(cat /f)" = "hello" ]
-add f /abc
-run [ "$(cat /abc)" = "hello" ]
-add f /x/y/z
-run [ "$(cat /x/y/z)" = "hello" ]
-add f /x/y/d/
-run [ "$(cat /x/y/d/f)" = "hello" ]
-add d /
-run [ "$(cat /ga)" = "bu" ]
-add d /somewhere
-run [ "$(cat /somewhere/ga)" = "bu" ]
-add d /anotherplace/
-run [ "$(cat /anotherplace/ga)" = "bu" ]
-add d /somewheeeere/over/the/rainbooow
-run [ "$(cat /somewheeeere/over/the/rainbooow/ga)" = "bu" ]
-`,
-		[][2]string{
-			{"f", "hello"},
-			{"d/ga", "bu"},
-		},
-		nil,
-	},
-
-	{
-		`
-from {IMAGE}
-add http://{SERVERADDR}/x /a/b/c
-run [ "$(cat /a/b/c)" = "hello" ]
-add http://{SERVERADDR}/x?foo=bar /
-run [ "$(cat /x)" = "hello" ]
-add http://{SERVERADDR}/x /d/
-run [ "$(cat /d/x)" = "hello" ]
-add http://{SERVERADDR} /e
-run [ "$(cat /e)" = "blah" ]
-`,
-		nil,
-		[][2]string{{"/x", "hello"}, {"/", "blah"}},
-	},
-
-	// Comments, shebangs, and executability, oh my!
-	{
-		`
-FROM {IMAGE}
-# This is an ordinary comment.
-RUN { echo '#!/bin/sh'; echo 'echo hello world'; } > /hello.sh
-RUN [ ! -x /hello.sh ]
-RUN chmod +x /hello.sh
-RUN [ -x /hello.sh ]
-RUN [ "$(cat /hello.sh)" = $'#!/bin/sh\necho hello world' ]
-RUN [ "$(/hello.sh)" = "hello world" ]
-`,
-		nil,
-		nil,
-	},
-
-	// Users and groups
-	{
-		`
-FROM {IMAGE}
-
-# Make sure our defaults work
-RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)" = '0:0/root:root' ]
-
-# TODO decide if "args.user = strconv.Itoa(syscall.Getuid())" is acceptable behavior for changeUser in sysvinit instead of "return nil" when "USER" isn't specified (so that we get the proper group list even if that is the empty list, even in the default case of not supplying an explicit USER to run as, which implies USER 0)
-USER root
-RUN [ "$(id -G):$(id -Gn)" = '0:root' ]
-
-# Setup dockerio user and group
-RUN echo 'dockerio:x:1000:1000::/bin:/bin/false' >> /etc/passwd
-RUN echo 'dockerio:x:1000:' >> /etc/group
-
-# Make sure we can switch to our user and all the information is exactly as we expect it to be
-USER dockerio
-RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1000:1000/dockerio:dockerio/1000:dockerio' ]
-
-# Switch back to root and double check that worked exactly as we might expect it to
-USER root
-RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '0:0/root:root/0:root' ]
-
-# Add a "supplementary" group for our dockerio user
-RUN echo 'supplementary:x:1001:dockerio' >> /etc/group
-
-# ... and then go verify that we get it like we expect
-USER dockerio
-RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1000:1000/dockerio:dockerio/1000 1001:dockerio supplementary' ]
-USER 1000
-RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1000:1000/dockerio:dockerio/1000 1001:dockerio supplementary' ]
-
-# super test the new "user:group" syntax
-USER dockerio:dockerio
-RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1000:1000/dockerio:dockerio/1000:dockerio' ]
-USER 1000:dockerio
-RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1000:1000/dockerio:dockerio/1000:dockerio' ]
-USER dockerio:1000
-RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1000:1000/dockerio:dockerio/1000:dockerio' ]
-USER 1000:1000
-RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1000:1000/dockerio:dockerio/1000:dockerio' ]
-USER dockerio:supplementary
-RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1000:1001/dockerio:supplementary/1001:supplementary' ]
-USER dockerio:1001
-RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1000:1001/dockerio:supplementary/1001:supplementary' ]
-USER 1000:supplementary
-RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1000:1001/dockerio:supplementary/1001:supplementary' ]
-USER 1000:1001
-RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1000:1001/dockerio:supplementary/1001:supplementary' ]
-
-# make sure unknown uid/gid still works properly
-USER 1042:1043
-RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1042:1043/1042:1043/1043:1043' ]
-`,
-		nil,
-		nil,
-	},
-
-	// Environment variable
-	{
-		`
-from   {IMAGE}
-env    FOO BAR
-run    [ "$FOO" = "BAR" ]
-`,
-		nil,
-		nil,
-	},
-
-	// Environment overwriting
-	{
-		`
-from   {IMAGE}
-env    FOO BAR
-run    [ "$FOO" = "BAR" ]
-env    FOO BAZ
-run    [ "$FOO" = "BAZ" ]
-`,
-		nil,
-		nil,
-	},
-
-	{
-		`
-from {IMAGE}
-ENTRYPOINT /bin/echo
-CMD Hello world
-`,
-		nil,
-		nil,
-	},
-
-	{
-		`
-from {IMAGE}
-VOLUME /test
-CMD Hello world
-`,
-		nil,
-		nil,
-	},
-
-	{
-		`
-from {IMAGE}
-env    FOO /foo/baz
-env    BAR /bar
-env    BAZ $BAR
-env    FOOPATH $PATH:$FOO
-run    [ "$BAR" = "$BAZ" ]
-run    [ "$FOOPATH" = "$PATH:/foo/baz" ]
-`,
-		nil,
-		nil,
-	},
-
-	{
-		`
-from {IMAGE}
-env    FOO /bar
-env    TEST testdir
-env    BAZ /foobar
-add    testfile $BAZ/
-add    $TEST $FOO
-run    [ "$(cat /foobar/testfile)" = "test1" ]
-run    [ "$(cat /bar/withfile)" = "test2" ]
-`,
-		[][2]string{
-			{"testfile", "test1"},
-			{"testdir/withfile", "test2"},
-		},
-		nil,
-	},
-
-	// JSON!
-	{
-		`
-FROM {IMAGE}
-RUN ["/bin/echo","hello","world"]
-CMD ["/bin/true"]
-ENTRYPOINT ["/bin/echo","your command -->"]
-`,
-		nil,
-		nil,
-	},
-	{
-		`
-FROM {IMAGE}
-ADD test /test
-RUN ["chmod","+x","/test"]
-RUN ["/test"]
-RUN [ "$(cat /testfile)" = 'test!' ]
-`,
-		[][2]string{
-			{"test", "#!/bin/sh\necho 'test!' > /testfile"},
-		},
-		nil,
-	},
-	{
-		`
-FROM {IMAGE}
-# what \
-RUN mkdir /testing
-RUN touch /testing/other
-`,
-		nil,
-		nil,
-	},
-}
-
-// FIXME: test building with 2 successive overlapping ADD commands
-
-func constructDockerfile(template string, ip net.IP, port string) string {
-	serverAddr := fmt.Sprintf("%s:%s", ip, port)
-	replacer := strings.NewReplacer("{IMAGE}", unitTestImageID, "{SERVERADDR}", serverAddr)
-	return replacer.Replace(template)
-}
-
-func mkTestingFileServer(files [][2]string) (*httptest.Server, error) {
-	mux := http.NewServeMux()
-	for _, file := range files {
-		name, contents := file[0], file[1]
-		mux.HandleFunc(name, func(w http.ResponseWriter, r *http.Request) {
-			w.Write([]byte(contents))
-		})
-	}
-
-	// This is how httptest.NewServer sets up a net.Listener, except that our listener must accept remote
-	// connections (from the container).
-	listener, err := net.Listen("tcp", ":0")
-	if err != nil {
-		return nil, err
-	}
-
-	s := httptest.NewUnstartedServer(mux)
-	s.Listener = listener
-	s.Start()
-	return s, nil
-}
-
-func TestBuild(t *testing.T) {
-	for _, ctx := range testContexts {
-		_, err := buildImage(ctx, t, nil, true)
-		if err != nil {
-			t.Fatal(err)
-		}
-	}
-}
-
-func buildImage(context testContextTemplate, t *testing.T, eng *engine.Engine, useCache bool) (*image.Image, error) {
-	if eng == nil {
-		eng = NewTestEngine(t)
-		runtime := mkDaemonFromEngine(eng, t)
-		// FIXME: we might not need runtime, why not simply nuke
-		// the engine?
-		defer nuke(runtime)
-	}
-	srv := mkServerFromEngine(eng, t)
-
-	httpServer, err := mkTestingFileServer(context.remoteFiles)
-	if err != nil {
-		t.Fatal(err)
-	}
-	defer httpServer.Close()
-
-	idx := strings.LastIndex(httpServer.URL, ":")
-	if idx < 0 {
-		t.Fatalf("could not get port from test http server address %s", httpServer.URL)
-	}
-	port := httpServer.URL[idx+1:]
-
-	iIP := eng.Hack_GetGlobalVar("httpapi.bridgeIP")
-	if iIP == nil {
-		t.Fatal("Legacy bridgeIP field not set in engine")
-	}
-	ip, ok := iIP.(net.IP)
-	if !ok {
-		panic("Legacy bridgeIP field in engine does not cast to net.IP")
-	}
-	dockerfile := constructDockerfile(context.dockerfile, ip, port)
-
-	buildfile := server.NewBuildFile(srv, ioutil.Discard, ioutil.Discard, false, useCache, false, false, ioutil.Discard, utils.NewStreamFormatter(false), nil, nil)
-	id, err := buildfile.Build(context.Archive(dockerfile, t))
-	if err != nil {
-		return nil, err
-	}
-
-	job := eng.Job("image_inspect", id)
-	buffer := bytes.NewBuffer(nil)
-	image := &image.Image{}
-	job.Stdout.Add(buffer)
-	if err := job.Run(); err != nil {
-		return nil, err
-	}
-	err = json.NewDecoder(buffer).Decode(image)
-	return image, err
-}
diff --git a/integration/commands_test.go b/integration/commands_test.go
index 47e9860..1073575 100644
--- a/integration/commands_test.go
+++ b/integration/commands_test.go
@@ -7,17 +7,15 @@
 	"io/ioutil"
 	"os"
 	"path"
-	"regexp"
 	"strings"
 	"testing"
 	"time"
 
-	"github.com/dotcloud/docker/api/client"
-	"github.com/dotcloud/docker/daemon"
-	"github.com/dotcloud/docker/engine"
-	"github.com/dotcloud/docker/image"
-	"github.com/dotcloud/docker/pkg/term"
-	"github.com/dotcloud/docker/utils"
+	"github.com/docker/docker/api/client"
+	"github.com/docker/docker/daemon"
+	"github.com/docker/docker/pkg/log"
+	"github.com/docker/docker/pkg/term"
+	"github.com/docker/docker/utils"
 )
 
 func closeWrap(args ...io.Closer) error {
@@ -116,76 +114,6 @@
 	return nil
 }
 
-// TestRunWorkdirExistsAndIsFile checks that if 'docker run -w' with existing file can be detected
-func TestRunWorkdirExistsAndIsFile(t *testing.T) {
-
-	cli := client.NewDockerCli(nil, nil, ioutil.Discard, testDaemonProto, testDaemonAddr, nil)
-	defer cleanup(globalEngine, t)
-
-	c := make(chan struct{})
-	go func() {
-		defer close(c)
-		if err := cli.CmdRun("-w", "/bin/cat", unitTestImageID, "pwd"); err == nil {
-			t.Fatal("should have failed to run when using /bin/cat as working dir.")
-		}
-	}()
-
-	setTimeout(t, "CmdRun timed out", 5*time.Second, func() {
-		<-c
-	})
-}
-
-func TestRunExit(t *testing.T) {
-	stdin, stdinPipe := io.Pipe()
-	stdout, stdoutPipe := io.Pipe()
-
-	cli := client.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr, nil)
-	defer cleanup(globalEngine, t)
-
-	c1 := make(chan struct{})
-	go func() {
-		cli.CmdRun("-i", unitTestImageID, "/bin/cat")
-		close(c1)
-	}()
-
-	setTimeout(t, "Read/Write assertion timed out", 2*time.Second, func() {
-		if err := assertPipe("hello\n", "hello", stdout, stdinPipe, 150); err != nil {
-			t.Fatal(err)
-		}
-	})
-
-	container := globalDaemon.List()[0]
-
-	// Closing /bin/cat stdin, expect it to exit
-	if err := stdin.Close(); err != nil {
-		t.Fatal(err)
-	}
-
-	// as the process exited, CmdRun must finish and unblock. Wait for it
-	setTimeout(t, "Waiting for CmdRun timed out", 10*time.Second, func() {
-		<-c1
-
-		go func() {
-			cli.CmdWait(container.ID)
-		}()
-
-		if _, err := bufio.NewReader(stdout).ReadString('\n'); err != nil {
-			t.Fatal(err)
-		}
-	})
-
-	// Make sure that the client has been disconnected
-	setTimeout(t, "The client should have been disconnected once the remote process exited.", 2*time.Second, func() {
-		// Expecting pipe i/o error, just check that read does not block
-		stdin.Read([]byte{})
-	})
-
-	// Cleanup pipes
-	if err := closeWrap(stdin, stdinPipe, stdout, stdoutPipe); err != nil {
-		t.Fatal(err)
-	}
-}
-
 // Expected behaviour: the process dies when the client disconnects
 func TestRunDisconnect(t *testing.T) {
 
@@ -247,7 +175,7 @@
 		// We're simulating a disconnect so the return value doesn't matter. What matters is the
 		// fact that CmdRun returns.
 		if err := cli.CmdRun("-i", "-t", unitTestImageID, "/bin/cat"); err != nil {
-			utils.Debugf("Error CmdRun: %s", err)
+			log.Debugf("Error CmdRun: %s", err)
 		}
 	}()
 
@@ -487,7 +415,7 @@
 	go func() {
 		// Start a process in daemon mode
 		if err := cli.CmdRun("-d", "-i", unitTestImageID, "/bin/cat"); err != nil {
-			utils.Debugf("Error CmdRun: %s", err)
+			log.Debugf("Error CmdRun: %s", err)
 		}
 	}()
 
@@ -604,132 +532,6 @@
 	})
 }
 
-func TestImagesViz(t *testing.T) {
-	t.Skip("Image viz is deprecated")
-	stdout, stdoutPipe := io.Pipe()
-
-	cli := client.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr, nil)
-	defer cleanup(globalEngine, t)
-
-	image := buildTestImages(t, globalEngine)
-
-	c := make(chan struct{})
-	go func() {
-		defer close(c)
-		if err := cli.CmdImages("--viz"); err != nil {
-			t.Fatal(err)
-		}
-		stdoutPipe.Close()
-	}()
-
-	setTimeout(t, "Reading command output time out", 2*time.Second, func() {
-		cmdOutputBytes, err := ioutil.ReadAll(bufio.NewReader(stdout))
-		if err != nil {
-			t.Fatal(err)
-		}
-		cmdOutput := string(cmdOutputBytes)
-
-		regexpStrings := []string{
-			"digraph docker {",
-			fmt.Sprintf("base -> \"%s\" \\[style=invis]", unitTestImageIDShort),
-			fmt.Sprintf("label=\"%s\\\\n%s:latest\"", unitTestImageIDShort, unitTestImageName),
-			fmt.Sprintf("label=\"%s\\\\n%s:%s\"", utils.TruncateID(image.ID), "test", "latest"),
-			"base \\[style=invisible]",
-		}
-
-		compiledRegexps := []*regexp.Regexp{}
-		for _, regexpString := range regexpStrings {
-			regexp, err := regexp.Compile(regexpString)
-			if err != nil {
-				fmt.Println("Error in regex string: ", err)
-				return
-			}
-			compiledRegexps = append(compiledRegexps, regexp)
-		}
-
-		for _, regexp := range compiledRegexps {
-			if !regexp.MatchString(cmdOutput) {
-				t.Fatalf("images --viz content '%s' did not match regexp '%s'", cmdOutput, regexp)
-			}
-		}
-	})
-}
-
-func TestImagesTree(t *testing.T) {
-	t.Skip("Image tree is deprecated")
-	stdout, stdoutPipe := io.Pipe()
-
-	cli := client.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr, nil)
-	defer cleanup(globalEngine, t)
-
-	image := buildTestImages(t, globalEngine)
-
-	c := make(chan struct{})
-	go func() {
-		defer close(c)
-		if err := cli.CmdImages("--tree"); err != nil {
-			t.Fatal(err)
-		}
-		stdoutPipe.Close()
-	}()
-
-	setTimeout(t, "Reading command output time out", 2*time.Second, func() {
-		cmdOutputBytes, err := ioutil.ReadAll(bufio.NewReader(stdout))
-		if err != nil {
-			t.Fatal(err)
-		}
-		cmdOutput := string(cmdOutputBytes)
-		regexpStrings := []string{
-			fmt.Sprintf("└─%s Virtual Size: \\d+.\\d+ MB Tags: %s:latest", unitTestImageIDShort, unitTestImageName),
-			"(?m)   └─[0-9a-f]+.*",
-			"(?m)    └─[0-9a-f]+.*",
-			"(?m)      └─[0-9a-f]+.*",
-			fmt.Sprintf("(?m)^        └─%s Virtual Size: \\d+.\\d+ MB Tags: test:latest", utils.TruncateID(image.ID)),
-		}
-
-		compiledRegexps := []*regexp.Regexp{}
-		for _, regexpString := range regexpStrings {
-			regexp, err := regexp.Compile(regexpString)
-			if err != nil {
-				fmt.Println("Error in regex string: ", err)
-				return
-			}
-			compiledRegexps = append(compiledRegexps, regexp)
-		}
-
-		for _, regexp := range compiledRegexps {
-			if !regexp.MatchString(cmdOutput) {
-				t.Fatalf("images --tree content '%s' did not match regexp '%s'", cmdOutput, regexp)
-			}
-		}
-	})
-}
-
-func buildTestImages(t *testing.T, eng *engine.Engine) *image.Image {
-
-	var testBuilder = testContextTemplate{
-		`
-from   {IMAGE}
-run    sh -c 'echo root:testpass > /tmp/passwd'
-run    mkdir -p /var/run/sshd
-run    [ "$(cat /tmp/passwd)" = "root:testpass" ]
-run    [ "$(ls -d /var/run/sshd)" = "/var/run/sshd" ]
-`,
-		nil,
-		nil,
-	}
-	image, err := buildImage(testBuilder, t, eng, true)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	if err := eng.Job("tag", image.ID, "test").Run(); err != nil {
-		t.Fatal(err)
-	}
-
-	return image
-}
-
 // #2098 - Docker cidFiles only contain short version of the containerId
 //sudo docker run --cidfile /tmp/docker_test.cid ubuntu echo "test"
 // TestRunCidFile tests that run --cidfile returns the longid
@@ -810,70 +612,3 @@
 		<-c
 	})
 }
-
-func TestContainerOrphaning(t *testing.T) {
-
-	// setup a temporary directory
-	tmpDir, err := ioutil.TempDir("", "project")
-	if err != nil {
-		t.Fatal(err)
-	}
-	defer os.RemoveAll(tmpDir)
-
-	// setup a CLI and server
-	cli := client.NewDockerCli(nil, ioutil.Discard, ioutil.Discard, testDaemonProto, testDaemonAddr, nil)
-	defer cleanup(globalEngine, t)
-	srv := mkServerFromEngine(globalEngine, t)
-
-	// closure to build something
-	buildSomething := func(template string, image string) string {
-		dockerfile := path.Join(tmpDir, "Dockerfile")
-		replacer := strings.NewReplacer("{IMAGE}", unitTestImageID)
-		contents := replacer.Replace(template)
-		ioutil.WriteFile(dockerfile, []byte(contents), 0x777)
-		if err := cli.CmdBuild("-t", image, tmpDir); err != nil {
-			t.Fatal(err)
-		}
-		job := globalEngine.Job("image_get", image)
-		info, _ := job.Stdout.AddEnv()
-		if err := job.Run(); err != nil {
-			t.Fatal(err)
-		}
-		return info.Get("Id")
-	}
-
-	// build an image
-	imageName := "orphan-test"
-	template1 := `
-	from {IMAGE}
-	cmd ["/bin/echo", "holla"]
-	`
-	img1 := buildSomething(template1, imageName)
-
-	// create a container using the fist image
-	if err := cli.CmdRun(imageName); err != nil {
-		t.Fatal(err)
-	}
-
-	// build a new image that splits lineage
-	template2 := `
-	from {IMAGE}
-	cmd ["/bin/echo", "holla"]
-	expose 22
-	`
-	buildSomething(template2, imageName)
-
-	// remove the second image by name
-	resp := engine.NewTable("", 0)
-	if err := srv.DeleteImage(imageName, resp, true, false, false); err == nil {
-		t.Fatal("Expected error, got none")
-	}
-
-	// see if we deleted the first image (and orphaned the container)
-	for _, i := range resp.Data {
-		if img1 == i.Get("Deleted") {
-			t.Fatal("Orphaned image with container")
-		}
-	}
-
-}
diff --git a/integration/container_test.go b/integration/container_test.go
index 48b3321..4462aba 100644
--- a/integration/container_test.go
+++ b/integration/container_test.go
@@ -10,7 +10,7 @@
 	"testing"
 	"time"
 
-	"github.com/dotcloud/docker/runconfig"
+	"github.com/docker/docker/runconfig"
 )
 
 func TestKillDifferentUser(t *testing.T) {
@@ -71,37 +71,6 @@
 	}
 }
 
-func TestRestart(t *testing.T) {
-	daemon := mkDaemon(t)
-	defer nuke(daemon)
-	container, _, err := daemon.Create(&runconfig.Config{
-		Image: GetTestImage(daemon).ID,
-		Cmd:   []string{"echo", "-n", "foobar"},
-	},
-		"",
-	)
-	if err != nil {
-		t.Fatal(err)
-	}
-	defer daemon.Destroy(container)
-	output, err := container.Output()
-	if err != nil {
-		t.Fatal(err)
-	}
-	if string(output) != "foobar" {
-		t.Error(string(output))
-	}
-
-	// Run the container again and check the output
-	output, err = container.Output()
-	if err != nil {
-		t.Fatal(err)
-	}
-	if string(output) != "foobar" {
-		t.Error(string(output))
-	}
-}
-
 func TestRestartStdin(t *testing.T) {
 	daemon := mkDaemon(t)
 	defer nuke(daemon)
@@ -401,100 +370,6 @@
 	return tmpDir
 }
 
-// Test for #1737
-func TestCopyVolumeUidGid(t *testing.T) {
-	eng := NewTestEngine(t)
-	r := mkDaemonFromEngine(eng, t)
-	defer r.Nuke()
-
-	// Add directory not owned by root
-	container1, _, _ := mkContainer(r, []string{"_", "/bin/sh", "-c", "mkdir -p /hello && touch /hello/test && chown daemon.daemon /hello"}, t)
-	defer r.Destroy(container1)
-
-	if container1.State.IsRunning() {
-		t.Errorf("Container shouldn't be running")
-	}
-	if err := container1.Run(); err != nil {
-		t.Fatal(err)
-	}
-	if container1.State.IsRunning() {
-		t.Errorf("Container shouldn't be running")
-	}
-
-	img, err := r.Commit(container1, "", "", "unit test commited image", "", true, nil)
-	if err != nil {
-		t.Error(err)
-	}
-
-	// Test that the uid and gid is copied from the image to the volume
-	tmpDir1 := tempDir(t)
-	defer os.RemoveAll(tmpDir1)
-	stdout1, _ := runContainer(eng, r, []string{"-v", "/hello", img.ID, "stat", "-c", "%U %G", "/hello"}, t)
-	if !strings.Contains(stdout1, "daemon daemon") {
-		t.Fatal("Container failed to transfer uid and gid to volume")
-	}
-
-	container2, _, _ := mkContainer(r, []string{"_", "/bin/sh", "-c", "mkdir -p /hello && chown daemon.daemon /hello"}, t)
-	defer r.Destroy(container1)
-
-	if container2.State.IsRunning() {
-		t.Errorf("Container shouldn't be running")
-	}
-	if err := container2.Run(); err != nil {
-		t.Fatal(err)
-	}
-	if container2.State.IsRunning() {
-		t.Errorf("Container shouldn't be running")
-	}
-
-	img2, err := r.Commit(container2, "", "", "unit test commited image", "", true, nil)
-	if err != nil {
-		t.Error(err)
-	}
-
-	// Test that the uid and gid is copied from the image to the volume
-	tmpDir2 := tempDir(t)
-	defer os.RemoveAll(tmpDir2)
-	stdout2, _ := runContainer(eng, r, []string{"-v", "/hello", img2.ID, "stat", "-c", "%U %G", "/hello"}, t)
-	if !strings.Contains(stdout2, "daemon daemon") {
-		t.Fatal("Container failed to transfer uid and gid to volume")
-	}
-}
-
-// Test for #1582
-func TestCopyVolumeContent(t *testing.T) {
-	eng := NewTestEngine(t)
-	r := mkDaemonFromEngine(eng, t)
-	defer r.Nuke()
-
-	// Put some content in a directory of a container and commit it
-	container1, _, _ := mkContainer(r, []string{"_", "/bin/sh", "-c", "mkdir -p /hello/local && echo hello > /hello/local/world"}, t)
-	defer r.Destroy(container1)
-
-	if container1.State.IsRunning() {
-		t.Errorf("Container shouldn't be running")
-	}
-	if err := container1.Run(); err != nil {
-		t.Fatal(err)
-	}
-	if container1.State.IsRunning() {
-		t.Errorf("Container shouldn't be running")
-	}
-
-	img, err := r.Commit(container1, "", "", "unit test commited image", "", true, nil)
-	if err != nil {
-		t.Error(err)
-	}
-
-	// Test that the content is copied from the image to the volume
-	tmpDir1 := tempDir(t)
-	defer os.RemoveAll(tmpDir1)
-	stdout1, _ := runContainer(eng, r, []string{"-v", "/hello", img.ID, "find", "/hello"}, t)
-	if !(strings.Contains(stdout1, "/hello/local/world") && strings.Contains(stdout1, "/hello/local")) {
-		t.Fatal("Container failed to transfer content to volume")
-	}
-}
-
 func TestBindMounts(t *testing.T) {
 	eng := NewTestEngine(t)
 	r := mkDaemonFromEngine(eng, t)
@@ -526,47 +401,3 @@
 		t.Fatal("Container failed to write to bind mount file")
 	}
 }
-
-// Test that restarting a container with a volume does not create a new volume on restart. Regression test for #819.
-func TestRestartWithVolumes(t *testing.T) {
-	daemon := mkDaemon(t)
-	defer nuke(daemon)
-
-	container, _, err := daemon.Create(&runconfig.Config{
-		Image:   GetTestImage(daemon).ID,
-		Cmd:     []string{"echo", "-n", "foobar"},
-		Volumes: map[string]struct{}{"/test": {}},
-	},
-		"",
-	)
-	if err != nil {
-		t.Fatal(err)
-	}
-	defer daemon.Destroy(container)
-
-	for key := range container.Config.Volumes {
-		if key != "/test" {
-			t.Fail()
-		}
-	}
-
-	_, err = container.Output()
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	expected := container.Volumes["/test"]
-	if expected == "" {
-		t.Fail()
-	}
-	// Run the container again to verify the volume path persists
-	_, err = container.Output()
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	actual := container.Volumes["/test"]
-	if expected != actual {
-		t.Fatalf("Expected volume path: %s Actual path: %s", expected, actual)
-	}
-}
diff --git a/integration/graph_test.go b/integration/graph_test.go
index dc056f7..c6a0740 100644
--- a/integration/graph_test.go
+++ b/integration/graph_test.go
@@ -2,12 +2,12 @@
 
 import (
 	"errors"
-	"github.com/dotcloud/docker/archive"
-	"github.com/dotcloud/docker/daemon/graphdriver"
-	"github.com/dotcloud/docker/dockerversion"
-	"github.com/dotcloud/docker/graph"
-	"github.com/dotcloud/docker/image"
-	"github.com/dotcloud/docker/utils"
+	"github.com/docker/docker/archive"
+	"github.com/docker/docker/daemon/graphdriver"
+	"github.com/docker/docker/dockerversion"
+	"github.com/docker/docker/graph"
+	"github.com/docker/docker/image"
+	"github.com/docker/docker/utils"
 	"io"
 	"io/ioutil"
 	"os"
diff --git a/integration/https_test.go b/integration/https_test.go
index 34c16cf..b15f4e5 100644
--- a/integration/https_test.go
+++ b/integration/https_test.go
@@ -8,7 +8,7 @@
 	"testing"
 	"time"
 
-	"github.com/dotcloud/docker/api/client"
+	"github.com/docker/docker/api/client"
 )
 
 const (
diff --git a/integration/runtime_test.go b/integration/runtime_test.go
index 4c0f636..d81a13d 100644
--- a/integration/runtime_test.go
+++ b/integration/runtime_test.go
@@ -4,7 +4,7 @@
 	"bytes"
 	"fmt"
 	"io"
-	"log"
+	std_log "log"
 	"net"
 	"net/url"
 	"os"
@@ -16,13 +16,14 @@
 	"testing"
 	"time"
 
-	"github.com/dotcloud/docker/daemon"
-	"github.com/dotcloud/docker/engine"
-	"github.com/dotcloud/docker/image"
-	"github.com/dotcloud/docker/nat"
-	"github.com/dotcloud/docker/runconfig"
-	"github.com/dotcloud/docker/sysinit"
-	"github.com/dotcloud/docker/utils"
+	"github.com/docker/docker/daemon"
+	"github.com/docker/docker/engine"
+	"github.com/docker/docker/image"
+	"github.com/docker/docker/nat"
+	"github.com/docker/docker/pkg/log"
+	"github.com/docker/docker/reexec"
+	"github.com/docker/docker/runconfig"
+	"github.com/docker/docker/utils"
 )
 
 const (
@@ -31,6 +32,7 @@
 	unitTestImageIDShort     = "83599e29c455"
 	unitTestNetworkBridge    = "testdockbr0"
 	unitTestStoreBase        = "/var/lib/docker/unit-tests"
+	unitTestDockerTmpdir     = "/var/lib/docker/tmp"
 	testDaemonAddr           = "127.0.0.1:4270"
 	testDaemonProto          = "tcp"
 	testDaemonHttpsProto     = "tcp"
@@ -90,31 +92,31 @@
 	// To test other drivers, we need a dedicated driver validation suite.
 	os.Setenv("DOCKER_DRIVER", "vfs")
 	os.Setenv("TEST", "1")
+	os.Setenv("DOCKER_TMPDIR", unitTestDockerTmpdir)
 
 	// Hack to run sys init during unit testing
-	if selfPath := utils.SelfPath(); strings.Contains(selfPath, ".dockerinit") {
-		sysinit.SysInit()
+	if reexec.Init() {
 		return
 	}
 
 	if uid := syscall.Geteuid(); uid != 0 {
-		log.Fatal("docker tests need to be run as root")
+		log.Fatalf("docker tests need to be run as root")
 	}
 
 	// Copy dockerinit into our current testing directory, if provided (so we can test a separate dockerinit binary)
 	if dockerinit := os.Getenv("TEST_DOCKERINIT_PATH"); dockerinit != "" {
 		src, err := os.Open(dockerinit)
 		if err != nil {
-			log.Fatalf("Unable to open TEST_DOCKERINIT_PATH: %s\n", err)
+			log.Fatalf("Unable to open TEST_DOCKERINIT_PATH: %s", err)
 		}
 		defer src.Close()
 		dst, err := os.OpenFile(filepath.Join(filepath.Dir(utils.SelfPath()), "dockerinit"), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0555)
 		if err != nil {
-			log.Fatalf("Unable to create dockerinit in test directory: %s\n", err)
+			log.Fatalf("Unable to create dockerinit in test directory: %s", err)
 		}
 		defer dst.Close()
 		if _, err := io.Copy(dst, src); err != nil {
-			log.Fatalf("Unable to copy dockerinit to TEST_DOCKERINIT_PATH: %s\n", err)
+			log.Fatalf("Unable to copy dockerinit to TEST_DOCKERINIT_PATH: %s", err)
 		}
 		dst.Close()
 		src.Close()
@@ -132,7 +134,7 @@
 }
 
 func setupBaseImage() {
-	eng := newTestEngine(log.New(os.Stderr, "", 0), false, unitTestStoreBase)
+	eng := newTestEngine(std_log.New(os.Stderr, "", 0), false, unitTestStoreBase)
 	job := eng.Job("image_inspect", unitTestImageName)
 	img, _ := job.Stdout.AddEnv()
 	// If the unit test is not found, try to download it.
@@ -148,17 +150,17 @@
 
 func spawnGlobalDaemon() {
 	if globalDaemon != nil {
-		utils.Debugf("Global daemon already exists. Skipping.")
+		log.Debugf("Global daemon already exists. Skipping.")
 		return
 	}
-	t := log.New(os.Stderr, "", 0)
+	t := std_log.New(os.Stderr, "", 0)
 	eng := NewTestEngine(t)
 	globalEngine = eng
 	globalDaemon = mkDaemonFromEngine(eng, t)
 
 	// Spawn a Daemon
 	go func() {
-		utils.Debugf("Spawning global daemon for integration tests")
+		log.Debugf("Spawning global daemon for integration tests")
 		listenURL := &url.URL{
 			Scheme: testDaemonProto,
 			Host:   testDaemonAddr,
@@ -196,19 +198,19 @@
 }
 
 func spawnHttpsDaemon(addr, cacert, cert, key string) *engine.Engine {
-	t := log.New(os.Stderr, "", 0)
+	t := std_log.New(os.Stderr, "", 0)
 	root, err := newTestDirectory(unitTestStoreBase)
 	if err != nil {
 		t.Fatal(err)
 	}
-	// FIXME: here we don't use NewTestEngine because it calls initserver with Autorestart=false,
+	// FIXME: here we don't use NewTestEngine because it configures the daemon with Autorestart=false,
 	// and we want to set it to true.
 
 	eng := newTestEngine(t, true, root)
 
 	// Spawn a Daemon
 	go func() {
-		utils.Debugf("Spawning https daemon for integration tests")
+		log.Debugf("Spawning https daemon for integration tests")
 		listenURL := &url.URL{
 			Scheme: testDaemonHttpsProto,
 			Host:   addr,
diff --git a/integration/server_test.go b/integration/server_test.go
index 151490c..363236f 100644
--- a/integration/server_test.go
+++ b/integration/server_test.go
@@ -2,13 +2,11 @@
 
 import (
 	"bytes"
-	"strings"
 	"testing"
 	"time"
 
-	"github.com/dotcloud/docker/engine"
-	"github.com/dotcloud/docker/runconfig"
-	"github.com/dotcloud/docker/server"
+	"github.com/docker/docker/engine"
+	"github.com/docker/docker/runconfig"
 )
 
 func TestCreateNumberHostname(t *testing.T) {
@@ -23,18 +21,6 @@
 	createTestContainer(eng, config, t)
 }
 
-func TestCreateNumberUsername(t *testing.T) {
-	eng := NewTestEngine(t)
-	defer mkDaemonFromEngine(eng, t).Nuke()
-
-	config, _, _, err := runconfig.Parse([]string{"-u", "1002", unitTestImageID, "echo test"}, nil)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	createTestContainer(eng, config, t)
-}
-
 func TestCommit(t *testing.T) {
 	eng := NewTestEngine(t)
 	defer mkDaemonFromEngine(eng, t).Nuke()
@@ -114,7 +100,6 @@
 
 func TestRestartKillWait(t *testing.T) {
 	eng := NewTestEngine(t)
-	srv := mkServerFromEngine(eng, t)
 	runtime := mkDaemonFromEngine(eng, t)
 	defer runtime.Nuke()
 
@@ -152,9 +137,8 @@
 	}
 
 	eng = newTestEngine(t, false, runtime.Config().Root)
-	srv = mkServerFromEngine(eng, t)
 
-	job = srv.Eng.Job("containers")
+	job = eng.Job("containers")
 	job.SetenvBool("all", true)
 	outs, err = job.Stdout.AddListTable()
 	if err != nil {
@@ -169,7 +153,7 @@
 	}
 
 	setTimeout(t, "Waiting on stopped container timedout", 5*time.Second, func() {
-		job = srv.Eng.Job("wait", outs.Data[0].Get("Id"))
+		job = eng.Job("wait", outs.Data[0].Get("Id"))
 		if err := job.Run(); err != nil {
 			t.Fatal(err)
 		}
@@ -178,7 +162,6 @@
 
 func TestCreateStartRestartStopStartKillRm(t *testing.T) {
 	eng := NewTestEngine(t)
-	srv := mkServerFromEngine(eng, t)
 	defer mkDaemonFromEngine(eng, t).Nuke()
 
 	config, hostConfig, _, err := runconfig.Parse([]string{"-i", unitTestImageID, "/bin/cat"}, nil)
@@ -188,7 +171,7 @@
 
 	id := createTestContainer(eng, config, t)
 
-	job := srv.Eng.Job("containers")
+	job := eng.Job("containers")
 	job.SetenvBool("all", true)
 	outs, err := job.Stdout.AddListTable()
 	if err != nil {
@@ -235,13 +218,13 @@
 	}
 
 	// FIXME: this failed once with a race condition ("Unable to remove filesystem for xxx: directory not empty")
-	job = eng.Job("container_delete", id)
+	job = eng.Job("delete", id)
 	job.SetenvBool("removeVolume", true)
 	if err := job.Run(); err != nil {
 		t.Fatal(err)
 	}
 
-	job = srv.Eng.Job("containers")
+	job = eng.Job("containers")
 	job.SetenvBool("all", true)
 	outs, err = job.Stdout.AddListTable()
 	if err != nil {
@@ -271,92 +254,6 @@
 	}
 }
 
-func TestRmi(t *testing.T) {
-	eng := NewTestEngine(t)
-	srv := mkServerFromEngine(eng, t)
-	defer mkDaemonFromEngine(eng, t).Nuke()
-
-	initialImages := getAllImages(eng, t)
-
-	config, hostConfig, _, err := runconfig.Parse([]string{unitTestImageID, "echo", "test"}, nil)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	containerID := createTestContainer(eng, config, t)
-
-	//To remove
-	job := eng.Job("start", containerID)
-	if err := job.ImportEnv(hostConfig); err != nil {
-		t.Fatal(err)
-	}
-	if err := job.Run(); err != nil {
-		t.Fatal(err)
-	}
-
-	if err := eng.Job("wait", containerID).Run(); err != nil {
-		t.Fatal(err)
-	}
-
-	job = eng.Job("commit", containerID)
-	job.Setenv("repo", "test")
-	var outputBuffer = bytes.NewBuffer(nil)
-	job.Stdout.Add(outputBuffer)
-	if err := job.Run(); err != nil {
-		t.Fatal(err)
-	}
-
-	if err := eng.Job("tag", engine.Tail(outputBuffer, 1), "test", "0.1").Run(); err != nil {
-		t.Fatal(err)
-	}
-
-	containerID = createTestContainer(eng, config, t)
-
-	//To remove
-	job = eng.Job("start", containerID)
-	if err := job.ImportEnv(hostConfig); err != nil {
-		t.Fatal(err)
-	}
-	if err := job.Run(); err != nil {
-		t.Fatal(err)
-	}
-
-	if err := eng.Job("wait", containerID).Run(); err != nil {
-		t.Fatal(err)
-	}
-
-	job = eng.Job("commit", containerID)
-	job.Setenv("repo", "test")
-	if err := job.Run(); err != nil {
-		t.Fatal(err)
-	}
-
-	images := getAllImages(eng, t)
-
-	if images.Len()-initialImages.Len() != 2 {
-		t.Fatalf("Expected 2 new images, found %d.", images.Len()-initialImages.Len())
-	}
-
-	if err = srv.DeleteImage(engine.Tail(outputBuffer, 1), engine.NewTable("", 0), true, false, false); err != nil {
-		t.Fatal(err)
-	}
-
-	images = getAllImages(eng, t)
-
-	if images.Len()-initialImages.Len() != 1 {
-		t.Fatalf("Expected 1 new image, found %d.", images.Len()-initialImages.Len())
-	}
-
-	for _, image := range images.Data {
-		if strings.Contains(unitTestImageID, image.Get("Id")) {
-			continue
-		}
-		if image.GetList("RepoTags")[0] == "<none>:<none>" {
-			t.Fatalf("Expected tagged image, got untagged one.")
-		}
-	}
-}
-
 func TestImagesFilter(t *testing.T) {
 	eng := NewTestEngine(t)
 	defer nuke(mkDaemonFromEngine(eng, t))
@@ -397,170 +294,3 @@
 		t.Fatal("incorrect number of matches returned")
 	}
 }
-
-func TestListContainers(t *testing.T) {
-	eng := NewTestEngine(t)
-	srv := mkServerFromEngine(eng, t)
-	defer mkDaemonFromEngine(eng, t).Nuke()
-
-	config := runconfig.Config{
-		Image:     unitTestImageID,
-		Cmd:       []string{"/bin/sh", "-c", "cat"},
-		OpenStdin: true,
-	}
-
-	firstID := createTestContainer(eng, &config, t)
-	secondID := createTestContainer(eng, &config, t)
-	thirdID := createTestContainer(eng, &config, t)
-	fourthID := createTestContainer(eng, &config, t)
-	defer func() {
-		containerKill(eng, firstID, t)
-		containerKill(eng, secondID, t)
-		containerKill(eng, fourthID, t)
-		containerWait(eng, firstID, t)
-		containerWait(eng, secondID, t)
-		containerWait(eng, fourthID, t)
-	}()
-
-	startContainer(eng, firstID, t)
-	startContainer(eng, secondID, t)
-	startContainer(eng, fourthID, t)
-
-	// all
-	if !assertContainerList(srv, true, -1, "", "", []string{fourthID, thirdID, secondID, firstID}) {
-		t.Error("Container list is not in the correct order")
-	}
-
-	// running
-	if !assertContainerList(srv, false, -1, "", "", []string{fourthID, secondID, firstID}) {
-		t.Error("Container list is not in the correct order")
-	}
-
-	// from here 'all' flag is ignored
-
-	// limit
-	expected := []string{fourthID, thirdID}
-	if !assertContainerList(srv, true, 2, "", "", expected) ||
-		!assertContainerList(srv, false, 2, "", "", expected) {
-		t.Error("Container list is not in the correct order")
-	}
-
-	// since
-	expected = []string{fourthID, thirdID, secondID}
-	if !assertContainerList(srv, true, -1, firstID, "", expected) ||
-		!assertContainerList(srv, false, -1, firstID, "", expected) {
-		t.Error("Container list is not in the correct order")
-	}
-
-	// before
-	expected = []string{secondID, firstID}
-	if !assertContainerList(srv, true, -1, "", thirdID, expected) ||
-		!assertContainerList(srv, false, -1, "", thirdID, expected) {
-		t.Error("Container list is not in the correct order")
-	}
-
-	// since & before
-	expected = []string{thirdID, secondID}
-	if !assertContainerList(srv, true, -1, firstID, fourthID, expected) ||
-		!assertContainerList(srv, false, -1, firstID, fourthID, expected) {
-		t.Error("Container list is not in the correct order")
-	}
-
-	// since & limit
-	expected = []string{fourthID, thirdID}
-	if !assertContainerList(srv, true, 2, firstID, "", expected) ||
-		!assertContainerList(srv, false, 2, firstID, "", expected) {
-		t.Error("Container list is not in the correct order")
-	}
-
-	// before & limit
-	expected = []string{thirdID}
-	if !assertContainerList(srv, true, 1, "", fourthID, expected) ||
-		!assertContainerList(srv, false, 1, "", fourthID, expected) {
-		t.Error("Container list is not in the correct order")
-	}
-
-	// since & before & limit
-	expected = []string{thirdID}
-	if !assertContainerList(srv, true, 1, firstID, fourthID, expected) ||
-		!assertContainerList(srv, false, 1, firstID, fourthID, expected) {
-		t.Error("Container list is not in the correct order")
-	}
-}
-
-func assertContainerList(srv *server.Server, all bool, limit int, since, before string, expected []string) bool {
-	job := srv.Eng.Job("containers")
-	job.SetenvBool("all", all)
-	job.SetenvInt("limit", limit)
-	job.Setenv("since", since)
-	job.Setenv("before", before)
-	outs, err := job.Stdout.AddListTable()
-	if err != nil {
-		return false
-	}
-	if err := job.Run(); err != nil {
-		return false
-	}
-	if len(outs.Data) != len(expected) {
-		return false
-	}
-	for i := 0; i < len(outs.Data); i++ {
-		if outs.Data[i].Get("Id") != expected[i] {
-			return false
-		}
-	}
-	return true
-}
-
-// Regression test for being able to untag an image with an existing
-// container
-func TestDeleteTagWithExistingContainers(t *testing.T) {
-	eng := NewTestEngine(t)
-	defer nuke(mkDaemonFromEngine(eng, t))
-
-	srv := mkServerFromEngine(eng, t)
-
-	// Tag the image
-	if err := eng.Job("tag", unitTestImageID, "utest", "tag1").Run(); err != nil {
-		t.Fatal(err)
-	}
-
-	// Create a container from the image
-	config, _, _, err := runconfig.Parse([]string{unitTestImageID, "echo test"}, nil)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	id := createNamedTestContainer(eng, config, t, "testingtags")
-	if id == "" {
-		t.Fatal("No id returned")
-	}
-
-	job := srv.Eng.Job("containers")
-	job.SetenvBool("all", true)
-	outs, err := job.Stdout.AddListTable()
-	if err != nil {
-		t.Fatal(err)
-	}
-	if err := job.Run(); err != nil {
-		t.Fatal(err)
-	}
-
-	if len(outs.Data) != 1 {
-		t.Fatalf("Expected 1 container got %d", len(outs.Data))
-	}
-
-	// Try to remove the tag
-	imgs := engine.NewTable("", 0)
-	if err := srv.DeleteImage("utest:tag1", imgs, true, false, false); err != nil {
-		t.Fatal(err)
-	}
-
-	if len(imgs.Data) != 1 {
-		t.Fatalf("Should only have deleted one untag %d", len(imgs.Data))
-	}
-
-	if untag := imgs.Data[0].Get("Untagged"); untag != "utest:tag1" {
-		t.Fatalf("Expected %s got %s", unitTestImageID, untag)
-	}
-}
diff --git a/integration/utils_test.go b/integration/utils_test.go
index 7be7f13..7962886 100644
--- a/integration/utils_test.go
+++ b/integration/utils_test.go
@@ -13,14 +13,14 @@
 	"testing"
 	"time"
 
-	"github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
+	"github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
 
-	"github.com/dotcloud/docker/builtins"
-	"github.com/dotcloud/docker/daemon"
-	"github.com/dotcloud/docker/engine"
-	"github.com/dotcloud/docker/runconfig"
-	"github.com/dotcloud/docker/server"
-	"github.com/dotcloud/docker/utils"
+	"github.com/docker/docker/builtins"
+	"github.com/docker/docker/daemon"
+	"github.com/docker/docker/engine"
+	"github.com/docker/docker/pkg/log"
+	"github.com/docker/docker/runconfig"
+	"github.com/docker/docker/utils"
 )
 
 // This file contains utility functions for docker's unit test suite.
@@ -29,7 +29,7 @@
 
 // Create a temporary daemon suitable for unit testing.
 // Call t.Fatal() at the first error.
-func mkDaemon(f utils.Fataler) *daemon.Daemon {
+func mkDaemon(f log.Fataler) *daemon.Daemon {
 	eng := newTestEngine(f, false, "")
 	return mkDaemonFromEngine(eng, f)
 	// FIXME:
@@ -38,7 +38,7 @@
 	// [...]
 }
 
-func createNamedTestContainer(eng *engine.Engine, config *runconfig.Config, f utils.Fataler, name string) (shortId string) {
+func createNamedTestContainer(eng *engine.Engine, config *runconfig.Config, f log.Fataler, name string) (shortId string) {
 	job := eng.Job("create", name)
 	if err := job.ImportEnv(config); err != nil {
 		f.Fatal(err)
@@ -51,23 +51,23 @@
 	return engine.Tail(outputBuffer, 1)
 }
 
-func createTestContainer(eng *engine.Engine, config *runconfig.Config, f utils.Fataler) (shortId string) {
+func createTestContainer(eng *engine.Engine, config *runconfig.Config, f log.Fataler) (shortId string) {
 	return createNamedTestContainer(eng, config, f, "")
 }
 
-func startContainer(eng *engine.Engine, id string, t utils.Fataler) {
+func startContainer(eng *engine.Engine, id string, t log.Fataler) {
 	job := eng.Job("start", id)
 	if err := job.Run(); err != nil {
 		t.Fatal(err)
 	}
 }
 
-func containerRun(eng *engine.Engine, id string, t utils.Fataler) {
+func containerRun(eng *engine.Engine, id string, t log.Fataler) {
 	startContainer(eng, id, t)
 	containerWait(eng, id, t)
 }
 
-func containerFileExists(eng *engine.Engine, id, dir string, t utils.Fataler) bool {
+func containerFileExists(eng *engine.Engine, id, dir string, t log.Fataler) bool {
 	c := getContainer(eng, id, t)
 	if err := c.Mount(); err != nil {
 		t.Fatal(err)
@@ -82,7 +82,7 @@
 	return true
 }
 
-func containerAttach(eng *engine.Engine, id string, t utils.Fataler) (io.WriteCloser, io.ReadCloser) {
+func containerAttach(eng *engine.Engine, id string, t log.Fataler) (io.WriteCloser, io.ReadCloser) {
 	c := getContainer(eng, id, t)
 	i, err := c.StdinPipe()
 	if err != nil {
@@ -95,31 +95,31 @@
 	return i, o
 }
 
-func containerWait(eng *engine.Engine, id string, t utils.Fataler) int {
+func containerWait(eng *engine.Engine, id string, t log.Fataler) int {
 	ex, _ := getContainer(eng, id, t).State.WaitStop(-1 * time.Second)
 	return ex
 }
 
-func containerWaitTimeout(eng *engine.Engine, id string, t utils.Fataler) error {
+func containerWaitTimeout(eng *engine.Engine, id string, t log.Fataler) error {
 	_, err := getContainer(eng, id, t).State.WaitStop(500 * time.Millisecond)
 	return err
 }
 
-func containerKill(eng *engine.Engine, id string, t utils.Fataler) {
+func containerKill(eng *engine.Engine, id string, t log.Fataler) {
 	if err := eng.Job("kill", id).Run(); err != nil {
 		t.Fatal(err)
 	}
 }
 
-func containerRunning(eng *engine.Engine, id string, t utils.Fataler) bool {
+func containerRunning(eng *engine.Engine, id string, t log.Fataler) bool {
 	return getContainer(eng, id, t).State.IsRunning()
 }
 
-func containerAssertExists(eng *engine.Engine, id string, t utils.Fataler) {
+func containerAssertExists(eng *engine.Engine, id string, t log.Fataler) {
 	getContainer(eng, id, t)
 }
 
-func containerAssertNotExists(eng *engine.Engine, id string, t utils.Fataler) {
+func containerAssertNotExists(eng *engine.Engine, id string, t log.Fataler) {
 	daemon := mkDaemonFromEngine(eng, t)
 	if c := daemon.Get(id); c != nil {
 		t.Fatal(fmt.Errorf("Container %s should not exist", id))
@@ -128,7 +128,7 @@
 
 // assertHttpNotError expect the given response to not have an error.
 // Otherwise the it causes the test to fail.
-func assertHttpNotError(r *httptest.ResponseRecorder, t utils.Fataler) {
+func assertHttpNotError(r *httptest.ResponseRecorder, t log.Fataler) {
 	// Non-error http status are [200, 400)
 	if r.Code < http.StatusOK || r.Code >= http.StatusBadRequest {
 		t.Fatal(fmt.Errorf("Unexpected http error: %v", r.Code))
@@ -137,14 +137,14 @@
 
 // assertHttpError expect the given response to have an error.
 // Otherwise the it causes the test to fail.
-func assertHttpError(r *httptest.ResponseRecorder, t utils.Fataler) {
+func assertHttpError(r *httptest.ResponseRecorder, t log.Fataler) {
 	// Non-error http status are [200, 400)
 	if !(r.Code < http.StatusOK || r.Code >= http.StatusBadRequest) {
 		t.Fatal(fmt.Errorf("Unexpected http success code: %v", r.Code))
 	}
 }
 
-func getContainer(eng *engine.Engine, id string, t utils.Fataler) *daemon.Container {
+func getContainer(eng *engine.Engine, id string, t log.Fataler) *daemon.Container {
 	daemon := mkDaemonFromEngine(eng, t)
 	c := daemon.Get(id)
 	if c == nil {
@@ -153,19 +153,7 @@
 	return c
 }
 
-func mkServerFromEngine(eng *engine.Engine, t utils.Fataler) *server.Server {
-	iSrv := eng.Hack_GetGlobalVar("httpapi.server")
-	if iSrv == nil {
-		panic("Legacy server field not set in engine")
-	}
-	srv, ok := iSrv.(*server.Server)
-	if !ok {
-		panic("Legacy server field in engine does not cast to *server.Server")
-	}
-	return srv
-}
-
-func mkDaemonFromEngine(eng *engine.Engine, t utils.Fataler) *daemon.Daemon {
+func mkDaemonFromEngine(eng *engine.Engine, t log.Fataler) *daemon.Daemon {
 	iDaemon := eng.Hack_GetGlobalVar("httpapi.daemon")
 	if iDaemon == nil {
 		panic("Legacy daemon field not set in engine")
@@ -177,7 +165,7 @@
 	return daemon
 }
 
-func newTestEngine(t utils.Fataler, autorestart bool, root string) *engine.Engine {
+func newTestEngine(t log.Fataler, autorestart bool, root string) *engine.Engine {
 	if root == "" {
 		if dir, err := newTestDirectory(unitTestStoreBase); err != nil {
 			t.Fatal(err)
@@ -191,19 +179,25 @@
 	// Load default plugins
 	builtins.Register(eng)
 	// (This is manually copied and modified from main() until we have a more generic plugin system)
-	job := eng.Job("initserver")
-	job.Setenv("Root", root)
-	job.SetenvBool("AutoRestart", autorestart)
-	job.Setenv("ExecDriver", "native")
-	// TestGetEnabledCors and TestOptionsRoute require EnableCors=true
-	job.SetenvBool("EnableCors", true)
-	if err := job.Run(); err != nil {
+	cfg := &daemon.Config{
+		Root:        root,
+		AutoRestart: autorestart,
+		ExecDriver:  "native",
+		// Either InterContainerCommunication or EnableIptables must be set,
+		// otherwise NewDaemon will fail because of conflicting settings.
+		InterContainerCommunication: true,
+	}
+	d, err := daemon.NewDaemon(cfg, eng)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if err := d.Install(eng); err != nil {
 		t.Fatal(err)
 	}
 	return eng
 }
 
-func NewTestEngine(t utils.Fataler) *engine.Engine {
+func NewTestEngine(t log.Fataler) *engine.Engine {
 	return newTestEngine(t, false, "")
 }
 
diff --git a/integration/z_final_test.go b/integration/z_final_test.go
index 6065230..ad1eb43 100644
--- a/integration/z_final_test.go
+++ b/integration/z_final_test.go
@@ -1,7 +1,7 @@
 package docker
 
 import (
-	"github.com/dotcloud/docker/utils"
+	"github.com/docker/docker/utils"
 	"runtime"
 	"testing"
 )
diff --git a/links/links.go b/links/links.go
index 7665a06..d2d6993 100644
--- a/links/links.go
+++ b/links/links.go
@@ -2,8 +2,8 @@
 
 import (
 	"fmt"
-	"github.com/dotcloud/docker/engine"
-	"github.com/dotcloud/docker/nat"
+	"github.com/docker/docker/engine"
+	"github.com/docker/docker/nat"
 	"path"
 	"strings"
 )
@@ -49,7 +49,7 @@
 
 func (l *Link) ToEnv() []string {
 	env := []string{}
-	alias := strings.ToUpper(l.Alias())
+	alias := strings.Replace(strings.ToUpper(l.Alias()), "-", "_", -1)
 
 	if p := l.getDefaultPort(); p != nil {
 		env = append(env, fmt.Sprintf("%s_PORT=%s://%s:%s", alias, p.Proto(), l.ChildIP, p.Port()))
diff --git a/links/links_test.go b/links/links_test.go
index e66f9bf..c26559e 100644
--- a/links/links_test.go
+++ b/links/links_test.go
@@ -1,11 +1,41 @@
 package links
 
 import (
-	"github.com/dotcloud/docker/nat"
+	"github.com/docker/docker/nat"
 	"strings"
 	"testing"
 )
 
+func TestLinkNaming(t *testing.T) {
+	ports := make(nat.PortSet)
+	ports[nat.Port("6379/tcp")] = struct{}{}
+
+	link, err := NewLink("172.0.17.3", "172.0.17.2", "/db/docker-1", nil, ports, nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	rawEnv := link.ToEnv()
+	env := make(map[string]string, len(rawEnv))
+	for _, e := range rawEnv {
+		parts := strings.Split(e, "=")
+		if len(parts) != 2 {
+			t.FailNow()
+		}
+		env[parts[0]] = parts[1]
+	}
+
+	value, ok := env["DOCKER_1_PORT"]
+
+	if !ok {
+		t.Fatalf("DOCKER_1_PORT not found in env")
+	}
+
+	if value != "tcp://172.0.17.2:6379" {
+		t.Fatalf("Expected 172.0.17.2:6379, got %s", env["DOCKER_1_PORT"])
+	}
+}
+
 func TestLinkNew(t *testing.T) {
 	ports := make(nat.PortSet)
 	ports[nat.Port("6379/tcp")] = struct{}{}
diff --git a/nat/nat.go b/nat/nat.go
index 31633dd..a2ad908 100644
--- a/nat/nat.go
+++ b/nat/nat.go
@@ -5,10 +5,11 @@
 
 import (
 	"fmt"
+	"net"
 	"strconv"
 	"strings"
 
-	"github.com/dotcloud/docker/utils"
+	"github.com/docker/docker/pkg/parsers"
 )
 
 const (
@@ -68,6 +69,10 @@
 		return "", ""
 	}
 	if l == 1 {
+		if rawPort == "" {
+			return "", "" // ""/tcp is not valid, ever
+		}
+
 		return "tcp", rawPort
 	}
 	return parts[1], parts[0]
@@ -103,7 +108,7 @@
 			rawPort = fmt.Sprintf(":%s", rawPort)
 		}
 
-		parts, err := utils.PartParser(PortSpecTemplate, rawPort)
+		parts, err := parsers.PartParser(PortSpecTemplate, rawPort)
 		if err != nil {
 			return nil, nil, err
 		}
@@ -114,6 +119,9 @@
 			hostPort      = parts["hostPort"]
 		)
 
+		if rawIp != "" && net.ParseIP(rawIp) == nil {
+			return nil, nil, fmt.Errorf("Invalid ip address: %s", rawIp)
+		}
 		if containerPort == "" {
 			return nil, nil, fmt.Errorf("No port specified: %s<empty>", rawPort)
 		}
@@ -123,6 +131,7 @@
 		if _, err := strconv.ParseUint(hostPort, 10, 16); hostPort != "" && err != nil {
 			return nil, nil, fmt.Errorf("Invalid hostPort: %s", hostPort)
 		}
+
 		if !validateProto(proto) {
 			return nil, nil, fmt.Errorf("Invalid proto: %s", proto)
 		}
diff --git a/nat/nat_test.go b/nat/nat_test.go
new file mode 100644
index 0000000..80a6169
--- /dev/null
+++ b/nat/nat_test.go
@@ -0,0 +1,189 @@
+package nat
+
+import (
+	"testing"
+)
+
+func TestParsePort(t *testing.T) {
+	var (
+		p   int
+		err error
+	)
+
+	p, err = ParsePort("1234")
+
+	if err != nil || p != 1234 {
+		t.Fatal("Parsing '1234' did not succeed")
+	}
+
+	// FIXME currently this is a valid port. I don't think it should be.
+	// I'm leaving this test commented out until we make a decision.
+	// - erikh
+
+	/*
+		p, err = ParsePort("0123")
+
+		if err != nil {
+		    t.Fatal("Successfully parsed port '0123' to '123'")
+		}
+	*/
+
+	p, err = ParsePort("asdf")
+
+	if err == nil || p != 0 {
+		t.Fatal("Parsing port 'asdf' succeeded")
+	}
+
+	p, err = ParsePort("1asdf")
+
+	if err == nil || p != 0 {
+		t.Fatal("Parsing port '1asdf' succeeded")
+	}
+}
+
+func TestPort(t *testing.T) {
+	p := NewPort("tcp", "1234")
+
+	if string(p) != "1234/tcp" {
+		t.Fatal("tcp, 1234 did not result in the string 1234/tcp")
+	}
+
+	if p.Proto() != "tcp" {
+		t.Fatal("protocol was not tcp")
+	}
+
+	if p.Port() != "1234" {
+		t.Fatal("port string value was not 1234")
+	}
+
+	if p.Int() != 1234 {
+		t.Fatal("port int value was not 1234")
+	}
+}
+
+func TestSplitProtoPort(t *testing.T) {
+	var (
+		proto string
+		port  string
+	)
+
+	proto, port = SplitProtoPort("1234/tcp")
+
+	if proto != "tcp" || port != "1234" {
+		t.Fatal("Could not split 1234/tcp properly")
+	}
+
+	proto, port = SplitProtoPort("")
+
+	if proto != "" || port != "" {
+		t.Fatal("parsing an empty string yielded surprising results")
+	}
+
+	proto, port = SplitProtoPort("1234")
+
+	if proto != "tcp" || port != "1234" {
+		t.Fatal("tcp is not the default protocol for portspec '1234'")
+	}
+}
+
+func TestParsePortSpecs(t *testing.T) {
+	var (
+		portMap    map[Port]struct{}
+		bindingMap map[Port][]PortBinding
+		err        error
+	)
+
+	portMap, bindingMap, err = ParsePortSpecs([]string{"1234/tcp", "2345/udp"})
+
+	if err != nil {
+		t.Fatalf("Error while processing ParsePortSpecs: %s", err.Error())
+	}
+
+	if _, ok := portMap[Port("1234/tcp")]; !ok {
+		t.Fatal("1234/tcp was not parsed properly")
+	}
+
+	if _, ok := portMap[Port("2345/udp")]; !ok {
+		t.Fatal("2345/udp was not parsed properly")
+	}
+
+	for portspec, bindings := range bindingMap {
+		if len(bindings) != 1 {
+			t.Fatalf("%s should have exactly one binding", portspec)
+		}
+
+		if bindings[0].HostIp != "" {
+			t.Fatalf("HostIp should not be set for %s", portspec)
+		}
+
+		if bindings[0].HostPort != "" {
+			t.Fatalf("HostPort should not be set for %s", portspec)
+		}
+	}
+
+	portMap, bindingMap, err = ParsePortSpecs([]string{"1234:1234/tcp", "2345:2345/udp"})
+
+	if err != nil {
+		t.Fatalf("Error while processing ParsePortSpecs: %s", err.Error())
+	}
+
+	if _, ok := portMap[Port("1234/tcp")]; !ok {
+		t.Fatal("1234/tcp was not parsed properly")
+	}
+
+	if _, ok := portMap[Port("2345/udp")]; !ok {
+		t.Fatal("2345/udp was not parsed properly")
+	}
+
+	for portspec, bindings := range bindingMap {
+		_, port := SplitProtoPort(string(portspec))
+
+		if len(bindings) != 1 {
+			t.Fatalf("%s should have exactly one binding", portspec)
+		}
+
+		if bindings[0].HostIp != "" {
+			t.Fatalf("HostIp should not be set for %s", portspec)
+		}
+
+		if bindings[0].HostPort != port {
+			t.Fatalf("HostPort should be %s for %s", port, portspec)
+		}
+	}
+
+	portMap, bindingMap, err = ParsePortSpecs([]string{"0.0.0.0:1234:1234/tcp", "0.0.0.0:2345:2345/udp"})
+
+	if err != nil {
+		t.Fatalf("Error while processing ParsePortSpecs: %s", err.Error())
+	}
+
+	if _, ok := portMap[Port("1234/tcp")]; !ok {
+		t.Fatal("1234/tcp was not parsed properly")
+	}
+
+	if _, ok := portMap[Port("2345/udp")]; !ok {
+		t.Fatal("2345/udp was not parsed properly")
+	}
+
+	for portspec, bindings := range bindingMap {
+		_, port := SplitProtoPort(string(portspec))
+
+		if len(bindings) != 1 {
+			t.Fatalf("%s should have exactly one binding", portspec)
+		}
+
+		if bindings[0].HostIp != "0.0.0.0" {
+			t.Fatalf("HostIp is not 0.0.0.0 for %s", portspec)
+		}
+
+		if bindings[0].HostPort != port {
+			t.Fatalf("HostPort should be %s for %s", port, portspec)
+		}
+	}
+
+	_, _, err = ParsePortSpecs([]string{"localhost:1234:1234/tcp"})
+
+	if err == nil {
+		t.Fatal("Received no error while trying to parse a hostname instead of ip")
+	}
+}
diff --git a/opts/ip.go b/opts/ip.go
new file mode 100644
index 0000000..f8a493e
--- /dev/null
+++ b/opts/ip.go
@@ -0,0 +1,31 @@
+package opts
+
+import (
+	"fmt"
+	"net"
+)
+
+type IpOpt struct {
+	*net.IP
+}
+
+func NewIpOpt(ref *net.IP, defaultVal string) *IpOpt {
+	o := &IpOpt{
+		IP: ref,
+	}
+	o.Set(defaultVal)
+	return o
+}
+
+func (o *IpOpt) Set(val string) error {
+	ip := net.ParseIP(val)
+	if ip == nil {
+		return fmt.Errorf("%s is not an ip address", val)
+	}
+	(*o.IP) = net.ParseIP(val)
+	return nil
+}
+
+func (o *IpOpt) String() string {
+	return (*o.IP).String()
+}
diff --git a/opts/opts.go b/opts/opts.go
index 67f1c8f..65806f3 100644
--- a/opts/opts.go
+++ b/opts/opts.go
@@ -2,27 +2,57 @@
 
 import (
 	"fmt"
-	"github.com/dotcloud/docker/utils"
+	"net"
 	"os"
 	"path/filepath"
 	"regexp"
 	"strings"
+
+	"github.com/docker/docker/api"
+	flag "github.com/docker/docker/pkg/mflag"
+	"github.com/docker/docker/pkg/parsers"
 )
 
+func ListVar(values *[]string, names []string, usage string) {
+	flag.Var(newListOptsRef(values, nil), names, usage)
+}
+
+func HostListVar(values *[]string, names []string, usage string) {
+	flag.Var(newListOptsRef(values, api.ValidateHost), names, usage)
+}
+
+func IPListVar(values *[]string, names []string, usage string) {
+	flag.Var(newListOptsRef(values, ValidateIPAddress), names, usage)
+}
+
+func DnsSearchListVar(values *[]string, names []string, usage string) {
+	flag.Var(newListOptsRef(values, ValidateDnsSearch), names, usage)
+}
+
+func IPVar(value *net.IP, names []string, defaultValue, usage string) {
+	flag.Var(NewIpOpt(value, defaultValue), names, usage)
+}
+
 // ListOpts type
 type ListOpts struct {
-	values    []string
+	values    *[]string
 	validator ValidatorFctType
 }
 
 func NewListOpts(validator ValidatorFctType) ListOpts {
-	return ListOpts{
+	var values []string
+	return *newListOptsRef(&values, validator)
+}
+
+func newListOptsRef(values *[]string, validator ValidatorFctType) *ListOpts {
+	return &ListOpts{
+		values:    values,
 		validator: validator,
 	}
 }
 
 func (opts *ListOpts) String() string {
-	return fmt.Sprintf("%v", []string(opts.values))
+	return fmt.Sprintf("%v", []string((*opts.values)))
 }
 
 // Set validates if needed the input value and add it to the
@@ -35,15 +65,15 @@
 		}
 		value = v
 	}
-	opts.values = append(opts.values, value)
+	(*opts.values) = append((*opts.values), value)
 	return nil
 }
 
 // Delete remove the given element from the slice.
 func (opts *ListOpts) Delete(key string) {
-	for i, k := range opts.values {
+	for i, k := range *opts.values {
 		if k == key {
-			opts.values = append(opts.values[:i], opts.values[i+1:]...)
+			(*opts.values) = append((*opts.values)[:i], (*opts.values)[i+1:]...)
 			return
 		}
 	}
@@ -54,7 +84,7 @@
 // FIXME: can we remove this?
 func (opts *ListOpts) GetMap() map[string]struct{} {
 	ret := make(map[string]struct{})
-	for _, k := range opts.values {
+	for _, k := range *opts.values {
 		ret[k] = struct{}{}
 	}
 	return ret
@@ -63,12 +93,12 @@
 // GetAll returns the values' slice.
 // FIXME: Can we remove this?
 func (opts *ListOpts) GetAll() []string {
-	return opts.values
+	return (*opts.values)
 }
 
 // Get checks the existence of the given key.
 func (opts *ListOpts) Get(key string) bool {
-	for _, k := range opts.values {
+	for _, k := range *opts.values {
 		if k == key {
 			return true
 		}
@@ -78,21 +108,24 @@
 
 // Len returns the amount of element in the slice.
 func (opts *ListOpts) Len() int {
-	return len(opts.values)
+	return len((*opts.values))
 }
 
 // Validators
 type ValidatorFctType func(val string) (string, error)
 
 func ValidateAttach(val string) (string, error) {
-	if val != "stdin" && val != "stdout" && val != "stderr" {
-		return val, fmt.Errorf("Unsupported stream name: %s", val)
+	s := strings.ToLower(val)
+	for _, str := range []string{"stdin", "stdout", "stderr"} {
+		if s == str {
+			return s, nil
+		}
 	}
-	return val, nil
+	return val, fmt.Errorf("valid streams are STDIN, STDOUT and STDERR.")
 }
 
 func ValidateLink(val string) (string, error) {
-	if _, err := utils.PartParser("name:alias", val); err != nil {
+	if _, err := parsers.PartParser("name:alias", val); err != nil {
 		return val, err
 	}
 	return val, nil
@@ -128,16 +161,24 @@
 	return fmt.Sprintf("%s=%s", val, os.Getenv(val)), nil
 }
 
-func ValidateIp4Address(val string) (string, error) {
-	re := regexp.MustCompile(`^(([0-9]+\.){3}([0-9]+))\s*$`)
-	var ns = re.FindSubmatch([]byte(val))
-	if len(ns) > 0 {
-		return string(ns[1]), nil
+func ValidateIPAddress(val string) (string, error) {
+	var ip = net.ParseIP(strings.TrimSpace(val))
+	if ip != nil {
+		return ip.String(), nil
 	}
-	return "", fmt.Errorf("%s is not an ip4 address", val)
+	return "", fmt.Errorf("%s is not an ip address", val)
 }
 
-func ValidateDomain(val string) (string, error) {
+// Validates domain for resolvconf search configuration.
+// A zero length domain is represented by .
+func ValidateDnsSearch(val string) (string, error) {
+	if val = strings.Trim(val, " "); val == "." {
+		return val, nil
+	}
+	return validateDomain(val)
+}
+
+func validateDomain(val string) (string, error) {
 	alpha := regexp.MustCompile(`[a-zA-Z]`)
 	if alpha.FindString(val) == "" {
 		return "", fmt.Errorf("%s is not a valid domain", val)
diff --git a/opts/opts_test.go b/opts/opts_test.go
index 299cbfe..09b5aa7 100644
--- a/opts/opts_test.go
+++ b/opts/opts_test.go
@@ -4,27 +4,38 @@
 	"testing"
 )
 
-func TestValidateIP4(t *testing.T) {
-	if ret, err := ValidateIp4Address(`1.2.3.4`); err != nil || ret == "" {
-		t.Fatalf("ValidateIp4Address(`1.2.3.4`) got %s %s", ret, err)
+func TestValidateIPAddress(t *testing.T) {
+	if ret, err := ValidateIPAddress(`1.2.3.4`); err != nil || ret == "" {
+		t.Fatalf("ValidateIPAddress(`1.2.3.4`) got %s %s", ret, err)
 	}
 
-	if ret, err := ValidateIp4Address(`127.0.0.1`); err != nil || ret == "" {
-		t.Fatalf("ValidateIp4Address(`127.0.0.1`) got %s %s", ret, err)
+	if ret, err := ValidateIPAddress(`127.0.0.1`); err != nil || ret == "" {
+		t.Fatalf("ValidateIPAddress(`127.0.0.1`) got %s %s", ret, err)
 	}
 
-	if ret, err := ValidateIp4Address(`127`); err == nil || ret != "" {
-		t.Fatalf("ValidateIp4Address(`127`) got %s %s", ret, err)
+	if ret, err := ValidateIPAddress(`::1`); err != nil || ret == "" {
+		t.Fatalf("ValidateIPAddress(`::1`) got %s %s", ret, err)
 	}
 
-	if ret, err := ValidateIp4Address(`random invalid string`); err == nil || ret != "" {
-		t.Fatalf("ValidateIp4Address(`random invalid string`) got %s %s", ret, err)
+	if ret, err := ValidateIPAddress(`127`); err == nil || ret != "" {
+		t.Fatalf("ValidateIPAddress(`127`) got %s %s", ret, err)
+	}
+
+	if ret, err := ValidateIPAddress(`random invalid string`); err == nil || ret != "" {
+		t.Fatalf("ValidateIPAddress(`random invalid string`) got %s %s", ret, err)
 	}
 
 }
 
-func TestValidateDomain(t *testing.T) {
+func TestListOpts(t *testing.T) {
+	o := NewListOpts(nil)
+	o.Set("foo")
+	o.String()
+}
+
+func TestValidateDnsSearch(t *testing.T) {
 	valid := []string{
+		`.`,
 		`a`,
 		`a.`,
 		`1.foo`,
@@ -49,7 +60,8 @@
 
 	invalid := []string{
 		``,
-		`.`,
+		` `,
+		`  `,
 		`17`,
 		`17.`,
 		`.17`,
@@ -65,14 +77,14 @@
 	}
 
 	for _, domain := range valid {
-		if ret, err := ValidateDomain(domain); err != nil || ret == "" {
-			t.Fatalf("ValidateDomain(`"+domain+"`) got %s %s", ret, err)
+		if ret, err := ValidateDnsSearch(domain); err != nil || ret == "" {
+			t.Fatalf("ValidateDnsSearch(`"+domain+"`) got %s %s", ret, err)
 		}
 	}
 
 	for _, domain := range invalid {
-		if ret, err := ValidateDomain(domain); err == nil || ret != "" {
-			t.Fatalf("ValidateDomain(`"+domain+"`) got %s %s", ret, err)
+		if ret, err := ValidateDnsSearch(domain); err == nil || ret != "" {
+			t.Fatalf("ValidateDnsSearch(`"+domain+"`) got %s %s", ret, err)
 		}
 	}
 }
diff --git a/pkg/broadcastwriter/broadcastwriter.go b/pkg/broadcastwriter/broadcastwriter.go
new file mode 100644
index 0000000..3bf1272
--- /dev/null
+++ b/pkg/broadcastwriter/broadcastwriter.go
@@ -0,0 +1,93 @@
+package broadcastwriter
+
+import (
+	"bytes"
+	"encoding/json"
+	"io"
+	"sync"
+	"time"
+
+	"github.com/docker/docker/pkg/jsonlog"
+	"github.com/docker/docker/pkg/log"
+)
+
+// BroadcastWriter accumulate multiple io.WriteCloser by stream.
+type BroadcastWriter struct {
+	sync.Mutex
+	buf     *bytes.Buffer
+	streams map[string](map[io.WriteCloser]struct{})
+}
+
+// AddWriter adds new io.WriteCloser for stream.
+// If stream is "", then all writes proceed as is. Otherwise every line from
+// input will be packed to serialized jsonlog.JSONLog.
+func (w *BroadcastWriter) AddWriter(writer io.WriteCloser, stream string) {
+	w.Lock()
+	if _, ok := w.streams[stream]; !ok {
+		w.streams[stream] = make(map[io.WriteCloser]struct{})
+	}
+	w.streams[stream][writer] = struct{}{}
+	w.Unlock()
+}
+
+// Write writes bytes to all writers. Failed writers will be evicted during
+// this call.
+func (w *BroadcastWriter) Write(p []byte) (n int, err error) {
+	created := time.Now().UTC()
+	w.Lock()
+	if writers, ok := w.streams[""]; ok {
+		for sw := range writers {
+			if n, err := sw.Write(p); err != nil || n != len(p) {
+				// On error, evict the writer
+				delete(writers, sw)
+			}
+		}
+	}
+	w.buf.Write(p)
+	for {
+		line, err := w.buf.ReadString('\n')
+		if err != nil {
+			w.buf.Write([]byte(line))
+			break
+		}
+		for stream, writers := range w.streams {
+			if stream == "" {
+				continue
+			}
+			b, err := json.Marshal(jsonlog.JSONLog{Log: line, Stream: stream, Created: created})
+			if err != nil {
+				log.Errorf("Error making JSON log line: %s", err)
+				continue
+			}
+			b = append(b, '\n')
+			for sw := range writers {
+				if _, err := sw.Write(b); err != nil {
+					delete(writers, sw)
+				}
+			}
+		}
+	}
+	w.Unlock()
+	return len(p), nil
+}
+
+// Clean closes and removes all writers. Last non-eol-terminated part of data
+// will be saved.
+func (w *BroadcastWriter) Clean() error {
+	w.Lock()
+	for _, writers := range w.streams {
+		for w := range writers {
+			w.Close()
+		}
+	}
+	w.streams = make(map[string](map[io.WriteCloser]struct{}))
+	w.Unlock()
+	return nil
+}
+
+func New() *BroadcastWriter {
+	return &BroadcastWriter{
+		streams: make(map[string](map[io.WriteCloser]struct{})),
+		buf:     bytes.NewBuffer(nil),
+	}
+}
diff --git a/pkg/broadcastwriter/broadcastwriter_test.go b/pkg/broadcastwriter/broadcastwriter_test.go
new file mode 100644
index 0000000..62ca126
--- /dev/null
+++ b/pkg/broadcastwriter/broadcastwriter_test.go
@@ -0,0 +1,144 @@
+package broadcastwriter
+
+import (
+	"bytes"
+	"errors"
+
+	"testing"
+)
+
+type dummyWriter struct {
+	buffer      bytes.Buffer
+	failOnWrite bool
+}
+
+func (dw *dummyWriter) Write(p []byte) (n int, err error) {
+	if dw.failOnWrite {
+		return 0, errors.New("Fake fail")
+	}
+	return dw.buffer.Write(p)
+}
+
+func (dw *dummyWriter) String() string {
+	return dw.buffer.String()
+}
+
+func (dw *dummyWriter) Close() error {
+	return nil
+}
+
+func TestBroadcastWriter(t *testing.T) {
+	writer := New()
+
+	// Test 1: Both bufferA and bufferB should contain "foo"
+	bufferA := &dummyWriter{}
+	writer.AddWriter(bufferA, "")
+	bufferB := &dummyWriter{}
+	writer.AddWriter(bufferB, "")
+	writer.Write([]byte("foo"))
+
+	if bufferA.String() != "foo" {
+		t.Errorf("Buffer contains %v", bufferA.String())
+	}
+
+	if bufferB.String() != "foo" {
+		t.Errorf("Buffer contains %v", bufferB.String())
+	}
+
+	// Test2: bufferA and bufferB should contain "foobar",
+	// while bufferC should only contain "bar"
+	bufferC := &dummyWriter{}
+	writer.AddWriter(bufferC, "")
+	writer.Write([]byte("bar"))
+
+	if bufferA.String() != "foobar" {
+		t.Errorf("Buffer contains %v", bufferA.String())
+	}
+
+	if bufferB.String() != "foobar" {
+		t.Errorf("Buffer contains %v", bufferB.String())
+	}
+
+	if bufferC.String() != "bar" {
+		t.Errorf("Buffer contains %v", bufferC.String())
+	}
+
+	// Test3: Test eviction on failure
+	bufferA.failOnWrite = true
+	writer.Write([]byte("fail"))
+	if bufferA.String() != "foobar" {
+		t.Errorf("Buffer contains %v", bufferA.String())
+	}
+	if bufferC.String() != "barfail" {
+		t.Errorf("Buffer contains %v", bufferC.String())
+	}
+	// Even though we reset the flag, no more writes should go in there
+	bufferA.failOnWrite = false
+	writer.Write([]byte("test"))
+	if bufferA.String() != "foobar" {
+		t.Errorf("Buffer contains %v", bufferA.String())
+	}
+	if bufferC.String() != "barfailtest" {
+		t.Errorf("Buffer contains %v", bufferC.String())
+	}
+
+	writer.Clean()
+}
+
+type devNullCloser int
+
+func (d devNullCloser) Close() error {
+	return nil
+}
+
+func (d devNullCloser) Write(buf []byte) (int, error) {
+	return len(buf), nil
+}
+
+// This test checks for races. It is only useful when run with the race detector.
+func TestRaceBroadcastWriter(t *testing.T) {
+	writer := New()
+	c := make(chan bool)
+	go func() {
+		writer.AddWriter(devNullCloser(0), "")
+		c <- true
+	}()
+	writer.Write([]byte("hello"))
+	<-c
+}
+
+func BenchmarkBroadcastWriter(b *testing.B) {
+	writer := New()
+	setUpWriter := func() {
+		for i := 0; i < 100; i++ {
+			writer.AddWriter(devNullCloser(0), "stdout")
+			writer.AddWriter(devNullCloser(0), "stderr")
+			writer.AddWriter(devNullCloser(0), "")
+		}
+	}
+	testLine := "Line that thinks that it is log line from docker"
+	var buf bytes.Buffer
+	for i := 0; i < 100; i++ {
+		buf.Write([]byte(testLine + "\n"))
+	}
+	// line without eol
+	buf.Write([]byte(testLine))
+	testText := buf.Bytes()
+	b.SetBytes(int64(5 * len(testText)))
+	b.ResetTimer()
+	for i := 0; i < b.N; i++ {
+		b.StopTimer()
+		setUpWriter()
+		b.StartTimer()
+
+		for j := 0; j < 5; j++ {
+			if _, err := writer.Write(testText); err != nil {
+				b.Fatal(err)
+			}
+		}
+
+		b.StopTimer()
+		writer.Clean()
+		b.StartTimer()
+	}
+}
diff --git a/pkg/httputils/MAINTAINERS b/pkg/httputils/MAINTAINERS
new file mode 100644
index 0000000..6dde476
--- /dev/null
+++ b/pkg/httputils/MAINTAINERS
@@ -0,0 +1 @@
+Cristian Staretu <cristian.staretu@gmail.com> (@unclejack)
diff --git a/utils/resumablerequestreader.go b/pkg/httputils/resumablerequestreader.go
similarity index 84%
rename from utils/resumablerequestreader.go
rename to pkg/httputils/resumablerequestreader.go
index e01f4e6..71533d3 100644
--- a/utils/resumablerequestreader.go
+++ b/pkg/httputils/resumablerequestreader.go
@@ -1,10 +1,12 @@
-package utils
+package httputils
 
 import (
 	"fmt"
 	"io"
 	"net/http"
 	"time"
+
+	"github.com/docker/docker/pkg/log"
 )
 
 type resumableRequestReader struct {
@@ -24,6 +26,10 @@
 	return &resumableRequestReader{client: c, request: r, maxFailures: maxfail, totalSize: totalsize}
 }
 
+func ResumableRequestReaderWithInitialResponse(c *http.Client, r *http.Request, maxfail uint32, totalsize int64, initialResponse *http.Response) io.ReadCloser {
+	return &resumableRequestReader{client: c, request: r, maxFailures: maxfail, totalSize: totalsize, currentResponse: initialResponse}
+}
+
 func (r *resumableRequestReader) Read(p []byte) (n int, err error) {
 	if r.client == nil || r.request == nil {
 		return 0, fmt.Errorf("client and request can't be nil\n")
@@ -66,7 +72,7 @@
 		r.cleanUpResponse()
 	}
 	if err != nil && err != io.EOF {
-		Debugf("encountered error during pull and clearing it before resume: %s", err)
+		log.Infof("encountered error during pull and clearing it before resume: %s", err)
 		err = nil
 	}
 	return n, err
diff --git a/pkg/jsonlog/jsonlog.go b/pkg/jsonlog/jsonlog.go
new file mode 100644
index 0000000..ecf4457
--- /dev/null
+++ b/pkg/jsonlog/jsonlog.go
@@ -0,0 +1,45 @@
+package jsonlog
+
+import (
+	"encoding/json"
+	"fmt"
+	"io"
+	"log"
+	"time"
+)
+
+type JSONLog struct {
+	Log     string    `json:"log,omitempty"`
+	Stream  string    `json:"stream,omitempty"`
+	Created time.Time `json:"time"`
+}
+
+func (jl *JSONLog) Format(format string) (string, error) {
+	if format == "" {
+		return jl.Log, nil
+	}
+	if format == "json" {
+		m, err := json.Marshal(jl)
+		return string(m), err
+	}
+	return fmt.Sprintf("[%s] %s", jl.Created.Format(format), jl.Log), nil
+}
+
+func WriteLog(src io.Reader, dst io.WriteCloser, format string) error {
+	dec := json.NewDecoder(src)
+	for {
+		l := &JSONLog{}
+
+		if err := dec.Decode(l); err == io.EOF {
+			return nil
+		} else if err != nil {
+			log.Printf("Error streaming logs: %s", err)
+			return err
+		}
+		line, err := l.Format(format)
+		if err != nil {
+			return err
+		}
+		fmt.Fprintf(dst, "%s", line)
+	}
+}
diff --git a/pkg/log/log.go b/pkg/log/log.go
new file mode 100644
index 0000000..53be6cf
--- /dev/null
+++ b/pkg/log/log.go
@@ -0,0 +1,83 @@
+package log
+
+import (
+	"fmt"
+	"io"
+	"os"
+	"runtime"
+	"strings"
+)
+
+type priority int
+
+const (
+	errorFormat = "[%s] %s:%d %s\n"
+	logFormat   = "[%s] %s\n"
+
+	fatal priority = iota
+	error
+	info
+	debug
+)
+
+// A common interface to access the Fatal method of
+// both testing.B and testing.T.
+type Fataler interface {
+	Fatal(args ...interface{})
+}
+
+func (p priority) String() string {
+	switch p {
+	case fatal:
+		return "fatal"
+	case error:
+		return "error"
+	case info:
+		return "info"
+	case debug:
+		return "debug"
+	}
+
+	return ""
+}
+
+// Debug function, if the debug flag is set, then display. Do nothing otherwise
+// If Docker is in damon mode, also send the debug info on the socket
+func Debugf(format string, a ...interface{}) {
+	if os.Getenv("DEBUG") != "" {
+		logf(os.Stderr, debug, format, a...)
+	}
+}
+
+func Infof(format string, a ...interface{}) {
+	logf(os.Stdout, info, format, a...)
+}
+
+func Errorf(format string, a ...interface{}) {
+	logf(os.Stderr, error, format, a...)
+}
+
+func Fatalf(format string, a ...interface{}) {
+	logf(os.Stderr, fatal, format, a...)
+	os.Exit(1)
+}
+
+func logf(stream io.Writer, level priority, format string, a ...interface{}) {
+	var prefix string
+
+	if level <= error || level == debug {
+		// Retrieve the stack infos
+		_, file, line, ok := runtime.Caller(2)
+		if !ok {
+			file = "<unknown>"
+			line = -1
+		} else {
+			file = file[strings.LastIndex(file, "/")+1:]
+		}
+		prefix = fmt.Sprintf(errorFormat, level.String(), file, line, format)
+	} else {
+		prefix = fmt.Sprintf(logFormat, level.String(), format)
+	}
+
+	fmt.Fprintf(stream, prefix, a...)
+}
diff --git a/pkg/log/log_test.go b/pkg/log/log_test.go
new file mode 100644
index 0000000..83ba5fd
--- /dev/null
+++ b/pkg/log/log_test.go
@@ -0,0 +1,37 @@
+package log
+
+import (
+	"bytes"
+	"regexp"
+
+	"testing"
+)
+
+func TestLogFatalf(t *testing.T) {
+	var output *bytes.Buffer
+
+	tests := []struct {
+		Level           priority
+		Format          string
+		Values          []interface{}
+		ExpectedPattern string
+	}{
+		{fatal, "%d + %d = %d", []interface{}{1, 1, 2}, "\\[fatal\\] testing.go:\\d+ 1 \\+ 1 = 2"},
+		{error, "%d + %d = %d", []interface{}{1, 1, 2}, "\\[error\\] testing.go:\\d+ 1 \\+ 1 = 2"},
+		{info, "%d + %d = %d", []interface{}{1, 1, 2}, "\\[info\\] 1 \\+ 1 = 2"},
+		{debug, "%d + %d = %d", []interface{}{1, 1, 2}, "\\[debug\\] testing.go:\\d+ 1 \\+ 1 = 2"},
+	}
+
+	for i, test := range tests {
+		output = &bytes.Buffer{}
+		logf(output, test.Level, test.Format, test.Values...)
+
+		expected := regexp.MustCompile(test.ExpectedPattern)
+		if !expected.MatchString(output.String()) {
+			t.Errorf("[%d] Log output does not match expected pattern:\n\tExpected: %s\n\tOutput: %s",
+				i,
+				expected.String(),
+				output.String())
+		}
+	}
+}
diff --git a/pkg/mflag/example/example.go b/pkg/mflag/example/example.go
index 8af8ff9..2e766dd 100644
--- a/pkg/mflag/example/example.go
+++ b/pkg/mflag/example/example.go
@@ -3,7 +3,7 @@
 import (
 	"fmt"
 
-	flag "github.com/dotcloud/docker/pkg/mflag"
+	flag "github.com/docker/docker/pkg/mflag"
 )
 
 var (
diff --git a/pkg/mflag/flag.go b/pkg/mflag/flag.go
index 52f786e..6e3f039 100644
--- a/pkg/mflag/flag.go
+++ b/pkg/mflag/flag.go
@@ -10,7 +10,7 @@
 	Define flags using flag.String(), Bool(), Int(), etc.
 
 	This declares an integer flag, -f or --flagname, stored in the pointer ip, with type *int.
-		import "flag /github.com/dotcloud/docker/pkg/mflag"
+		import "flag /github.com/docker/docker/pkg/mflag"
 		var ip = flag.Int([]string{"f", "-flagname"}, 1234, "help message for flagname")
 	If you like, you can bind the flag to a variable using the Var() functions.
 		var flagvar int
diff --git a/pkg/mflag/flag_test.go b/pkg/mflag/flag_test.go
index 4c2222e..2aa6fda 100644
--- a/pkg/mflag/flag_test.go
+++ b/pkg/mflag/flag_test.go
@@ -7,7 +7,7 @@
 import (
 	"bytes"
 	"fmt"
-	. "github.com/dotcloud/docker/pkg/mflag"
+	. "github.com/docker/docker/pkg/mflag"
 	"os"
 	"sort"
 	"strings"
diff --git a/pkg/mount/flags_linux.go b/pkg/mount/flags_linux.go
index 19c882f..0bb47d8 100644
--- a/pkg/mount/flags_linux.go
+++ b/pkg/mount/flags_linux.go
@@ -1,5 +1,3 @@
-// +build amd64
-
 package mount
 
 import (
diff --git a/pkg/mount/flags_unsupported.go b/pkg/mount/flags_unsupported.go
index e598354..5a14108 100644
--- a/pkg/mount/flags_unsupported.go
+++ b/pkg/mount/flags_unsupported.go
@@ -1,4 +1,4 @@
-// +build !linux,!freebsd linux,!amd64 freebsd,!cgo
+// +build !linux,!freebsd freebsd,!cgo
 
 package mount
 
diff --git a/pkg/mount/mounter_linux.go b/pkg/mount/mounter_linux.go
index 70b7798..dd4280c 100644
--- a/pkg/mount/mounter_linux.go
+++ b/pkg/mount/mounter_linux.go
@@ -1,5 +1,3 @@
-// +build amd64
-
 package mount
 
 import (
diff --git a/pkg/mount/mounter_unsupported.go b/pkg/mount/mounter_unsupported.go
index 06f2ac0..eb93365 100644
--- a/pkg/mount/mounter_unsupported.go
+++ b/pkg/mount/mounter_unsupported.go
@@ -1,4 +1,4 @@
-// +build !linux,!freebsd linux,!amd64 freebsd,!cgo
+// +build !linux,!freebsd freebsd,!cgo
 
 package mount
 
diff --git a/pkg/mount/mountinfo_test_linux.go b/pkg/mount/mountinfo_linux_test.go
similarity index 99%
rename from pkg/mount/mountinfo_test_linux.go
rename to pkg/mount/mountinfo_linux_test.go
index f2e3daa..07ef7e0 100644
--- a/pkg/mount/mountinfo_test_linux.go
+++ b/pkg/mount/mountinfo_linux_test.go
@@ -1,3 +1,5 @@
+// +build linux
+
 package mount
 
 import (
diff --git a/pkg/namesgenerator/names-generator.go b/pkg/namesgenerator/names-generator.go
index a89e5b2..ebb5850 100644
--- a/pkg/namesgenerator/names-generator.go
+++ b/pkg/namesgenerator/names-generator.go
@@ -61,6 +61,7 @@
 	// Rachel Carson - American marine biologist and conservationist, her book Silent Spring and other writings are credited with advancing the global environmental movement. http://en.wikipedia.org/wiki/Rachel_Carson
 	// Radia Perlman is a software designer and network engineer and most famous for her invention of the spanning-tree protocol (STP). http://en.wikipedia.org/wiki/Radia_Perlman
 	// Richard Feynman was a key contributor to quantum mechanics and particle physics. http://en.wikipedia.org/wiki/Richard_Feynman
+	// Richard Matthew Stallman - the founder of the Free Software movement, the GNU project, the Free Software Foundation, and the League for Programming Freedom. He also invented the concept of copyleft to protect the ideals of this movement, and enshrined this concept in the widely-used GPL (General Public License) for software. http://en.wikiquote.org/wiki/Richard_Stallman
 	// Rob Pike was a key contributor to Unix, Plan 9, the X graphic system, utf-8, and the Go programming language. http://en.wikipedia.org/wiki/Rob_Pike
 	// Rosalind Franklin - British biophysicist and X-ray crystallographer whose research was critical to the understanding of DNA - http://en.wikipedia.org/wiki/Rosalind_Franklin
 	// Sophie Kowalevski - Russian mathematician responsible for important original contributions to analysis, differential equations and mechanics - http://en.wikipedia.org/wiki/Sofia_Kovalevskaya
@@ -72,7 +73,7 @@
 	//	http://en.wikipedia.org/wiki/John_Bardeen
 	//	http://en.wikipedia.org/wiki/Walter_Houser_Brattain
 	//	http://en.wikipedia.org/wiki/William_Shockley
-	right = [...]string{"lovelace", "franklin", "tesla", "einstein", "bohr", "davinci", "pasteur", "nobel", "curie", "darwin", "turing", "ritchie", "torvalds", "pike", "thompson", "wozniak", "galileo", "euclid", "newton", "fermat", "archimedes", "poincare", "heisenberg", "feynman", "hawking", "fermi", "pare", "mccarthy", "engelbart", "babbage", "albattani", "ptolemy", "bell", "wright", "lumiere", "morse", "mclean", "brown", "bardeen", "brattain", "shockley", "goldstine", "hoover", "hopper", "bartik", "sammet", "jones", "perlman", "wilson", "kowalevski", "hypatia", "goodall", "mayer", "elion", "blackwell", "lalande", "kirch", "ardinghelli", "colden", "almeida", "leakey", "meitner", "mestorf", "rosalind", "sinoussi", "carson", "mcclintock", "yonath"}
+	right = [...]string{"albattani", "almeida", "archimedes", "ardinghelli", "babbage", "bardeen", "bartik", "bell", "blackwell", "bohr", "brattain", "brown", "carson", "colden", "curie", "darwin", "davinci", "einstein", "elion", "engelbart", "euclid", "fermat", "fermi", "feynman", "franklin", "galileo", "goldstine", "goodall", "hawking", "heisenberg", "hoover", "hopper", "hypatia", "jones", "kirch", "kowalevski", "lalande", "leakey", "lovelace", "lumiere", "mayer", "mccarthy", "mcclintock", "mclean", "meitner", "mestorf", "morse", "newton", "nobel", "pare", "pasteur", "perlman", "pike", "poincare", "ptolemy", "ritchie", "rosalind", "sammet", "shockley", "sinoussi", "stallman", "tesla", "thompson", "torvalds", "turing", "wilson", "wozniak", "wright", "yonath"}
 )
 
 func GetRandomName(retry int) string {
diff --git a/pkg/networkfs/resolvconf/resolvconf.go b/pkg/networkfs/resolvconf/resolvconf.go
index d6854fb..38ae564 100644
--- a/pkg/networkfs/resolvconf/resolvconf.go
+++ b/pkg/networkfs/resolvconf/resolvconf.go
@@ -78,8 +78,10 @@
 		}
 	}
 	if len(dnsSearch) > 0 {
-		if _, err := content.WriteString("search " + strings.Join(dnsSearch, " ") + "\n"); err != nil {
-			return err
+		if searchString := strings.Join(dnsSearch, " "); strings.Trim(searchString, " ") != "." {
+			if _, err := content.WriteString("search " + searchString + "\n"); err != nil {
+				return err
+			}
 		}
 	}
 
diff --git a/pkg/networkfs/resolvconf/resolvconf_test.go b/pkg/networkfs/resolvconf/resolvconf_test.go
index fd20712..6187acb 100644
--- a/pkg/networkfs/resolvconf/resolvconf_test.go
+++ b/pkg/networkfs/resolvconf/resolvconf_test.go
@@ -131,3 +131,28 @@
 		t.Fatalf("Expected to find '%s' got '%s'", expected, content)
 	}
 }
+
+func TestBuildWithZeroLengthDomainSearch(t *testing.T) {
+	file, err := ioutil.TempFile("", "")
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer os.Remove(file.Name())
+
+	err = Build(file.Name(), []string{"ns1", "ns2", "ns3"}, []string{"."})
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	content, err := ioutil.ReadFile(file.Name())
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if expected := "nameserver ns1\nnameserver ns2\nnameserver ns3\n"; !bytes.Contains(content, []byte(expected)) {
+		t.Fatalf("Expected to find '%s' got '%s'", expected, content)
+	}
+	if notExpected := "search ."; bytes.Contains(content, []byte(notExpected)) {
+		t.Fatalf("Expected to not find '%s' got '%s'", notExpected, content)
+	}
+}
diff --git a/pkg/parsers/MAINTAINERS b/pkg/parsers/MAINTAINERS
new file mode 100644
index 0000000..8c89025
--- /dev/null
+++ b/pkg/parsers/MAINTAINERS
@@ -0,0 +1 @@
+Erik Hollensbe <github@hollensbe.org> (@erikh)
diff --git a/utils/filters/parse.go b/pkg/parsers/filters/parse.go
similarity index 100%
rename from utils/filters/parse.go
rename to pkg/parsers/filters/parse.go
diff --git a/utils/filters/parse_test.go b/pkg/parsers/filters/parse_test.go
similarity index 100%
rename from utils/filters/parse_test.go
rename to pkg/parsers/filters/parse_test.go
diff --git a/pkg/parsers/kernel/kernel.go b/pkg/parsers/kernel/kernel.go
new file mode 100644
index 0000000..70d0900
--- /dev/null
+++ b/pkg/parsers/kernel/kernel.go
@@ -0,0 +1,93 @@
+package kernel
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+)
+
+type KernelVersionInfo struct {
+	Kernel int
+	Major  int
+	Minor  int
+	Flavor string
+}
+
+func (k *KernelVersionInfo) String() string {
+	return fmt.Sprintf("%d.%d.%d%s", k.Kernel, k.Major, k.Minor, k.Flavor)
+}
+
+// Compare two KernelVersionInfo struct.
+// Returns -1 if a < b, 0 if a == b, 1 it a > b
+func CompareKernelVersion(a, b *KernelVersionInfo) int {
+	if a.Kernel < b.Kernel {
+		return -1
+	} else if a.Kernel > b.Kernel {
+		return 1
+	}
+
+	if a.Major < b.Major {
+		return -1
+	} else if a.Major > b.Major {
+		return 1
+	}
+
+	if a.Minor < b.Minor {
+		return -1
+	} else if a.Minor > b.Minor {
+		return 1
+	}
+
+	return 0
+}
+
+func GetKernelVersion() (*KernelVersionInfo, error) {
+	var (
+		err error
+	)
+
+	uts, err := uname()
+	if err != nil {
+		return nil, err
+	}
+
+	release := make([]byte, len(uts.Release))
+
+	i := 0
+	for _, c := range uts.Release {
+		release[i] = byte(c)
+		i++
+	}
+
+	// Remove the \x00 from the release for Atoi to parse correctly
+	release = release[:bytes.IndexByte(release, 0)]
+
+	return ParseRelease(string(release))
+}
+
+func ParseRelease(release string) (*KernelVersionInfo, error) {
+	var (
+		kernel, major, minor, parsed int
+		flavor, partial              string
+	)
+
+	// Ignore error from Sscanf to allow an empty flavor.  Instead, just
+	// make sure we got all the version numbers.
+	parsed, _ = fmt.Sscanf(release, "%d.%d%s", &kernel, &major, &partial)
+	if parsed < 2 {
+		return nil, errors.New("Can't parse kernel version " + release)
+	}
+
+	// sometimes we have 3.12.25-gentoo, but sometimes we just have 3.12-1-amd64
+	parsed, _ = fmt.Sscanf(partial, ".%d%s", &minor, &flavor)
+	if parsed < 1 {
+		flavor = partial
+	}
+
+	return &KernelVersionInfo{
+		Kernel: kernel,
+		Major:  major,
+		Minor:  minor,
+		Flavor: flavor,
+	}, nil
+}
diff --git a/pkg/parsers/kernel/kernel_test.go b/pkg/parsers/kernel/kernel_test.go
new file mode 100644
index 0000000..e211a63
--- /dev/null
+++ b/pkg/parsers/kernel/kernel_test.go
@@ -0,0 +1,61 @@
+package kernel
+
+import (
+	"testing"
+)
+
+func assertParseRelease(t *testing.T, release string, b *KernelVersionInfo, result int) {
+	var (
+		a *KernelVersionInfo
+	)
+	a, _ = ParseRelease(release)
+
+	if r := CompareKernelVersion(a, b); r != result {
+		t.Fatalf("Unexpected kernel version comparison result. Found %d, expected %d", r, result)
+	}
+	if a.Flavor != b.Flavor {
+		t.Fatalf("Unexpected parsed kernel flavor.  Found %s, expected %s", a.Flavor, b.Flavor)
+	}
+}
+
+func TestParseRelease(t *testing.T) {
+	assertParseRelease(t, "3.8.0", &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, 0)
+	assertParseRelease(t, "3.4.54.longterm-1", &KernelVersionInfo{Kernel: 3, Major: 4, Minor: 54, Flavor: ".longterm-1"}, 0)
+	assertParseRelease(t, "3.4.54.longterm-1", &KernelVersionInfo{Kernel: 3, Major: 4, Minor: 54, Flavor: ".longterm-1"}, 0)
+	assertParseRelease(t, "3.8.0-19-generic", &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0, Flavor: "-19-generic"}, 0)
+	assertParseRelease(t, "3.12.8tag", &KernelVersionInfo{Kernel: 3, Major: 12, Minor: 8, Flavor: "tag"}, 0)
+	assertParseRelease(t, "3.12-1-amd64", &KernelVersionInfo{Kernel: 3, Major: 12, Minor: 0, Flavor: "-1-amd64"}, 0)
+}
+
+func assertKernelVersion(t *testing.T, a, b *KernelVersionInfo, result int) {
+	if r := CompareKernelVersion(a, b); r != result {
+		t.Fatalf("Unexpected kernel version comparison result. Found %d, expected %d", r, result)
+	}
+}
+
+func TestCompareKernelVersion(t *testing.T) {
+	assertKernelVersion(t,
+		&KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0},
+		&KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0},
+		0)
+	assertKernelVersion(t,
+		&KernelVersionInfo{Kernel: 2, Major: 6, Minor: 0},
+		&KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0},
+		-1)
+	assertKernelVersion(t,
+		&KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0},
+		&KernelVersionInfo{Kernel: 2, Major: 6, Minor: 0},
+		1)
+	assertKernelVersion(t,
+		&KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0},
+		&KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0},
+		0)
+	assertKernelVersion(t,
+		&KernelVersionInfo{Kernel: 3, Major: 8, Minor: 5},
+		&KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0},
+		1)
+	assertKernelVersion(t,
+		&KernelVersionInfo{Kernel: 3, Major: 0, Minor: 20},
+		&KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0},
+		-1)
+}
diff --git a/utils/uname_linux.go b/pkg/parsers/kernel/uname_linux.go
similarity index 86%
rename from utils/uname_linux.go
rename to pkg/parsers/kernel/uname_linux.go
index 2f4afb4..8ca814c 100644
--- a/utils/uname_linux.go
+++ b/pkg/parsers/kernel/uname_linux.go
@@ -1,6 +1,4 @@
-// +build amd64
-
-package utils
+package kernel
 
 import (
 	"syscall"
diff --git a/utils/uname_unsupported.go b/pkg/parsers/kernel/uname_unsupported.go
similarity index 82%
rename from utils/uname_unsupported.go
rename to pkg/parsers/kernel/uname_unsupported.go
index 57b82ec..00c5422 100644
--- a/utils/uname_unsupported.go
+++ b/pkg/parsers/kernel/uname_unsupported.go
@@ -1,6 +1,6 @@
-// +build !linux !amd64
+// +build !linux
 
-package utils
+package kernel
 
 import (
 	"errors"
diff --git a/pkg/parsers/operatingsystem/operatingsystem.go b/pkg/parsers/operatingsystem/operatingsystem.go
new file mode 100644
index 0000000..af185f9
--- /dev/null
+++ b/pkg/parsers/operatingsystem/operatingsystem.go
@@ -0,0 +1,40 @@
+package operatingsystem
+
+import (
+	"bytes"
+	"errors"
+	"io/ioutil"
+)
+
+var (
+	// file to use to detect if the daemon is running in a container
+	proc1Cgroup = "/proc/1/cgroup"
+
+	// file to check to determine Operating System
+	etcOsRelease = "/etc/os-release"
+)
+
+func GetOperatingSystem() (string, error) {
+	b, err := ioutil.ReadFile(etcOsRelease)
+	if err != nil {
+		return "", err
+	}
+	if i := bytes.Index(b, []byte("PRETTY_NAME")); i >= 0 {
+		b = b[i+13:]
+		return string(b[:bytes.IndexByte(b, '"')]), nil
+	}
+	return "", errors.New("PRETTY_NAME not found")
+}
+
+func IsContainerized() (bool, error) {
+	b, err := ioutil.ReadFile(proc1Cgroup)
+	if err != nil {
+		return false, err
+	}
+	for _, line := range bytes.Split(b, []byte{'\n'}) {
+		if len(line) > 0 && !bytes.HasSuffix(line, []byte{'/'}) {
+			return true, nil
+		}
+	}
+	return false, nil
+}
diff --git a/pkg/parsers/operatingsystem/operatingsystem_test.go b/pkg/parsers/operatingsystem/operatingsystem_test.go
new file mode 100644
index 0000000..d264b35
--- /dev/null
+++ b/pkg/parsers/operatingsystem/operatingsystem_test.go
@@ -0,0 +1,123 @@
+package operatingsystem
+
+import (
+	"io/ioutil"
+	"os"
+	"path/filepath"
+	"testing"
+)
+
+func TestGetOperatingSystem(t *testing.T) {
+	var (
+		backup       = etcOsRelease
+		ubuntuTrusty = []byte(`NAME="Ubuntu"
+VERSION="14.04, Trusty Tahr"
+ID=ubuntu
+ID_LIKE=debian
+PRETTY_NAME="Ubuntu 14.04 LTS"
+VERSION_ID="14.04"
+HOME_URL="http://www.ubuntu.com/"
+SUPPORT_URL="http://help.ubuntu.com/"
+BUG_REPORT_URL="http://bugs.launchpad.net/ubuntu/"`)
+		gentoo = []byte(`NAME=Gentoo
+ID=gentoo
+PRETTY_NAME="Gentoo/Linux"
+ANSI_COLOR="1;32"
+HOME_URL="http://www.gentoo.org/"
+SUPPORT_URL="http://www.gentoo.org/main/en/support.xml"
+BUG_REPORT_URL="https://bugs.gentoo.org/"
+`)
+		noPrettyName = []byte(`NAME="Ubuntu"
+VERSION="14.04, Trusty Tahr"
+ID=ubuntu
+ID_LIKE=debian
+VERSION_ID="14.04"
+HOME_URL="http://www.ubuntu.com/"
+SUPPORT_URL="http://help.ubuntu.com/"
+BUG_REPORT_URL="http://bugs.launchpad.net/ubuntu/"`)
+	)
+
+	dir := os.TempDir()
+	defer func() {
+		etcOsRelease = backup
+		os.RemoveAll(dir)
+	}()
+
+	etcOsRelease = filepath.Join(dir, "etcOsRelease")
+	for expect, osRelease := range map[string][]byte{
+		"Ubuntu 14.04 LTS": ubuntuTrusty,
+		"Gentoo/Linux":     gentoo,
+		"":                 noPrettyName,
+	} {
+		if err := ioutil.WriteFile(etcOsRelease, osRelease, 0600); err != nil {
+			t.Fatalf("failed to write to %s: %v", etcOsRelease, err)
+		}
+		s, err := GetOperatingSystem()
+		if s != expect {
+			if expect == "" {
+				t.Fatalf("Expected error 'PRETTY_NAME not found', but got %v", err)
+			} else {
+				t.Fatalf("Expected '%s', but got '%s'. Err=%v", expect, s, err)
+			}
+		}
+	}
+}
+
+func TestIsContainerized(t *testing.T) {
+	var (
+		backup                      = proc1Cgroup
+		nonContainerizedProc1Cgroup = []byte(`14:name=systemd:/
+13:hugetlb:/
+12:net_prio:/
+11:perf_event:/
+10:bfqio:/
+9:blkio:/
+8:net_cls:/
+7:freezer:/
+6:devices:/
+5:memory:/
+4:cpuacct:/
+3:cpu:/
+2:cpuset:/
+`)
+		containerizedProc1Cgroup = []byte(`9:perf_event:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d
+8:blkio:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d
+7:net_cls:/
+6:freezer:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d
+5:devices:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d
+4:memory:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d
+3:cpuacct:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d
+2:cpu:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d
+1:cpuset:/`)
+	)
+
+	dir := os.TempDir()
+	defer func() {
+		proc1Cgroup = backup
+		os.RemoveAll(dir)
+	}()
+
+	proc1Cgroup = filepath.Join(dir, "proc1Cgroup")
+
+	if err := ioutil.WriteFile(proc1Cgroup, nonContainerizedProc1Cgroup, 0600); err != nil {
+		t.Fatalf("failed to write to %s: %v", proc1Cgroup, err)
+	}
+	inContainer, err := IsContainerized()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if inContainer {
+		t.Fatal("Wrongly assuming containerized")
+	}
+
+	if err := ioutil.WriteFile(proc1Cgroup, containerizedProc1Cgroup, 0600); err != nil {
+		t.Fatalf("failed to write to %s: %v", proc1Cgroup, err)
+	}
+	inContainer, err = IsContainerized()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if !inContainer {
+		t.Fatal("Wrongly assuming non-containerized")
+	}
+}
diff --git a/pkg/parsers/parsers.go b/pkg/parsers/parsers.go
new file mode 100644
index 0000000..e6e3718
--- /dev/null
+++ b/pkg/parsers/parsers.go
@@ -0,0 +1,110 @@
+package parsers
+
+import (
+	"fmt"
+	"strconv"
+	"strings"
+)
+
+// FIXME: Change this not to receive default value as parameter
+func ParseHost(defaultHost string, defaultUnix, addr string) (string, error) {
+	var (
+		proto string
+		host  string
+		port  int
+	)
+	addr = strings.TrimSpace(addr)
+	switch {
+	case addr == "tcp://":
+		return "", fmt.Errorf("Invalid bind address format: %s", addr)
+	case strings.HasPrefix(addr, "unix://"):
+		proto = "unix"
+		addr = strings.TrimPrefix(addr, "unix://")
+		if addr == "" {
+			addr = defaultUnix
+		}
+	case strings.HasPrefix(addr, "tcp://"):
+		proto = "tcp"
+		addr = strings.TrimPrefix(addr, "tcp://")
+	case strings.HasPrefix(addr, "fd://"):
+		return addr, nil
+	case addr == "":
+		proto = "unix"
+		addr = defaultUnix
+	default:
+		if strings.Contains(addr, "://") {
+			return "", fmt.Errorf("Invalid bind address protocol: %s", addr)
+		}
+		proto = "tcp"
+	}
+
+	if proto != "unix" && strings.Contains(addr, ":") {
+		hostParts := strings.Split(addr, ":")
+		if len(hostParts) != 2 {
+			return "", fmt.Errorf("Invalid bind address format: %s", addr)
+		}
+		if hostParts[0] != "" {
+			host = hostParts[0]
+		} else {
+			host = defaultHost
+		}
+
+		if p, err := strconv.Atoi(hostParts[1]); err == nil && p != 0 {
+			port = p
+		} else {
+			return "", fmt.Errorf("Invalid bind address format: %s", addr)
+		}
+
+	} else if proto == "tcp" && !strings.Contains(addr, ":") {
+		return "", fmt.Errorf("Invalid bind address format: %s", addr)
+	} else {
+		host = addr
+	}
+	if proto == "unix" {
+		return fmt.Sprintf("%s://%s", proto, host), nil
+	}
+	return fmt.Sprintf("%s://%s:%d", proto, host, port), nil
+}
+
+// Get a repos name and returns the right reposName + tag
+// The tag can be confusing because of a port in a repository name.
+//     Ex: localhost.localdomain:5000/samalba/hipache:latest
+func ParseRepositoryTag(repos string) (string, string) {
+	n := strings.LastIndex(repos, ":")
+	if n < 0 {
+		return repos, ""
+	}
+	if tag := repos[n+1:]; !strings.Contains(tag, "/") {
+		return repos[:n], tag
+	}
+	return repos, ""
+}
+
+func PartParser(template, data string) (map[string]string, error) {
+	// ip:public:private
+	var (
+		templateParts = strings.Split(template, ":")
+		parts         = strings.Split(data, ":")
+		out           = make(map[string]string, len(templateParts))
+	)
+	if len(parts) != len(templateParts) {
+		return nil, fmt.Errorf("Invalid format to parse.  %s should match template %s", data, template)
+	}
+
+	for i, t := range templateParts {
+		value := ""
+		if len(parts) > i {
+			value = parts[i]
+		}
+		out[t] = value
+	}
+	return out, nil
+}
+
+func ParseKeyValueOpt(opt string) (string, string, error) {
+	parts := strings.SplitN(opt, "=", 2)
+	if len(parts) != 2 {
+		return "", "", fmt.Errorf("Unable to parse key/value option: %s", opt)
+	}
+	return strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1]), nil
+}
diff --git a/pkg/parsers/parsers_test.go b/pkg/parsers/parsers_test.go
new file mode 100644
index 0000000..12b8df5
--- /dev/null
+++ b/pkg/parsers/parsers_test.go
@@ -0,0 +1,83 @@
+package parsers
+
+import (
+	"testing"
+)
+
+func TestParseHost(t *testing.T) {
+	var (
+		defaultHttpHost = "127.0.0.1"
+		defaultUnix     = "/var/run/docker.sock"
+	)
+	if addr, err := ParseHost(defaultHttpHost, defaultUnix, "0.0.0.0"); err == nil {
+		t.Errorf("tcp 0.0.0.0 address expected error return, but err == nil, got %s", addr)
+	}
+	if addr, err := ParseHost(defaultHttpHost, defaultUnix, "tcp://"); err == nil {
+		t.Errorf("default tcp:// address expected error return, but err == nil, got %s", addr)
+	}
+	if addr, err := ParseHost(defaultHttpHost, defaultUnix, "0.0.0.1:5555"); err != nil || addr != "tcp://0.0.0.1:5555" {
+		t.Errorf("0.0.0.1:5555 -> expected tcp://0.0.0.1:5555, got %s", addr)
+	}
+	if addr, err := ParseHost(defaultHttpHost, defaultUnix, ":6666"); err != nil || addr != "tcp://127.0.0.1:6666" {
+		t.Errorf(":6666 -> expected tcp://127.0.0.1:6666, got %s", addr)
+	}
+	if addr, err := ParseHost(defaultHttpHost, defaultUnix, "tcp://:7777"); err != nil || addr != "tcp://127.0.0.1:7777" {
+		t.Errorf("tcp://:7777 -> expected tcp://127.0.0.1:7777, got %s", addr)
+	}
+	if addr, err := ParseHost(defaultHttpHost, defaultUnix, ""); err != nil || addr != "unix:///var/run/docker.sock" {
+		t.Errorf("empty argument -> expected unix:///var/run/docker.sock, got %s", addr)
+	}
+	if addr, err := ParseHost(defaultHttpHost, defaultUnix, "unix:///var/run/docker.sock"); err != nil || addr != "unix:///var/run/docker.sock" {
+		t.Errorf("unix:///var/run/docker.sock -> expected unix:///var/run/docker.sock, got %s", addr)
+	}
+	if addr, err := ParseHost(defaultHttpHost, defaultUnix, "unix://"); err != nil || addr != "unix:///var/run/docker.sock" {
+		t.Errorf("unix:///var/run/docker.sock -> expected unix:///var/run/docker.sock, got %s", addr)
+	}
+	if addr, err := ParseHost(defaultHttpHost, defaultUnix, "udp://127.0.0.1"); err == nil {
+		t.Errorf("udp protocol address expected error return, but err == nil. Got %s", addr)
+	}
+	if addr, err := ParseHost(defaultHttpHost, defaultUnix, "udp://127.0.0.1:2375"); err == nil {
+		t.Errorf("udp protocol address expected error return, but err == nil. Got %s", addr)
+	}
+}
+
+func TestParseRepositoryTag(t *testing.T) {
+	if repo, tag := ParseRepositoryTag("root"); repo != "root" || tag != "" {
+		t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "root", "", repo, tag)
+	}
+	if repo, tag := ParseRepositoryTag("root:tag"); repo != "root" || tag != "tag" {
+		t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "root", "tag", repo, tag)
+	}
+	if repo, tag := ParseRepositoryTag("user/repo"); repo != "user/repo" || tag != "" {
+		t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "user/repo", "", repo, tag)
+	}
+	if repo, tag := ParseRepositoryTag("user/repo:tag"); repo != "user/repo" || tag != "tag" {
+		t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "user/repo", "tag", repo, tag)
+	}
+	if repo, tag := ParseRepositoryTag("url:5000/repo"); repo != "url:5000/repo" || tag != "" {
+		t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "url:5000/repo", "", repo, tag)
+	}
+	if repo, tag := ParseRepositoryTag("url:5000/repo:tag"); repo != "url:5000/repo" || tag != "tag" {
+		t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "url:5000/repo", "tag", repo, tag)
+	}
+}
+
+func TestParsePortMapping(t *testing.T) {
+	data, err := PartParser("ip:public:private", "192.168.1.1:80:8080")
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if len(data) != 3 {
+		t.FailNow()
+	}
+	if data["ip"] != "192.168.1.1" {
+		t.Fail()
+	}
+	if data["public"] != "80" {
+		t.Fail()
+	}
+	if data["private"] != "8080" {
+		t.Fail()
+	}
+}
diff --git a/pkg/proxy/udp_proxy.go b/pkg/proxy/udp_proxy.go
index 14f2306..ae6a7bb 100644
--- a/pkg/proxy/udp_proxy.go
+++ b/pkg/proxy/udp_proxy.go
@@ -116,6 +116,7 @@
 			proxyConn, err = net.DialUDP("udp", nil, proxy.backendAddr)
 			if err != nil {
 				log.Printf("Can't proxy a datagram to udp/%s: %s\n", proxy.backendAddr, err)
+				proxy.connTrackLock.Unlock()
 				continue
 			}
 			proxy.connTrackTable[*fromKey] = proxyConn
diff --git a/pkg/signal/trap.go b/pkg/signal/trap.go
new file mode 100644
index 0000000..cbdfd1f
--- /dev/null
+++ b/pkg/signal/trap.go
@@ -0,0 +1,54 @@
+package signal
+
+import (
+	"log"
+	"os"
+	gosignal "os/signal"
+	"sync/atomic"
+	"syscall"
+)
+
+// Trap sets up a simplified signal "trap", appropriate for common
+// behavior expected from a vanilla unix command-line tool in general
+// (and the Docker engine in particular).
+//
+// * If SIGINT or SIGTERM are received, `cleanup` is called, then the process is terminated.
+// * If SIGINT or SIGTERM are repeated 3 times before cleanup is complete, then cleanup is
+// skipped and the process terminated directly.
+// * If "DEBUG" is set in the environment, SIGQUIT causes an exit without cleanup.
+//
+func Trap(cleanup func()) {
+	c := make(chan os.Signal, 1)
+	signals := []os.Signal{os.Interrupt, syscall.SIGTERM}
+	if os.Getenv("DEBUG") == "" {
+		signals = append(signals, syscall.SIGQUIT)
+	}
+	gosignal.Notify(c, signals...)
+	go func() {
+		interruptCount := uint32(0)
+		for sig := range c {
+			go func(sig os.Signal) {
+				log.Printf("Received signal '%v', starting shutdown of docker...\n", sig)
+				switch sig {
+				case os.Interrupt, syscall.SIGTERM:
+					// If the user really wants to interrupt, let him do so.
+					if atomic.LoadUint32(&interruptCount) < 3 {
+						atomic.AddUint32(&interruptCount, 1)
+						// Initiate the cleanup only once
+						if atomic.LoadUint32(&interruptCount) == 1 {
+							// Call cleanup handler
+							cleanup()
+							os.Exit(0)
+						} else {
+							return
+						}
+					} else {
+						log.Printf("Force shutdown of docker, interrupting cleanup\n")
+					}
+				case syscall.SIGQUIT:
+				}
+				os.Exit(128 + int(sig.(syscall.Signal)))
+			}(sig)
+		}
+	}()
+}
diff --git a/pkg/system/calls_linux.go b/pkg/system/calls_linux.go
deleted file mode 100644
index 125fd25..0000000
--- a/pkg/system/calls_linux.go
+++ /dev/null
@@ -1,185 +0,0 @@
-package system
-
-import (
-	"os/exec"
-	"syscall"
-	"unsafe"
-)
-
-func Chroot(dir string) error {
-	return syscall.Chroot(dir)
-}
-
-func Chdir(dir string) error {
-	return syscall.Chdir(dir)
-}
-
-func Exec(cmd string, args []string, env []string) error {
-	return syscall.Exec(cmd, args, env)
-}
-
-func Execv(cmd string, args []string, env []string) error {
-	name, err := exec.LookPath(cmd)
-	if err != nil {
-		return err
-	}
-	return Exec(name, args, env)
-}
-
-func Fork() (int, error) {
-	syscall.ForkLock.Lock()
-	pid, _, err := syscall.Syscall(syscall.SYS_FORK, 0, 0, 0)
-	syscall.ForkLock.Unlock()
-	if err != 0 {
-		return -1, err
-	}
-	return int(pid), nil
-}
-
-func Mount(source, target, fstype string, flags uintptr, data string) error {
-	return syscall.Mount(source, target, fstype, flags, data)
-}
-
-func Unmount(target string, flags int) error {
-	return syscall.Unmount(target, flags)
-}
-
-func Pivotroot(newroot, putold string) error {
-	return syscall.PivotRoot(newroot, putold)
-}
-
-func Unshare(flags int) error {
-	return syscall.Unshare(flags)
-}
-
-func Clone(flags uintptr) (int, error) {
-	syscall.ForkLock.Lock()
-	pid, _, err := syscall.RawSyscall(syscall.SYS_CLONE, flags, 0, 0)
-	syscall.ForkLock.Unlock()
-	if err != 0 {
-		return -1, err
-	}
-	return int(pid), nil
-}
-
-func UsetCloseOnExec(fd uintptr) error {
-	if _, _, err := syscall.Syscall(syscall.SYS_FCNTL, fd, syscall.F_SETFD, 0); err != 0 {
-		return err
-	}
-	return nil
-}
-
-func Setgroups(gids []int) error {
-	return syscall.Setgroups(gids)
-}
-
-func Setresgid(rgid, egid, sgid int) error {
-	return syscall.Setresgid(rgid, egid, sgid)
-}
-
-func Setresuid(ruid, euid, suid int) error {
-	return syscall.Setresuid(ruid, euid, suid)
-}
-
-func Setgid(gid int) error {
-	return syscall.Setgid(gid)
-}
-
-func Setuid(uid int) error {
-	return syscall.Setuid(uid)
-}
-
-func Sethostname(name string) error {
-	return syscall.Sethostname([]byte(name))
-}
-
-func Setsid() (int, error) {
-	return syscall.Setsid()
-}
-
-func Ioctl(fd uintptr, flag, data uintptr) error {
-	if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, flag, data); err != 0 {
-		return err
-	}
-	return nil
-}
-
-func Closefd(fd uintptr) error {
-	return syscall.Close(int(fd))
-}
-
-func Dup2(fd1, fd2 uintptr) error {
-	return syscall.Dup2(int(fd1), int(fd2))
-}
-
-func Mknod(path string, mode uint32, dev int) error {
-	return syscall.Mknod(path, mode, dev)
-}
-
-func Prctl(option int, arg2, arg3, arg4, arg5 uintptr) error {
-	if _, _, err := syscall.Syscall6(syscall.SYS_PRCTL, uintptr(option), arg2, arg3, arg4, arg5, 0); err != 0 {
-		return err
-	}
-	return nil
-}
-
-func ParentDeathSignal(sig uintptr) error {
-	if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_SET_PDEATHSIG, sig, 0); err != 0 {
-		return err
-	}
-	return nil
-}
-
-func GetParentDeathSignal() (int, error) {
-	var sig int
-
-	_, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_GET_PDEATHSIG, uintptr(unsafe.Pointer(&sig)), 0)
-
-	if err != 0 {
-		return -1, err
-	}
-
-	return sig, nil
-}
-
-func SetKeepCaps() error {
-	if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_SET_KEEPCAPS, 1, 0); err != 0 {
-		return err
-	}
-
-	return nil
-}
-
-func ClearKeepCaps() error {
-	if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_SET_KEEPCAPS, 0, 0); err != 0 {
-		return err
-	}
-
-	return nil
-}
-
-func Setctty() error {
-	if _, _, err := syscall.RawSyscall(syscall.SYS_IOCTL, 0, uintptr(syscall.TIOCSCTTY), 0); err != 0 {
-		return err
-	}
-	return nil
-}
-
-func Mkfifo(name string, mode uint32) error {
-	return syscall.Mkfifo(name, mode)
-}
-
-func Umask(mask int) int {
-	return syscall.Umask(mask)
-}
-
-func SetCloneFlags(cmd *exec.Cmd, flag uintptr) {
-	if cmd.SysProcAttr == nil {
-		cmd.SysProcAttr = &syscall.SysProcAttr{}
-	}
-	cmd.SysProcAttr.Cloneflags = flag
-}
-
-func Gettid() int {
-	return syscall.Gettid()
-}
diff --git a/pkg/system/fds_linux.go b/pkg/system/fds_linux.go
deleted file mode 100644
index 53d2299..0000000
--- a/pkg/system/fds_linux.go
+++ /dev/null
@@ -1,38 +0,0 @@
-package system
-
-import (
-	"io/ioutil"
-	"strconv"
-	"syscall"
-)
-
-// Works similarly to OpenBSD's "closefrom(2)":
-//   The closefrom() call deletes all descriptors numbered fd and higher from
-//   the per-process file descriptor table.  It is effectively the same as
-//   calling close(2) on each descriptor.
-// http://www.openbsd.org/cgi-bin/man.cgi?query=closefrom&sektion=2
-//
-// See also http://stackoverflow.com/a/918469/433558
-func CloseFdsFrom(minFd int) error {
-	fdList, err := ioutil.ReadDir("/proc/self/fd")
-	if err != nil {
-		return err
-	}
-	for _, fi := range fdList {
-		fd, err := strconv.Atoi(fi.Name())
-		if err != nil {
-			// ignore non-numeric file names
-			continue
-		}
-
-		if fd < minFd {
-			// ignore descriptors lower than our specified minimum
-			continue
-		}
-
-		// intentionally ignore errors from syscall.Close
-		syscall.Close(fd)
-		// the cases where this might fail are basically file descriptors that have already been closed (including and especially the one that was created when ioutil.ReadDir did the "opendir" syscall)
-	}
-	return nil
-}
diff --git a/pkg/system/fds_unsupported.go b/pkg/system/fds_unsupported.go
deleted file mode 100644
index c1e08e8..0000000
--- a/pkg/system/fds_unsupported.go
+++ /dev/null
@@ -1,12 +0,0 @@
-// +build !linux
-
-package system
-
-import (
-	"fmt"
-	"runtime"
-)
-
-func CloseFdsFrom(minFd int) error {
-	return fmt.Errorf("CloseFdsFrom is unsupported on this platform (%s/%s)", runtime.GOOS, runtime.GOARCH)
-}
diff --git a/pkg/system/pty_linux.go b/pkg/system/pty_linux.go
deleted file mode 100644
index ca588d8..0000000
--- a/pkg/system/pty_linux.go
+++ /dev/null
@@ -1,58 +0,0 @@
-package system
-
-import (
-	"fmt"
-	"os"
-	"syscall"
-	"unsafe"
-)
-
-// Unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f.
-// Unlockpt should be called before opening the slave side of a pseudoterminal.
-func Unlockpt(f *os.File) error {
-	var u int
-	return Ioctl(f.Fd(), syscall.TIOCSPTLCK, uintptr(unsafe.Pointer(&u)))
-}
-
-// Ptsname retrieves the name of the first available pts for the given master.
-func Ptsname(f *os.File) (string, error) {
-	var n int
-
-	if err := Ioctl(f.Fd(), syscall.TIOCGPTN, uintptr(unsafe.Pointer(&n))); err != nil {
-		return "", err
-	}
-	return fmt.Sprintf("/dev/pts/%d", n), nil
-}
-
-// CreateMasterAndConsole will open /dev/ptmx on the host and retreive the
-// pts name for use as the pty slave inside the container
-func CreateMasterAndConsole() (*os.File, string, error) {
-	master, err := os.OpenFile("/dev/ptmx", syscall.O_RDWR|syscall.O_NOCTTY|syscall.O_CLOEXEC, 0)
-	if err != nil {
-		return nil, "", err
-	}
-	console, err := Ptsname(master)
-	if err != nil {
-		return nil, "", err
-	}
-	if err := Unlockpt(master); err != nil {
-		return nil, "", err
-	}
-	return master, console, nil
-}
-
-// OpenPtmx opens /dev/ptmx, i.e. the PTY master.
-func OpenPtmx() (*os.File, error) {
-	// O_NOCTTY and O_CLOEXEC are not present in os package so we use the syscall's one for all.
-	return os.OpenFile("/dev/ptmx", syscall.O_RDONLY|syscall.O_NOCTTY|syscall.O_CLOEXEC, 0)
-}
-
-// OpenTerminal is a clone of os.OpenFile without the O_CLOEXEC
-// used to open the pty slave inside the container namespace
-func OpenTerminal(name string, flag int) (*os.File, error) {
-	r, e := syscall.Open(name, flag, 0)
-	if e != nil {
-		return nil, &os.PathError{"open", name, e}
-	}
-	return os.NewFile(uintptr(r), name), nil
-}
diff --git a/pkg/system/sysconfig.go b/pkg/system/sysconfig.go
deleted file mode 100644
index dcbe6c9..0000000
--- a/pkg/system/sysconfig.go
+++ /dev/null
@@ -1,13 +0,0 @@
-// +build linux,cgo
-
-package system
-
-/*
-#include <unistd.h>
-int get_hz(void) { return sysconf(_SC_CLK_TCK); }
-*/
-import "C"
-
-func GetClockTicks() int {
-	return int(C.get_hz())
-}
diff --git a/pkg/system/sysconfig_nocgo.go b/pkg/system/sysconfig_nocgo.go
deleted file mode 100644
index 7ca3488..0000000
--- a/pkg/system/sysconfig_nocgo.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// +build linux,!cgo
-
-package system
-
-func GetClockTicks() int {
-	// when we cannot call out to C to get the sysconf it is fairly safe to
-	// just return 100
-	return 100
-}
diff --git a/pkg/system/unsupported.go b/pkg/system/unsupported.go
deleted file mode 100644
index aea4b69..0000000
--- a/pkg/system/unsupported.go
+++ /dev/null
@@ -1,38 +0,0 @@
-// +build !linux
-
-package system
-
-import (
-	"os"
-	"os/exec"
-)
-
-func SetCloneFlags(cmd *exec.Cmd, flag uintptr) {
-
-}
-
-func UsetCloseOnExec(fd uintptr) error {
-	return ErrNotSupportedPlatform
-}
-
-func Gettid() int {
-	return 0
-}
-
-func GetClockTicks() int {
-	// when we cannot call out to C to get the sysconf it is fairly safe to
-	// just return 100
-	return 100
-}
-
-func CreateMasterAndConsole() (*os.File, string, error) {
-	return nil, "", ErrNotSupportedPlatform
-}
-
-func SetKeepCaps() error {
-	return ErrNotSupportedPlatform
-}
-
-func ClearKeepCaps() error {
-	return ErrNotSupportedPlatform
-}
diff --git a/utils/tarsum.go b/pkg/tarsum/tarsum.go
similarity index 90%
rename from utils/tarsum.go
rename to pkg/tarsum/tarsum.go
index 67e94aa..05b9ef4 100644
--- a/utils/tarsum.go
+++ b/pkg/tarsum/tarsum.go
@@ -1,16 +1,19 @@
-package utils
+package tarsum
 
 import (
 	"bytes"
 	"compress/gzip"
 	"crypto/sha256"
 	"encoding/hex"
-	"github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
 	"hash"
 	"io"
 	"sort"
 	"strconv"
 	"strings"
+
+	"github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
+
+	"github.com/docker/docker/pkg/log"
 )
 
 type TarSum struct {
@@ -20,6 +23,7 @@
 	gz                 writeCloseFlusher
 	bufTar             *bytes.Buffer
 	bufGz              *bytes.Buffer
+	bufData            [8192]byte
 	h                  hash.Hash
 	sums               map[string]string
 	currentFile        string
@@ -89,7 +93,12 @@
 	if ts.finished {
 		return ts.bufGz.Read(buf)
 	}
-	buf2 := make([]byte, len(buf), cap(buf))
+	var buf2 []byte
+	if len(buf) > 8192 {
+		buf2 = make([]byte, len(buf), cap(buf))
+	} else {
+		buf2 = ts.bufData[:len(buf)-1]
+	}
 
 	n, err := ts.tarR.Read(buf2)
 	if err != nil {
@@ -168,11 +177,11 @@
 		h.Write(extra)
 	}
 	for _, sum := range sums {
-		Debugf("-->%s<--", sum)
+		log.Infof("-->%s<--", sum)
 		h.Write([]byte(sum))
 	}
 	checksum := "tarsum+sha256:" + hex.EncodeToString(h.Sum(nil))
-	Debugf("checksum processed: %s", checksum)
+	log.Infof("checksum processed: %s", checksum)
 	return checksum
 }
 
diff --git a/utils/tarsum_test.go b/pkg/tarsum/tarsum_test.go
similarity index 97%
rename from utils/tarsum_test.go
rename to pkg/tarsum/tarsum_test.go
index e84abde..f251f09 100644
--- a/utils/tarsum_test.go
+++ b/pkg/tarsum/tarsum_test.go
@@ -1,4 +1,4 @@
-package utils
+package tarsum
 
 import (
 	"bytes"
@@ -9,7 +9,7 @@
 	"os"
 	"testing"
 
-	"github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
+	"github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
 )
 
 type testLayer struct {
diff --git a/utils/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json b/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json
similarity index 100%
rename from utils/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json
rename to pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json
diff --git a/utils/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar b/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar
similarity index 100%
rename from utils/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar
rename to pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar
Binary files differ
diff --git a/utils/testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/json b/pkg/tarsum/testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/json
similarity index 100%
rename from utils/testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/json
rename to pkg/tarsum/testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/json
diff --git a/utils/testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/layer.tar b/pkg/tarsum/testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/layer.tar
similarity index 100%
rename from utils/testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/layer.tar
rename to pkg/tarsum/testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/layer.tar
Binary files differ
diff --git a/pkg/term/MAINTAINERS b/pkg/term/MAINTAINERS
index 1887dfc..aee10c8 100644
--- a/pkg/term/MAINTAINERS
+++ b/pkg/term/MAINTAINERS
@@ -1 +1 @@
-Solomon Hykes <solomon@dotcloud.com> (@shykes)
+Solomon Hykes <solomon@docker.com> (@shykes)
diff --git a/pkg/testutils/testutils.go b/pkg/testutils/utils.go
similarity index 100%
rename from pkg/testutils/testutils.go
rename to pkg/testutils/utils.go
diff --git a/pkg/truncindex/MAINTAINERS b/pkg/truncindex/MAINTAINERS
new file mode 100644
index 0000000..6dde476
--- /dev/null
+++ b/pkg/truncindex/MAINTAINERS
@@ -0,0 +1 @@
+Cristian Staretu <cristian.staretu@gmail.com> (@unclejack)
diff --git a/pkg/truncindex/truncindex_test.go b/pkg/truncindex/truncindex_test.go
index f88d667..32c41c7 100644
--- a/pkg/truncindex/truncindex_test.go
+++ b/pkg/truncindex/truncindex_test.go
@@ -4,7 +4,7 @@
 	"math/rand"
 	"testing"
 
-	"github.com/dotcloud/docker/utils"
+	"github.com/docker/docker/utils"
 )
 
 // Test the behavior of TruncIndex, an index for querying IDs from a non-conflicting prefix.
diff --git a/pkg/units/duration_test.go b/pkg/units/duration_test.go
new file mode 100644
index 0000000..a229474
--- /dev/null
+++ b/pkg/units/duration_test.go
@@ -0,0 +1,46 @@
+package units
+
+import (
+	"testing"
+	"time"
+)
+
+func TestHumanDuration(t *testing.T) {
+	// Useful duration abstractions
+	day := 24 * time.Hour
+	week := 7 * day
+	month := 30 * day
+	year := 365 * day
+
+	assertEquals(t, "Less than a second", HumanDuration(450*time.Millisecond))
+	assertEquals(t, "47 seconds", HumanDuration(47*time.Second))
+	assertEquals(t, "About a minute", HumanDuration(1*time.Minute))
+	assertEquals(t, "3 minutes", HumanDuration(3*time.Minute))
+	assertEquals(t, "35 minutes", HumanDuration(35*time.Minute))
+	assertEquals(t, "35 minutes", HumanDuration(35*time.Minute+40*time.Second))
+	assertEquals(t, "About an hour", HumanDuration(1*time.Hour))
+	assertEquals(t, "About an hour", HumanDuration(1*time.Hour+45*time.Minute))
+	assertEquals(t, "3 hours", HumanDuration(3*time.Hour))
+	assertEquals(t, "3 hours", HumanDuration(3*time.Hour+59*time.Minute))
+	assertEquals(t, "4 hours", HumanDuration(3*time.Hour+60*time.Minute))
+	assertEquals(t, "24 hours", HumanDuration(24*time.Hour))
+	assertEquals(t, "36 hours", HumanDuration(1*day+12*time.Hour))
+	assertEquals(t, "2 days", HumanDuration(2*day))
+	assertEquals(t, "7 days", HumanDuration(7*day))
+	assertEquals(t, "13 days", HumanDuration(13*day+5*time.Hour))
+	assertEquals(t, "2 weeks", HumanDuration(2*week))
+	assertEquals(t, "2 weeks", HumanDuration(2*week+4*day))
+	assertEquals(t, "3 weeks", HumanDuration(3*week))
+	assertEquals(t, "4 weeks", HumanDuration(4*week))
+	assertEquals(t, "4 weeks", HumanDuration(4*week+3*day))
+	assertEquals(t, "4 weeks", HumanDuration(1*month))
+	assertEquals(t, "6 weeks", HumanDuration(1*month+2*week))
+	assertEquals(t, "8 weeks", HumanDuration(2*month))
+	assertEquals(t, "3 months", HumanDuration(3*month+1*week))
+	assertEquals(t, "5 months", HumanDuration(5*month+2*week))
+	assertEquals(t, "13 months", HumanDuration(13*month))
+	assertEquals(t, "23 months", HumanDuration(23*month))
+	assertEquals(t, "24 months", HumanDuration(24*month))
+	assertEquals(t, "2.010959 years", HumanDuration(24*month+2*week))
+	assertEquals(t, "3.164384 years", HumanDuration(3*year+2*month))
+}
diff --git a/pkg/units/size.go b/pkg/units/size.go
index 480ec2f..88d91dd 100644
--- a/pkg/units/size.go
+++ b/pkg/units/size.go
@@ -7,86 +7,80 @@
 	"strings"
 )
 
+// See: http://en.wikipedia.org/wiki/Binary_prefix
+const (
+	// Decimal
+	KB = 1000
+	MB = 1000 * KB
+	GB = 1000 * MB
+	TB = 1000 * GB
+	PB = 1000 * TB
+
+	// Binary
+	KiB = 1024
+	MiB = 1024 * KiB
+	GiB = 1024 * MiB
+	TiB = 1024 * GiB
+	PiB = 1024 * TiB
+)
+
+type unitMap map[string]int64
+
+var (
+	decimalMap = unitMap{"k": KB, "m": MB, "g": GB, "t": TB, "p": PB}
+	binaryMap  = unitMap{"k": KiB, "m": MiB, "g": GiB, "t": TiB, "p": PiB}
+)
+
+var sizeRegex *regexp.Regexp
+
+func init() {
+	sizeRegex = regexp.MustCompile("^(\\d+)([kKmMgGtTpP])?[bB]?$")
+}
+
+var unitAbbrs = [...]string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"}
+
 // HumanSize returns a human-readable approximation of a size
 // using SI standard (eg. "44kB", "17MB")
 func HumanSize(size int64) string {
 	i := 0
-	var sizef float64
-	sizef = float64(size)
-	units := []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"}
+	sizef := float64(size)
 	for sizef >= 1000.0 {
 		sizef = sizef / 1000.0
 		i++
 	}
-	return fmt.Sprintf("%.4g %s", sizef, units[i])
+	return fmt.Sprintf("%.4g %s", sizef, unitAbbrs[i])
 }
 
-// FromHumanSize returns an integer from a human-readable specification of a size
-// using SI standard (eg. "44kB", "17MB")
+// FromHumanSize returns an integer from a human-readable specification of a
+// size using SI standard (eg. "44kB", "17MB")
 func FromHumanSize(size string) (int64, error) {
-	re, error := regexp.Compile("^(\\d+)([kKmMgGtTpP])?[bB]?$")
-	if error != nil {
-		return -1, fmt.Errorf("%s does not specify not a size", size)
-	}
-
-	matches := re.FindStringSubmatch(size)
-
-	if len(matches) != 3 {
-		return -1, fmt.Errorf("Invalid size: '%s'", size)
-	}
-
-	theSize, error := strconv.ParseInt(matches[1], 10, 0)
-	if error != nil {
-		return -1, error
-	}
-
-	unit := strings.ToLower(matches[2])
-
-	if unit == "k" {
-		theSize *= 1000
-	} else if unit == "m" {
-		theSize *= 1000 * 1000
-	} else if unit == "g" {
-		theSize *= 1000 * 1000 * 1000
-	} else if unit == "t" {
-		theSize *= 1000 * 1000 * 1000 * 1000
-	} else if unit == "p" {
-		theSize *= 1000 * 1000 * 1000 * 1000 * 1000
-	}
-
-	return theSize, nil
+	return parseSize(size, decimalMap)
 }
 
 // Parses a human-readable string representing an amount of RAM
-// in bytes, kibibytes, mebibytes or gibibytes, and returns the
-// number of bytes, or -1 if the string is unparseable.
+// in bytes, kibibytes, mebibytes, gibibytes, or tebibytes and
+// returns the number of bytes, or -1 if the string is unparseable.
 // Units are case-insensitive, and the 'b' suffix is optional.
-func RAMInBytes(size string) (bytes int64, err error) {
-	re, error := regexp.Compile("^(\\d+)([kKmMgG])?[bB]?$")
-	if error != nil {
-		return -1, error
-	}
+func RAMInBytes(size string) (int64, error) {
+	return parseSize(size, binaryMap)
+}
 
-	matches := re.FindStringSubmatch(size)
-
+// Parses the human-readable size string into the amount it represents
+func parseSize(sizeStr string, uMap unitMap) (int64, error) {
+	matches := sizeRegex.FindStringSubmatch(sizeStr)
 	if len(matches) != 3 {
-		return -1, fmt.Errorf("Invalid size: '%s'", size)
+		return -1, fmt.Errorf("Invalid size: '%s'", sizeStr)
 	}
 
-	memLimit, error := strconv.ParseInt(matches[1], 10, 0)
-	if error != nil {
-		return -1, error
+	size, err := strconv.ParseInt(matches[1], 10, 0)
+	if err != nil {
+		return -1, err
 	}
 
-	unit := strings.ToLower(matches[2])
-
-	if unit == "k" {
-		memLimit *= 1024
-	} else if unit == "m" {
-		memLimit *= 1024 * 1024
-	} else if unit == "g" {
-		memLimit *= 1024 * 1024 * 1024
+	unitPrefix := strings.ToLower(matches[2])
+	if mul, ok := uMap[unitPrefix]; ok {
+		size *= mul
 	}
 
-	return memLimit, nil
+	return size, nil
 }
diff --git a/pkg/units/size_test.go b/pkg/units/size_test.go
index 5240bbd..8dae7e7 100644
--- a/pkg/units/size_test.go
+++ b/pkg/units/size_test.go
@@ -1,89 +1,98 @@
 package units
 
 import (
+	"reflect"
+	"runtime"
 	"strings"
 	"testing"
 )
 
 func TestHumanSize(t *testing.T) {
-
-	size := strings.Trim(HumanSize(1000), " \t")
-	expect := "1 kB"
-	if size != expect {
-		t.Errorf("1000 -> expected '%s', got '%s'", expect, size)
-	}
-
-	size = strings.Trim(HumanSize(1024), " \t")
-	expect = "1.024 kB"
-	if size != expect {
-		t.Errorf("1024 -> expected '%s', got '%s'", expect, size)
-	}
+	assertEquals(t, "1 kB", HumanSize(1000))
+	assertEquals(t, "1.024 kB", HumanSize(1024))
+	assertEquals(t, "1 MB", HumanSize(1000000))
+	assertEquals(t, "1.049 MB", HumanSize(1048576))
+	assertEquals(t, "2 MB", HumanSize(2*MB))
+	assertEquals(t, "3.42 GB", HumanSize(3.42*GB))
+	assertEquals(t, "5.372 TB", HumanSize(5.372*TB))
+	assertEquals(t, "2.22 PB", HumanSize(2.22*PB))
 }
 
 func TestFromHumanSize(t *testing.T) {
-	assertFromHumanSize(t, "32", false, 32)
-	assertFromHumanSize(t, "32b", false, 32)
-	assertFromHumanSize(t, "32B", false, 32)
-	assertFromHumanSize(t, "32k", false, 32*1000)
-	assertFromHumanSize(t, "32K", false, 32*1000)
-	assertFromHumanSize(t, "32kb", false, 32*1000)
-	assertFromHumanSize(t, "32Kb", false, 32*1000)
-	assertFromHumanSize(t, "32Mb", false, 32*1000*1000)
-	assertFromHumanSize(t, "32Gb", false, 32*1000*1000*1000)
-	assertFromHumanSize(t, "32Tb", false, 32*1000*1000*1000*1000)
-	assertFromHumanSize(t, "8Pb", false, 8*1000*1000*1000*1000*1000)
+	assertSuccessEquals(t, 32, FromHumanSize, "32")
+	assertSuccessEquals(t, 32, FromHumanSize, "32b")
+	assertSuccessEquals(t, 32, FromHumanSize, "32B")
+	assertSuccessEquals(t, 32*KB, FromHumanSize, "32k")
+	assertSuccessEquals(t, 32*KB, FromHumanSize, "32K")
+	assertSuccessEquals(t, 32*KB, FromHumanSize, "32kb")
+	assertSuccessEquals(t, 32*KB, FromHumanSize, "32Kb")
+	assertSuccessEquals(t, 32*MB, FromHumanSize, "32Mb")
+	assertSuccessEquals(t, 32*GB, FromHumanSize, "32Gb")
+	assertSuccessEquals(t, 32*TB, FromHumanSize, "32Tb")
+	assertSuccessEquals(t, 32*PB, FromHumanSize, "32Pb")
 
-	assertFromHumanSize(t, "", true, -1)
-	assertFromHumanSize(t, "hello", true, -1)
-	assertFromHumanSize(t, "-32", true, -1)
-	assertFromHumanSize(t, " 32 ", true, -1)
-	assertFromHumanSize(t, "32 mb", true, -1)
-	assertFromHumanSize(t, "32m b", true, -1)
-	assertFromHumanSize(t, "32bm", true, -1)
-}
-
-func assertFromHumanSize(t *testing.T, size string, expectError bool, expectedBytes int64) {
-	actualBytes, err := FromHumanSize(size)
-	if (err != nil) && !expectError {
-		t.Errorf("Unexpected error parsing '%s': %s", size, err)
-	}
-	if (err == nil) && expectError {
-		t.Errorf("Expected to get an error parsing '%s', but got none (bytes=%d)", size, actualBytes)
-	}
-	if actualBytes != expectedBytes {
-		t.Errorf("Expected '%s' to parse as %d bytes, got %d", size, expectedBytes, actualBytes)
-	}
+	assertError(t, FromHumanSize, "")
+	assertError(t, FromHumanSize, "hello")
+	assertError(t, FromHumanSize, "-32")
+	assertError(t, FromHumanSize, "32.3")
+	assertError(t, FromHumanSize, " 32 ")
+	assertError(t, FromHumanSize, "32.3Kb")
+	assertError(t, FromHumanSize, "32 mb")
+	assertError(t, FromHumanSize, "32m b")
+	assertError(t, FromHumanSize, "32bm")
 }
 
 func TestRAMInBytes(t *testing.T) {
-	assertRAMInBytes(t, "32", false, 32)
-	assertRAMInBytes(t, "32b", false, 32)
-	assertRAMInBytes(t, "32B", false, 32)
-	assertRAMInBytes(t, "32k", false, 32*1024)
-	assertRAMInBytes(t, "32K", false, 32*1024)
-	assertRAMInBytes(t, "32kb", false, 32*1024)
-	assertRAMInBytes(t, "32Kb", false, 32*1024)
-	assertRAMInBytes(t, "32Mb", false, 32*1024*1024)
-	assertRAMInBytes(t, "32Gb", false, 32*1024*1024*1024)
+	assertSuccessEquals(t, 32, RAMInBytes, "32")
+	assertSuccessEquals(t, 32, RAMInBytes, "32b")
+	assertSuccessEquals(t, 32, RAMInBytes, "32B")
+	assertSuccessEquals(t, 32*KiB, RAMInBytes, "32k")
+	assertSuccessEquals(t, 32*KiB, RAMInBytes, "32K")
+	assertSuccessEquals(t, 32*KiB, RAMInBytes, "32kb")
+	assertSuccessEquals(t, 32*KiB, RAMInBytes, "32Kb")
+	assertSuccessEquals(t, 32*MiB, RAMInBytes, "32Mb")
+	assertSuccessEquals(t, 32*GiB, RAMInBytes, "32Gb")
+	assertSuccessEquals(t, 32*TiB, RAMInBytes, "32Tb")
+	assertSuccessEquals(t, 32*PiB, RAMInBytes, "32Pb")
+	assertSuccessEquals(t, 32*PiB, RAMInBytes, "32PB")
+	assertSuccessEquals(t, 32*PiB, RAMInBytes, "32P")
 
-	assertRAMInBytes(t, "", true, -1)
-	assertRAMInBytes(t, "hello", true, -1)
-	assertRAMInBytes(t, "-32", true, -1)
-	assertRAMInBytes(t, " 32 ", true, -1)
-	assertRAMInBytes(t, "32 mb", true, -1)
-	assertRAMInBytes(t, "32m b", true, -1)
-	assertRAMInBytes(t, "32bm", true, -1)
+	assertError(t, RAMInBytes, "")
+	assertError(t, RAMInBytes, "hello")
+	assertError(t, RAMInBytes, "-32")
+	assertError(t, RAMInBytes, "32.3")
+	assertError(t, RAMInBytes, " 32 ")
+	assertError(t, RAMInBytes, "32.3Kb")
+	assertError(t, RAMInBytes, "32 mb")
+	assertError(t, RAMInBytes, "32m b")
+	assertError(t, RAMInBytes, "32bm")
 }
 
-func assertRAMInBytes(t *testing.T, size string, expectError bool, expectedBytes int64) {
-	actualBytes, err := RAMInBytes(size)
-	if (err != nil) && !expectError {
-		t.Errorf("Unexpected error parsing '%s': %s", size, err)
+func assertEquals(t *testing.T, expected, actual interface{}) {
+	if expected != actual {
+		t.Errorf("Expected '%v' but got '%v'", expected, actual)
 	}
-	if (err == nil) && expectError {
-		t.Errorf("Expected to get an error parsing '%s', but got none (bytes=%d)", size, actualBytes)
+}
+
+// func that maps to the parse function signatures as testing abstraction
+type parseFn func(string) (int64, error)
+
+// Define 'String()' for pretty-print
+func (fn parseFn) String() string {
+	fnName := runtime.FuncForPC(reflect.ValueOf(fn).Pointer()).Name()
+	return fnName[strings.LastIndex(fnName, ".")+1:]
+}
+
+func assertSuccessEquals(t *testing.T, expected int64, fn parseFn, arg string) {
+	res, err := fn(arg)
+	if err != nil || res != expected {
+		t.Errorf("%s(\"%s\") -> expected '%d' but got '%d' with error '%v'", fn, arg, expected, res, err)
 	}
-	if actualBytes != expectedBytes {
-		t.Errorf("Expected '%s' to parse as %d bytes, got %d", size, expectedBytes, actualBytes)
+}
+
+func assertError(t *testing.T, fn parseFn, arg string) {
+	res, err := fn(arg)
+	if err == nil && res != -1 {
+		t.Errorf("%s(\"%s\") -> expected error but got '%d'", fn, arg, res)
 	}
 }
diff --git a/pkg/version/version.go b/pkg/version/version.go
index 5ff9d2e..6a7d635 100644
--- a/pkg/version/version.go
+++ b/pkg/version/version.go
@@ -12,9 +12,17 @@
 		meTab    = strings.Split(string(me), ".")
 		otherTab = strings.Split(string(other), ".")
 	)
-	for i, s := range meTab {
+
+	max := len(meTab)
+	if len(otherTab) > max {
+		max = len(otherTab)
+	}
+	for i := 0; i < max; i++ {
 		var meInt, otherInt int
-		meInt, _ = strconv.Atoi(s)
+
+		if len(meTab) > i {
+			meInt, _ = strconv.Atoi(meTab[i])
+		}
 		if len(otherTab) > i {
 			otherInt, _ = strconv.Atoi(otherTab[i])
 		}
@@ -25,9 +33,6 @@
 			return -1
 		}
 	}
-	if len(otherTab) > len(meTab) {
-		return -1
-	}
 	return 0
 }
 
diff --git a/pkg/version/version_test.go b/pkg/version/version_test.go
index 27c0536..c02ec40 100644
--- a/pkg/version/version_test.go
+++ b/pkg/version/version_test.go
@@ -12,6 +12,8 @@
 
 func TestCompareVersion(t *testing.T) {
 	assertVersion(t, "1.12", "1.12", 0)
+	assertVersion(t, "1.0.0", "1", 0)
+	assertVersion(t, "1", "1.0.0", 0)
 	assertVersion(t, "1.05.00.0156", "1.0.221.9289", 1)
 	assertVersion(t, "1", "1.0.1", -1)
 	assertVersion(t, "1.0.1", "1", 1)
diff --git a/reexec/README.md b/reexec/README.md
new file mode 100644
index 0000000..45592ce
--- /dev/null
+++ b/reexec/README.md
@@ -0,0 +1,5 @@
+## reexec
+
+The `reexec` package facilitates the busybox style reexec of the docker binary that we require because 
+of the forking limitations of using Go.  Handlers can be registered with a name and the argv 0 of 
+the exec of the binary will be used to find and execute custom init paths.
diff --git a/reexec/reexec.go b/reexec/reexec.go
new file mode 100644
index 0000000..136b905
--- /dev/null
+++ b/reexec/reexec.go
@@ -0,0 +1,45 @@
+package reexec
+
+import (
+	"fmt"
+	"os"
+	"os/exec"
+	"path/filepath"
+)
+
+var registeredInitializers = make(map[string]func())
+
+// Register adds an initialization func under the specified name
+func Register(name string, initializer func()) {
+	if _, exists := registeredInitializers[name]; exists {
+		panic(fmt.Sprintf("reexec func already registred under name %q", name))
+	}
+
+	registeredInitializers[name] = initializer
+}
+
+// Init is called as the first part of the exec process and returns true if an
+// initialization function was called.
+func Init() bool {
+	initializer, exists := registeredInitializers[os.Args[0]]
+	if exists {
+		initializer()
+
+		return true
+	}
+
+	return false
+}
+
+// Self returns the path to the current processes binary
+func Self() string {
+	name := os.Args[0]
+
+	if filepath.Base(name) == name {
+		if lp, err := exec.LookPath(name); err == nil {
+			name = lp
+		}
+	}
+
+	return name
+}
diff --git a/registry/MAINTAINERS b/registry/MAINTAINERS
index af791fb..fdb03ed 100644
--- a/registry/MAINTAINERS
+++ b/registry/MAINTAINERS
@@ -1,4 +1,5 @@
-Sam Alba <sam@dotcloud.com> (@samalba)
-Joffrey Fuhrer <joffrey@dotcloud.com> (@shin-)
-Ken Cochrane <ken@dotcloud.com> (@kencochrane)
+Sam Alba <sam@docker.com> (@samalba)
+Joffrey Fuhrer <joffrey@docker.com> (@shin-)
+Ken Cochrane <ken@docker.com> (@kencochrane)
 Vincent Batts <vbatts@redhat.com> (@vbatts)
+Olivier Gambier <olivier@docker.com> (@dmp42)
diff --git a/registry/auth.go b/registry/auth.go
index 7384efb..906a37d 100644
--- a/registry/auth.go
+++ b/registry/auth.go
@@ -11,7 +11,7 @@
 	"path"
 	"strings"
 
-	"github.com/dotcloud/docker/utils"
+	"github.com/docker/docker/utils"
 )
 
 // Where we store the config file
@@ -20,7 +20,7 @@
 // Only used for user auth + account creation
 const INDEXSERVER = "https://index.docker.io/v1/"
 
-//const INDEXSERVER = "https://indexstaging-docker.dotcloud.com/v1/"
+//const INDEXSERVER = "https://registry-stage.hub.docker.com/v1/"
 
 var (
 	ErrConfigFileMissing = errors.New("The Auth config file is missing")
diff --git a/registry/httpfactory.go b/registry/httpfactory.go
new file mode 100644
index 0000000..4c78436
--- /dev/null
+++ b/registry/httpfactory.go
@@ -0,0 +1,46 @@
+package registry
+
+import (
+	"runtime"
+
+	"github.com/docker/docker/dockerversion"
+	"github.com/docker/docker/pkg/parsers/kernel"
+	"github.com/docker/docker/utils"
+)
+
+func HTTPRequestFactory(metaHeaders map[string][]string) *utils.HTTPRequestFactory {
+	// FIXME: this replicates the 'info' job.
+	httpVersion := make([]utils.VersionInfo, 0, 4)
+	httpVersion = append(httpVersion, &simpleVersionInfo{"docker", dockerversion.VERSION})
+	httpVersion = append(httpVersion, &simpleVersionInfo{"go", runtime.Version()})
+	httpVersion = append(httpVersion, &simpleVersionInfo{"git-commit", dockerversion.GITCOMMIT})
+	if kernelVersion, err := kernel.GetKernelVersion(); err == nil {
+		httpVersion = append(httpVersion, &simpleVersionInfo{"kernel", kernelVersion.String()})
+	}
+	httpVersion = append(httpVersion, &simpleVersionInfo{"os", runtime.GOOS})
+	httpVersion = append(httpVersion, &simpleVersionInfo{"arch", runtime.GOARCH})
+	ud := utils.NewHTTPUserAgentDecorator(httpVersion...)
+	md := &utils.HTTPMetaHeadersDecorator{
+		Headers: metaHeaders,
+	}
+	factory := utils.NewHTTPRequestFactory(ud, md)
+	return factory
+}
+
+// simpleVersionInfo is a simple implementation of
+// the interface VersionInfo, which is used
+// to provide version information for some product,
+// component, etc. It stores the product name and the version
+// in string and returns them on calls to Name() and Version().
+type simpleVersionInfo struct {
+	name    string
+	version string
+}
+
+func (v *simpleVersionInfo) Name() string {
+	return v.name
+}
+
+func (v *simpleVersionInfo) Version() string {
+	return v.version
+}
diff --git a/registry/registry.go b/registry/registry.go
index 24c5512..9c76aca 100644
--- a/registry/registry.go
+++ b/registry/registry.go
@@ -1,26 +1,22 @@
 package registry
 
 import (
-	"bytes"
-	"crypto/sha256"
-	_ "crypto/sha512"
+	"crypto/tls"
+	"crypto/x509"
 	"encoding/json"
 	"errors"
 	"fmt"
-	"io"
 	"io/ioutil"
 	"net"
 	"net/http"
-	"net/http/cookiejar"
-	"net/url"
+	"os"
+	"path"
 	"regexp"
-	"runtime"
-	"strconv"
 	"strings"
 	"time"
 
-	"github.com/dotcloud/docker/dockerversion"
-	"github.com/dotcloud/docker/utils"
+	"github.com/docker/docker/pkg/log"
+	"github.com/docker/docker/utils"
 )
 
 var (
@@ -29,31 +25,155 @@
 	errLoginRequired         = errors.New("Authentication is required.")
 )
 
+type TimeoutType uint32
+
+const (
+	NoTimeout TimeoutType = iota
+	ReceiveTimeout
+	ConnectTimeout
+)
+
+func newClient(jar http.CookieJar, roots *x509.CertPool, cert *tls.Certificate, timeout TimeoutType) *http.Client {
+	tlsConfig := tls.Config{RootCAs: roots}
+
+	if cert != nil {
+		tlsConfig.Certificates = append(tlsConfig.Certificates, *cert)
+	}
+
+	httpTransport := &http.Transport{
+		DisableKeepAlives: true,
+		Proxy:             http.ProxyFromEnvironment,
+		TLSClientConfig:   &tlsConfig,
+	}
+
+	switch timeout {
+	case ConnectTimeout:
+		httpTransport.Dial = func(proto string, addr string) (net.Conn, error) {
+			// Set the connect timeout to 5 seconds
+			conn, err := net.DialTimeout(proto, addr, 5*time.Second)
+			if err != nil {
+				return nil, err
+			}
+			// Set the recv timeout to 10 seconds
+			conn.SetDeadline(time.Now().Add(10 * time.Second))
+			return conn, nil
+		}
+	case ReceiveTimeout:
+		httpTransport.Dial = func(proto string, addr string) (net.Conn, error) {
+			conn, err := net.Dial(proto, addr)
+			if err != nil {
+				return nil, err
+			}
+			conn = utils.NewTimeoutConn(conn, 1*time.Minute)
+			return conn, nil
+		}
+	}
+
+	return &http.Client{
+		Transport:     httpTransport,
+		CheckRedirect: AddRequiredHeadersToRedirectedRequests,
+		Jar:           jar,
+	}
+}
+
+func doRequest(req *http.Request, jar http.CookieJar, timeout TimeoutType) (*http.Response, *http.Client, error) {
+	hasFile := func(files []os.FileInfo, name string) bool {
+		for _, f := range files {
+			if f.Name() == name {
+				return true
+			}
+		}
+		return false
+	}
+
+	hostDir := path.Join("/etc/docker/certs.d", req.URL.Host)
+	fs, err := ioutil.ReadDir(hostDir)
+	if err != nil && !os.IsNotExist(err) {
+		return nil, nil, err
+	}
+
+	var (
+		pool  *x509.CertPool
+		certs []*tls.Certificate
+	)
+
+	for _, f := range fs {
+		if strings.HasSuffix(f.Name(), ".crt") {
+			if pool == nil {
+				pool = x509.NewCertPool()
+			}
+			data, err := ioutil.ReadFile(path.Join(hostDir, f.Name()))
+			if err != nil {
+				return nil, nil, err
+			} else {
+				pool.AppendCertsFromPEM(data)
+			}
+		}
+		if strings.HasSuffix(f.Name(), ".cert") {
+			certName := f.Name()
+			keyName := certName[:len(certName)-5] + ".key"
+			if !hasFile(fs, keyName) {
+				return nil, nil, fmt.Errorf("Missing key %s for certificate %s", keyName, certName)
+			} else {
+				cert, err := tls.LoadX509KeyPair(path.Join(hostDir, certName), path.Join(hostDir, keyName))
+				if err != nil {
+					return nil, nil, err
+				}
+				certs = append(certs, &cert)
+			}
+		}
+		if strings.HasSuffix(f.Name(), ".key") {
+			keyName := f.Name()
+			certName := keyName[:len(keyName)-4] + ".cert"
+			if !hasFile(fs, certName) {
+				return nil, nil, fmt.Errorf("Missing certificate %s for key %s", certName, keyName)
+			}
+		}
+	}
+
+	if len(certs) == 0 {
+		client := newClient(jar, pool, nil, timeout)
+		res, err := client.Do(req)
+		if err != nil {
+			return nil, nil, err
+		}
+		return res, client, nil
+	} else {
+		for i, cert := range certs {
+			client := newClient(jar, pool, cert, timeout)
+			res, err := client.Do(req)
+			if i == len(certs)-1 {
+				// If this is the last cert, always return the result
+				return res, client, err
+			} else {
+				// Otherwise, continue to next cert if 403 or 5xx
+				if err == nil && res.StatusCode != 403 && !(res.StatusCode >= 500 && res.StatusCode < 600) {
+					return res, client, err
+				}
+			}
+		}
+	}
+
+	return nil, nil, nil
+}
+
 func pingRegistryEndpoint(endpoint string) (RegistryInfo, error) {
 	if endpoint == IndexServerAddress() {
 		// Skip the check, we now this one is valid
 		// (and we never want to fallback to http in case of error)
 		return RegistryInfo{Standalone: false}, nil
 	}
-	httpDial := func(proto string, addr string) (net.Conn, error) {
-		// Set the connect timeout to 5 seconds
-		conn, err := net.DialTimeout(proto, addr, 5*time.Second)
-		if err != nil {
-			return nil, err
-		}
-		// Set the recv timeout to 10 seconds
-		conn.SetDeadline(time.Now().Add(10 * time.Second))
-		return conn, nil
-	}
-	httpTransport := &http.Transport{
-		Dial:  httpDial,
-		Proxy: http.ProxyFromEnvironment,
-	}
-	client := &http.Client{Transport: httpTransport}
-	resp, err := client.Get(endpoint + "_ping")
+
+	req, err := http.NewRequest("GET", endpoint+"_ping", nil)
 	if err != nil {
 		return RegistryInfo{Standalone: false}, err
 	}
+
+	resp, _, err := doRequest(req, nil, ConnectTimeout)
+	if err != nil {
+		return RegistryInfo{Standalone: false}, err
+	}
+
 	defer resp.Body.Close()
 
 	jsonString, err := ioutil.ReadAll(resp.Body)
@@ -67,17 +187,17 @@
 		Standalone: true,
 	}
 	if err := json.Unmarshal(jsonString, &info); err != nil {
-		utils.Debugf("Error unmarshalling the _ping RegistryInfo: %s", err)
+		log.Debugf("Error unmarshalling the _ping RegistryInfo: %s", err)
 		// don't stop here. Just assume sane defaults
 	}
 	if hdr := resp.Header.Get("X-Docker-Registry-Version"); hdr != "" {
-		utils.Debugf("Registry version header: '%s'", hdr)
+		log.Debugf("Registry version header: '%s'", hdr)
 		info.Version = hdr
 	}
-	utils.Debugf("RegistryInfo.Version: %q", info.Version)
+	log.Debugf("RegistryInfo.Version: %q", info.Version)
 
 	standalone := resp.Header.Get("X-Docker-Registry-Standalone")
-	utils.Debugf("Registry standalone header: '%s'", standalone)
+	log.Debugf("Registry standalone header: '%s'", standalone)
 	// Accepted values are "true" (case-insensitive) and "1".
 	if strings.EqualFold(standalone, "true") || standalone == "1" {
 		info.Standalone = true
@@ -85,7 +205,7 @@
 		// there is a header set, and it is not "true" or "1", so assume fails
 		info.Standalone = false
 	}
-	utils.Debugf("RegistryInfo.Standalone: %q", info.Standalone)
+	log.Debugf("RegistryInfo.Standalone: %q", info.Standalone)
 	return info, nil
 }
 
@@ -155,7 +275,7 @@
 	}
 	endpoint := fmt.Sprintf("https://%s/v1/", hostname)
 	if _, err := pingRegistryEndpoint(endpoint); err != nil {
-		utils.Debugf("Registry %s does not work (%s), falling back to http", endpoint, err)
+		log.Debugf("Registry %s does not work (%s), falling back to http", endpoint, err)
 		endpoint = fmt.Sprintf("http://%s/v1/", hostname)
 		if _, err = pingRegistryEndpoint(endpoint); err != nil {
 			//TODO: triggering highland build can be done there without "failing"
@@ -165,597 +285,6 @@
 	return endpoint, nil
 }
 
-func setTokenAuth(req *http.Request, token []string) {
-	if req.Header.Get("Authorization") == "" { // Don't override
-		req.Header.Set("Authorization", "Token "+strings.Join(token, ","))
-	}
-}
-
-// Retrieve the history of a given image from the Registry.
-// Return a list of the parent's json (requested image included)
-func (r *Registry) GetRemoteHistory(imgID, registry string, token []string) ([]string, error) {
-	req, err := r.reqFactory.NewRequest("GET", registry+"images/"+imgID+"/ancestry", nil)
-	if err != nil {
-		return nil, err
-	}
-	setTokenAuth(req, token)
-	res, err := r.client.Do(req)
-	if err != nil {
-		return nil, err
-	}
-	defer res.Body.Close()
-	if res.StatusCode != 200 {
-		if res.StatusCode == 401 {
-			return nil, errLoginRequired
-		}
-		return nil, utils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying to fetch remote history for %s", res.StatusCode, imgID), res)
-	}
-
-	jsonString, err := ioutil.ReadAll(res.Body)
-	if err != nil {
-		return nil, fmt.Errorf("Error while reading the http response: %s", err)
-	}
-
-	utils.Debugf("Ancestry: %s", jsonString)
-	history := new([]string)
-	if err := json.Unmarshal(jsonString, history); err != nil {
-		return nil, err
-	}
-	return *history, nil
-}
-
-// Check if an image exists in the Registry
-// TODO: This method should return the errors instead of masking them and returning false
-func (r *Registry) LookupRemoteImage(imgID, registry string, token []string) bool {
-
-	req, err := r.reqFactory.NewRequest("GET", registry+"images/"+imgID+"/json", nil)
-	if err != nil {
-		utils.Errorf("Error in LookupRemoteImage %s", err)
-		return false
-	}
-	setTokenAuth(req, token)
-	res, err := r.client.Do(req)
-	if err != nil {
-		utils.Errorf("Error in LookupRemoteImage %s", err)
-		return false
-	}
-	res.Body.Close()
-	return res.StatusCode == 200
-}
-
-// Retrieve an image from the Registry.
-func (r *Registry) GetRemoteImageJSON(imgID, registry string, token []string) ([]byte, int, error) {
-	// Get the JSON
-	req, err := r.reqFactory.NewRequest("GET", registry+"images/"+imgID+"/json", nil)
-	if err != nil {
-		return nil, -1, fmt.Errorf("Failed to download json: %s", err)
-	}
-	setTokenAuth(req, token)
-	res, err := r.client.Do(req)
-	if err != nil {
-		return nil, -1, fmt.Errorf("Failed to download json: %s", err)
-	}
-	defer res.Body.Close()
-	if res.StatusCode != 200 {
-		return nil, -1, utils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d", res.StatusCode), res)
-	}
-
-	// if the size header is not present, then set it to '-1'
-	imageSize := -1
-	if hdr := res.Header.Get("X-Docker-Size"); hdr != "" {
-		imageSize, err = strconv.Atoi(hdr)
-		if err != nil {
-			return nil, -1, err
-		}
-	}
-
-	jsonString, err := ioutil.ReadAll(res.Body)
-	if err != nil {
-		return nil, -1, fmt.Errorf("Failed to parse downloaded json: %s (%s)", err, jsonString)
-	}
-	return jsonString, imageSize, nil
-}
-
-func (r *Registry) GetRemoteImageLayer(imgID, registry string, token []string, imgSize int64) (io.ReadCloser, error) {
-	var (
-		retries   = 5
-		headRes   *http.Response
-		hasResume bool = false
-		imageURL       = fmt.Sprintf("%simages/%s/layer", registry, imgID)
-	)
-	headReq, err := r.reqFactory.NewRequest("HEAD", imageURL, nil)
-	if err != nil {
-		return nil, fmt.Errorf("Error while getting from the server: %s\n", err)
-	}
-	setTokenAuth(headReq, token)
-	for i := 1; i <= retries; i++ {
-		headRes, err = r.client.Do(headReq)
-		if err != nil && i == retries {
-			return nil, fmt.Errorf("Eror while making head request: %s\n", err)
-		} else if err != nil {
-			time.Sleep(time.Duration(i) * 5 * time.Second)
-			continue
-		}
-		break
-	}
-
-	if headRes.Header.Get("Accept-Ranges") == "bytes" && imgSize > 0 {
-		hasResume = true
-	}
-
-	req, err := r.reqFactory.NewRequest("GET", imageURL, nil)
-	if err != nil {
-		return nil, fmt.Errorf("Error while getting from the server: %s\n", err)
-	}
-	setTokenAuth(req, token)
-	if hasResume {
-		utils.Debugf("server supports resume")
-		return utils.ResumableRequestReader(r.client, req, 5, imgSize), nil
-	}
-	utils.Debugf("server doesn't support resume")
-	res, err := r.client.Do(req)
-	if err != nil {
-		return nil, err
-	}
-	if res.StatusCode != 200 {
-		res.Body.Close()
-		return nil, fmt.Errorf("Server error: Status %d while fetching image layer (%s)",
-			res.StatusCode, imgID)
-	}
-	return res.Body, nil
-}
-
-func (r *Registry) GetRemoteTags(registries []string, repository string, token []string) (map[string]string, error) {
-	if strings.Count(repository, "/") == 0 {
-		// This will be removed once the Registry supports auto-resolution on
-		// the "library" namespace
-		repository = "library/" + repository
-	}
-	for _, host := range registries {
-		endpoint := fmt.Sprintf("%srepositories/%s/tags", host, repository)
-		req, err := r.reqFactory.NewRequest("GET", endpoint, nil)
-
-		if err != nil {
-			return nil, err
-		}
-		setTokenAuth(req, token)
-		res, err := r.client.Do(req)
-		if err != nil {
-			return nil, err
-		}
-
-		utils.Debugf("Got status code %d from %s", res.StatusCode, endpoint)
-		defer res.Body.Close()
-
-		if res.StatusCode != 200 && res.StatusCode != 404 {
-			continue
-		} else if res.StatusCode == 404 {
-			return nil, fmt.Errorf("Repository not found")
-		}
-
-		result := make(map[string]string)
-		rawJSON, err := ioutil.ReadAll(res.Body)
-		if err != nil {
-			return nil, err
-		}
-		if err := json.Unmarshal(rawJSON, &result); err != nil {
-			return nil, err
-		}
-		return result, nil
-	}
-	return nil, fmt.Errorf("Could not reach any registry endpoint")
-}
-
-func buildEndpointsList(headers []string, indexEp string) ([]string, error) {
-	var endpoints []string
-	parsedUrl, err := url.Parse(indexEp)
-	if err != nil {
-		return nil, err
-	}
-	var urlScheme = parsedUrl.Scheme
-	// The Registry's URL scheme has to match the Index'
-	for _, ep := range headers {
-		epList := strings.Split(ep, ",")
-		for _, epListElement := range epList {
-			endpoints = append(
-				endpoints,
-				fmt.Sprintf("%s://%s/v1/", urlScheme, strings.TrimSpace(epListElement)))
-		}
-	}
-	return endpoints, nil
-}
-
-func (r *Registry) GetRepositoryData(remote string) (*RepositoryData, error) {
-	indexEp := r.indexEndpoint
-	repositoryTarget := fmt.Sprintf("%srepositories/%s/images", indexEp, remote)
-
-	utils.Debugf("[registry] Calling GET %s", repositoryTarget)
-
-	req, err := r.reqFactory.NewRequest("GET", repositoryTarget, nil)
-	if err != nil {
-		return nil, err
-	}
-	if r.authConfig != nil && len(r.authConfig.Username) > 0 {
-		req.SetBasicAuth(r.authConfig.Username, r.authConfig.Password)
-	}
-	req.Header.Set("X-Docker-Token", "true")
-
-	res, err := r.client.Do(req)
-	if err != nil {
-		return nil, err
-	}
-	defer res.Body.Close()
-	if res.StatusCode == 401 {
-		return nil, errLoginRequired
-	}
-	// TODO: Right now we're ignoring checksums in the response body.
-	// In the future, we need to use them to check image validity.
-	if res.StatusCode != 200 {
-		return nil, utils.NewHTTPRequestError(fmt.Sprintf("HTTP code: %d", res.StatusCode), res)
-	}
-
-	var tokens []string
-	if res.Header.Get("X-Docker-Token") != "" {
-		tokens = res.Header["X-Docker-Token"]
-	}
-
-	var endpoints []string
-	if res.Header.Get("X-Docker-Endpoints") != "" {
-		endpoints, err = buildEndpointsList(res.Header["X-Docker-Endpoints"], indexEp)
-		if err != nil {
-			return nil, err
-		}
-	} else {
-		// Assume the endpoint is on the same host
-		u, err := url.Parse(indexEp)
-		if err != nil {
-			return nil, err
-		}
-		endpoints = append(endpoints, fmt.Sprintf("%s://%s/v1/", u.Scheme, req.URL.Host))
-	}
-
-	checksumsJSON, err := ioutil.ReadAll(res.Body)
-	if err != nil {
-		return nil, err
-	}
-	remoteChecksums := []*ImgData{}
-	if err := json.Unmarshal(checksumsJSON, &remoteChecksums); err != nil {
-		return nil, err
-	}
-
-	// Forge a better object from the retrieved data
-	imgsData := make(map[string]*ImgData)
-	for _, elem := range remoteChecksums {
-		imgsData[elem.ID] = elem
-	}
-
-	return &RepositoryData{
-		ImgList:   imgsData,
-		Endpoints: endpoints,
-		Tokens:    tokens,
-	}, nil
-}
-
-func (r *Registry) PushImageChecksumRegistry(imgData *ImgData, registry string, token []string) error {
-
-	utils.Debugf("[registry] Calling PUT %s", registry+"images/"+imgData.ID+"/checksum")
-
-	req, err := r.reqFactory.NewRequest("PUT", registry+"images/"+imgData.ID+"/checksum", nil)
-	if err != nil {
-		return err
-	}
-	setTokenAuth(req, token)
-	req.Header.Set("X-Docker-Checksum", imgData.Checksum)
-	req.Header.Set("X-Docker-Checksum-Payload", imgData.ChecksumPayload)
-
-	res, err := r.client.Do(req)
-	if err != nil {
-		return fmt.Errorf("Failed to upload metadata: %s", err)
-	}
-	defer res.Body.Close()
-	if len(res.Cookies()) > 0 {
-		r.client.Jar.SetCookies(req.URL, res.Cookies())
-	}
-	if res.StatusCode != 200 {
-		errBody, err := ioutil.ReadAll(res.Body)
-		if err != nil {
-			return fmt.Errorf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err)
-		}
-		var jsonBody map[string]string
-		if err := json.Unmarshal(errBody, &jsonBody); err != nil {
-			errBody = []byte(err.Error())
-		} else if jsonBody["error"] == "Image already exists" {
-			return ErrAlreadyExists
-		}
-		return fmt.Errorf("HTTP code %d while uploading metadata: %s", res.StatusCode, errBody)
-	}
-	return nil
-}
-
-// Push a local image to the registry
-func (r *Registry) PushImageJSONRegistry(imgData *ImgData, jsonRaw []byte, registry string, token []string) error {
-
-	utils.Debugf("[registry] Calling PUT %s", registry+"images/"+imgData.ID+"/json")
-
-	req, err := r.reqFactory.NewRequest("PUT", registry+"images/"+imgData.ID+"/json", bytes.NewReader(jsonRaw))
-	if err != nil {
-		return err
-	}
-	req.Header.Add("Content-type", "application/json")
-	setTokenAuth(req, token)
-
-	res, err := r.client.Do(req)
-	if err != nil {
-		return fmt.Errorf("Failed to upload metadata: %s", err)
-	}
-	defer res.Body.Close()
-	if res.StatusCode == 401 && strings.HasPrefix(registry, "http://") {
-		return utils.NewHTTPRequestError("HTTP code 401, Docker will not send auth headers over HTTP.", res)
-	}
-	if res.StatusCode != 200 {
-		errBody, err := ioutil.ReadAll(res.Body)
-		if err != nil {
-			return utils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err), res)
-		}
-		var jsonBody map[string]string
-		if err := json.Unmarshal(errBody, &jsonBody); err != nil {
-			errBody = []byte(err.Error())
-		} else if jsonBody["error"] == "Image already exists" {
-			return ErrAlreadyExists
-		}
-		return utils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d while uploading metadata: %s", res.StatusCode, errBody), res)
-	}
-	return nil
-}
-
-func (r *Registry) PushImageLayerRegistry(imgID string, layer io.Reader, registry string, token []string, jsonRaw []byte) (checksum string, checksumPayload string, err error) {
-
-	utils.Debugf("[registry] Calling PUT %s", registry+"images/"+imgID+"/layer")
-
-	tarsumLayer := &utils.TarSum{Reader: layer}
-	h := sha256.New()
-	h.Write(jsonRaw)
-	h.Write([]byte{'\n'})
-	checksumLayer := &utils.CheckSum{Reader: tarsumLayer, Hash: h}
-
-	req, err := r.reqFactory.NewRequest("PUT", registry+"images/"+imgID+"/layer", checksumLayer)
-	if err != nil {
-		return "", "", err
-	}
-	req.ContentLength = -1
-	req.TransferEncoding = []string{"chunked"}
-	setTokenAuth(req, token)
-	res, err := r.client.Do(req)
-	if err != nil {
-		return "", "", fmt.Errorf("Failed to upload layer: %s", err)
-	}
-	if rc, ok := layer.(io.Closer); ok {
-		if err := rc.Close(); err != nil {
-			return "", "", err
-		}
-	}
-	defer res.Body.Close()
-
-	if res.StatusCode != 200 {
-		errBody, err := ioutil.ReadAll(res.Body)
-		if err != nil {
-			return "", "", utils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err), res)
-		}
-		return "", "", utils.NewHTTPRequestError(fmt.Sprintf("Received HTTP code %d while uploading layer: %s", res.StatusCode, errBody), res)
-	}
-
-	checksumPayload = "sha256:" + checksumLayer.Sum()
-	return tarsumLayer.Sum(jsonRaw), checksumPayload, nil
-}
-
-// push a tag on the registry.
-// Remote has the format '<user>/<repo>
-func (r *Registry) PushRegistryTag(remote, revision, tag, registry string, token []string) error {
-	// "jsonify" the string
-	revision = "\"" + revision + "\""
-	path := fmt.Sprintf("repositories/%s/tags/%s", remote, tag)
-
-	req, err := r.reqFactory.NewRequest("PUT", registry+path, strings.NewReader(revision))
-	if err != nil {
-		return err
-	}
-	req.Header.Add("Content-type", "application/json")
-	setTokenAuth(req, token)
-	req.ContentLength = int64(len(revision))
-	res, err := r.client.Do(req)
-	if err != nil {
-		return err
-	}
-	res.Body.Close()
-	if res.StatusCode != 200 && res.StatusCode != 201 {
-		return utils.NewHTTPRequestError(fmt.Sprintf("Internal server error: %d trying to push tag %s on %s", res.StatusCode, tag, remote), res)
-	}
-	return nil
-}
-
-func (r *Registry) PushImageJSONIndex(remote string, imgList []*ImgData, validate bool, regs []string) (*RepositoryData, error) {
-	cleanImgList := []*ImgData{}
-	indexEp := r.indexEndpoint
-
-	if validate {
-		for _, elem := range imgList {
-			if elem.Checksum != "" {
-				cleanImgList = append(cleanImgList, elem)
-			}
-		}
-	} else {
-		cleanImgList = imgList
-	}
-
-	imgListJSON, err := json.Marshal(cleanImgList)
-	if err != nil {
-		return nil, err
-	}
-	var suffix string
-	if validate {
-		suffix = "images"
-	}
-	u := fmt.Sprintf("%srepositories/%s/%s", indexEp, remote, suffix)
-	utils.Debugf("[registry] PUT %s", u)
-	utils.Debugf("Image list pushed to index:\n%s", imgListJSON)
-	req, err := r.reqFactory.NewRequest("PUT", u, bytes.NewReader(imgListJSON))
-	if err != nil {
-		return nil, err
-	}
-	req.Header.Add("Content-type", "application/json")
-	req.SetBasicAuth(r.authConfig.Username, r.authConfig.Password)
-	req.ContentLength = int64(len(imgListJSON))
-	req.Header.Set("X-Docker-Token", "true")
-	if validate {
-		req.Header["X-Docker-Endpoints"] = regs
-	}
-
-	res, err := r.client.Do(req)
-	if err != nil {
-		return nil, err
-	}
-	defer res.Body.Close()
-
-	// Redirect if necessary
-	for res.StatusCode >= 300 && res.StatusCode < 400 {
-		utils.Debugf("Redirected to %s", res.Header.Get("Location"))
-		req, err = r.reqFactory.NewRequest("PUT", res.Header.Get("Location"), bytes.NewReader(imgListJSON))
-		if err != nil {
-			return nil, err
-		}
-		req.SetBasicAuth(r.authConfig.Username, r.authConfig.Password)
-		req.ContentLength = int64(len(imgListJSON))
-		req.Header.Set("X-Docker-Token", "true")
-		if validate {
-			req.Header["X-Docker-Endpoints"] = regs
-		}
-		res, err = r.client.Do(req)
-		if err != nil {
-			return nil, err
-		}
-		defer res.Body.Close()
-	}
-
-	var tokens, endpoints []string
-	if !validate {
-		if res.StatusCode != 200 && res.StatusCode != 201 {
-			errBody, err := ioutil.ReadAll(res.Body)
-			if err != nil {
-				return nil, err
-			}
-			return nil, utils.NewHTTPRequestError(fmt.Sprintf("Error: Status %d trying to push repository %s: %s", res.StatusCode, remote, errBody), res)
-		}
-		if res.Header.Get("X-Docker-Token") != "" {
-			tokens = res.Header["X-Docker-Token"]
-			utils.Debugf("Auth token: %v", tokens)
-		} else {
-			return nil, fmt.Errorf("Index response didn't contain an access token")
-		}
-
-		if res.Header.Get("X-Docker-Endpoints") != "" {
-			endpoints, err = buildEndpointsList(res.Header["X-Docker-Endpoints"], indexEp)
-			if err != nil {
-				return nil, err
-			}
-		} else {
-			return nil, fmt.Errorf("Index response didn't contain any endpoints")
-		}
-	}
-	if validate {
-		if res.StatusCode != 204 {
-			errBody, err := ioutil.ReadAll(res.Body)
-			if err != nil {
-				return nil, err
-			}
-			return nil, utils.NewHTTPRequestError(fmt.Sprintf("Error: Status %d trying to push checksums %s: %s", res.StatusCode, remote, errBody), res)
-		}
-	}
-
-	return &RepositoryData{
-		Tokens:    tokens,
-		Endpoints: endpoints,
-	}, nil
-}
-
-func (r *Registry) SearchRepositories(term string) (*SearchResults, error) {
-	utils.Debugf("Index server: %s", r.indexEndpoint)
-	u := r.indexEndpoint + "search?q=" + url.QueryEscape(term)
-	req, err := r.reqFactory.NewRequest("GET", u, nil)
-	if err != nil {
-		return nil, err
-	}
-	if r.authConfig != nil && len(r.authConfig.Username) > 0 {
-		req.SetBasicAuth(r.authConfig.Username, r.authConfig.Password)
-	}
-	req.Header.Set("X-Docker-Token", "true")
-	res, err := r.client.Do(req)
-	if err != nil {
-		return nil, err
-	}
-	defer res.Body.Close()
-	if res.StatusCode != 200 {
-		return nil, utils.NewHTTPRequestError(fmt.Sprintf("Unexepected status code %d", res.StatusCode), res)
-	}
-	rawData, err := ioutil.ReadAll(res.Body)
-	if err != nil {
-		return nil, err
-	}
-	result := new(SearchResults)
-	err = json.Unmarshal(rawData, result)
-	return result, err
-}
-
-func (r *Registry) GetAuthConfig(withPasswd bool) *AuthConfig {
-	password := ""
-	if withPasswd {
-		password = r.authConfig.Password
-	}
-	return &AuthConfig{
-		Username: r.authConfig.Username,
-		Password: password,
-		Email:    r.authConfig.Email,
-	}
-}
-
-type SearchResult struct {
-	StarCount   int    `json:"star_count"`
-	IsOfficial  bool   `json:"is_official"`
-	Name        string `json:"name"`
-	IsTrusted   bool   `json:"is_trusted"`
-	Description string `json:"description"`
-}
-
-type SearchResults struct {
-	Query      string         `json:"query"`
-	NumResults int            `json:"num_results"`
-	Results    []SearchResult `json:"results"`
-}
-
-type RepositoryData struct {
-	ImgList   map[string]*ImgData
-	Endpoints []string
-	Tokens    []string
-}
-
-type ImgData struct {
-	ID              string `json:"id"`
-	Checksum        string `json:"checksum,omitempty"`
-	ChecksumPayload string `json:"-"`
-	Tag             string `json:",omitempty"`
-}
-
-type RegistryInfo struct {
-	Version    string `json:"version"`
-	Standalone bool   `json:"standalone"`
-}
-
-type Registry struct {
-	client        *http.Client
-	authConfig    *AuthConfig
-	reqFactory    *utils.HTTPRequestFactory
-	indexEndpoint string
-}
-
 func trustedLocation(req *http.Request) bool {
 	var (
 		trusteds = []string{"docker.com", "docker.io"}
@@ -789,87 +318,3 @@
 	}
 	return nil
 }
-
-func NewRegistry(authConfig *AuthConfig, factory *utils.HTTPRequestFactory, indexEndpoint string, timeout bool) (r *Registry, err error) {
-	httpTransport := &http.Transport{
-		DisableKeepAlives: true,
-		Proxy:             http.ProxyFromEnvironment,
-	}
-	if timeout {
-		httpTransport.Dial = func(proto string, addr string) (net.Conn, error) {
-			conn, err := net.Dial(proto, addr)
-			if err != nil {
-				return nil, err
-			}
-			conn = utils.NewTimeoutConn(conn, 1*time.Minute)
-			return conn, nil
-		}
-	}
-	r = &Registry{
-		authConfig: authConfig,
-		client: &http.Client{
-			Transport:     httpTransport,
-			CheckRedirect: AddRequiredHeadersToRedirectedRequests,
-		},
-		indexEndpoint: indexEndpoint,
-	}
-
-	r.client.Jar, err = cookiejar.New(nil)
-	if err != nil {
-		return nil, err
-	}
-
-	// If we're working with a standalone private registry over HTTPS, send Basic Auth headers
-	// alongside our requests.
-	if indexEndpoint != IndexServerAddress() && strings.HasPrefix(indexEndpoint, "https://") {
-		info, err := pingRegistryEndpoint(indexEndpoint)
-		if err != nil {
-			return nil, err
-		}
-		if info.Standalone {
-			utils.Debugf("Endpoint %s is eligible for private registry registry. Enabling decorator.", indexEndpoint)
-			dec := utils.NewHTTPAuthDecorator(authConfig.Username, authConfig.Password)
-			factory.AddDecorator(dec)
-		}
-	}
-
-	r.reqFactory = factory
-	return r, nil
-}
-
-func HTTPRequestFactory(metaHeaders map[string][]string) *utils.HTTPRequestFactory {
-	// FIXME: this replicates the 'info' job.
-	httpVersion := make([]utils.VersionInfo, 0, 4)
-	httpVersion = append(httpVersion, &simpleVersionInfo{"docker", dockerversion.VERSION})
-	httpVersion = append(httpVersion, &simpleVersionInfo{"go", runtime.Version()})
-	httpVersion = append(httpVersion, &simpleVersionInfo{"git-commit", dockerversion.GITCOMMIT})
-	if kernelVersion, err := utils.GetKernelVersion(); err == nil {
-		httpVersion = append(httpVersion, &simpleVersionInfo{"kernel", kernelVersion.String()})
-	}
-	httpVersion = append(httpVersion, &simpleVersionInfo{"os", runtime.GOOS})
-	httpVersion = append(httpVersion, &simpleVersionInfo{"arch", runtime.GOARCH})
-	ud := utils.NewHTTPUserAgentDecorator(httpVersion...)
-	md := &utils.HTTPMetaHeadersDecorator{
-		Headers: metaHeaders,
-	}
-	factory := utils.NewHTTPRequestFactory(ud, md)
-	return factory
-}
-
-// simpleVersionInfo is a simple implementation of
-// the interface VersionInfo, which is used
-// to provide version information for some product,
-// component, etc. It stores the product name and the version
-// in string and returns them on calls to Name() and Version().
-type simpleVersionInfo struct {
-	name    string
-	version string
-}
-
-func (v *simpleVersionInfo) Name() string {
-	return v.name
-}
-
-func (v *simpleVersionInfo) Version() string {
-	return v.version
-}
diff --git a/registry/registry_mock_test.go b/registry/registry_mock_test.go
index 6b00751..2b4cd9d 100644
--- a/registry/registry_mock_test.go
+++ b/registry/registry_mock_test.go
@@ -3,8 +3,6 @@
 import (
 	"encoding/json"
 	"fmt"
-	"github.com/dotcloud/docker/utils"
-	"github.com/gorilla/mux"
 	"io"
 	"io/ioutil"
 	"net/http"
@@ -14,6 +12,10 @@
 	"strings"
 	"testing"
 	"time"
+
+	"github.com/gorilla/mux"
+
+	"github.com/docker/docker/pkg/log"
 )
 
 var (
@@ -96,7 +98,7 @@
 
 func handlerAccessLog(handler http.Handler) http.Handler {
 	logHandler := func(w http.ResponseWriter, r *http.Request) {
-		utils.Debugf("%s \"%s %s\"", r.RemoteAddr, r.Method, r.URL)
+		log.Debugf("%s \"%s %s\"", r.RemoteAddr, r.Method, r.URL)
 		handler.ServeHTTP(w, r)
 	}
 	return http.HandlerFunc(logHandler)
diff --git a/registry/registry_test.go b/registry/registry_test.go
index 5cec059..303879e 100644
--- a/registry/registry_test.go
+++ b/registry/registry_test.go
@@ -7,7 +7,7 @@
 	"strings"
 	"testing"
 
-	"github.com/dotcloud/docker/utils"
+	"github.com/docker/docker/utils"
 )
 
 var (
@@ -16,9 +16,9 @@
 	REPO     = "foo42/bar"
 )
 
-func spawnTestRegistry(t *testing.T) *Registry {
+func spawnTestRegistrySession(t *testing.T) *Session {
 	authConfig := &AuthConfig{}
-	r, err := NewRegistry(authConfig, utils.NewHTTPRequestFactory(), makeURL("/v1/"), true)
+	r, err := NewSession(authConfig, utils.NewHTTPRequestFactory(), makeURL("/v1/"), true)
 	if err != nil {
 		t.Fatal(err)
 	}
@@ -34,7 +34,7 @@
 }
 
 func TestGetRemoteHistory(t *testing.T) {
-	r := spawnTestRegistry(t)
+	r := spawnTestRegistrySession(t)
 	hist, err := r.GetRemoteHistory(IMAGE_ID, makeURL("/v1/"), TOKEN)
 	if err != nil {
 		t.Fatal(err)
@@ -46,7 +46,7 @@
 }
 
 func TestLookupRemoteImage(t *testing.T) {
-	r := spawnTestRegistry(t)
+	r := spawnTestRegistrySession(t)
 	found := r.LookupRemoteImage(IMAGE_ID, makeURL("/v1/"), TOKEN)
 	assertEqual(t, found, true, "Expected remote lookup to succeed")
 	found = r.LookupRemoteImage("abcdef", makeURL("/v1/"), TOKEN)
@@ -54,7 +54,7 @@
 }
 
 func TestGetRemoteImageJSON(t *testing.T) {
-	r := spawnTestRegistry(t)
+	r := spawnTestRegistrySession(t)
 	json, size, err := r.GetRemoteImageJSON(IMAGE_ID, makeURL("/v1/"), TOKEN)
 	if err != nil {
 		t.Fatal(err)
@@ -71,7 +71,7 @@
 }
 
 func TestGetRemoteImageLayer(t *testing.T) {
-	r := spawnTestRegistry(t)
+	r := spawnTestRegistrySession(t)
 	data, err := r.GetRemoteImageLayer(IMAGE_ID, makeURL("/v1/"), TOKEN, 0)
 	if err != nil {
 		t.Fatal(err)
@@ -87,7 +87,7 @@
 }
 
 func TestGetRemoteTags(t *testing.T) {
-	r := spawnTestRegistry(t)
+	r := spawnTestRegistrySession(t)
 	tags, err := r.GetRemoteTags([]string{makeURL("/v1/")}, REPO, TOKEN)
 	if err != nil {
 		t.Fatal(err)
@@ -102,7 +102,7 @@
 }
 
 func TestGetRepositoryData(t *testing.T) {
-	r := spawnTestRegistry(t)
+	r := spawnTestRegistrySession(t)
 	parsedUrl, err := url.Parse(makeURL("/v1/"))
 	if err != nil {
 		t.Fatal(err)
@@ -123,7 +123,7 @@
 }
 
 func TestPushImageJSONRegistry(t *testing.T) {
-	r := spawnTestRegistry(t)
+	r := spawnTestRegistrySession(t)
 	imgData := &ImgData{
 		ID:       "77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20",
 		Checksum: "sha256:1ac330d56e05eef6d438586545ceff7550d3bdcb6b19961f12c5ba714ee1bb37",
@@ -136,7 +136,7 @@
 }
 
 func TestPushImageLayerRegistry(t *testing.T) {
-	r := spawnTestRegistry(t)
+	r := spawnTestRegistrySession(t)
 	layer := strings.NewReader("")
 	_, _, err := r.PushImageLayerRegistry(IMAGE_ID, layer, makeURL("/v1/"), TOKEN, []byte{})
 	if err != nil {
@@ -145,7 +145,7 @@
 }
 
 func TestResolveRepositoryName(t *testing.T) {
-	_, _, err := ResolveRepositoryName("https://github.com/dotcloud/docker")
+	_, _, err := ResolveRepositoryName("https://github.com/docker/docker")
 	assertEqual(t, err, ErrInvalidRepositoryName, "Expected error invalid repo name")
 	ep, repo, err := ResolveRepositoryName("fooo/bar")
 	if err != nil {
@@ -171,7 +171,7 @@
 }
 
 func TestPushRegistryTag(t *testing.T) {
-	r := spawnTestRegistry(t)
+	r := spawnTestRegistrySession(t)
 	err := r.PushRegistryTag("foo42/bar", IMAGE_ID, "stable", makeURL("/v1/"), TOKEN)
 	if err != nil {
 		t.Fatal(err)
@@ -179,7 +179,7 @@
 }
 
 func TestPushImageJSONIndex(t *testing.T) {
-	r := spawnTestRegistry(t)
+	r := spawnTestRegistrySession(t)
 	imgData := []*ImgData{
 		{
 			ID:       "77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20",
@@ -207,7 +207,7 @@
 }
 
 func TestSearchRepositories(t *testing.T) {
-	r := spawnTestRegistry(t)
+	r := spawnTestRegistrySession(t)
 	results, err := r.SearchRepositories("fakequery")
 	if err != nil {
 		t.Fatal(err)
diff --git a/registry/service.go b/registry/service.go
index 89a4baa..0e6f1bd 100644
--- a/registry/service.go
+++ b/registry/service.go
@@ -1,7 +1,7 @@
 package registry
 
 import (
-	"github.com/dotcloud/docker/engine"
+	"github.com/docker/docker/engine"
 )
 
 // Service exposes registry capabilities in the standard Engine
@@ -82,7 +82,15 @@
 	job.GetenvJson("authConfig", authConfig)
 	job.GetenvJson("metaHeaders", metaHeaders)
 
-	r, err := NewRegistry(authConfig, HTTPRequestFactory(metaHeaders), IndexServerAddress(), true)
+	hostname, term, err := ResolveRepositoryName(term)
+	if err != nil {
+		return job.Error(err)
+	}
+	hostname, err = ExpandAndVerifyRegistryUrl(hostname)
+	if err != nil {
+		return job.Error(err)
+	}
+	r, err := NewSession(authConfig, HTTPRequestFactory(metaHeaders), hostname, true)
 	if err != nil {
 		return job.Error(err)
 	}
diff --git a/registry/session.go b/registry/session.go
new file mode 100644
index 0000000..82b931f
--- /dev/null
+++ b/registry/session.go
@@ -0,0 +1,612 @@
+package registry
+
+import (
+	"bytes"
+	"crypto/sha256"
+	_ "crypto/sha512"
+	"encoding/hex"
+	"encoding/json"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"net/http"
+	"net/http/cookiejar"
+	"net/url"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/docker/docker/pkg/httputils"
+	"github.com/docker/docker/pkg/log"
+	"github.com/docker/docker/pkg/tarsum"
+	"github.com/docker/docker/utils"
+)
+
+type Session struct {
+	authConfig    *AuthConfig
+	reqFactory    *utils.HTTPRequestFactory
+	indexEndpoint string
+	jar           *cookiejar.Jar
+	timeout       TimeoutType
+}
+
+func NewSession(authConfig *AuthConfig, factory *utils.HTTPRequestFactory, indexEndpoint string, timeout bool) (r *Session, err error) {
+	r = &Session{
+		authConfig:    authConfig,
+		indexEndpoint: indexEndpoint,
+	}
+
+	if timeout {
+		r.timeout = ReceiveTimeout
+	}
+
+	r.jar, err = cookiejar.New(nil)
+	if err != nil {
+		return nil, err
+	}
+
+	// If we're working with a standalone private registry over HTTPS, send Basic Auth headers
+	// alongside our requests.
+	if indexEndpoint != IndexServerAddress() && strings.HasPrefix(indexEndpoint, "https://") {
+		info, err := pingRegistryEndpoint(indexEndpoint)
+		if err != nil {
+			return nil, err
+		}
+		if info.Standalone {
+			log.Debugf("Endpoint %s is eligible for private registry registry. Enabling decorator.", indexEndpoint)
+			dec := utils.NewHTTPAuthDecorator(authConfig.Username, authConfig.Password)
+			factory.AddDecorator(dec)
+		}
+	}
+
+	r.reqFactory = factory
+	return r, nil
+}
+
+func (r *Session) doRequest(req *http.Request) (*http.Response, *http.Client, error) {
+	return doRequest(req, r.jar, r.timeout)
+}
+
+// Retrieve the history of a given image from the Registry.
+// Return a list of the parent's json (requested image included)
+func (r *Session) GetRemoteHistory(imgID, registry string, token []string) ([]string, error) {
+	req, err := r.reqFactory.NewRequest("GET", registry+"images/"+imgID+"/ancestry", nil)
+	if err != nil {
+		return nil, err
+	}
+	setTokenAuth(req, token)
+	res, _, err := r.doRequest(req)
+	if err != nil {
+		return nil, err
+	}
+	defer res.Body.Close()
+	if res.StatusCode != 200 {
+		if res.StatusCode == 401 {
+			return nil, errLoginRequired
+		}
+		return nil, utils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying to fetch remote history for %s", res.StatusCode, imgID), res)
+	}
+
+	jsonString, err := ioutil.ReadAll(res.Body)
+	if err != nil {
+		return nil, fmt.Errorf("Error while reading the http response: %s", err)
+	}
+
+	log.Debugf("Ancestry: %s", jsonString)
+	history := new([]string)
+	if err := json.Unmarshal(jsonString, history); err != nil {
+		return nil, err
+	}
+	return *history, nil
+}
+
+// Check if an image exists in the Registry
+// TODO: This method should return the errors instead of masking them and returning false
+func (r *Session) LookupRemoteImage(imgID, registry string, token []string) bool {
+
+	req, err := r.reqFactory.NewRequest("GET", registry+"images/"+imgID+"/json", nil)
+	if err != nil {
+		log.Errorf("Error in LookupRemoteImage %s", err)
+		return false
+	}
+	setTokenAuth(req, token)
+	res, _, err := r.doRequest(req)
+	if err != nil {
+		log.Errorf("Error in LookupRemoteImage %s", err)
+		return false
+	}
+	res.Body.Close()
+	return res.StatusCode == 200
+}
+
+// Retrieve an image from the Registry.
+func (r *Session) GetRemoteImageJSON(imgID, registry string, token []string) ([]byte, int, error) {
+	// Get the JSON
+	req, err := r.reqFactory.NewRequest("GET", registry+"images/"+imgID+"/json", nil)
+	if err != nil {
+		return nil, -1, fmt.Errorf("Failed to download json: %s", err)
+	}
+	setTokenAuth(req, token)
+	res, _, err := r.doRequest(req)
+	if err != nil {
+		return nil, -1, fmt.Errorf("Failed to download json: %s", err)
+	}
+	defer res.Body.Close()
+	if res.StatusCode != 200 {
+		return nil, -1, utils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d", res.StatusCode), res)
+	}
+	// if the size header is not present, then set it to '-1'
+	imageSize := -1
+	if hdr := res.Header.Get("X-Docker-Size"); hdr != "" {
+		imageSize, err = strconv.Atoi(hdr)
+		if err != nil {
+			return nil, -1, err
+		}
+	}
+
+	jsonString, err := ioutil.ReadAll(res.Body)
+	if err != nil {
+		return nil, -1, fmt.Errorf("Failed to parse downloaded json: %s (%s)", err, jsonString)
+	}
+	return jsonString, imageSize, nil
+}
+
+func (r *Session) GetRemoteImageLayer(imgID, registry string, token []string, imgSize int64) (io.ReadCloser, error) {
+	var (
+		retries  = 5
+		client   *http.Client
+		res      *http.Response
+		imageURL = fmt.Sprintf("%simages/%s/layer", registry, imgID)
+	)
+
+	req, err := r.reqFactory.NewRequest("GET", imageURL, nil)
+	if err != nil {
+		return nil, fmt.Errorf("Error while getting from the server: %s\n", err)
+	}
+	setTokenAuth(req, token)
+	for i := 1; i <= retries; i++ {
+		res, client, err = r.doRequest(req)
+		if err != nil {
+			res.Body.Close()
+			if i == retries {
+				return nil, fmt.Errorf("Server error: Status %d while fetching image layer (%s)",
+					res.StatusCode, imgID)
+			}
+			time.Sleep(time.Duration(i) * 5 * time.Second)
+			continue
+		}
+		break
+	}
+
+	if res.StatusCode != 200 {
+		res.Body.Close()
+		return nil, fmt.Errorf("Server error: Status %d while fetching image layer (%s)",
+			res.StatusCode, imgID)
+	}
+
+	if res.Header.Get("Accept-Ranges") == "bytes" && imgSize > 0 {
+		log.Debugf("server supports resume")
+		return httputils.ResumableRequestReaderWithInitialResponse(client, req, 5, imgSize, res), nil
+	}
+	log.Debugf("server doesn't support resume")
+	return res.Body, nil
+}
+
+func (r *Session) GetRemoteTags(registries []string, repository string, token []string) (map[string]string, error) {
+	if strings.Count(repository, "/") == 0 {
+		// This will be removed once the Registry supports auto-resolution on
+		// the "library" namespace
+		repository = "library/" + repository
+	}
+	for _, host := range registries {
+		endpoint := fmt.Sprintf("%srepositories/%s/tags", host, repository)
+		req, err := r.reqFactory.NewRequest("GET", endpoint, nil)
+
+		if err != nil {
+			return nil, err
+		}
+		setTokenAuth(req, token)
+		res, _, err := r.doRequest(req)
+		if err != nil {
+			return nil, err
+		}
+
+		log.Debugf("Got status code %d from %s", res.StatusCode, endpoint)
+		defer res.Body.Close()
+
+		if res.StatusCode != 200 && res.StatusCode != 404 {
+			continue
+		} else if res.StatusCode == 404 {
+			return nil, fmt.Errorf("Repository not found")
+		}
+
+		result := make(map[string]string)
+		rawJSON, err := ioutil.ReadAll(res.Body)
+		if err != nil {
+			return nil, err
+		}
+		if err := json.Unmarshal(rawJSON, &result); err != nil {
+			return nil, err
+		}
+		return result, nil
+	}
+	return nil, fmt.Errorf("Could not reach any registry endpoint")
+}
+
+func buildEndpointsList(headers []string, indexEp string) ([]string, error) {
+	var endpoints []string
+	parsedUrl, err := url.Parse(indexEp)
+	if err != nil {
+		return nil, err
+	}
+	var urlScheme = parsedUrl.Scheme
+	// The Registry's URL scheme has to match the Index'
+	for _, ep := range headers {
+		epList := strings.Split(ep, ",")
+		for _, epListElement := range epList {
+			endpoints = append(
+				endpoints,
+				fmt.Sprintf("%s://%s/v1/", urlScheme, strings.TrimSpace(epListElement)))
+		}
+	}
+	return endpoints, nil
+}
+
+func (r *Session) GetRepositoryData(remote string) (*RepositoryData, error) {
+	indexEp := r.indexEndpoint
+	repositoryTarget := fmt.Sprintf("%srepositories/%s/images", indexEp, remote)
+
+	log.Debugf("[registry] Calling GET %s", repositoryTarget)
+
+	req, err := r.reqFactory.NewRequest("GET", repositoryTarget, nil)
+	if err != nil {
+		return nil, err
+	}
+	if r.authConfig != nil && len(r.authConfig.Username) > 0 {
+		req.SetBasicAuth(r.authConfig.Username, r.authConfig.Password)
+	}
+	req.Header.Set("X-Docker-Token", "true")
+
+	res, _, err := r.doRequest(req)
+	if err != nil {
+		return nil, err
+	}
+	defer res.Body.Close()
+	if res.StatusCode == 401 {
+		return nil, errLoginRequired
+	}
+	// TODO: Right now we're ignoring checksums in the response body.
+	// In the future, we need to use them to check image validity.
+	if res.StatusCode != 200 {
+		return nil, utils.NewHTTPRequestError(fmt.Sprintf("HTTP code: %d", res.StatusCode), res)
+	}
+
+	var tokens []string
+	if res.Header.Get("X-Docker-Token") != "" {
+		tokens = res.Header["X-Docker-Token"]
+	}
+
+	var endpoints []string
+	if res.Header.Get("X-Docker-Endpoints") != "" {
+		endpoints, err = buildEndpointsList(res.Header["X-Docker-Endpoints"], indexEp)
+		if err != nil {
+			return nil, err
+		}
+	} else {
+		// Assume the endpoint is on the same host
+		u, err := url.Parse(indexEp)
+		if err != nil {
+			return nil, err
+		}
+		endpoints = append(endpoints, fmt.Sprintf("%s://%s/v1/", u.Scheme, req.URL.Host))
+	}
+
+	checksumsJSON, err := ioutil.ReadAll(res.Body)
+	if err != nil {
+		return nil, err
+	}
+	remoteChecksums := []*ImgData{}
+	if err := json.Unmarshal(checksumsJSON, &remoteChecksums); err != nil {
+		return nil, err
+	}
+
+	// Forge a better object from the retrieved data
+	imgsData := make(map[string]*ImgData)
+	for _, elem := range remoteChecksums {
+		imgsData[elem.ID] = elem
+	}
+
+	return &RepositoryData{
+		ImgList:   imgsData,
+		Endpoints: endpoints,
+		Tokens:    tokens,
+	}, nil
+}
+
+func (r *Session) PushImageChecksumRegistry(imgData *ImgData, registry string, token []string) error {
+
+	log.Debugf("[registry] Calling PUT %s", registry+"images/"+imgData.ID+"/checksum")
+
+	req, err := r.reqFactory.NewRequest("PUT", registry+"images/"+imgData.ID+"/checksum", nil)
+	if err != nil {
+		return err
+	}
+	setTokenAuth(req, token)
+	req.Header.Set("X-Docker-Checksum", imgData.Checksum)
+	req.Header.Set("X-Docker-Checksum-Payload", imgData.ChecksumPayload)
+
+	res, _, err := r.doRequest(req)
+	if err != nil {
+		return fmt.Errorf("Failed to upload metadata: %s", err)
+	}
+	defer res.Body.Close()
+	if len(res.Cookies()) > 0 {
+		r.jar.SetCookies(req.URL, res.Cookies())
+	}
+	if res.StatusCode != 200 {
+		errBody, err := ioutil.ReadAll(res.Body)
+		if err != nil {
+			return fmt.Errorf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err)
+		}
+		var jsonBody map[string]string
+		if err := json.Unmarshal(errBody, &jsonBody); err != nil {
+			errBody = []byte(err.Error())
+		} else if jsonBody["error"] == "Image already exists" {
+			return ErrAlreadyExists
+		}
+		return fmt.Errorf("HTTP code %d while uploading metadata: %s", res.StatusCode, errBody)
+	}
+	return nil
+}
+
+// Push a local image to the registry
+func (r *Session) PushImageJSONRegistry(imgData *ImgData, jsonRaw []byte, registry string, token []string) error {
+
+	log.Debugf("[registry] Calling PUT %s", registry+"images/"+imgData.ID+"/json")
+
+	req, err := r.reqFactory.NewRequest("PUT", registry+"images/"+imgData.ID+"/json", bytes.NewReader(jsonRaw))
+	if err != nil {
+		return err
+	}
+	req.Header.Add("Content-type", "application/json")
+	setTokenAuth(req, token)
+
+	res, _, err := r.doRequest(req)
+	if err != nil {
+		return fmt.Errorf("Failed to upload metadata: %s", err)
+	}
+	defer res.Body.Close()
+	if res.StatusCode == 401 && strings.HasPrefix(registry, "http://") {
+		return utils.NewHTTPRequestError("HTTP code 401, Docker will not send auth headers over HTTP.", res)
+	}
+	if res.StatusCode != 200 {
+		errBody, err := ioutil.ReadAll(res.Body)
+		if err != nil {
+			return utils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err), res)
+		}
+		var jsonBody map[string]string
+		if err := json.Unmarshal(errBody, &jsonBody); err != nil {
+			errBody = []byte(err.Error())
+		} else if jsonBody["error"] == "Image already exists" {
+			return ErrAlreadyExists
+		}
+		return utils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d while uploading metadata: %s", res.StatusCode, errBody), res)
+	}
+	return nil
+}
+
+func (r *Session) PushImageLayerRegistry(imgID string, layer io.Reader, registry string, token []string, jsonRaw []byte) (checksum string, checksumPayload string, err error) {
+
+	log.Debugf("[registry] Calling PUT %s", registry+"images/"+imgID+"/layer")
+
+	tarsumLayer := &tarsum.TarSum{Reader: layer}
+	h := sha256.New()
+	h.Write(jsonRaw)
+	h.Write([]byte{'\n'})
+	checksumLayer := io.TeeReader(tarsumLayer, h)
+
+	req, err := r.reqFactory.NewRequest("PUT", registry+"images/"+imgID+"/layer", checksumLayer)
+	if err != nil {
+		return "", "", err
+	}
+	req.Header.Add("Content-Type", "application/octet-stream")
+	req.ContentLength = -1
+	req.TransferEncoding = []string{"chunked"}
+	setTokenAuth(req, token)
+	res, _, err := r.doRequest(req)
+	if err != nil {
+		return "", "", fmt.Errorf("Failed to upload layer: %s", err)
+	}
+	if rc, ok := layer.(io.Closer); ok {
+		if err := rc.Close(); err != nil {
+			return "", "", err
+		}
+	}
+	defer res.Body.Close()
+
+	if res.StatusCode != 200 {
+		errBody, err := ioutil.ReadAll(res.Body)
+		if err != nil {
+			return "", "", utils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err), res)
+		}
+		return "", "", utils.NewHTTPRequestError(fmt.Sprintf("Received HTTP code %d while uploading layer: %s", res.StatusCode, errBody), res)
+	}
+
+	checksumPayload = "sha256:" + hex.EncodeToString(h.Sum(nil))
+	return tarsumLayer.Sum(jsonRaw), checksumPayload, nil
+}
+
+// push a tag on the registry.
+// Remote has the format '<user>/<repo>
+func (r *Session) PushRegistryTag(remote, revision, tag, registry string, token []string) error {
+	// "jsonify" the string
+	revision = "\"" + revision + "\""
+	path := fmt.Sprintf("repositories/%s/tags/%s", remote, tag)
+
+	req, err := r.reqFactory.NewRequest("PUT", registry+path, strings.NewReader(revision))
+	if err != nil {
+		return err
+	}
+	req.Header.Add("Content-type", "application/json")
+	setTokenAuth(req, token)
+	req.ContentLength = int64(len(revision))
+	res, _, err := r.doRequest(req)
+	if err != nil {
+		return err
+	}
+	res.Body.Close()
+	if res.StatusCode != 200 && res.StatusCode != 201 {
+		return utils.NewHTTPRequestError(fmt.Sprintf("Internal server error: %d trying to push tag %s on %s", res.StatusCode, tag, remote), res)
+	}
+	return nil
+}
+
+func (r *Session) PushImageJSONIndex(remote string, imgList []*ImgData, validate bool, regs []string) (*RepositoryData, error) {
+	cleanImgList := []*ImgData{}
+	indexEp := r.indexEndpoint
+
+	if validate {
+		for _, elem := range imgList {
+			if elem.Checksum != "" {
+				cleanImgList = append(cleanImgList, elem)
+			}
+		}
+	} else {
+		cleanImgList = imgList
+	}
+
+	imgListJSON, err := json.Marshal(cleanImgList)
+	if err != nil {
+		return nil, err
+	}
+	var suffix string
+	if validate {
+		suffix = "images"
+	}
+	u := fmt.Sprintf("%srepositories/%s/%s", indexEp, remote, suffix)
+	log.Debugf("[registry] PUT %s", u)
+	log.Debugf("Image list pushed to index:\n%s", imgListJSON)
+	req, err := r.reqFactory.NewRequest("PUT", u, bytes.NewReader(imgListJSON))
+	if err != nil {
+		return nil, err
+	}
+	req.Header.Add("Content-type", "application/json")
+	req.SetBasicAuth(r.authConfig.Username, r.authConfig.Password)
+	req.ContentLength = int64(len(imgListJSON))
+	req.Header.Set("X-Docker-Token", "true")
+	if validate {
+		req.Header["X-Docker-Endpoints"] = regs
+	}
+
+	res, _, err := r.doRequest(req)
+	if err != nil {
+		return nil, err
+	}
+	defer res.Body.Close()
+
+	// Redirect if necessary
+	for res.StatusCode >= 300 && res.StatusCode < 400 {
+		log.Debugf("Redirected to %s", res.Header.Get("Location"))
+		req, err = r.reqFactory.NewRequest("PUT", res.Header.Get("Location"), bytes.NewReader(imgListJSON))
+		if err != nil {
+			return nil, err
+		}
+		req.SetBasicAuth(r.authConfig.Username, r.authConfig.Password)
+		req.ContentLength = int64(len(imgListJSON))
+		req.Header.Set("X-Docker-Token", "true")
+		if validate {
+			req.Header["X-Docker-Endpoints"] = regs
+		}
+		res, _, err := r.doRequest(req)
+		if err != nil {
+			return nil, err
+		}
+		defer res.Body.Close()
+	}
+
+	var tokens, endpoints []string
+	if !validate {
+		if res.StatusCode != 200 && res.StatusCode != 201 {
+			errBody, err := ioutil.ReadAll(res.Body)
+			if err != nil {
+				return nil, err
+			}
+			return nil, utils.NewHTTPRequestError(fmt.Sprintf("Error: Status %d trying to push repository %s: %s", res.StatusCode, remote, errBody), res)
+		}
+		if res.Header.Get("X-Docker-Token") != "" {
+			tokens = res.Header["X-Docker-Token"]
+			log.Debugf("Auth token: %v", tokens)
+		} else {
+			return nil, fmt.Errorf("Index response didn't contain an access token")
+		}
+
+		if res.Header.Get("X-Docker-Endpoints") != "" {
+			endpoints, err = buildEndpointsList(res.Header["X-Docker-Endpoints"], indexEp)
+			if err != nil {
+				return nil, err
+			}
+		} else {
+			return nil, fmt.Errorf("Index response didn't contain any endpoints")
+		}
+	}
+	if validate {
+		if res.StatusCode != 204 {
+			errBody, err := ioutil.ReadAll(res.Body)
+			if err != nil {
+				return nil, err
+			}
+			return nil, utils.NewHTTPRequestError(fmt.Sprintf("Error: Status %d trying to push checksums %s: %s", res.StatusCode, remote, errBody), res)
+		}
+	}
+
+	return &RepositoryData{
+		Tokens:    tokens,
+		Endpoints: endpoints,
+	}, nil
+}
+
+func (r *Session) SearchRepositories(term string) (*SearchResults, error) {
+	log.Debugf("Index server: %s", r.indexEndpoint)
+	u := r.indexEndpoint + "search?q=" + url.QueryEscape(term)
+	req, err := r.reqFactory.NewRequest("GET", u, nil)
+	if err != nil {
+		return nil, err
+	}
+	if r.authConfig != nil && len(r.authConfig.Username) > 0 {
+		req.SetBasicAuth(r.authConfig.Username, r.authConfig.Password)
+	}
+	req.Header.Set("X-Docker-Token", "true")
+	res, _, err := r.doRequest(req)
+	if err != nil {
+		return nil, err
+	}
+	defer res.Body.Close()
+	if res.StatusCode != 200 {
+		return nil, utils.NewHTTPRequestError(fmt.Sprintf("Unexepected status code %d", res.StatusCode), res)
+	}
+	rawData, err := ioutil.ReadAll(res.Body)
+	if err != nil {
+		return nil, err
+	}
+	result := new(SearchResults)
+	err = json.Unmarshal(rawData, result)
+	return result, err
+}
+
+func (r *Session) GetAuthConfig(withPasswd bool) *AuthConfig {
+	password := ""
+	if withPasswd {
+		password = r.authConfig.Password
+	}
+	return &AuthConfig{
+		Username: r.authConfig.Username,
+		Password: password,
+		Email:    r.authConfig.Email,
+	}
+}
+
+func setTokenAuth(req *http.Request, token []string) {
+	if req.Header.Get("Authorization") == "" { // Don't override
+		req.Header.Set("Authorization", "Token "+strings.Join(token, ","))
+	}
+}
diff --git a/registry/types.go b/registry/types.go
new file mode 100644
index 0000000..70d55e4
--- /dev/null
+++ b/registry/types.go
@@ -0,0 +1,33 @@
+package registry
+
+type SearchResult struct {
+	StarCount   int    `json:"star_count"`
+	IsOfficial  bool   `json:"is_official"`
+	Name        string `json:"name"`
+	IsTrusted   bool   `json:"is_trusted"`
+	Description string `json:"description"`
+}
+
+type SearchResults struct {
+	Query      string         `json:"query"`
+	NumResults int            `json:"num_results"`
+	Results    []SearchResult `json:"results"`
+}
+
+type RepositoryData struct {
+	ImgList   map[string]*ImgData
+	Endpoints []string
+	Tokens    []string
+}
+
+type ImgData struct {
+	ID              string `json:"id"`
+	Checksum        string `json:"checksum,omitempty"`
+	ChecksumPayload string `json:"-"`
+	Tag             string `json:",omitempty"`
+}
+
+type RegistryInfo struct {
+	Version    string `json:"version"`
+	Standalone bool   `json:"standalone"`
+}
diff --git a/runconfig/config.go b/runconfig/config.go
index 8a069c6..c00110b 100644
--- a/runconfig/config.go
+++ b/runconfig/config.go
@@ -1,8 +1,8 @@
 package runconfig
 
 import (
-	"github.com/dotcloud/docker/engine"
-	"github.com/dotcloud/docker/nat"
+	"github.com/docker/docker/engine"
+	"github.com/docker/docker/nat"
 )
 
 // Note: the Config structure should hold only portable information about the container.
diff --git a/runconfig/config_test.go b/runconfig/config_test.go
index 3b57b0a..7d3aa01 100644
--- a/runconfig/config_test.go
+++ b/runconfig/config_test.go
@@ -5,7 +5,7 @@
 	"strings"
 	"testing"
 
-	"github.com/dotcloud/docker/nat"
+	"github.com/docker/docker/nat"
 )
 
 func parse(t *testing.T, args string) (*Config, *HostConfig, error) {
diff --git a/runconfig/hostconfig.go b/runconfig/hostconfig.go
index 79ffad7..4dd4766 100644
--- a/runconfig/hostconfig.go
+++ b/runconfig/hostconfig.go
@@ -3,9 +3,9 @@
 import (
 	"strings"
 
-	"github.com/dotcloud/docker/engine"
-	"github.com/dotcloud/docker/nat"
-	"github.com/dotcloud/docker/utils"
+	"github.com/docker/docker/engine"
+	"github.com/docker/docker/nat"
+	"github.com/docker/docker/utils"
 )
 
 type NetworkMode string
@@ -19,6 +19,17 @@
 	return len(parts) > 1 && parts[0] == "container"
 }
 
+type DeviceMapping struct {
+	PathOnHost        string
+	PathInContainer   string
+	CgroupPermissions string
+}
+
+type RestartPolicy struct {
+	Name              string
+	MaximumRetryCount int
+}
+
 type HostConfig struct {
 	Binds           []string
 	ContainerIDFile string
@@ -30,7 +41,11 @@
 	Dns             []string
 	DnsSearch       []string
 	VolumesFrom     []string
+	Devices         []DeviceMapping
 	NetworkMode     NetworkMode
+	CapAdd          []string
+	CapDrop         []string
+	RestartPolicy   RestartPolicy
 }
 
 func ContainerHostConfigFromJob(job *engine.Job) *HostConfig {
@@ -40,8 +55,11 @@
 		PublishAllPorts: job.GetenvBool("PublishAllPorts"),
 		NetworkMode:     NetworkMode(job.Getenv("NetworkMode")),
 	}
+
 	job.GetenvJson("LxcConf", &hostConfig.LxcConf)
 	job.GetenvJson("PortBindings", &hostConfig.PortBindings)
+	job.GetenvJson("Devices", &hostConfig.Devices)
+	job.GetenvJson("RestartPolicy", &hostConfig.RestartPolicy)
 	if Binds := job.GetenvList("Binds"); Binds != nil {
 		hostConfig.Binds = Binds
 	}
@@ -57,5 +75,12 @@
 	if VolumesFrom := job.GetenvList("VolumesFrom"); VolumesFrom != nil {
 		hostConfig.VolumesFrom = VolumesFrom
 	}
+	if CapAdd := job.GetenvList("CapAdd"); CapAdd != nil {
+		hostConfig.CapAdd = CapAdd
+	}
+	if CapDrop := job.GetenvList("CapDrop"); CapDrop != nil {
+		hostConfig.CapDrop = CapDrop
+	}
+
 	return hostConfig
 }
diff --git a/runconfig/merge.go b/runconfig/merge.go
index e30b4ce..0c60d1d 100644
--- a/runconfig/merge.go
+++ b/runconfig/merge.go
@@ -3,8 +3,8 @@
 import (
 	"strings"
 
-	"github.com/dotcloud/docker/nat"
-	"github.com/dotcloud/docker/utils"
+	"github.com/docker/docker/nat"
+	"github.com/docker/docker/pkg/log"
 )
 
 func Merge(userConf, imageConf *Config) error {
@@ -20,7 +20,7 @@
 	if userConf.CpuShares == 0 {
 		userConf.CpuShares = imageConf.CpuShares
 	}
-	if userConf.ExposedPorts == nil || len(userConf.ExposedPorts) == 0 {
+	if len(userConf.ExposedPorts) == 0 {
 		userConf.ExposedPorts = imageConf.ExposedPorts
 	} else if imageConf.ExposedPorts != nil {
 		if userConf.ExposedPorts == nil {
@@ -33,7 +33,7 @@
 		}
 	}
 
-	if userConf.PortSpecs != nil && len(userConf.PortSpecs) > 0 {
+	if len(userConf.PortSpecs) > 0 {
 		if userConf.ExposedPorts == nil {
 			userConf.ExposedPorts = make(nat.PortSet)
 		}
@@ -48,9 +48,9 @@
 		}
 		userConf.PortSpecs = nil
 	}
-	if imageConf.PortSpecs != nil && len(imageConf.PortSpecs) > 0 {
+	if len(imageConf.PortSpecs) > 0 {
 		// FIXME: I think we can safely remove this. Leaving it for now for the sake of reverse-compat paranoia.
-		utils.Debugf("Migrating image port specs to containter: %s", strings.Join(imageConf.PortSpecs, ", "))
+		log.Debugf("Migrating image port specs to containter: %s", strings.Join(imageConf.PortSpecs, ", "))
 		if userConf.ExposedPorts == nil {
 			userConf.ExposedPorts = make(nat.PortSet)
 		}
@@ -66,7 +66,7 @@
 		}
 	}
 
-	if userConf.Env == nil || len(userConf.Env) == 0 {
+	if len(userConf.Env) == 0 {
 		userConf.Env = imageConf.Env
 	} else {
 		for _, imageEnv := range imageConf.Env {
@@ -84,16 +84,16 @@
 		}
 	}
 
-	if userConf.Cmd == nil || len(userConf.Cmd) == 0 {
-		userConf.Cmd = imageConf.Cmd
-	}
-	if userConf.Entrypoint == nil || len(userConf.Entrypoint) == 0 {
+	if len(userConf.Entrypoint) == 0 {
+		if len(userConf.Cmd) == 0 {
+			userConf.Cmd = imageConf.Cmd
+		}
 		userConf.Entrypoint = imageConf.Entrypoint
 	}
 	if userConf.WorkingDir == "" {
 		userConf.WorkingDir = imageConf.WorkingDir
 	}
-	if userConf.Volumes == nil || len(userConf.Volumes) == 0 {
+	if len(userConf.Volumes) == 0 {
 		userConf.Volumes = imageConf.Volumes
 	} else {
 		for k, v := range imageConf.Volumes {
diff --git a/runconfig/parse.go b/runconfig/parse.go
index dfd9f4d..2b4dc63 100644
--- a/runconfig/parse.go
+++ b/runconfig/parse.go
@@ -4,21 +4,25 @@
 	"fmt"
 	"io/ioutil"
 	"path"
+	"strconv"
 	"strings"
 
-	"github.com/dotcloud/docker/nat"
-	"github.com/dotcloud/docker/opts"
-	flag "github.com/dotcloud/docker/pkg/mflag"
-	"github.com/dotcloud/docker/pkg/sysinfo"
-	"github.com/dotcloud/docker/pkg/units"
-	"github.com/dotcloud/docker/utils"
+	"github.com/docker/docker/nat"
+	"github.com/docker/docker/opts"
+	flag "github.com/docker/docker/pkg/mflag"
+	"github.com/docker/docker/pkg/parsers"
+	"github.com/docker/docker/pkg/sysinfo"
+	"github.com/docker/docker/pkg/units"
+	"github.com/docker/docker/utils"
 )
 
 var (
-	ErrInvalidWorkingDirectory  = fmt.Errorf("The working directory is invalid. It needs to be an absolute path.")
-	ErrConflictAttachDetach     = fmt.Errorf("Conflicting options: -a and -d")
-	ErrConflictDetachAutoRemove = fmt.Errorf("Conflicting options: --rm and -d")
-	ErrConflictNetworkHostname  = fmt.Errorf("Conflicting options: -h and the network mode (--net)")
+	ErrInvalidWorkingDirectory            = fmt.Errorf("The working directory is invalid. It needs to be an absolute path.")
+	ErrConflictAttachDetach               = fmt.Errorf("Conflicting options: -a and -d")
+	ErrConflictDetachAutoRemove           = fmt.Errorf("Conflicting options: --rm and -d")
+	ErrConflictNetworkHostname            = fmt.Errorf("Conflicting options: -h and the network mode (--net)")
+	ErrConflictHostNetworkAndLinks        = fmt.Errorf("Conflicting options: --net=host can't be used with links. This would result in undefined behavior.")
+	ErrConflictRestartPolicyAndAutoRemove = fmt.Errorf("Conflicting options: --restart and --rm")
 )
 
 //FIXME Only used in tests
@@ -41,24 +45,27 @@
 		flVolumes = opts.NewListOpts(opts.ValidatePath)
 		flLinks   = opts.NewListOpts(opts.ValidateLink)
 		flEnv     = opts.NewListOpts(opts.ValidateEnv)
+		flDevices = opts.NewListOpts(opts.ValidatePath)
 
-		flPublish     opts.ListOpts
-		flExpose      opts.ListOpts
-		flDns         opts.ListOpts
-		flDnsSearch   = opts.NewListOpts(opts.ValidateDomain)
-		flVolumesFrom opts.ListOpts
-		flLxcOpts     opts.ListOpts
-		flEnvFile     opts.ListOpts
+		flPublish     = opts.NewListOpts(nil)
+		flExpose      = opts.NewListOpts(nil)
+		flDns         = opts.NewListOpts(opts.ValidateIPAddress)
+		flDnsSearch   = opts.NewListOpts(opts.ValidateDnsSearch)
+		flVolumesFrom = opts.NewListOpts(nil)
+		flLxcOpts     = opts.NewListOpts(nil)
+		flEnvFile     = opts.NewListOpts(nil)
+		flCapAdd      = opts.NewListOpts(nil)
+		flCapDrop     = opts.NewListOpts(nil)
 
 		flAutoRemove      = cmd.Bool([]string{"#rm", "-rm"}, false, "Automatically remove the container when it exits (incompatible with -d)")
-		flDetach          = cmd.Bool([]string{"d", "-detach"}, false, "Detached mode: Run container in the background, print new container id")
+		flDetach          = cmd.Bool([]string{"d", "-detach"}, false, "Detached mode: run container in the background and print new container ID")
 		flNetwork         = cmd.Bool([]string{"#n", "#-networking"}, true, "Enable networking for this container")
 		flPrivileged      = cmd.Bool([]string{"#privileged", "-privileged"}, false, "Give extended privileges to this container")
 		flPublishAll      = cmd.Bool([]string{"P", "-publish-all"}, false, "Publish all exposed ports to the host interfaces")
-		flStdin           = cmd.Bool([]string{"i", "-interactive"}, false, "Keep stdin open even if not attached")
-		flTty             = cmd.Bool([]string{"t", "-tty"}, false, "Allocate a pseudo-tty")
+		flStdin           = cmd.Bool([]string{"i", "-interactive"}, false, "Keep STDIN open even if not attached")
+		flTty             = cmd.Bool([]string{"t", "-tty"}, false, "Allocate a pseudo-TTY")
 		flContainerIDFile = cmd.String([]string{"#cidfile", "-cidfile"}, "", "Write the container ID to the file")
-		flEntrypoint      = cmd.String([]string{"#entrypoint", "-entrypoint"}, "", "Overwrite the default entrypoint of the image")
+		flEntrypoint      = cmd.String([]string{"#entrypoint", "-entrypoint"}, "", "Overwrite the default ENTRYPOINT of the image")
 		flHostname        = cmd.String([]string{"h", "-hostname"}, "", "Container host name")
 		flMemoryString    = cmd.String([]string{"m", "-memory"}, "", "Memory limit (format: <number><optional unit>, where unit = b, k, m or g)")
 		flUser            = cmd.String([]string{"u", "-user"}, "", "Username or UID")
@@ -66,24 +73,29 @@
 		flCpuShares       = cmd.Int64([]string{"c", "-cpu-shares"}, 0, "CPU shares (relative weight)")
 		flCpuset          = cmd.String([]string{"-cpuset"}, "", "CPUs in which to allow execution (0-3, 0,1)")
 		flNetMode         = cmd.String([]string{"-net"}, "bridge", "Set the Network mode for the container\n'bridge': creates a new network stack for the container on the docker bridge\n'none': no networking for this container\n'container:<name|id>': reuses another container network stack\n'host': use the host network stack inside the container.  Note: the host mode gives the container full access to local system services such as D-bus and is therefore considered insecure.")
+		flRestartPolicy   = cmd.String([]string{"-restart"}, "", "Restart policy to apply when a container exits (no, on-failure, always)")
 		// For documentation purpose
-		_ = cmd.Bool([]string{"#sig-proxy", "-sig-proxy"}, true, "Proxify received signals to the process (even in non-tty mode). SIGCHLD is not proxied.")
+		_ = cmd.Bool([]string{"#sig-proxy", "-sig-proxy"}, true, "Proxy received signals to the process (even in non-TTY mode). SIGCHLD, SIGSTOP, and SIGKILL are not proxied.")
 		_ = cmd.String([]string{"#name", "-name"}, "", "Assign a name to the container")
 	)
 
-	cmd.Var(&flAttach, []string{"a", "-attach"}, "Attach to stdin, stdout or stderr.")
-	cmd.Var(&flVolumes, []string{"v", "-volume"}, "Bind mount a volume (e.g., from the host: -v /host:/container, from docker: -v /container)")
-	cmd.Var(&flLinks, []string{"#link", "-link"}, "Add link to another container (name:alias)")
+	cmd.Var(&flAttach, []string{"a", "-attach"}, "Attach to STDIN, STDOUT or STDERR.")
+	cmd.Var(&flVolumes, []string{"v", "-volume"}, "Bind mount a volume (e.g., from the host: -v /host:/container, from Docker: -v /container)")
+	cmd.Var(&flLinks, []string{"#link", "-link"}, "Add link to another container in the form of name:alias")
+	cmd.Var(&flDevices, []string{"-device"}, "Add a host device to the container (e.g. --device=/dev/sdc:/dev/xvdc)")
 	cmd.Var(&flEnv, []string{"e", "-env"}, "Set environment variables")
-	cmd.Var(&flEnvFile, []string{"-env-file"}, "Read in a line delimited file of ENV variables")
+	cmd.Var(&flEnvFile, []string{"-env-file"}, "Read in a line delimited file of environment variables")
 
 	cmd.Var(&flPublish, []string{"p", "-publish"}, fmt.Sprintf("Publish a container's port to the host\nformat: %s\n(use 'docker port' to see the actual mapping)", nat.PortSpecTemplateFormat))
 	cmd.Var(&flExpose, []string{"#expose", "-expose"}, "Expose a port from the container without publishing it to your host")
-	cmd.Var(&flDns, []string{"#dns", "-dns"}, "Set custom dns servers")
-	cmd.Var(&flDnsSearch, []string{"-dns-search"}, "Set custom dns search domains")
+	cmd.Var(&flDns, []string{"#dns", "-dns"}, "Set custom DNS servers")
+	cmd.Var(&flDnsSearch, []string{"-dns-search"}, "Set custom DNS search domains")
 	cmd.Var(&flVolumesFrom, []string{"#volumes-from", "-volumes-from"}, "Mount volumes from the specified container(s)")
 	cmd.Var(&flLxcOpts, []string{"#lxc-conf", "-lxc-conf"}, "(lxc exec-driver only) Add custom lxc options --lxc-conf=\"lxc.cgroup.cpuset.cpus = 0,1\"")
 
+	cmd.Var(&flCapAdd, []string{"-cap-add"}, "Add Linux capabilities")
+	cmd.Var(&flCapDrop, []string{"-cap-drop"}, "Drop Linux capabilities")
+
 	if err := cmd.Parse(args); err != nil {
 		return nil, nil, cmd, err
 	}
@@ -108,6 +120,10 @@
 		return nil, nil, cmd, ErrConflictNetworkHostname
 	}
 
+	if *flNetMode == "host" && flLinks.Len() > 0 {
+		return nil, nil, cmd, ErrConflictHostNetworkAndLinks
+	}
+
 	// If neither -d or -a are set, attach to everything by default
 	if flAttach.Len() == 0 && !*flDetach {
 		if !*flDetach {
@@ -191,6 +207,16 @@
 		}
 	}
 
+	// parse device mappings
+	deviceMappings := []DeviceMapping{}
+	for _, device := range flDevices.GetAll() {
+		deviceMapping, err := ParseDevice(device)
+		if err != nil {
+			return nil, nil, cmd, err
+		}
+		deviceMappings = append(deviceMappings, deviceMapping)
+	}
+
 	// collect all the environment variables for the container
 	envVariables := []string{}
 	for _, ef := range flEnvFile.GetAll() {
@@ -202,14 +228,21 @@
 	}
 	// parse the '-e' and '--env' after, to allow override
 	envVariables = append(envVariables, flEnv.GetAll()...)
-	// boo, there's no debug output for docker run
-	//utils.Debugf("Environment variables for the container: %#v", envVariables)
 
 	netMode, err := parseNetMode(*flNetMode)
 	if err != nil {
 		return nil, nil, cmd, fmt.Errorf("--net: invalid net mode: %v", err)
 	}
 
+	restartPolicy, err := parseRestartPolicy(*flRestartPolicy)
+	if err != nil {
+		return nil, nil, cmd, err
+	}
+
+	if *flAutoRemove && (restartPolicy.Name == "always" || restartPolicy.Name == "on-failure") {
+		return nil, nil, cmd, ErrConflictRestartPolicyAndAutoRemove
+	}
+
 	config := &Config{
 		Hostname:        hostname,
 		Domainname:      domainname,
@@ -245,6 +278,10 @@
 		DnsSearch:       flDnsSearch.GetAll(),
 		VolumesFrom:     flVolumesFrom.GetAll(),
 		NetworkMode:     netMode,
+		Devices:         deviceMappings,
+		CapAdd:          flCapAdd.GetAll(),
+		CapDrop:         flCapDrop.GetAll(),
+		RestartPolicy:   restartPolicy,
 	}
 
 	if sysInfo != nil && flMemory > 0 && !sysInfo.SwapLimit {
@@ -259,6 +296,46 @@
 	return config, hostConfig, cmd, nil
 }
 
+// parseRestartPolicy returns the parsed policy or an error indicating what is incorrect
+func parseRestartPolicy(policy string) (RestartPolicy, error) {
+	p := RestartPolicy{}
+
+	if policy == "" {
+		return p, nil
+	}
+
+	var (
+		parts = strings.Split(policy, ":")
+		name  = parts[0]
+	)
+
+	switch name {
+	case "always":
+		p.Name = name
+
+		if len(parts) == 2 {
+			return p, fmt.Errorf("maximum restart count not valid with restart policy of \"always\"")
+		}
+	case "no":
+		// do nothing
+	case "on-failure":
+		p.Name = name
+
+		if len(parts) == 2 {
+			count, err := strconv.Atoi(parts[1])
+			if err != nil {
+				return p, err
+			}
+
+			p.MaximumRetryCount = count
+		}
+	default:
+		return p, fmt.Errorf("invalid restart policy %s", name)
+	}
+
+	return p, nil
+}
+
 // options will come in the format of name.key=value or name.option
 func parseDriverOpts(opts opts.ListOpts) (map[string][]string, error) {
 	out := make(map[string][]string, len(opts.GetAll()))
@@ -281,7 +358,7 @@
 func parseKeyValueOpts(opts opts.ListOpts) ([]utils.KeyValuePair, error) {
 	out := make([]utils.KeyValuePair, opts.Len())
 	for i, o := range opts.GetAll() {
-		k, v, err := utils.ParseKeyValueOpt(o)
+		k, v, err := parsers.ParseKeyValueOpt(o)
 		if err != nil {
 			return nil, err
 		}
@@ -303,3 +380,33 @@
 	}
 	return NetworkMode(netMode), nil
 }
+
+func ParseDevice(device string) (DeviceMapping, error) {
+	src := ""
+	dst := ""
+	permissions := "rwm"
+	arr := strings.Split(device, ":")
+	switch len(arr) {
+	case 3:
+		permissions = arr[2]
+		fallthrough
+	case 2:
+		dst = arr[1]
+		fallthrough
+	case 1:
+		src = arr[0]
+	default:
+		return DeviceMapping{}, fmt.Errorf("Invalid device specification: %s", device)
+	}
+
+	if dst == "" {
+		dst = src
+	}
+
+	deviceMapping := DeviceMapping{
+		PathOnHost:        src,
+		PathInContainer:   dst,
+		CgroupPermissions: permissions,
+	}
+	return deviceMapping, nil
+}
diff --git a/runconfig/parse_test.go b/runconfig/parse_test.go
index 94cea49..aa6e4f2 100644
--- a/runconfig/parse_test.go
+++ b/runconfig/parse_test.go
@@ -3,14 +3,14 @@
 import (
 	"testing"
 
-	"github.com/dotcloud/docker/utils"
+	"github.com/docker/docker/pkg/parsers"
 )
 
 func TestParseLxcConfOpt(t *testing.T) {
 	opts := []string{"lxc.utsname=docker", "lxc.utsname = docker "}
 
 	for _, o := range opts {
-		k, v, err := utils.ParseKeyValueOpt(o)
+		k, v, err := parsers.ParseKeyValueOpt(o)
 		if err != nil {
 			t.FailNow()
 		}
diff --git a/server/MAINTAINERS b/server/MAINTAINERS
deleted file mode 100644
index e35518a..0000000
--- a/server/MAINTAINERS
+++ /dev/null
@@ -1,2 +0,0 @@
-Solomon Hykes <solomon@docker.com> (@shykes)
-Victor Vieux <vieux@docker.com> (@vieux)
diff --git a/server/server.go b/server/server.go
deleted file mode 100644
index 3e6de00..0000000
--- a/server/server.go
+++ /dev/null
@@ -1,2473 +0,0 @@
-// DEPRECATION NOTICE. PLEASE DO NOT ADD ANYTHING TO THIS FILE.
-//
-// server/server.go is deprecated. We are working on breaking it up into smaller, cleaner
-// pieces which will be easier to find and test. This will help make the code less
-// redundant and more readable.
-//
-// Contributors, please don't add anything to server/server.go, unless it has the explicit
-// goal of helping the deprecation effort.
-//
-// Maintainers, please refuse patches which add code to server/server.go.
-//
-// Instead try the following files:
-// * For code related to local image management, try graph/
-// * For code related to image downloading, uploading, remote search etc, try registry/
-// * For code related to the docker daemon, try daemon/
-// * For small utilities which could potentially be useful outside of Docker, try pkg/
-// * For miscalleneous "util" functions which are docker-specific, try encapsulating them
-//     inside one of the subsystems above. If you really think they should be more widely
-//     available, are you sure you can't remove the docker dependencies and move them to
-//     pkg? In last resort, you can add them to utils/ (but please try not to).
-
-package server
-
-import (
-	"bytes"
-	"encoding/json"
-	"fmt"
-	"io"
-	"io/ioutil"
-	"log"
-	"net"
-	"net/http"
-	"net/url"
-	"os"
-	"os/exec"
-	gosignal "os/signal"
-	"path"
-	"path/filepath"
-	"runtime"
-	"strconv"
-	"strings"
-	"sync"
-	"sync/atomic"
-	"syscall"
-	"time"
-
-	"github.com/dotcloud/docker/archive"
-	"github.com/dotcloud/docker/daemon"
-	"github.com/dotcloud/docker/daemonconfig"
-	"github.com/dotcloud/docker/dockerversion"
-	"github.com/dotcloud/docker/engine"
-	"github.com/dotcloud/docker/graph"
-	"github.com/dotcloud/docker/image"
-	"github.com/dotcloud/docker/pkg/graphdb"
-	"github.com/dotcloud/docker/pkg/signal"
-	"github.com/dotcloud/docker/pkg/tailfile"
-	"github.com/dotcloud/docker/registry"
-	"github.com/dotcloud/docker/runconfig"
-	"github.com/dotcloud/docker/utils"
-	"github.com/dotcloud/docker/utils/filters"
-)
-
-func (srv *Server) handlerWrap(h engine.Handler) engine.Handler {
-	return func(job *engine.Job) engine.Status {
-		if !srv.IsRunning() {
-			return job.Errorf("Server is not running")
-		}
-		srv.tasks.Add(1)
-		defer srv.tasks.Done()
-		return h(job)
-	}
-}
-
-// jobInitApi runs the remote api server `srv` as a daemon,
-// Only one api server can run at the same time - this is enforced by a pidfile.
-// The signals SIGINT, SIGQUIT and SIGTERM are intercepted for cleanup.
-func InitServer(job *engine.Job) engine.Status {
-	job.Logf("Creating server")
-	srv, err := NewServer(job.Eng, daemonconfig.ConfigFromJob(job))
-	if err != nil {
-		return job.Error(err)
-	}
-	if srv.daemon.Config().Pidfile != "" {
-		job.Logf("Creating pidfile")
-		if err := utils.CreatePidFile(srv.daemon.Config().Pidfile); err != nil {
-			// FIXME: do we need fatal here instead of returning a job error?
-			log.Fatal(err)
-		}
-	}
-	job.Logf("Setting up signal traps")
-	c := make(chan os.Signal, 1)
-	gosignal.Notify(c, os.Interrupt, syscall.SIGTERM, syscall.SIGQUIT)
-	go func() {
-		interruptCount := uint32(0)
-		for sig := range c {
-			go func(sig os.Signal) {
-				log.Printf("Received signal '%v', starting shutdown of docker...\n", sig)
-				switch sig {
-				case os.Interrupt, syscall.SIGTERM:
-					// If the user really wants to interrupt, let him do so.
-					if atomic.LoadUint32(&interruptCount) < 3 {
-						atomic.AddUint32(&interruptCount, 1)
-						// Initiate the cleanup only once
-						if atomic.LoadUint32(&interruptCount) == 1 {
-							utils.RemovePidFile(srv.daemon.Config().Pidfile)
-							srv.Close()
-						} else {
-							return
-						}
-					} else {
-						log.Printf("Force shutdown of docker, interrupting cleanup\n")
-					}
-				case syscall.SIGQUIT:
-				}
-				os.Exit(128 + int(sig.(syscall.Signal)))
-			}(sig)
-		}
-	}()
-	job.Eng.Hack_SetGlobalVar("httpapi.server", srv)
-	job.Eng.Hack_SetGlobalVar("httpapi.daemon", srv.daemon)
-
-	for name, handler := range map[string]engine.Handler{
-		"export":           srv.ContainerExport,
-		"create":           srv.ContainerCreate,
-		"stop":             srv.ContainerStop,
-		"restart":          srv.ContainerRestart,
-		"start":            srv.ContainerStart,
-		"kill":             srv.ContainerKill,
-		"pause":            srv.ContainerPause,
-		"unpause":          srv.ContainerUnpause,
-		"wait":             srv.ContainerWait,
-		"tag":              srv.ImageTag, // FIXME merge with "image_tag"
-		"resize":           srv.ContainerResize,
-		"commit":           srv.ContainerCommit,
-		"info":             srv.DockerInfo,
-		"container_delete": srv.ContainerDestroy,
-		"image_export":     srv.ImageExport,
-		"images":           srv.Images,
-		"history":          srv.ImageHistory,
-		"viz":              srv.ImagesViz,
-		"container_copy":   srv.ContainerCopy,
-		"attach":           srv.ContainerAttach,
-		"logs":             srv.ContainerLogs,
-		"changes":          srv.ContainerChanges,
-		"top":              srv.ContainerTop,
-		"load":             srv.ImageLoad,
-		"build":            srv.Build,
-		"pull":             srv.ImagePull,
-		"import":           srv.ImageImport,
-		"image_delete":     srv.ImageDelete,
-		"events":           srv.Events,
-		"push":             srv.ImagePush,
-		"containers":       srv.Containers,
-	} {
-		if err := job.Eng.Register(name, srv.handlerWrap(handler)); err != nil {
-			return job.Error(err)
-		}
-	}
-	// Install image-related commands from the image subsystem.
-	// See `graph/service.go`
-	if err := srv.daemon.Repositories().Install(job.Eng); err != nil {
-		return job.Error(err)
-	}
-	// Install daemon-related commands from the daemon subsystem.
-	// See `daemon/`
-	if err := srv.daemon.Install(job.Eng); err != nil {
-		return job.Error(err)
-	}
-	srv.SetRunning(true)
-	return engine.StatusOK
-}
-
-func (srv *Server) ContainerPause(job *engine.Job) engine.Status {
-	if len(job.Args) != 1 {
-		return job.Errorf("Usage: %s CONTAINER", job.Name)
-	}
-	name := job.Args[0]
-	container := srv.daemon.Get(name)
-	if container == nil {
-		return job.Errorf("No such container: %s", name)
-	}
-	if err := container.Pause(); err != nil {
-		return job.Errorf("Cannot pause container %s: %s", name, err)
-	}
-	return engine.StatusOK
-}
-
-func (srv *Server) ContainerUnpause(job *engine.Job) engine.Status {
-	if n := len(job.Args); n < 1 || n > 2 {
-		return job.Errorf("Usage: %s CONTAINER", job.Name)
-	}
-	name := job.Args[0]
-	container := srv.daemon.Get(name)
-	if container == nil {
-		return job.Errorf("No such container: %s", name)
-	}
-	if err := container.Unpause(); err != nil {
-		return job.Errorf("Cannot unpause container %s: %s", name, err)
-	}
-	return engine.StatusOK
-}
-
-// ContainerKill send signal to the container
-// If no signal is given (sig 0), then Kill with SIGKILL and wait
-// for the container to exit.
-// If a signal is given, then just send it to the container and return.
-func (srv *Server) ContainerKill(job *engine.Job) engine.Status {
-	if n := len(job.Args); n < 1 || n > 2 {
-		return job.Errorf("Usage: %s CONTAINER [SIGNAL]", job.Name)
-	}
-	var (
-		name = job.Args[0]
-		sig  uint64
-		err  error
-	)
-
-	// If we have a signal, look at it. Otherwise, do nothing
-	if len(job.Args) == 2 && job.Args[1] != "" {
-		// Check if we passed the signal as a number:
-		// The largest legal signal is 31, so let's parse on 5 bits
-		sig, err = strconv.ParseUint(job.Args[1], 10, 5)
-		if err != nil {
-			// The signal is not a number, treat it as a string (either like "KILL" or like "SIGKILL")
-			sig = uint64(signal.SignalMap[strings.TrimPrefix(job.Args[1], "SIG")])
-			if sig == 0 {
-				return job.Errorf("Invalid signal: %s", job.Args[1])
-			}
-
-		}
-	}
-
-	if container := srv.daemon.Get(name); container != nil {
-		// If no signal is passed, or SIGKILL, perform regular Kill (SIGKILL + wait())
-		if sig == 0 || syscall.Signal(sig) == syscall.SIGKILL {
-			if err := container.Kill(); err != nil {
-				return job.Errorf("Cannot kill container %s: %s", name, err)
-			}
-			srv.LogEvent("kill", container.ID, srv.daemon.Repositories().ImageName(container.Image))
-		} else {
-			// Otherwise, just send the requested signal
-			if err := container.KillSig(int(sig)); err != nil {
-				return job.Errorf("Cannot kill container %s: %s", name, err)
-			}
-			// FIXME: Add event for signals
-		}
-	} else {
-		return job.Errorf("No such container: %s", name)
-	}
-	return engine.StatusOK
-}
-
-func (srv *Server) Events(job *engine.Job) engine.Status {
-	if len(job.Args) != 0 {
-		return job.Errorf("Usage: %s", job.Name)
-	}
-
-	var (
-		since   = job.GetenvInt64("since")
-		until   = job.GetenvInt64("until")
-		timeout = time.NewTimer(time.Unix(until, 0).Sub(time.Now()))
-	)
-
-	// If no until, disable timeout
-	if until == 0 {
-		timeout.Stop()
-	}
-
-	listener := make(chan utils.JSONMessage)
-	srv.eventPublisher.Subscribe(listener)
-	defer srv.eventPublisher.Unsubscribe(listener)
-
-	// When sending an event JSON serialization errors are ignored, but all
-	// other errors lead to the eviction of the listener.
-	sendEvent := func(event *utils.JSONMessage) error {
-		if b, err := json.Marshal(event); err == nil {
-			if _, err = job.Stdout.Write(b); err != nil {
-				return err
-			}
-		}
-		return nil
-	}
-
-	job.Stdout.Write(nil)
-
-	// Resend every event in the [since, until] time interval.
-	if since != 0 {
-		for _, event := range srv.GetEvents() {
-			if event.Time >= since && (event.Time <= until || until == 0) {
-				if err := sendEvent(&event); err != nil {
-					return job.Error(err)
-				}
-			}
-		}
-	}
-
-	for {
-		select {
-		case event, ok := <-listener:
-			if !ok {
-				return engine.StatusOK
-			}
-			if err := sendEvent(&event); err != nil {
-				return job.Error(err)
-			}
-		case <-timeout.C:
-			return engine.StatusOK
-		}
-	}
-}
-
-func (srv *Server) ContainerExport(job *engine.Job) engine.Status {
-	if len(job.Args) != 1 {
-		return job.Errorf("Usage: %s container_id", job.Name)
-	}
-	name := job.Args[0]
-	if container := srv.daemon.Get(name); container != nil {
-		data, err := container.Export()
-		if err != nil {
-			return job.Errorf("%s: %s", name, err)
-		}
-		defer data.Close()
-
-		// Stream the entire contents of the container (basically a volatile snapshot)
-		if _, err := io.Copy(job.Stdout, data); err != nil {
-			return job.Errorf("%s: %s", name, err)
-		}
-		// FIXME: factor job-specific LogEvent to engine.Job.Run()
-		srv.LogEvent("export", container.ID, srv.daemon.Repositories().ImageName(container.Image))
-		return engine.StatusOK
-	}
-	return job.Errorf("No such container: %s", name)
-}
-
-// ImageExport exports all images with the given tag. All versions
-// containing the same tag are exported. The resulting output is an
-// uncompressed tar ball.
-// name is the set of tags to export.
-// out is the writer where the images are written to.
-func (srv *Server) ImageExport(job *engine.Job) engine.Status {
-	if len(job.Args) != 1 {
-		return job.Errorf("Usage: %s IMAGE\n", job.Name)
-	}
-	name := job.Args[0]
-	// get image json
-	tempdir, err := ioutil.TempDir("", "docker-export-")
-	if err != nil {
-		return job.Error(err)
-	}
-	defer os.RemoveAll(tempdir)
-
-	utils.Debugf("Serializing %s", name)
-
-	rootRepo, err := srv.daemon.Repositories().Get(name)
-	if err != nil {
-		return job.Error(err)
-	}
-	if rootRepo != nil {
-		for _, id := range rootRepo {
-			if err := srv.exportImage(job.Eng, id, tempdir); err != nil {
-				return job.Error(err)
-			}
-		}
-
-		// write repositories
-		rootRepoMap := map[string]graph.Repository{}
-		rootRepoMap[name] = rootRepo
-		rootRepoJson, _ := json.Marshal(rootRepoMap)
-
-		if err := ioutil.WriteFile(path.Join(tempdir, "repositories"), rootRepoJson, os.FileMode(0644)); err != nil {
-			return job.Error(err)
-		}
-	} else {
-		if err := srv.exportImage(job.Eng, name, tempdir); err != nil {
-			return job.Error(err)
-		}
-	}
-
-	fs, err := archive.Tar(tempdir, archive.Uncompressed)
-	if err != nil {
-		return job.Error(err)
-	}
-	defer fs.Close()
-
-	if _, err := io.Copy(job.Stdout, fs); err != nil {
-		return job.Error(err)
-	}
-	utils.Debugf("End Serializing %s", name)
-	return engine.StatusOK
-}
-
-func (srv *Server) exportImage(eng *engine.Engine, name, tempdir string) error {
-	for n := name; n != ""; {
-		// temporary directory
-		tmpImageDir := path.Join(tempdir, n)
-		if err := os.Mkdir(tmpImageDir, os.FileMode(0755)); err != nil {
-			if os.IsExist(err) {
-				return nil
-			}
-			return err
-		}
-
-		var version = "1.0"
-		var versionBuf = []byte(version)
-
-		if err := ioutil.WriteFile(path.Join(tmpImageDir, "VERSION"), versionBuf, os.FileMode(0644)); err != nil {
-			return err
-		}
-
-		// serialize json
-		json, err := os.Create(path.Join(tmpImageDir, "json"))
-		if err != nil {
-			return err
-		}
-		job := eng.Job("image_inspect", n)
-		job.SetenvBool("raw", true)
-		job.Stdout.Add(json)
-		if err := job.Run(); err != nil {
-			return err
-		}
-
-		// serialize filesystem
-		fsTar, err := os.Create(path.Join(tmpImageDir, "layer.tar"))
-		if err != nil {
-			return err
-		}
-		job = eng.Job("image_tarlayer", n)
-		job.Stdout.Add(fsTar)
-		if err := job.Run(); err != nil {
-			return err
-		}
-
-		// find parent
-		job = eng.Job("image_get", n)
-		info, _ := job.Stdout.AddEnv()
-		if err := job.Run(); err != nil {
-			return err
-		}
-		n = info.Get("Parent")
-	}
-	return nil
-}
-
-func (srv *Server) Build(job *engine.Job) engine.Status {
-	if len(job.Args) != 0 {
-		return job.Errorf("Usage: %s\n", job.Name)
-	}
-	var (
-		remoteURL      = job.Getenv("remote")
-		repoName       = job.Getenv("t")
-		suppressOutput = job.GetenvBool("q")
-		noCache        = job.GetenvBool("nocache")
-		rm             = job.GetenvBool("rm")
-		forceRm        = job.GetenvBool("forcerm")
-		authConfig     = &registry.AuthConfig{}
-		configFile     = &registry.ConfigFile{}
-		tag            string
-		context        io.ReadCloser
-	)
-	job.GetenvJson("authConfig", authConfig)
-	job.GetenvJson("configFile", configFile)
-	repoName, tag = utils.ParseRepositoryTag(repoName)
-
-	if remoteURL == "" {
-		context = ioutil.NopCloser(job.Stdin)
-	} else if utils.IsGIT(remoteURL) {
-		if !strings.HasPrefix(remoteURL, "git://") {
-			remoteURL = "https://" + remoteURL
-		}
-		root, err := ioutil.TempDir("", "docker-build-git")
-		if err != nil {
-			return job.Error(err)
-		}
-		defer os.RemoveAll(root)
-
-		if output, err := exec.Command("git", "clone", "--recursive", remoteURL, root).CombinedOutput(); err != nil {
-			return job.Errorf("Error trying to use git: %s (%s)", err, output)
-		}
-
-		c, err := archive.Tar(root, archive.Uncompressed)
-		if err != nil {
-			return job.Error(err)
-		}
-		context = c
-	} else if utils.IsURL(remoteURL) {
-		f, err := utils.Download(remoteURL)
-		if err != nil {
-			return job.Error(err)
-		}
-		defer f.Body.Close()
-		dockerFile, err := ioutil.ReadAll(f.Body)
-		if err != nil {
-			return job.Error(err)
-		}
-		c, err := archive.Generate("Dockerfile", string(dockerFile))
-		if err != nil {
-			return job.Error(err)
-		}
-		context = c
-	}
-	defer context.Close()
-
-	sf := utils.NewStreamFormatter(job.GetenvBool("json"))
-	b := NewBuildFile(srv,
-		&utils.StdoutFormater{
-			Writer:          job.Stdout,
-			StreamFormatter: sf,
-		},
-		&utils.StderrFormater{
-			Writer:          job.Stdout,
-			StreamFormatter: sf,
-		},
-		!suppressOutput, !noCache, rm, forceRm, job.Stdout, sf, authConfig, configFile)
-	id, err := b.Build(context)
-	if err != nil {
-		return job.Error(err)
-	}
-	if repoName != "" {
-		srv.daemon.Repositories().Set(repoName, tag, id, false)
-	}
-	return engine.StatusOK
-}
-
-// Loads a set of images into the repository. This is the complementary of ImageExport.
-// The input stream is an uncompressed tar ball containing images and metadata.
-func (srv *Server) ImageLoad(job *engine.Job) engine.Status {
-	tmpImageDir, err := ioutil.TempDir("", "docker-import-")
-	if err != nil {
-		return job.Error(err)
-	}
-	defer os.RemoveAll(tmpImageDir)
-
-	var (
-		repoTarFile = path.Join(tmpImageDir, "repo.tar")
-		repoDir     = path.Join(tmpImageDir, "repo")
-	)
-
-	tarFile, err := os.Create(repoTarFile)
-	if err != nil {
-		return job.Error(err)
-	}
-	if _, err := io.Copy(tarFile, job.Stdin); err != nil {
-		return job.Error(err)
-	}
-	tarFile.Close()
-
-	repoFile, err := os.Open(repoTarFile)
-	if err != nil {
-		return job.Error(err)
-	}
-	if err := os.Mkdir(repoDir, os.ModeDir); err != nil {
-		return job.Error(err)
-	}
-	if err := archive.Untar(repoFile, repoDir, nil); err != nil {
-		return job.Error(err)
-	}
-
-	dirs, err := ioutil.ReadDir(repoDir)
-	if err != nil {
-		return job.Error(err)
-	}
-
-	for _, d := range dirs {
-		if d.IsDir() {
-			if err := srv.recursiveLoad(job.Eng, d.Name(), tmpImageDir); err != nil {
-				return job.Error(err)
-			}
-		}
-	}
-
-	repositoriesJson, err := ioutil.ReadFile(path.Join(tmpImageDir, "repo", "repositories"))
-	if err == nil {
-		repositories := map[string]graph.Repository{}
-		if err := json.Unmarshal(repositoriesJson, &repositories); err != nil {
-			return job.Error(err)
-		}
-
-		for imageName, tagMap := range repositories {
-			for tag, address := range tagMap {
-				if err := srv.daemon.Repositories().Set(imageName, tag, address, true); err != nil {
-					return job.Error(err)
-				}
-			}
-		}
-	} else if !os.IsNotExist(err) {
-		return job.Error(err)
-	}
-
-	return engine.StatusOK
-}
-
-func (srv *Server) recursiveLoad(eng *engine.Engine, address, tmpImageDir string) error {
-	if err := eng.Job("image_get", address).Run(); err != nil {
-		utils.Debugf("Loading %s", address)
-
-		imageJson, err := ioutil.ReadFile(path.Join(tmpImageDir, "repo", address, "json"))
-		if err != nil {
-			utils.Debugf("Error reading json", err)
-			return err
-		}
-
-		layer, err := os.Open(path.Join(tmpImageDir, "repo", address, "layer.tar"))
-		if err != nil {
-			utils.Debugf("Error reading embedded tar", err)
-			return err
-		}
-		img, err := image.NewImgJSON(imageJson)
-		if err != nil {
-			utils.Debugf("Error unmarshalling json", err)
-			return err
-		}
-		if img.Parent != "" {
-			if !srv.daemon.Graph().Exists(img.Parent) {
-				if err := srv.recursiveLoad(eng, img.Parent, tmpImageDir); err != nil {
-					return err
-				}
-			}
-		}
-		if err := srv.daemon.Graph().Register(imageJson, layer, img); err != nil {
-			return err
-		}
-	}
-	utils.Debugf("Completed processing %s", address)
-
-	return nil
-}
-
-func (srv *Server) ImagesViz(job *engine.Job) engine.Status {
-	images, _ := srv.daemon.Graph().Map()
-	if images == nil {
-		return engine.StatusOK
-	}
-	job.Stdout.Write([]byte("digraph docker {\n"))
-
-	var (
-		parentImage *image.Image
-		err         error
-	)
-	for _, image := range images {
-		parentImage, err = image.GetParent()
-		if err != nil {
-			return job.Errorf("Error while getting parent image: %v", err)
-		}
-		if parentImage != nil {
-			job.Stdout.Write([]byte(" \"" + parentImage.ID + "\" -> \"" + image.ID + "\"\n"))
-		} else {
-			job.Stdout.Write([]byte(" base -> \"" + image.ID + "\" [style=invis]\n"))
-		}
-	}
-
-	for id, repos := range srv.daemon.Repositories().GetRepoRefs() {
-		job.Stdout.Write([]byte(" \"" + id + "\" [label=\"" + id + "\\n" + strings.Join(repos, "\\n") + "\",shape=box,fillcolor=\"paleturquoise\",style=\"filled,rounded\"];\n"))
-	}
-	job.Stdout.Write([]byte(" base [style=invisible]\n}\n"))
-	return engine.StatusOK
-}
-
-func (srv *Server) Images(job *engine.Job) engine.Status {
-	var (
-		allImages   map[string]*image.Image
-		err         error
-		filt_tagged = true
-	)
-
-	imageFilters, err := filters.FromParam(job.Getenv("filters"))
-	if err != nil {
-		return job.Error(err)
-	}
-	if i, ok := imageFilters["dangling"]; ok {
-		for _, value := range i {
-			if strings.ToLower(value) == "true" {
-				filt_tagged = false
-			}
-		}
-	}
-
-	if job.GetenvBool("all") && filt_tagged {
-		allImages, err = srv.daemon.Graph().Map()
-	} else {
-		allImages, err = srv.daemon.Graph().Heads()
-	}
-	if err != nil {
-		return job.Error(err)
-	}
-	lookup := make(map[string]*engine.Env)
-	srv.daemon.Repositories().Lock()
-	for name, repository := range srv.daemon.Repositories().Repositories {
-		if job.Getenv("filter") != "" {
-			if match, _ := path.Match(job.Getenv("filter"), name); !match {
-				continue
-			}
-		}
-		for tag, id := range repository {
-			image, err := srv.daemon.Graph().Get(id)
-			if err != nil {
-				log.Printf("Warning: couldn't load %s from %s/%s: %s", id, name, tag, err)
-				continue
-			}
-
-			if out, exists := lookup[id]; exists {
-				if filt_tagged {
-					out.SetList("RepoTags", append(out.GetList("RepoTags"), fmt.Sprintf("%s:%s", name, tag)))
-				}
-			} else {
-				// get the boolean list for if only the untagged images are requested
-				delete(allImages, id)
-				if filt_tagged {
-					out := &engine.Env{}
-					out.Set("ParentId", image.Parent)
-					out.SetList("RepoTags", []string{fmt.Sprintf("%s:%s", name, tag)})
-					out.Set("Id", image.ID)
-					out.SetInt64("Created", image.Created.Unix())
-					out.SetInt64("Size", image.Size)
-					out.SetInt64("VirtualSize", image.GetParentsSize(0)+image.Size)
-					lookup[id] = out
-				}
-			}
-
-		}
-	}
-	srv.daemon.Repositories().Unlock()
-
-	outs := engine.NewTable("Created", len(lookup))
-	for _, value := range lookup {
-		outs.Add(value)
-	}
-
-	// Display images which aren't part of a repository/tag
-	if job.Getenv("filter") == "" {
-		for _, image := range allImages {
-			out := &engine.Env{}
-			out.Set("ParentId", image.Parent)
-			out.SetList("RepoTags", []string{"<none>:<none>"})
-			out.Set("Id", image.ID)
-			out.SetInt64("Created", image.Created.Unix())
-			out.SetInt64("Size", image.Size)
-			out.SetInt64("VirtualSize", image.GetParentsSize(0)+image.Size)
-			outs.Add(out)
-		}
-	}
-
-	outs.ReverseSort()
-	if _, err := outs.WriteListTo(job.Stdout); err != nil {
-		return job.Error(err)
-	}
-	return engine.StatusOK
-}
-
-func (srv *Server) DockerInfo(job *engine.Job) engine.Status {
-	images, _ := srv.daemon.Graph().Map()
-	var imgcount int
-	if images == nil {
-		imgcount = 0
-	} else {
-		imgcount = len(images)
-	}
-	kernelVersion := "<unknown>"
-	if kv, err := utils.GetKernelVersion(); err == nil {
-		kernelVersion = kv.String()
-	}
-
-	// if we still have the original dockerinit binary from before we copied it locally, let's return the path to that, since that's more intuitive (the copied path is trivial to derive by hand given VERSION)
-	initPath := utils.DockerInitPath("")
-	if initPath == "" {
-		// if that fails, we'll just return the path from the daemon
-		initPath = srv.daemon.SystemInitPath()
-	}
-
-	v := &engine.Env{}
-	v.SetInt("Containers", len(srv.daemon.List()))
-	v.SetInt("Images", imgcount)
-	v.Set("Driver", srv.daemon.GraphDriver().String())
-	v.SetJson("DriverStatus", srv.daemon.GraphDriver().Status())
-	v.SetBool("MemoryLimit", srv.daemon.SystemConfig().MemoryLimit)
-	v.SetBool("SwapLimit", srv.daemon.SystemConfig().SwapLimit)
-	v.SetBool("IPv4Forwarding", !srv.daemon.SystemConfig().IPv4ForwardingDisabled)
-	v.SetBool("Debug", os.Getenv("DEBUG") != "")
-	v.SetInt("NFd", utils.GetTotalUsedFds())
-	v.SetInt("NGoroutines", runtime.NumGoroutine())
-	v.Set("ExecutionDriver", srv.daemon.ExecutionDriver().Name())
-	v.SetInt("NEventsListener", srv.eventPublisher.SubscribersCount())
-	v.Set("KernelVersion", kernelVersion)
-	v.Set("IndexServerAddress", registry.IndexServerAddress())
-	v.Set("InitSha1", dockerversion.INITSHA1)
-	v.Set("InitPath", initPath)
-	v.SetList("Sockets", srv.daemon.Sockets)
-	if _, err := v.WriteTo(job.Stdout); err != nil {
-		return job.Error(err)
-	}
-	return engine.StatusOK
-}
-
-func (srv *Server) ImageHistory(job *engine.Job) engine.Status {
-	if n := len(job.Args); n != 1 {
-		return job.Errorf("Usage: %s IMAGE", job.Name)
-	}
-	name := job.Args[0]
-	foundImage, err := srv.daemon.Repositories().LookupImage(name)
-	if err != nil {
-		return job.Error(err)
-	}
-
-	lookupMap := make(map[string][]string)
-	for name, repository := range srv.daemon.Repositories().Repositories {
-		for tag, id := range repository {
-			// If the ID already has a reverse lookup, do not update it unless for "latest"
-			if _, exists := lookupMap[id]; !exists {
-				lookupMap[id] = []string{}
-			}
-			lookupMap[id] = append(lookupMap[id], name+":"+tag)
-		}
-	}
-
-	outs := engine.NewTable("Created", 0)
-	err = foundImage.WalkHistory(func(img *image.Image) error {
-		out := &engine.Env{}
-		out.Set("Id", img.ID)
-		out.SetInt64("Created", img.Created.Unix())
-		out.Set("CreatedBy", strings.Join(img.ContainerConfig.Cmd, " "))
-		out.SetList("Tags", lookupMap[img.ID])
-		out.SetInt64("Size", img.Size)
-		outs.Add(out)
-		return nil
-	})
-	if _, err := outs.WriteListTo(job.Stdout); err != nil {
-		return job.Error(err)
-	}
-	return engine.StatusOK
-}
-
-func (srv *Server) ContainerTop(job *engine.Job) engine.Status {
-	if len(job.Args) != 1 && len(job.Args) != 2 {
-		return job.Errorf("Not enough arguments. Usage: %s CONTAINER [PS_ARGS]\n", job.Name)
-	}
-	var (
-		name   = job.Args[0]
-		psArgs = "-ef"
-	)
-
-	if len(job.Args) == 2 && job.Args[1] != "" {
-		psArgs = job.Args[1]
-	}
-
-	if container := srv.daemon.Get(name); container != nil {
-		if !container.State.IsRunning() {
-			return job.Errorf("Container %s is not running", name)
-		}
-		pids, err := srv.daemon.ExecutionDriver().GetPidsForContainer(container.ID)
-		if err != nil {
-			return job.Error(err)
-		}
-		output, err := exec.Command("ps", psArgs).Output()
-		if err != nil {
-			return job.Errorf("Error running ps: %s", err)
-		}
-
-		lines := strings.Split(string(output), "\n")
-		header := strings.Fields(lines[0])
-		out := &engine.Env{}
-		out.SetList("Titles", header)
-
-		pidIndex := -1
-		for i, name := range header {
-			if name == "PID" {
-				pidIndex = i
-			}
-		}
-		if pidIndex == -1 {
-			return job.Errorf("Couldn't find PID field in ps output")
-		}
-
-		processes := [][]string{}
-		for _, line := range lines[1:] {
-			if len(line) == 0 {
-				continue
-			}
-			fields := strings.Fields(line)
-			p, err := strconv.Atoi(fields[pidIndex])
-			if err != nil {
-				return job.Errorf("Unexpected pid '%s': %s", fields[pidIndex], err)
-			}
-
-			for _, pid := range pids {
-				if pid == p {
-					// Make sure number of fields equals number of header titles
-					// merging "overhanging" fields
-					process := fields[:len(header)-1]
-					process = append(process, strings.Join(fields[len(header)-1:], " "))
-					processes = append(processes, process)
-				}
-			}
-		}
-		out.SetJson("Processes", processes)
-		out.WriteTo(job.Stdout)
-		return engine.StatusOK
-
-	}
-	return job.Errorf("No such container: %s", name)
-}
-
-func (srv *Server) ContainerChanges(job *engine.Job) engine.Status {
-	if n := len(job.Args); n != 1 {
-		return job.Errorf("Usage: %s CONTAINER", job.Name)
-	}
-	name := job.Args[0]
-	if container := srv.daemon.Get(name); container != nil {
-		outs := engine.NewTable("", 0)
-		changes, err := container.Changes()
-		if err != nil {
-			return job.Error(err)
-		}
-		for _, change := range changes {
-			out := &engine.Env{}
-			if err := out.Import(change); err != nil {
-				return job.Error(err)
-			}
-			outs.Add(out)
-		}
-		if _, err := outs.WriteListTo(job.Stdout); err != nil {
-			return job.Error(err)
-		}
-	} else {
-		return job.Errorf("No such container: %s", name)
-	}
-	return engine.StatusOK
-}
-
-func (srv *Server) Containers(job *engine.Job) engine.Status {
-	var (
-		foundBefore bool
-		displayed   int
-		all         = job.GetenvBool("all")
-		since       = job.Getenv("since")
-		before      = job.Getenv("before")
-		n           = job.GetenvInt("limit")
-		size        = job.GetenvBool("size")
-	)
-	outs := engine.NewTable("Created", 0)
-
-	names := map[string][]string{}
-	srv.daemon.ContainerGraph().Walk("/", func(p string, e *graphdb.Entity) error {
-		names[e.ID()] = append(names[e.ID()], p)
-		return nil
-	}, -1)
-
-	var beforeCont, sinceCont *daemon.Container
-	if before != "" {
-		beforeCont = srv.daemon.Get(before)
-		if beforeCont == nil {
-			return job.Error(fmt.Errorf("Could not find container with name or id %s", before))
-		}
-	}
-
-	if since != "" {
-		sinceCont = srv.daemon.Get(since)
-		if sinceCont == nil {
-			return job.Error(fmt.Errorf("Could not find container with name or id %s", since))
-		}
-	}
-
-	for _, container := range srv.daemon.List() {
-		if !container.State.IsRunning() && !all && n <= 0 && since == "" && before == "" {
-			continue
-		}
-		if before != "" && !foundBefore {
-			if container.ID == beforeCont.ID {
-				foundBefore = true
-			}
-			continue
-		}
-		if n > 0 && displayed == n {
-			break
-		}
-		if since != "" {
-			if container.ID == sinceCont.ID {
-				break
-			}
-		}
-		displayed++
-		out := &engine.Env{}
-		out.Set("Id", container.ID)
-		out.SetList("Names", names[container.ID])
-		out.Set("Image", srv.daemon.Repositories().ImageName(container.Image))
-		if len(container.Args) > 0 {
-			args := []string{}
-			for _, arg := range container.Args {
-				if strings.Contains(arg, " ") {
-					args = append(args, fmt.Sprintf("'%s'", arg))
-				} else {
-					args = append(args, arg)
-				}
-			}
-			argsAsString := strings.Join(args, " ")
-
-			out.Set("Command", fmt.Sprintf("\"%s %s\"", container.Path, argsAsString))
-		} else {
-			out.Set("Command", fmt.Sprintf("\"%s\"", container.Path))
-		}
-		out.SetInt64("Created", container.Created.Unix())
-		out.Set("Status", container.State.String())
-		str, err := container.NetworkSettings.PortMappingAPI().ToListString()
-		if err != nil {
-			return job.Error(err)
-		}
-		out.Set("Ports", str)
-		if size {
-			sizeRw, sizeRootFs := container.GetSize()
-			out.SetInt64("SizeRw", sizeRw)
-			out.SetInt64("SizeRootFs", sizeRootFs)
-		}
-		outs.Add(out)
-	}
-	outs.ReverseSort()
-	if _, err := outs.WriteListTo(job.Stdout); err != nil {
-		return job.Error(err)
-	}
-	return engine.StatusOK
-}
-
-func (srv *Server) ContainerCommit(job *engine.Job) engine.Status {
-	if len(job.Args) != 1 {
-		return job.Errorf("Not enough arguments. Usage: %s CONTAINER\n", job.Name)
-	}
-	name := job.Args[0]
-
-	container := srv.daemon.Get(name)
-	if container == nil {
-		return job.Errorf("No such container: %s", name)
-	}
-
-	var (
-		config    = container.Config
-		newConfig runconfig.Config
-	)
-
-	if err := job.GetenvJson("config", &newConfig); err != nil {
-		return job.Error(err)
-	}
-
-	if err := runconfig.Merge(&newConfig, config); err != nil {
-		return job.Error(err)
-	}
-
-	img, err := srv.daemon.Commit(container, job.Getenv("repo"), job.Getenv("tag"), job.Getenv("comment"), job.Getenv("author"), job.GetenvBool("pause"), &newConfig)
-	if err != nil {
-		return job.Error(err)
-	}
-	job.Printf("%s\n", img.ID)
-	return engine.StatusOK
-}
-
-func (srv *Server) ImageTag(job *engine.Job) engine.Status {
-	if len(job.Args) != 2 && len(job.Args) != 3 {
-		return job.Errorf("Usage: %s IMAGE REPOSITORY [TAG]\n", job.Name)
-	}
-	var tag string
-	if len(job.Args) == 3 {
-		tag = job.Args[2]
-	}
-	if err := srv.daemon.Repositories().Set(job.Args[1], tag, job.Args[0], job.GetenvBool("force")); err != nil {
-		return job.Error(err)
-	}
-	return engine.StatusOK
-}
-
-func (srv *Server) pullImage(r *registry.Registry, out io.Writer, imgID, endpoint string, token []string, sf *utils.StreamFormatter) error {
-	history, err := r.GetRemoteHistory(imgID, endpoint, token)
-	if err != nil {
-		return err
-	}
-	out.Write(sf.FormatProgress(utils.TruncateID(imgID), "Pulling dependent layers", nil))
-	// FIXME: Try to stream the images?
-	// FIXME: Launch the getRemoteImage() in goroutines
-
-	for i := len(history) - 1; i >= 0; i-- {
-		id := history[i]
-
-		// ensure no two downloads of the same layer happen at the same time
-		if c, err := srv.poolAdd("pull", "layer:"+id); err != nil {
-			utils.Debugf("Image (id: %s) pull is already running, skipping: %v", id, err)
-			<-c
-		}
-		defer srv.poolRemove("pull", "layer:"+id)
-
-		if !srv.daemon.Graph().Exists(id) {
-			out.Write(sf.FormatProgress(utils.TruncateID(id), "Pulling metadata", nil))
-			var (
-				imgJSON []byte
-				imgSize int
-				err     error
-				img     *image.Image
-			)
-			retries := 5
-			for j := 1; j <= retries; j++ {
-				imgJSON, imgSize, err = r.GetRemoteImageJSON(id, endpoint, token)
-				if err != nil && j == retries {
-					out.Write(sf.FormatProgress(utils.TruncateID(id), "Error pulling dependent layers", nil))
-					return err
-				} else if err != nil {
-					time.Sleep(time.Duration(j) * 500 * time.Millisecond)
-					continue
-				}
-				img, err = image.NewImgJSON(imgJSON)
-				if err != nil && j == retries {
-					out.Write(sf.FormatProgress(utils.TruncateID(id), "Error pulling dependent layers", nil))
-					return fmt.Errorf("Failed to parse json: %s", err)
-				} else if err != nil {
-					time.Sleep(time.Duration(j) * 500 * time.Millisecond)
-					continue
-				} else {
-					break
-				}
-			}
-
-			for j := 1; j <= retries; j++ {
-				// Get the layer
-				status := "Pulling fs layer"
-				if j > 1 {
-					status = fmt.Sprintf("Pulling fs layer [retries: %d]", j)
-				}
-				out.Write(sf.FormatProgress(utils.TruncateID(id), status, nil))
-				layer, err := r.GetRemoteImageLayer(img.ID, endpoint, token, int64(imgSize))
-				if uerr, ok := err.(*url.Error); ok {
-					err = uerr.Err
-				}
-				if terr, ok := err.(net.Error); ok && terr.Timeout() && j < retries {
-					time.Sleep(time.Duration(j) * 500 * time.Millisecond)
-					continue
-				} else if err != nil {
-					out.Write(sf.FormatProgress(utils.TruncateID(id), "Error pulling dependent layers", nil))
-					return err
-				}
-				defer layer.Close()
-
-				err = srv.daemon.Graph().Register(imgJSON,
-					utils.ProgressReader(layer, imgSize, out, sf, false, utils.TruncateID(id), "Downloading"),
-					img)
-				if terr, ok := err.(net.Error); ok && terr.Timeout() && j < retries {
-					time.Sleep(time.Duration(j) * 500 * time.Millisecond)
-					continue
-				} else if err != nil {
-					out.Write(sf.FormatProgress(utils.TruncateID(id), "Error downloading dependent layers", nil))
-					return err
-				} else {
-					break
-				}
-			}
-		}
-		out.Write(sf.FormatProgress(utils.TruncateID(id), "Download complete", nil))
-
-	}
-	return nil
-}
-
-func (srv *Server) pullRepository(r *registry.Registry, out io.Writer, localName, remoteName, askedTag string, sf *utils.StreamFormatter, parallel bool) error {
-	out.Write(sf.FormatStatus("", "Pulling repository %s", localName))
-
-	repoData, err := r.GetRepositoryData(remoteName)
-	if err != nil {
-		return err
-	}
-
-	utils.Debugf("Retrieving the tag list")
-	tagsList, err := r.GetRemoteTags(repoData.Endpoints, remoteName, repoData.Tokens)
-	if err != nil {
-		utils.Errorf("%v", err)
-		return err
-	}
-
-	for tag, id := range tagsList {
-		repoData.ImgList[id] = &registry.ImgData{
-			ID:       id,
-			Tag:      tag,
-			Checksum: "",
-		}
-	}
-
-	utils.Debugf("Registering tags")
-	// If no tag has been specified, pull them all
-	if askedTag == "" {
-		for tag, id := range tagsList {
-			repoData.ImgList[id].Tag = tag
-		}
-	} else {
-		// Otherwise, check that the tag exists and use only that one
-		id, exists := tagsList[askedTag]
-		if !exists {
-			return fmt.Errorf("Tag %s not found in repository %s", askedTag, localName)
-		}
-		repoData.ImgList[id].Tag = askedTag
-	}
-
-	errors := make(chan error)
-	for _, image := range repoData.ImgList {
-		downloadImage := func(img *registry.ImgData) {
-			if askedTag != "" && img.Tag != askedTag {
-				utils.Debugf("(%s) does not match %s (id: %s), skipping", img.Tag, askedTag, img.ID)
-				if parallel {
-					errors <- nil
-				}
-				return
-			}
-
-			if img.Tag == "" {
-				utils.Debugf("Image (id: %s) present in this repository but untagged, skipping", img.ID)
-				if parallel {
-					errors <- nil
-				}
-				return
-			}
-
-			// ensure no two downloads of the same image happen at the same time
-			if c, err := srv.poolAdd("pull", "img:"+img.ID); err != nil {
-				if c != nil {
-					out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Layer already being pulled by another client. Waiting.", nil))
-					<-c
-					out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Download complete", nil))
-				} else {
-					utils.Debugf("Image (id: %s) pull is already running, skipping: %v", img.ID, err)
-				}
-				if parallel {
-					errors <- nil
-				}
-				return
-			}
-			defer srv.poolRemove("pull", "img:"+img.ID)
-
-			out.Write(sf.FormatProgress(utils.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s", img.Tag, localName), nil))
-			success := false
-			var lastErr error
-			for _, ep := range repoData.Endpoints {
-				out.Write(sf.FormatProgress(utils.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s, endpoint: %s", img.Tag, localName, ep), nil))
-				if err := srv.pullImage(r, out, img.ID, ep, repoData.Tokens, sf); err != nil {
-					// It's not ideal that only the last error is returned, it would be better to concatenate the errors.
-					// As the error is also given to the output stream the user will see the error.
-					lastErr = err
-					out.Write(sf.FormatProgress(utils.TruncateID(img.ID), fmt.Sprintf("Error pulling image (%s) from %s, endpoint: %s, %s", img.Tag, localName, ep, err), nil))
-					continue
-				}
-				success = true
-				break
-			}
-			if !success {
-				err := fmt.Errorf("Error pulling image (%s) from %s, %v", img.Tag, localName, lastErr)
-				out.Write(sf.FormatProgress(utils.TruncateID(img.ID), err.Error(), nil))
-				if parallel {
-					errors <- err
-					return
-				}
-			}
-			out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Download complete", nil))
-
-			if parallel {
-				errors <- nil
-			}
-		}
-
-		if parallel {
-			go downloadImage(image)
-		} else {
-			downloadImage(image)
-		}
-	}
-	if parallel {
-		var lastError error
-		for i := 0; i < len(repoData.ImgList); i++ {
-			if err := <-errors; err != nil {
-				lastError = err
-			}
-		}
-		if lastError != nil {
-			return lastError
-		}
-
-	}
-	for tag, id := range tagsList {
-		if askedTag != "" && tag != askedTag {
-			continue
-		}
-		if err := srv.daemon.Repositories().Set(localName, tag, id, true); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (srv *Server) poolAdd(kind, key string) (chan struct{}, error) {
-	srv.Lock()
-	defer srv.Unlock()
-
-	if c, exists := srv.pullingPool[key]; exists {
-		return c, fmt.Errorf("pull %s is already in progress", key)
-	}
-	if c, exists := srv.pushingPool[key]; exists {
-		return c, fmt.Errorf("push %s is already in progress", key)
-	}
-
-	c := make(chan struct{})
-	switch kind {
-	case "pull":
-		srv.pullingPool[key] = c
-	case "push":
-		srv.pushingPool[key] = c
-	default:
-		return nil, fmt.Errorf("Unknown pool type")
-	}
-	return c, nil
-}
-
-func (srv *Server) poolRemove(kind, key string) error {
-	srv.Lock()
-	defer srv.Unlock()
-	switch kind {
-	case "pull":
-		if c, exists := srv.pullingPool[key]; exists {
-			close(c)
-			delete(srv.pullingPool, key)
-		}
-	case "push":
-		if c, exists := srv.pushingPool[key]; exists {
-			close(c)
-			delete(srv.pushingPool, key)
-		}
-	default:
-		return fmt.Errorf("Unknown pool type")
-	}
-	return nil
-}
-
-func (srv *Server) ImagePull(job *engine.Job) engine.Status {
-	if n := len(job.Args); n != 1 && n != 2 {
-		return job.Errorf("Usage: %s IMAGE [TAG]", job.Name)
-	}
-	var (
-		localName   = job.Args[0]
-		tag         string
-		sf          = utils.NewStreamFormatter(job.GetenvBool("json"))
-		authConfig  = &registry.AuthConfig{}
-		metaHeaders map[string][]string
-	)
-	if len(job.Args) > 1 {
-		tag = job.Args[1]
-	}
-
-	job.GetenvJson("authConfig", authConfig)
-	job.GetenvJson("metaHeaders", &metaHeaders)
-
-	c, err := srv.poolAdd("pull", localName+":"+tag)
-	if err != nil {
-		if c != nil {
-			// Another pull of the same repository is already taking place; just wait for it to finish
-			job.Stdout.Write(sf.FormatStatus("", "Repository %s already being pulled by another client. Waiting.", localName))
-			<-c
-			return engine.StatusOK
-		}
-		return job.Error(err)
-	}
-	defer srv.poolRemove("pull", localName+":"+tag)
-
-	// Resolve the Repository name from fqn to endpoint + name
-	hostname, remoteName, err := registry.ResolveRepositoryName(localName)
-	if err != nil {
-		return job.Error(err)
-	}
-
-	endpoint, err := registry.ExpandAndVerifyRegistryUrl(hostname)
-	if err != nil {
-		return job.Error(err)
-	}
-
-	r, err := registry.NewRegistry(authConfig, registry.HTTPRequestFactory(metaHeaders), endpoint, true)
-	if err != nil {
-		return job.Error(err)
-	}
-
-	if endpoint == registry.IndexServerAddress() {
-		// If pull "index.docker.io/foo/bar", it's stored locally under "foo/bar"
-		localName = remoteName
-	}
-
-	if err = srv.pullRepository(r, job.Stdout, localName, remoteName, tag, sf, job.GetenvBool("parallel")); err != nil {
-		return job.Error(err)
-	}
-
-	return engine.StatusOK
-}
-
-// Retrieve the all the images to be uploaded in the correct order
-func (srv *Server) getImageList(localRepo map[string]string, requestedTag string) ([]string, map[string][]string, error) {
-	var (
-		imageList   []string
-		imagesSeen  map[string]bool     = make(map[string]bool)
-		tagsByImage map[string][]string = make(map[string][]string)
-	)
-
-	for tag, id := range localRepo {
-		if requestedTag != "" && requestedTag != tag {
-			continue
-		}
-		var imageListForThisTag []string
-
-		tagsByImage[id] = append(tagsByImage[id], tag)
-
-		for img, err := srv.daemon.Graph().Get(id); img != nil; img, err = img.GetParent() {
-			if err != nil {
-				return nil, nil, err
-			}
-
-			if imagesSeen[img.ID] {
-				// This image is already on the list, we can ignore it and all its parents
-				break
-			}
-
-			imagesSeen[img.ID] = true
-			imageListForThisTag = append(imageListForThisTag, img.ID)
-		}
-
-		// reverse the image list for this tag (so the "most"-parent image is first)
-		for i, j := 0, len(imageListForThisTag)-1; i < j; i, j = i+1, j-1 {
-			imageListForThisTag[i], imageListForThisTag[j] = imageListForThisTag[j], imageListForThisTag[i]
-		}
-
-		// append to main image list
-		imageList = append(imageList, imageListForThisTag...)
-	}
-	if len(imageList) == 0 {
-		return nil, nil, fmt.Errorf("No images found for the requested repository / tag")
-	}
-	utils.Debugf("Image list: %v", imageList)
-	utils.Debugf("Tags by image: %v", tagsByImage)
-
-	return imageList, tagsByImage, nil
-}
-
-func (srv *Server) pushRepository(r *registry.Registry, out io.Writer, localName, remoteName string, localRepo map[string]string, tag string, sf *utils.StreamFormatter) error {
-	out = utils.NewWriteFlusher(out)
-	utils.Debugf("Local repo: %s", localRepo)
-	imgList, tagsByImage, err := srv.getImageList(localRepo, tag)
-	if err != nil {
-		return err
-	}
-
-	out.Write(sf.FormatStatus("", "Sending image list"))
-
-	var (
-		repoData   *registry.RepositoryData
-		imageIndex []*registry.ImgData
-	)
-
-	for _, imgId := range imgList {
-		if tags, exists := tagsByImage[imgId]; exists {
-			// If an image has tags you must add an entry in the image index
-			// for each tag
-			for _, tag := range tags {
-				imageIndex = append(imageIndex, &registry.ImgData{
-					ID:  imgId,
-					Tag: tag,
-				})
-			}
-		} else {
-			// If the image does not have a tag it still needs to be sent to the
-			// registry with an empty tag so that it is accociated with the repository
-			imageIndex = append(imageIndex, &registry.ImgData{
-				ID:  imgId,
-				Tag: "",
-			})
-
-		}
-	}
-
-	utils.Debugf("Preparing to push %s with the following images and tags\n", localRepo)
-	for _, data := range imageIndex {
-		utils.Debugf("Pushing ID: %s with Tag: %s\n", data.ID, data.Tag)
-	}
-
-	// Register all the images in a repository with the registry
-	// If an image is not in this list it will not be associated with the repository
-	repoData, err = r.PushImageJSONIndex(remoteName, imageIndex, false, nil)
-	if err != nil {
-		return err
-	}
-
-	nTag := 1
-	if tag == "" {
-		nTag = len(localRepo)
-	}
-	for _, ep := range repoData.Endpoints {
-		out.Write(sf.FormatStatus("", "Pushing repository %s (%d tags)", localName, nTag))
-
-		for _, imgId := range imgList {
-			if r.LookupRemoteImage(imgId, ep, repoData.Tokens) {
-				out.Write(sf.FormatStatus("", "Image %s already pushed, skipping", utils.TruncateID(imgId)))
-			} else {
-				if _, err := srv.pushImage(r, out, remoteName, imgId, ep, repoData.Tokens, sf); err != nil {
-					// FIXME: Continue on error?
-					return err
-				}
-			}
-
-			for _, tag := range tagsByImage[imgId] {
-				out.Write(sf.FormatStatus("", "Pushing tag for rev [%s] on {%s}", utils.TruncateID(imgId), ep+"repositories/"+remoteName+"/tags/"+tag))
-
-				if err := r.PushRegistryTag(remoteName, imgId, tag, ep, repoData.Tokens); err != nil {
-					return err
-				}
-			}
-		}
-	}
-
-	if _, err := r.PushImageJSONIndex(remoteName, imageIndex, true, repoData.Endpoints); err != nil {
-		return err
-	}
-
-	return nil
-}
-
-func (srv *Server) pushImage(r *registry.Registry, out io.Writer, remote, imgID, ep string, token []string, sf *utils.StreamFormatter) (checksum string, err error) {
-	out = utils.NewWriteFlusher(out)
-	jsonRaw, err := ioutil.ReadFile(path.Join(srv.daemon.Graph().Root, imgID, "json"))
-	if err != nil {
-		return "", fmt.Errorf("Cannot retrieve the path for {%s}: %s", imgID, err)
-	}
-	out.Write(sf.FormatProgress(utils.TruncateID(imgID), "Pushing", nil))
-
-	imgData := &registry.ImgData{
-		ID: imgID,
-	}
-
-	// Send the json
-	if err := r.PushImageJSONRegistry(imgData, jsonRaw, ep, token); err != nil {
-		if err == registry.ErrAlreadyExists {
-			out.Write(sf.FormatProgress(utils.TruncateID(imgData.ID), "Image already pushed, skipping", nil))
-			return "", nil
-		}
-		return "", err
-	}
-
-	layerData, err := srv.daemon.Graph().TempLayerArchive(imgID, archive.Uncompressed, sf, out)
-	if err != nil {
-		return "", fmt.Errorf("Failed to generate layer archive: %s", err)
-	}
-	defer os.RemoveAll(layerData.Name())
-
-	// Send the layer
-	utils.Debugf("rendered layer for %s of [%d] size", imgData.ID, layerData.Size)
-
-	checksum, checksumPayload, err := r.PushImageLayerRegistry(imgData.ID, utils.ProgressReader(layerData, int(layerData.Size), out, sf, false, utils.TruncateID(imgData.ID), "Pushing"), ep, token, jsonRaw)
-	if err != nil {
-		return "", err
-	}
-	imgData.Checksum = checksum
-	imgData.ChecksumPayload = checksumPayload
-	// Send the checksum
-	if err := r.PushImageChecksumRegistry(imgData, ep, token); err != nil {
-		return "", err
-	}
-
-	out.Write(sf.FormatProgress(utils.TruncateID(imgData.ID), "Image successfully pushed", nil))
-	return imgData.Checksum, nil
-}
-
-// FIXME: Allow to interrupt current push when new push of same image is done.
-func (srv *Server) ImagePush(job *engine.Job) engine.Status {
-	if n := len(job.Args); n != 1 {
-		return job.Errorf("Usage: %s IMAGE", job.Name)
-	}
-	var (
-		localName   = job.Args[0]
-		sf          = utils.NewStreamFormatter(job.GetenvBool("json"))
-		authConfig  = &registry.AuthConfig{}
-		metaHeaders map[string][]string
-	)
-
-	tag := job.Getenv("tag")
-	job.GetenvJson("authConfig", authConfig)
-	job.GetenvJson("metaHeaders", &metaHeaders)
-	if _, err := srv.poolAdd("push", localName); err != nil {
-		return job.Error(err)
-	}
-	defer srv.poolRemove("push", localName)
-
-	// Resolve the Repository name from fqn to endpoint + name
-	hostname, remoteName, err := registry.ResolveRepositoryName(localName)
-	if err != nil {
-		return job.Error(err)
-	}
-
-	endpoint, err := registry.ExpandAndVerifyRegistryUrl(hostname)
-	if err != nil {
-		return job.Error(err)
-	}
-
-	img, err := srv.daemon.Graph().Get(localName)
-	r, err2 := registry.NewRegistry(authConfig, registry.HTTPRequestFactory(metaHeaders), endpoint, false)
-	if err2 != nil {
-		return job.Error(err2)
-	}
-
-	if err != nil {
-		reposLen := 1
-		if tag == "" {
-			reposLen = len(srv.daemon.Repositories().Repositories[localName])
-		}
-		job.Stdout.Write(sf.FormatStatus("", "The push refers to a repository [%s] (len: %d)", localName, reposLen))
-		// If it fails, try to get the repository
-		if localRepo, exists := srv.daemon.Repositories().Repositories[localName]; exists {
-			if err := srv.pushRepository(r, job.Stdout, localName, remoteName, localRepo, tag, sf); err != nil {
-				return job.Error(err)
-			}
-			return engine.StatusOK
-		}
-		return job.Error(err)
-	}
-
-	var token []string
-	job.Stdout.Write(sf.FormatStatus("", "The push refers to an image: [%s]", localName))
-	if _, err := srv.pushImage(r, job.Stdout, remoteName, img.ID, endpoint, token, sf); err != nil {
-		return job.Error(err)
-	}
-	return engine.StatusOK
-}
-
-func (srv *Server) ImageImport(job *engine.Job) engine.Status {
-	if n := len(job.Args); n != 2 && n != 3 {
-		return job.Errorf("Usage: %s SRC REPO [TAG]", job.Name)
-	}
-	var (
-		src     = job.Args[0]
-		repo    = job.Args[1]
-		tag     string
-		sf      = utils.NewStreamFormatter(job.GetenvBool("json"))
-		archive archive.ArchiveReader
-		resp    *http.Response
-	)
-	if len(job.Args) > 2 {
-		tag = job.Args[2]
-	}
-
-	if src == "-" {
-		archive = job.Stdin
-	} else {
-		u, err := url.Parse(src)
-		if err != nil {
-			return job.Error(err)
-		}
-		if u.Scheme == "" {
-			u.Scheme = "http"
-			u.Host = src
-			u.Path = ""
-		}
-		job.Stdout.Write(sf.FormatStatus("", "Downloading from %s", u))
-		// Download with curl (pretty progress bar)
-		// If curl is not available, fallback to http.Get()
-		resp, err = utils.Download(u.String())
-		if err != nil {
-			return job.Error(err)
-		}
-		progressReader := utils.ProgressReader(resp.Body, int(resp.ContentLength), job.Stdout, sf, true, "", "Importing")
-		defer progressReader.Close()
-		archive = progressReader
-	}
-	img, err := srv.daemon.Graph().Create(archive, "", "", "Imported from "+src, "", nil, nil)
-	if err != nil {
-		return job.Error(err)
-	}
-	// Optionally register the image at REPO/TAG
-	if repo != "" {
-		if err := srv.daemon.Repositories().Set(repo, tag, img.ID, true); err != nil {
-			return job.Error(err)
-		}
-	}
-	job.Stdout.Write(sf.FormatStatus("", img.ID))
-	return engine.StatusOK
-}
-
-func (srv *Server) ContainerCreate(job *engine.Job) engine.Status {
-	var name string
-	if len(job.Args) == 1 {
-		name = job.Args[0]
-	} else if len(job.Args) > 1 {
-		return job.Errorf("Usage: %s", job.Name)
-	}
-	config := runconfig.ContainerConfigFromJob(job)
-	if config.Memory != 0 && config.Memory < 524288 {
-		return job.Errorf("Minimum memory limit allowed is 512k")
-	}
-	if config.Memory > 0 && !srv.daemon.SystemConfig().MemoryLimit {
-		job.Errorf("Your kernel does not support memory limit capabilities. Limitation discarded.\n")
-		config.Memory = 0
-	}
-	if config.Memory > 0 && !srv.daemon.SystemConfig().SwapLimit {
-		job.Errorf("Your kernel does not support swap limit capabilities. Limitation discarded.\n")
-		config.MemorySwap = -1
-	}
-	container, buildWarnings, err := srv.daemon.Create(config, name)
-	if err != nil {
-		if srv.daemon.Graph().IsNotExist(err) {
-			_, tag := utils.ParseRepositoryTag(config.Image)
-			if tag == "" {
-				tag = graph.DEFAULTTAG
-			}
-			return job.Errorf("No such image: %s (tag: %s)", config.Image, tag)
-		}
-		return job.Error(err)
-	}
-	if !container.Config.NetworkDisabled && srv.daemon.SystemConfig().IPv4ForwardingDisabled {
-		job.Errorf("IPv4 forwarding is disabled.\n")
-	}
-	srv.LogEvent("create", container.ID, srv.daemon.Repositories().ImageName(container.Image))
-	// FIXME: this is necessary because daemon.Create might return a nil container
-	// with a non-nil error. This should not happen! Once it's fixed we
-	// can remove this workaround.
-	if container != nil {
-		job.Printf("%s\n", container.ID)
-	}
-	for _, warning := range buildWarnings {
-		job.Errorf("%s\n", warning)
-	}
-	return engine.StatusOK
-}
-
-func (srv *Server) ContainerRestart(job *engine.Job) engine.Status {
-	if len(job.Args) != 1 {
-		return job.Errorf("Usage: %s CONTAINER\n", job.Name)
-	}
-	var (
-		name = job.Args[0]
-		t    = 10
-	)
-	if job.EnvExists("t") {
-		t = job.GetenvInt("t")
-	}
-	if container := srv.daemon.Get(name); container != nil {
-		if err := container.Restart(int(t)); err != nil {
-			return job.Errorf("Cannot restart container %s: %s\n", name, err)
-		}
-		srv.LogEvent("restart", container.ID, srv.daemon.Repositories().ImageName(container.Image))
-	} else {
-		return job.Errorf("No such container: %s\n", name)
-	}
-	return engine.StatusOK
-}
-
-func (srv *Server) ContainerDestroy(job *engine.Job) engine.Status {
-	if len(job.Args) != 1 {
-		return job.Errorf("Not enough arguments. Usage: %s CONTAINER\n", job.Name)
-	}
-	name := job.Args[0]
-	removeVolume := job.GetenvBool("removeVolume")
-	removeLink := job.GetenvBool("removeLink")
-	forceRemove := job.GetenvBool("forceRemove")
-
-	container := srv.daemon.Get(name)
-
-	if removeLink {
-		if container == nil {
-			return job.Errorf("No such link: %s", name)
-		}
-		name, err := daemon.GetFullContainerName(name)
-		if err != nil {
-			job.Error(err)
-		}
-		parent, n := path.Split(name)
-		if parent == "/" {
-			return job.Errorf("Conflict, cannot remove the default name of the container")
-		}
-		pe := srv.daemon.ContainerGraph().Get(parent)
-		if pe == nil {
-			return job.Errorf("Cannot get parent %s for name %s", parent, name)
-		}
-		parentContainer := srv.daemon.Get(pe.ID())
-
-		if parentContainer != nil {
-			parentContainer.DisableLink(n)
-		}
-
-		if err := srv.daemon.ContainerGraph().Delete(name); err != nil {
-			return job.Error(err)
-		}
-		return engine.StatusOK
-	}
-
-	if container != nil {
-		if container.State.IsRunning() {
-			if forceRemove {
-				if err := container.Stop(5); err != nil {
-					return job.Errorf("Could not stop running container, cannot remove - %v", err)
-				}
-			} else {
-				return job.Errorf("Impossible to remove a running container, please stop it first or use -f")
-			}
-		}
-		if err := srv.daemon.Destroy(container); err != nil {
-			return job.Errorf("Cannot destroy container %s: %s", name, err)
-		}
-		srv.LogEvent("destroy", container.ID, srv.daemon.Repositories().ImageName(container.Image))
-
-		if removeVolume {
-			var (
-				volumes     = make(map[string]struct{})
-				binds       = make(map[string]struct{})
-				usedVolumes = make(map[string]*daemon.Container)
-			)
-
-			// the volume id is always the base of the path
-			getVolumeId := func(p string) string {
-				return filepath.Base(strings.TrimSuffix(p, "/layer"))
-			}
-
-			// populate bind map so that they can be skipped and not removed
-			for _, bind := range container.HostConfig().Binds {
-				source := strings.Split(bind, ":")[0]
-				// TODO: refactor all volume stuff, all of it
-				// it is very important that we eval the link or comparing the keys to container.Volumes will not work
-				//
-				// eval symlink can fail, ref #5244 if we receive an is not exist error we can ignore it
-				p, err := filepath.EvalSymlinks(source)
-				if err != nil && !os.IsNotExist(err) {
-					return job.Error(err)
-				}
-				if p != "" {
-					source = p
-				}
-				binds[source] = struct{}{}
-			}
-
-			// Store all the deleted containers volumes
-			for _, volumeId := range container.Volumes {
-				// Skip the volumes mounted from external
-				// bind mounts here will will be evaluated for a symlink
-				if _, exists := binds[volumeId]; exists {
-					continue
-				}
-
-				volumeId = getVolumeId(volumeId)
-				volumes[volumeId] = struct{}{}
-			}
-
-			// Retrieve all volumes from all remaining containers
-			for _, container := range srv.daemon.List() {
-				for _, containerVolumeId := range container.Volumes {
-					containerVolumeId = getVolumeId(containerVolumeId)
-					usedVolumes[containerVolumeId] = container
-				}
-			}
-
-			for volumeId := range volumes {
-				// If the requested volu
-				if c, exists := usedVolumes[volumeId]; exists {
-					log.Printf("The volume %s is used by the container %s. Impossible to remove it. Skipping.\n", volumeId, c.ID)
-					continue
-				}
-				if err := srv.daemon.Volumes().Delete(volumeId); err != nil {
-					return job.Errorf("Error calling volumes.Delete(%q): %v", volumeId, err)
-				}
-			}
-		}
-	} else {
-		return job.Errorf("No such container: %s", name)
-	}
-	return engine.StatusOK
-}
-
-func (srv *Server) DeleteImage(name string, imgs *engine.Table, first, force, noprune bool) error {
-	var (
-		repoName, tag string
-		tags          = []string{}
-		tagDeleted    bool
-	)
-
-	repoName, tag = utils.ParseRepositoryTag(name)
-	if tag == "" {
-		tag = graph.DEFAULTTAG
-	}
-
-	img, err := srv.daemon.Repositories().LookupImage(name)
-	if err != nil {
-		if r, _ := srv.daemon.Repositories().Get(repoName); r != nil {
-			return fmt.Errorf("No such image: %s:%s", repoName, tag)
-		}
-		return fmt.Errorf("No such image: %s", name)
-	}
-
-	if strings.Contains(img.ID, name) {
-		repoName = ""
-		tag = ""
-	}
-
-	byParents, err := srv.daemon.Graph().ByParent()
-	if err != nil {
-		return err
-	}
-
-	//If delete by id, see if the id belong only to one repository
-	if repoName == "" {
-		for _, repoAndTag := range srv.daemon.Repositories().ByID()[img.ID] {
-			parsedRepo, parsedTag := utils.ParseRepositoryTag(repoAndTag)
-			if repoName == "" || repoName == parsedRepo {
-				repoName = parsedRepo
-				if parsedTag != "" {
-					tags = append(tags, parsedTag)
-				}
-			} else if repoName != parsedRepo && !force {
-				// the id belongs to multiple repos, like base:latest and user:test,
-				// in that case return conflict
-				return fmt.Errorf("Conflict, cannot delete image %s because it is tagged in multiple repositories, use -f to force", name)
-			}
-		}
-	} else {
-		tags = append(tags, tag)
-	}
-
-	if !first && len(tags) > 0 {
-		return nil
-	}
-
-	//Untag the current image
-	for _, tag := range tags {
-		tagDeleted, err = srv.daemon.Repositories().Delete(repoName, tag)
-		if err != nil {
-			return err
-		}
-		if tagDeleted {
-			out := &engine.Env{}
-			out.Set("Untagged", repoName+":"+tag)
-			imgs.Add(out)
-			srv.LogEvent("untag", img.ID, "")
-		}
-	}
-	tags = srv.daemon.Repositories().ByID()[img.ID]
-	if (len(tags) <= 1 && repoName == "") || len(tags) == 0 {
-		if len(byParents[img.ID]) == 0 {
-			if err := srv.canDeleteImage(img.ID, force, tagDeleted); err != nil {
-				return err
-			}
-			if err := srv.daemon.Repositories().DeleteAll(img.ID); err != nil {
-				return err
-			}
-			if err := srv.daemon.Graph().Delete(img.ID); err != nil {
-				return err
-			}
-			out := &engine.Env{}
-			out.Set("Deleted", img.ID)
-			imgs.Add(out)
-			srv.LogEvent("delete", img.ID, "")
-			if img.Parent != "" && !noprune {
-				err := srv.DeleteImage(img.Parent, imgs, false, force, noprune)
-				if first {
-					return err
-				}
-
-			}
-
-		}
-	}
-	return nil
-}
-
-func (srv *Server) ImageDelete(job *engine.Job) engine.Status {
-	if n := len(job.Args); n != 1 {
-		return job.Errorf("Usage: %s IMAGE", job.Name)
-	}
-	imgs := engine.NewTable("", 0)
-	if err := srv.DeleteImage(job.Args[0], imgs, true, job.GetenvBool("force"), job.GetenvBool("noprune")); err != nil {
-		return job.Error(err)
-	}
-	if len(imgs.Data) == 0 {
-		return job.Errorf("Conflict, %s wasn't deleted", job.Args[0])
-	}
-	if _, err := imgs.WriteListTo(job.Stdout); err != nil {
-		return job.Error(err)
-	}
-	return engine.StatusOK
-}
-
-func (srv *Server) canDeleteImage(imgID string, force, untagged bool) error {
-	var message string
-	if untagged {
-		message = " (docker untagged the image)"
-	}
-	for _, container := range srv.daemon.List() {
-		parent, err := srv.daemon.Repositories().LookupImage(container.Image)
-		if err != nil {
-			return err
-		}
-
-		if err := parent.WalkHistory(func(p *image.Image) error {
-			if imgID == p.ID {
-				if container.State.IsRunning() {
-					if force {
-						return fmt.Errorf("Conflict, cannot force delete %s because the running container %s is using it%s, stop it and retry", utils.TruncateID(imgID), utils.TruncateID(container.ID), message)
-					}
-					return fmt.Errorf("Conflict, cannot delete %s because the running container %s is using it%s, stop it and use -f to force", utils.TruncateID(imgID), utils.TruncateID(container.ID), message)
-				} else if !force {
-					return fmt.Errorf("Conflict, cannot delete %s because the container %s is using it%s, use -f to force", utils.TruncateID(imgID), utils.TruncateID(container.ID), message)
-				}
-			}
-			return nil
-		}); err != nil {
-			return err
-		}
-	}
-	return nil
-}
-
-func (srv *Server) ImageGetCached(imgID string, config *runconfig.Config) (*image.Image, error) {
-	// Retrieve all images
-	images, err := srv.daemon.Graph().Map()
-	if err != nil {
-		return nil, err
-	}
-
-	// Store the tree in a map of map (map[parentId][childId])
-	imageMap := make(map[string]map[string]struct{})
-	for _, img := range images {
-		if _, exists := imageMap[img.Parent]; !exists {
-			imageMap[img.Parent] = make(map[string]struct{})
-		}
-		imageMap[img.Parent][img.ID] = struct{}{}
-	}
-
-	// Loop on the children of the given image and check the config
-	var match *image.Image
-	for elem := range imageMap[imgID] {
-		img, err := srv.daemon.Graph().Get(elem)
-		if err != nil {
-			return nil, err
-		}
-		if runconfig.Compare(&img.ContainerConfig, config) {
-			if match == nil || match.Created.Before(img.Created) {
-				match = img
-			}
-		}
-	}
-	return match, nil
-}
-
-func (srv *Server) ContainerStart(job *engine.Job) engine.Status {
-	if len(job.Args) < 1 {
-		return job.Errorf("Usage: %s container_id", job.Name)
-	}
-	var (
-		name      = job.Args[0]
-		daemon    = srv.daemon
-		container = daemon.Get(name)
-	)
-
-	if container == nil {
-		return job.Errorf("No such container: %s", name)
-	}
-
-	if container.State.IsRunning() {
-		return job.Errorf("Container already started")
-	}
-
-	// If no environment was set, then no hostconfig was passed.
-	if len(job.Environ()) > 0 {
-		hostConfig := runconfig.ContainerHostConfigFromJob(job)
-		// Validate the HostConfig binds. Make sure that:
-		// the source exists
-		for _, bind := range hostConfig.Binds {
-			splitBind := strings.Split(bind, ":")
-			source := splitBind[0]
-
-			// ensure the source exists on the host
-			_, err := os.Stat(source)
-			if err != nil && os.IsNotExist(err) {
-				err = os.MkdirAll(source, 0755)
-				if err != nil {
-					return job.Errorf("Could not create local directory '%s' for bind mount: %s!", source, err.Error())
-				}
-			}
-		}
-		// Register any links from the host config before starting the container
-		if err := srv.daemon.RegisterLinks(container, hostConfig); err != nil {
-			return job.Error(err)
-		}
-		container.SetHostConfig(hostConfig)
-		container.ToDisk()
-	}
-	if err := container.Start(); err != nil {
-		return job.Errorf("Cannot start container %s: %s", name, err)
-	}
-	srv.LogEvent("start", container.ID, daemon.Repositories().ImageName(container.Image))
-
-	return engine.StatusOK
-}
-
-func (srv *Server) ContainerStop(job *engine.Job) engine.Status {
-	if len(job.Args) != 1 {
-		return job.Errorf("Usage: %s CONTAINER\n", job.Name)
-	}
-	var (
-		name = job.Args[0]
-		t    = 10
-	)
-	if job.EnvExists("t") {
-		t = job.GetenvInt("t")
-	}
-	if container := srv.daemon.Get(name); container != nil {
-		if !container.State.IsRunning() {
-			return job.Errorf("Container already stopped")
-		}
-		if err := container.Stop(int(t)); err != nil {
-			return job.Errorf("Cannot stop container %s: %s\n", name, err)
-		}
-		srv.LogEvent("stop", container.ID, srv.daemon.Repositories().ImageName(container.Image))
-	} else {
-		return job.Errorf("No such container: %s\n", name)
-	}
-	return engine.StatusOK
-}
-
-func (srv *Server) ContainerWait(job *engine.Job) engine.Status {
-	if len(job.Args) != 1 {
-		return job.Errorf("Usage: %s", job.Name)
-	}
-	name := job.Args[0]
-	if container := srv.daemon.Get(name); container != nil {
-		status, _ := container.State.WaitStop(-1 * time.Second)
-		job.Printf("%d\n", status)
-		return engine.StatusOK
-	}
-	return job.Errorf("%s: no such container: %s", job.Name, name)
-}
-
-func (srv *Server) ContainerResize(job *engine.Job) engine.Status {
-	if len(job.Args) != 3 {
-		return job.Errorf("Not enough arguments. Usage: %s CONTAINER HEIGHT WIDTH\n", job.Name)
-	}
-	name := job.Args[0]
-	height, err := strconv.Atoi(job.Args[1])
-	if err != nil {
-		return job.Error(err)
-	}
-	width, err := strconv.Atoi(job.Args[2])
-	if err != nil {
-		return job.Error(err)
-	}
-	if container := srv.daemon.Get(name); container != nil {
-		if err := container.Resize(height, width); err != nil {
-			return job.Error(err)
-		}
-		return engine.StatusOK
-	}
-	return job.Errorf("No such container: %s", name)
-}
-
-func (srv *Server) ContainerLogs(job *engine.Job) engine.Status {
-	if len(job.Args) != 1 {
-		return job.Errorf("Usage: %s CONTAINER\n", job.Name)
-	}
-
-	var (
-		name   = job.Args[0]
-		stdout = job.GetenvBool("stdout")
-		stderr = job.GetenvBool("stderr")
-		tail   = job.Getenv("tail")
-		follow = job.GetenvBool("follow")
-		times  = job.GetenvBool("timestamps")
-		lines  = -1
-		format string
-	)
-	if !(stdout || stderr) {
-		return job.Errorf("You must choose at least one stream")
-	}
-	if times {
-		format = time.StampMilli
-	}
-	if tail == "" {
-		tail = "all"
-	}
-	container := srv.daemon.Get(name)
-	if container == nil {
-		return job.Errorf("No such container: %s", name)
-	}
-	cLog, err := container.ReadLog("json")
-	if err != nil && os.IsNotExist(err) {
-		// Legacy logs
-		utils.Debugf("Old logs format")
-		if stdout {
-			cLog, err := container.ReadLog("stdout")
-			if err != nil {
-				utils.Errorf("Error reading logs (stdout): %s", err)
-			} else if _, err := io.Copy(job.Stdout, cLog); err != nil {
-				utils.Errorf("Error streaming logs (stdout): %s", err)
-			}
-		}
-		if stderr {
-			cLog, err := container.ReadLog("stderr")
-			if err != nil {
-				utils.Errorf("Error reading logs (stderr): %s", err)
-			} else if _, err := io.Copy(job.Stderr, cLog); err != nil {
-				utils.Errorf("Error streaming logs (stderr): %s", err)
-			}
-		}
-	} else if err != nil {
-		utils.Errorf("Error reading logs (json): %s", err)
-	} else {
-		if tail != "all" {
-			var err error
-			lines, err = strconv.Atoi(tail)
-			if err != nil {
-				utils.Errorf("Failed to parse tail %s, error: %v, show all logs", err)
-				lines = -1
-			}
-		}
-		if lines != 0 {
-			if lines > 0 {
-				f := cLog.(*os.File)
-				ls, err := tailfile.TailFile(f, lines)
-				if err != nil {
-					return job.Error(err)
-				}
-				tmp := bytes.NewBuffer([]byte{})
-				for _, l := range ls {
-					fmt.Fprintf(tmp, "%s\n", l)
-				}
-				cLog = tmp
-			}
-			dec := json.NewDecoder(cLog)
-			for {
-				l := &utils.JSONLog{}
-
-				if err := dec.Decode(l); err == io.EOF {
-					break
-				} else if err != nil {
-					utils.Errorf("Error streaming logs: %s", err)
-					break
-				}
-				logLine := l.Log
-				if times {
-					logLine = fmt.Sprintf("[%s] %s", l.Created.Format(format), logLine)
-				}
-				if l.Stream == "stdout" && stdout {
-					fmt.Fprintf(job.Stdout, "%s", logLine)
-				}
-				if l.Stream == "stderr" && stderr {
-					fmt.Fprintf(job.Stderr, "%s", logLine)
-				}
-			}
-		}
-	}
-	if follow {
-		errors := make(chan error, 2)
-		if stdout {
-			stdoutPipe := container.StdoutLogPipe()
-			go func() {
-				errors <- utils.WriteLog(stdoutPipe, job.Stdout, format)
-			}()
-		}
-		if stderr {
-			stderrPipe := container.StderrLogPipe()
-			go func() {
-				errors <- utils.WriteLog(stderrPipe, job.Stderr, format)
-			}()
-		}
-		err := <-errors
-		if err != nil {
-			utils.Errorf("%s", err)
-		}
-	}
-	return engine.StatusOK
-}
-
-func (srv *Server) ContainerAttach(job *engine.Job) engine.Status {
-	if len(job.Args) != 1 {
-		return job.Errorf("Usage: %s CONTAINER\n", job.Name)
-	}
-
-	var (
-		name   = job.Args[0]
-		logs   = job.GetenvBool("logs")
-		stream = job.GetenvBool("stream")
-		stdin  = job.GetenvBool("stdin")
-		stdout = job.GetenvBool("stdout")
-		stderr = job.GetenvBool("stderr")
-	)
-
-	container := srv.daemon.Get(name)
-	if container == nil {
-		return job.Errorf("No such container: %s", name)
-	}
-
-	//logs
-	if logs {
-		cLog, err := container.ReadLog("json")
-		if err != nil && os.IsNotExist(err) {
-			// Legacy logs
-			utils.Debugf("Old logs format")
-			if stdout {
-				cLog, err := container.ReadLog("stdout")
-				if err != nil {
-					utils.Errorf("Error reading logs (stdout): %s", err)
-				} else if _, err := io.Copy(job.Stdout, cLog); err != nil {
-					utils.Errorf("Error streaming logs (stdout): %s", err)
-				}
-			}
-			if stderr {
-				cLog, err := container.ReadLog("stderr")
-				if err != nil {
-					utils.Errorf("Error reading logs (stderr): %s", err)
-				} else if _, err := io.Copy(job.Stderr, cLog); err != nil {
-					utils.Errorf("Error streaming logs (stderr): %s", err)
-				}
-			}
-		} else if err != nil {
-			utils.Errorf("Error reading logs (json): %s", err)
-		} else {
-			dec := json.NewDecoder(cLog)
-			for {
-				l := &utils.JSONLog{}
-
-				if err := dec.Decode(l); err == io.EOF {
-					break
-				} else if err != nil {
-					utils.Errorf("Error streaming logs: %s", err)
-					break
-				}
-				if l.Stream == "stdout" && stdout {
-					fmt.Fprintf(job.Stdout, "%s", l.Log)
-				}
-				if l.Stream == "stderr" && stderr {
-					fmt.Fprintf(job.Stderr, "%s", l.Log)
-				}
-			}
-		}
-	}
-
-	//stream
-	if stream {
-		var (
-			cStdin           io.ReadCloser
-			cStdout, cStderr io.Writer
-			cStdinCloser     io.Closer
-		)
-
-		if stdin {
-			r, w := io.Pipe()
-			go func() {
-				defer w.Close()
-				defer utils.Debugf("Closing buffered stdin pipe")
-				io.Copy(w, job.Stdin)
-			}()
-			cStdin = r
-			cStdinCloser = job.Stdin
-		}
-		if stdout {
-			cStdout = job.Stdout
-		}
-		if stderr {
-			cStderr = job.Stderr
-		}
-
-		<-srv.daemon.Attach(container, cStdin, cStdinCloser, cStdout, cStderr)
-
-		// If we are in stdinonce mode, wait for the process to end
-		// otherwise, simply return
-		if container.Config.StdinOnce && !container.Config.Tty {
-			container.State.WaitStop(-1 * time.Second)
-		}
-	}
-	return engine.StatusOK
-}
-
-func (srv *Server) ContainerCopy(job *engine.Job) engine.Status {
-	if len(job.Args) != 2 {
-		return job.Errorf("Usage: %s CONTAINER RESOURCE\n", job.Name)
-	}
-
-	var (
-		name     = job.Args[0]
-		resource = job.Args[1]
-	)
-
-	if container := srv.daemon.Get(name); container != nil {
-
-		data, err := container.Copy(resource)
-		if err != nil {
-			return job.Error(err)
-		}
-		defer data.Close()
-
-		if _, err := io.Copy(job.Stdout, data); err != nil {
-			return job.Error(err)
-		}
-		return engine.StatusOK
-	}
-	return job.Errorf("No such container: %s", name)
-}
-
-func NewServer(eng *engine.Engine, config *daemonconfig.Config) (*Server, error) {
-	daemon, err := daemon.NewDaemon(config, eng)
-	if err != nil {
-		return nil, err
-	}
-	srv := &Server{
-		Eng:            eng,
-		daemon:         daemon,
-		pullingPool:    make(map[string]chan struct{}),
-		pushingPool:    make(map[string]chan struct{}),
-		events:         make([]utils.JSONMessage, 0, 64), //only keeps the 64 last events
-		eventPublisher: utils.NewJSONMessagePublisher(),
-	}
-	daemon.SetServer(srv)
-	return srv, nil
-}
-
-func (srv *Server) LogEvent(action, id, from string) *utils.JSONMessage {
-	now := time.Now().UTC().Unix()
-	jm := utils.JSONMessage{Status: action, ID: id, From: from, Time: now}
-	srv.AddEvent(jm)
-	srv.eventPublisher.Publish(jm)
-	return &jm
-}
-
-func (srv *Server) AddEvent(jm utils.JSONMessage) {
-	srv.Lock()
-	defer srv.Unlock()
-	srv.events = append(srv.events, jm)
-}
-
-func (srv *Server) GetEvents() []utils.JSONMessage {
-	srv.RLock()
-	defer srv.RUnlock()
-	return srv.events
-}
-
-func (srv *Server) SetRunning(status bool) {
-	srv.Lock()
-	defer srv.Unlock()
-
-	srv.running = status
-}
-
-func (srv *Server) IsRunning() bool {
-	srv.RLock()
-	defer srv.RUnlock()
-	return srv.running
-}
-
-func (srv *Server) Close() error {
-	if srv == nil {
-		return nil
-	}
-	srv.SetRunning(false)
-	done := make(chan struct{})
-	go func() {
-		srv.tasks.Wait()
-		close(done)
-	}()
-	select {
-	// Waiting server jobs for 15 seconds, shutdown immediately after that time
-	case <-time.After(time.Second * 15):
-	case <-done:
-	}
-	if srv.daemon == nil {
-		return nil
-	}
-	return srv.daemon.Close()
-}
-
-type Server struct {
-	sync.RWMutex
-	daemon         *daemon.Daemon
-	pullingPool    map[string]chan struct{}
-	pushingPool    map[string]chan struct{}
-	events         []utils.JSONMessage
-	eventPublisher *utils.JSONMessagePublisher
-	Eng            *engine.Engine
-	running        bool
-	tasks          sync.WaitGroup
-}
diff --git a/server/server_unit_test.go b/server/server_unit_test.go
deleted file mode 100644
index e6c5d49..0000000
--- a/server/server_unit_test.go
+++ /dev/null
@@ -1,98 +0,0 @@
-package server
-
-import (
-	"testing"
-	"time"
-
-	"github.com/dotcloud/docker/utils"
-)
-
-func TestPools(t *testing.T) {
-	srv := &Server{
-		pullingPool: make(map[string]chan struct{}),
-		pushingPool: make(map[string]chan struct{}),
-	}
-
-	if _, err := srv.poolAdd("pull", "test1"); err != nil {
-		t.Fatal(err)
-	}
-	if _, err := srv.poolAdd("pull", "test2"); err != nil {
-		t.Fatal(err)
-	}
-	if _, err := srv.poolAdd("push", "test1"); err == nil || err.Error() != "pull test1 is already in progress" {
-		t.Fatalf("Expected `pull test1 is already in progress`")
-	}
-	if _, err := srv.poolAdd("pull", "test1"); err == nil || err.Error() != "pull test1 is already in progress" {
-		t.Fatalf("Expected `pull test1 is already in progress`")
-	}
-	if _, err := srv.poolAdd("wait", "test3"); err == nil || err.Error() != "Unknown pool type" {
-		t.Fatalf("Expected `Unknown pool type`")
-	}
-	if err := srv.poolRemove("pull", "test2"); err != nil {
-		t.Fatal(err)
-	}
-	if err := srv.poolRemove("pull", "test2"); err != nil {
-		t.Fatal(err)
-	}
-	if err := srv.poolRemove("pull", "test1"); err != nil {
-		t.Fatal(err)
-	}
-	if err := srv.poolRemove("push", "test1"); err != nil {
-		t.Fatal(err)
-	}
-	if err := srv.poolRemove("wait", "test3"); err == nil || err.Error() != "Unknown pool type" {
-		t.Fatalf("Expected `Unknown pool type`")
-	}
-}
-
-func TestLogEvent(t *testing.T) {
-	srv := &Server{
-		events:         make([]utils.JSONMessage, 0, 64),
-		eventPublisher: utils.NewJSONMessagePublisher(),
-	}
-
-	srv.LogEvent("fakeaction", "fakeid", "fakeimage")
-
-	listener := make(chan utils.JSONMessage)
-	srv.eventPublisher.Subscribe(listener)
-
-	srv.LogEvent("fakeaction2", "fakeid", "fakeimage")
-
-	numEvents := len(srv.GetEvents())
-	if numEvents != 2 {
-		t.Fatalf("Expected 2 events, found %d", numEvents)
-	}
-	go func() {
-		time.Sleep(200 * time.Millisecond)
-		srv.LogEvent("fakeaction3", "fakeid", "fakeimage")
-		time.Sleep(200 * time.Millisecond)
-		srv.LogEvent("fakeaction4", "fakeid", "fakeimage")
-	}()
-
-	setTimeout(t, "Listening for events timed out", 2*time.Second, func() {
-		for i := 2; i < 4; i++ {
-			event := <-listener
-			if event != srv.GetEvents()[i] {
-				t.Fatalf("Event received it different than expected")
-			}
-		}
-	})
-}
-
-// FIXME: this is duplicated from integration/commands_test.go
-func setTimeout(t *testing.T, msg string, d time.Duration, f func()) {
-	c := make(chan bool)
-
-	// Make sure we are not too long
-	go func() {
-		time.Sleep(d)
-		c <- true
-	}()
-	go func() {
-		f()
-		c <- false
-	}()
-	if <-c && msg != "" {
-		t.Fatal(msg)
-	}
-}
diff --git a/sysinit/README.md b/sysinit/README.md
deleted file mode 100644
index c28d029..0000000
--- a/sysinit/README.md
+++ /dev/null
@@ -1,4 +0,0 @@
-Sys Init code
-
-This code is run INSIDE the container and is responsible for setting
-up the environment before running the actual process
diff --git a/sysinit/sysinit.go b/sysinit/sysinit.go
deleted file mode 100644
index 62e89ce..0000000
--- a/sysinit/sysinit.go
+++ /dev/null
@@ -1,62 +0,0 @@
-package sysinit
-
-import (
-	"flag"
-	"fmt"
-	"github.com/dotcloud/docker/daemon/execdriver"
-	_ "github.com/dotcloud/docker/daemon/execdriver/lxc"
-	_ "github.com/dotcloud/docker/daemon/execdriver/native"
-	"log"
-	"os"
-)
-
-func executeProgram(args *execdriver.InitArgs) error {
-	dockerInitFct, err := execdriver.GetInitFunc(args.Driver)
-	if err != nil {
-		panic(err)
-	}
-	return dockerInitFct(args)
-}
-
-// Sys Init code
-// This code is run INSIDE the container and is responsible for setting
-// up the environment before running the actual process
-func SysInit() {
-	if len(os.Args) <= 1 {
-		fmt.Println("You should not invoke dockerinit manually")
-		os.Exit(1)
-	}
-
-	var (
-		// Get cmdline arguments
-		user       = flag.String("u", "", "username or uid")
-		gateway    = flag.String("g", "", "gateway address")
-		ip         = flag.String("i", "", "ip address")
-		workDir    = flag.String("w", "", "workdir")
-		privileged = flag.Bool("privileged", false, "privileged mode")
-		mtu        = flag.Int("mtu", 1500, "interface mtu")
-		driver     = flag.String("driver", "", "exec driver")
-		pipe       = flag.Int("pipe", 0, "sync pipe fd")
-		console    = flag.String("console", "", "console (pty slave) path")
-		root       = flag.String("root", ".", "root path for configuration files")
-	)
-	flag.Parse()
-
-	args := &execdriver.InitArgs{
-		User:       *user,
-		Gateway:    *gateway,
-		Ip:         *ip,
-		WorkDir:    *workDir,
-		Privileged: *privileged,
-		Args:       flag.Args(),
-		Mtu:        *mtu,
-		Driver:     *driver,
-		Console:    *console,
-		Pipe:       *pipe,
-		Root:       *root,
-	}
-
-	if err := executeProgram(args); err != nil {
-		log.Fatal(err)
-	}
-}
diff --git a/utils/checksum.go b/utils/checksum.go
deleted file mode 100644
index 1c85aa6..0000000
--- a/utils/checksum.go
+++ /dev/null
@@ -1,24 +0,0 @@
-package utils
-
-import (
-	"encoding/hex"
-	"hash"
-	"io"
-)
-
-type CheckSum struct {
-	io.Reader
-	Hash hash.Hash
-}
-
-func (cs *CheckSum) Read(buf []byte) (int, error) {
-	n, err := cs.Reader.Read(buf)
-	if err == nil {
-		cs.Hash.Write(buf[:n])
-	}
-	return n, err
-}
-
-func (cs *CheckSum) Sum() string {
-	return hex.EncodeToString(cs.Hash.Sum(nil))
-}
diff --git a/utils/http.go b/utils/http.go
index e193633..c877eef 100644
--- a/utils/http.go
+++ b/utils/http.go
@@ -4,6 +4,8 @@
 	"io"
 	"net/http"
 	"strings"
+
+	"github.com/docker/docker/pkg/log"
 )
 
 // VersionInfo is used to model entities which has a version.
@@ -157,6 +159,6 @@
 			return nil, err
 		}
 	}
-	Debugf("%v -- HEADERS: %v", req.URL, req.Header)
+	log.Debugf("%v -- HEADERS: %v", req.URL, req.Header)
 	return req, err
 }
diff --git a/utils/jsonmessage.go b/utils/jsonmessage.go
index 66fdcae..e22d06e 100644
--- a/utils/jsonmessage.go
+++ b/utils/jsonmessage.go
@@ -7,8 +7,8 @@
 	"strings"
 	"time"
 
-	"github.com/dotcloud/docker/pkg/term"
-	"github.com/dotcloud/docker/pkg/units"
+	"github.com/docker/docker/pkg/term"
+	"github.com/docker/docker/pkg/units"
 )
 
 type JSONError struct {
@@ -50,7 +50,12 @@
 	total := units.HumanSize(int64(p.Total))
 	percentage := int(float64(p.Current)/float64(p.Total)*100) / 2
 	if width > 110 {
-		pbBox = fmt.Sprintf("[%s>%s] ", strings.Repeat("=", percentage), strings.Repeat(" ", 50-percentage))
+		// this number can't be negetive gh#7136
+		numSpaces := 0
+		if 50-percentage > 0 {
+			numSpaces = 50 - percentage
+		}
+		pbBox = fmt.Sprintf("[%s>%s] ", strings.Repeat("=", percentage), strings.Repeat(" ", numSpaces))
 	}
 	numbersBox = fmt.Sprintf("%8v/%v", current, total)
 
@@ -95,7 +100,7 @@
 		return nil
 	}
 	if jm.Time != 0 {
-		fmt.Fprintf(out, "[%s] ", time.Unix(jm.Time, 0))
+		fmt.Fprintf(out, "%s ", time.Unix(jm.Time, 0).Format(time.RFC3339Nano))
 	}
 	if jm.ID != "" {
 		fmt.Fprintf(out, "%s: ", jm.ID)
diff --git a/utils/jsonmessage_test.go b/utils/jsonmessage_test.go
index ecf1896..0ce9492 100644
--- a/utils/jsonmessage_test.go
+++ b/utils/jsonmessage_test.go
@@ -17,13 +17,22 @@
 		t.Fatalf("Expected empty string, got '%s'", jp.String())
 	}
 
+	expected := "     1 B"
 	jp2 := JSONProgress{Current: 1}
-	if jp2.String() != "     1 B" {
-		t.Fatalf("Expected '     1 B', got '%s'", jp2.String())
+	if jp2.String() != expected {
+		t.Fatalf("Expected %q, got %q", expected, jp2.String())
 	}
 
+	expected = "[=========================>                         ]     50 B/100 B"
 	jp3 := JSONProgress{Current: 50, Total: 100}
-	if jp3.String() != "[=========================>                         ]     50 B/100 B" {
-		t.Fatalf("Expected '[=========================>                         ]     50 B/100 B', got '%s'", jp3.String())
+	if jp3.String() != expected {
+		t.Fatalf("Expected %q, got %q", expected, jp3.String())
+	}
+
+	// this number can't be negetive gh#7136
+	expected = "[==============================================================>]     50 B/40 B"
+	jp4 := JSONProgress{Current: 50, Total: 40}
+	if jp4.String() != expected {
+		t.Fatalf("Expected %q, got %q", expected, jp4.String())
 	}
 }
diff --git a/utils/jsonmessagepublisher.go b/utils/jsonmessagepublisher.go
deleted file mode 100644
index 659e6c8..0000000
--- a/utils/jsonmessagepublisher.go
+++ /dev/null
@@ -1,61 +0,0 @@
-package utils
-
-import (
-	"sync"
-	"time"
-)
-
-func NewJSONMessagePublisher() *JSONMessagePublisher {
-	return &JSONMessagePublisher{}
-}
-
-type JSONMessageListener chan<- JSONMessage
-
-type JSONMessagePublisher struct {
-	m           sync.RWMutex
-	subscribers []JSONMessageListener
-}
-
-func (p *JSONMessagePublisher) Subscribe(l JSONMessageListener) {
-	p.m.Lock()
-	p.subscribers = append(p.subscribers, l)
-	p.m.Unlock()
-}
-
-func (p *JSONMessagePublisher) SubscribersCount() int {
-	p.m.RLock()
-	count := len(p.subscribers)
-	p.m.RUnlock()
-	return count
-}
-
-// Unsubscribe closes and removes the specified listener from the list of
-// previously registed ones.
-// It returns a boolean value indicating if the listener was successfully
-// found, closed and unregistered.
-func (p *JSONMessagePublisher) Unsubscribe(l JSONMessageListener) bool {
-	p.m.Lock()
-	defer p.m.Unlock()
-
-	for i, subscriber := range p.subscribers {
-		if subscriber == l {
-			close(l)
-			p.subscribers = append(p.subscribers[:i], p.subscribers[i+1:]...)
-			return true
-		}
-	}
-	return false
-}
-
-func (p *JSONMessagePublisher) Publish(m JSONMessage) {
-	p.m.RLock()
-	for _, subscriber := range p.subscribers {
-		// We give each subscriber a 100ms time window to receive the event,
-		// after which we move to the next.
-		select {
-		case subscriber <- m:
-		case <-time.After(100 * time.Millisecond):
-		}
-	}
-	p.m.RUnlock()
-}
diff --git a/utils/jsonmessagepublisher_test.go b/utils/jsonmessagepublisher_test.go
deleted file mode 100644
index 2e1a820..0000000
--- a/utils/jsonmessagepublisher_test.go
+++ /dev/null
@@ -1,73 +0,0 @@
-package utils
-
-import (
-	"testing"
-	"time"
-)
-
-func assertSubscribersCount(t *testing.T, q *JSONMessagePublisher, expected int) {
-	if q.SubscribersCount() != expected {
-		t.Fatalf("Expected %d registered subscribers, got %d", expected, q.SubscribersCount())
-	}
-}
-
-func TestJSONMessagePublisherSubscription(t *testing.T) {
-	q := NewJSONMessagePublisher()
-	l1 := make(chan JSONMessage)
-	l2 := make(chan JSONMessage)
-
-	assertSubscribersCount(t, q, 0)
-	q.Subscribe(l1)
-	assertSubscribersCount(t, q, 1)
-	q.Subscribe(l2)
-	assertSubscribersCount(t, q, 2)
-
-	q.Unsubscribe(l1)
-	q.Unsubscribe(l2)
-	assertSubscribersCount(t, q, 0)
-}
-
-func TestJSONMessagePublisherPublish(t *testing.T) {
-	q := NewJSONMessagePublisher()
-	l1 := make(chan JSONMessage)
-	l2 := make(chan JSONMessage)
-
-	go func() {
-		for {
-			select {
-			case <-l1:
-				close(l1)
-				l1 = nil
-			case <-l2:
-				close(l2)
-				l2 = nil
-			case <-time.After(1 * time.Second):
-				q.Unsubscribe(l1)
-				q.Unsubscribe(l2)
-				t.Fatal("Timeout waiting for broadcasted message")
-			}
-		}
-	}()
-
-	q.Subscribe(l1)
-	q.Subscribe(l2)
-	q.Publish(JSONMessage{})
-}
-
-func TestJSONMessagePublishTimeout(t *testing.T) {
-	q := NewJSONMessagePublisher()
-	l := make(chan JSONMessage)
-	q.Subscribe(l)
-
-	c := make(chan struct{})
-	go func() {
-		q.Publish(JSONMessage{})
-		close(c)
-	}()
-
-	select {
-	case <-c:
-	case <-time.After(time.Second):
-		t.Fatal("Timeout publishing message")
-	}
-}
diff --git a/utils/progressreader.go b/utils/progressreader.go
index a43ee55..87eae8b 100644
--- a/utils/progressreader.go
+++ b/utils/progressreader.go
@@ -32,7 +32,7 @@
 		r.lastUpdate = r.progress.Current
 	}
 	// Send newline when complete
-	if r.newLine && err != nil {
+	if r.newLine && err != nil && read == 0 {
 		r.output.Write(r.sf.FormatStatus("", ""))
 	}
 	return read, err
diff --git a/utils/stdcopy.go b/utils/stdcopy.go
index bb9d632..08263de 100644
--- a/utils/stdcopy.go
+++ b/utils/stdcopy.go
@@ -4,6 +4,8 @@
 	"encoding/binary"
 	"errors"
 	"io"
+
+	"github.com/docker/docker/pkg/log"
 )
 
 const (
@@ -85,13 +87,13 @@
 			nr += nr2
 			if er == io.EOF {
 				if nr < StdWriterPrefixLen {
-					Debugf("Corrupted prefix: %v", buf[:nr])
+					log.Debugf("Corrupted prefix: %v", buf[:nr])
 					return written, nil
 				}
 				break
 			}
 			if er != nil {
-				Debugf("Error reading header: %s", er)
+				log.Debugf("Error reading header: %s", er)
 				return 0, er
 			}
 		}
@@ -107,18 +109,18 @@
 			// Write on stderr
 			out = dsterr
 		default:
-			Debugf("Error selecting output fd: (%d)", buf[StdWriterFdIndex])
+			log.Debugf("Error selecting output fd: (%d)", buf[StdWriterFdIndex])
 			return 0, ErrInvalidStdHeader
 		}
 
 		// Retrieve the size of the frame
 		frameSize = int(binary.BigEndian.Uint32(buf[StdWriterSizeIndex : StdWriterSizeIndex+4]))
-		Debugf("framesize: %d", frameSize)
+		log.Debugf("framesize: %d", frameSize)
 
 		// Check if the buffer is big enough to read the frame.
 		// Extend it if necessary.
 		if frameSize+StdWriterPrefixLen > bufLen {
-			Debugf("Extending buffer cap by %d (was %d)", frameSize+StdWriterPrefixLen-bufLen+1, len(buf))
+			log.Debugf("Extending buffer cap by %d (was %d)", frameSize+StdWriterPrefixLen-bufLen+1, len(buf))
 			buf = append(buf, make([]byte, frameSize+StdWriterPrefixLen-bufLen+1)...)
 			bufLen = len(buf)
 		}
@@ -130,13 +132,13 @@
 			nr += nr2
 			if er == io.EOF {
 				if nr < frameSize+StdWriterPrefixLen {
-					Debugf("Corrupted frame: %v", buf[StdWriterPrefixLen:nr])
+					log.Debugf("Corrupted frame: %v", buf[StdWriterPrefixLen:nr])
 					return written, nil
 				}
 				break
 			}
 			if er != nil {
-				Debugf("Error reading frame: %s", er)
+				log.Debugf("Error reading frame: %s", er)
 				return 0, er
 			}
 		}
@@ -144,12 +146,12 @@
 		// Write the retrieved frame (without header)
 		nw, ew = out.Write(buf[StdWriterPrefixLen : frameSize+StdWriterPrefixLen])
 		if ew != nil {
-			Debugf("Error writing frame: %s", ew)
+			log.Debugf("Error writing frame: %s", ew)
 			return 0, ew
 		}
 		// If the frame has not been fully written: error
 		if nw != frameSize {
-			Debugf("Error Short Write: (%d on %d)", nw, frameSize)
+			log.Debugf("Error Short Write: (%d on %d)", nw, frameSize)
 			return 0, io.ErrShortWrite
 		}
 		written += int64(nw)
diff --git a/utils/tmpdir.go b/utils/tmpdir.go
new file mode 100644
index 0000000..921a8f6
--- /dev/null
+++ b/utils/tmpdir.go
@@ -0,0 +1,12 @@
+// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd
+
+package utils
+
+import (
+	"os"
+)
+
+// TempDir returns the default directory to use for temporary files.
+func TempDir(rootdir string) (string error) {
+	return os.TempDir(), nil
+}
diff --git a/utils/tmpdir_unix.go b/utils/tmpdir_unix.go
new file mode 100644
index 0000000..30d7c3a
--- /dev/null
+++ b/utils/tmpdir_unix.go
@@ -0,0 +1,18 @@
+// +build darwin dragonfly freebsd linux netbsd openbsd
+
+package utils
+
+import (
+	"os"
+	"path/filepath"
+)
+
+// TempDir returns the default directory to use for temporary files.
+func TempDir(rootDir string) (string, error) {
+	var tmpDir string
+	if tmpDir = os.Getenv("DOCKER_TMPDIR"); tmpDir == "" {
+		tmpDir = filepath.Join(rootDir, "tmp")
+	}
+	err := os.MkdirAll(tmpDir, 0700)
+	return tmpDir, err
+}
diff --git a/utils/utils.go b/utils/utils.go
index 333468d..da6854b 100644
--- a/utils/utils.go
+++ b/utils/utils.go
@@ -6,8 +6,6 @@
 	"crypto/sha1"
 	"crypto/sha256"
 	"encoding/hex"
-	"encoding/json"
-	"errors"
 	"fmt"
 	"io"
 	"io/ioutil"
@@ -20,9 +18,9 @@
 	"strings"
 	"sync"
 	"syscall"
-	"time"
 
-	"github.com/dotcloud/docker/dockerversion"
+	"github.com/docker/docker/dockerversion"
+	"github.com/docker/docker/pkg/log"
 )
 
 type KeyValuePair struct {
@@ -30,12 +28,6 @@
 	Value string
 }
 
-// A common interface to access the Fatal method of
-// both testing.B and testing.T.
-type Fataler interface {
-	Fatal(args ...interface{})
-}
-
 // Go is a basic promise implementation: it wraps calls a function in a goroutine,
 // and returns a channel which will later return the function's return value.
 func Go(f func() error) chan error {
@@ -57,31 +49,6 @@
 	return resp, nil
 }
 
-func logf(level string, format string, a ...interface{}) {
-	// Retrieve the stack infos
-	_, file, line, ok := runtime.Caller(2)
-	if !ok {
-		file = "<unknown>"
-		line = -1
-	} else {
-		file = file[strings.LastIndex(file, "/")+1:]
-	}
-
-	fmt.Fprintf(os.Stderr, fmt.Sprintf("[%s] %s:%d %s\n", level, file, line, format), a...)
-}
-
-// Debug function, if the debug flag is set, then display. Do nothing otherwise
-// If Docker is in damon mode, also send the debug info on the socket
-func Debugf(format string, a ...interface{}) {
-	if os.Getenv("DEBUG") != "" {
-		logf("debug", format, a...)
-	}
-}
-
-func Errorf(format string, a ...interface{}) {
-	logf("error", format, a...)
-}
-
 func Trunc(s string, maxlen int) string {
 	if len(s) <= maxlen {
 		return s
@@ -265,131 +232,9 @@
 	return closer.Close()
 }
 
-type WriteBroadcaster struct {
-	sync.Mutex
-	buf     *bytes.Buffer
-	streams map[string](map[io.WriteCloser]struct{})
-}
-
-func (w *WriteBroadcaster) AddWriter(writer io.WriteCloser, stream string) {
-	w.Lock()
-	if _, ok := w.streams[stream]; !ok {
-		w.streams[stream] = make(map[io.WriteCloser]struct{})
-	}
-	w.streams[stream][writer] = struct{}{}
-	w.Unlock()
-}
-
-type JSONLog struct {
-	Log     string    `json:"log,omitempty"`
-	Stream  string    `json:"stream,omitempty"`
-	Created time.Time `json:"time"`
-}
-
-func (jl *JSONLog) Format(format string) (string, error) {
-	if format == "" {
-		return jl.Log, nil
-	}
-	if format == "json" {
-		m, err := json.Marshal(jl)
-		return string(m), err
-	}
-	return fmt.Sprintf("[%s] %s", jl.Created.Format(format), jl.Log), nil
-}
-
-func WriteLog(src io.Reader, dst io.WriteCloser, format string) error {
-	dec := json.NewDecoder(src)
-	for {
-		l := &JSONLog{}
-
-		if err := dec.Decode(l); err == io.EOF {
-			return nil
-		} else if err != nil {
-			Errorf("Error streaming logs: %s", err)
-			return err
-		}
-		line, err := l.Format(format)
-		if err != nil {
-			return err
-		}
-		fmt.Fprintf(dst, "%s", line)
-	}
-}
-
-type LogFormatter struct {
-	wc         io.WriteCloser
-	timeFormat string
-}
-
-func (w *WriteBroadcaster) Write(p []byte) (n int, err error) {
-	created := time.Now().UTC()
-	w.Lock()
-	defer w.Unlock()
-	if writers, ok := w.streams[""]; ok {
-		for sw := range writers {
-			if n, err := sw.Write(p); err != nil || n != len(p) {
-				// On error, evict the writer
-				delete(writers, sw)
-			}
-		}
-	}
-	w.buf.Write(p)
-	lines := []string{}
-	for {
-		line, err := w.buf.ReadString('\n')
-		if err != nil {
-			w.buf.Write([]byte(line))
-			break
-		}
-		lines = append(lines, line)
-	}
-
-	if len(lines) != 0 {
-		for stream, writers := range w.streams {
-			if stream == "" {
-				continue
-			}
-			var lp []byte
-			for _, line := range lines {
-				b, err := json.Marshal(&JSONLog{Log: line, Stream: stream, Created: created})
-				if err != nil {
-					Errorf("Error making JSON log line: %s", err)
-				}
-				lp = append(lp, b...)
-				lp = append(lp, '\n')
-			}
-			for sw := range writers {
-				if _, err := sw.Write(lp); err != nil {
-					delete(writers, sw)
-				}
-			}
-		}
-	}
-	return len(p), nil
-}
-
-func (w *WriteBroadcaster) CloseWriters() error {
-	w.Lock()
-	defer w.Unlock()
-	for _, writers := range w.streams {
-		for w := range writers {
-			w.Close()
-		}
-	}
-	w.streams = make(map[string](map[io.WriteCloser]struct{}))
-	return nil
-}
-
-func NewWriteBroadcaster() *WriteBroadcaster {
-	return &WriteBroadcaster{
-		streams: make(map[string](map[io.WriteCloser]struct{})),
-		buf:     bytes.NewBuffer(nil),
-	}
-}
-
 func GetTotalUsedFds() int {
 	if fds, err := ioutil.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil {
-		Errorf("Error opening /proc/%d/fd: %s", os.Getpid(), err)
+		log.Errorf("Error opening /proc/%d/fd: %s", os.Getpid(), err)
 	} else {
 		return len(fds)
 	}
@@ -487,92 +332,6 @@
 	return "sha256:" + hex.EncodeToString(h.Sum(nil)), nil
 }
 
-type KernelVersionInfo struct {
-	Kernel int
-	Major  int
-	Minor  int
-	Flavor string
-}
-
-func (k *KernelVersionInfo) String() string {
-	return fmt.Sprintf("%d.%d.%d%s", k.Kernel, k.Major, k.Minor, k.Flavor)
-}
-
-// Compare two KernelVersionInfo struct.
-// Returns -1 if a < b, 0 if a == b, 1 it a > b
-func CompareKernelVersion(a, b *KernelVersionInfo) int {
-	if a.Kernel < b.Kernel {
-		return -1
-	} else if a.Kernel > b.Kernel {
-		return 1
-	}
-
-	if a.Major < b.Major {
-		return -1
-	} else if a.Major > b.Major {
-		return 1
-	}
-
-	if a.Minor < b.Minor {
-		return -1
-	} else if a.Minor > b.Minor {
-		return 1
-	}
-
-	return 0
-}
-
-func GetKernelVersion() (*KernelVersionInfo, error) {
-	var (
-		err error
-	)
-
-	uts, err := uname()
-	if err != nil {
-		return nil, err
-	}
-
-	release := make([]byte, len(uts.Release))
-
-	i := 0
-	for _, c := range uts.Release {
-		release[i] = byte(c)
-		i++
-	}
-
-	// Remove the \x00 from the release for Atoi to parse correctly
-	release = release[:bytes.IndexByte(release, 0)]
-
-	return ParseRelease(string(release))
-}
-
-func ParseRelease(release string) (*KernelVersionInfo, error) {
-	var (
-		kernel, major, minor, parsed int
-		flavor, partial              string
-	)
-
-	// Ignore error from Sscanf to allow an empty flavor.  Instead, just
-	// make sure we got all the version numbers.
-	parsed, _ = fmt.Sscanf(release, "%d.%d%s", &kernel, &major, &partial)
-	if parsed < 2 {
-		return nil, errors.New("Can't parse kernel version " + release)
-	}
-
-	// sometimes we have 3.12.25-gentoo, but sometimes we just have 3.12-1-amd64
-	parsed, _ = fmt.Sscanf(partial, ".%d%s", &minor, &flavor)
-	if parsed < 1 {
-		flavor = partial
-	}
-
-	return &KernelVersionInfo{
-		Kernel: kernel,
-		Major:  major,
-		Minor:  minor,
-		Flavor: flavor,
-	}, nil
-}
-
 // FIXME: this is deprecated by CopyWithTar in archive.go
 func CopyDirectory(source, dest string) error {
 	if output, err := exec.Command("cp", "-ra", source, dest).CombinedOutput(); err != nil {
@@ -666,80 +425,6 @@
 	return output
 }
 
-// FIXME: Change this not to receive default value as parameter
-func ParseHost(defaultHost string, defaultUnix, addr string) (string, error) {
-	var (
-		proto string
-		host  string
-		port  int
-	)
-	addr = strings.TrimSpace(addr)
-	switch {
-	case addr == "tcp://":
-		return "", fmt.Errorf("Invalid bind address format: %s", addr)
-	case strings.HasPrefix(addr, "unix://"):
-		proto = "unix"
-		addr = strings.TrimPrefix(addr, "unix://")
-		if addr == "" {
-			addr = defaultUnix
-		}
-	case strings.HasPrefix(addr, "tcp://"):
-		proto = "tcp"
-		addr = strings.TrimPrefix(addr, "tcp://")
-	case strings.HasPrefix(addr, "fd://"):
-		return addr, nil
-	case addr == "":
-		proto = "unix"
-		addr = defaultUnix
-	default:
-		if strings.Contains(addr, "://") {
-			return "", fmt.Errorf("Invalid bind address protocol: %s", addr)
-		}
-		proto = "tcp"
-	}
-
-	if proto != "unix" && strings.Contains(addr, ":") {
-		hostParts := strings.Split(addr, ":")
-		if len(hostParts) != 2 {
-			return "", fmt.Errorf("Invalid bind address format: %s", addr)
-		}
-		if hostParts[0] != "" {
-			host = hostParts[0]
-		} else {
-			host = defaultHost
-		}
-
-		if p, err := strconv.Atoi(hostParts[1]); err == nil && p != 0 {
-			port = p
-		} else {
-			return "", fmt.Errorf("Invalid bind address format: %s", addr)
-		}
-
-	} else if proto == "tcp" && !strings.Contains(addr, ":") {
-		return "", fmt.Errorf("Invalid bind address format: %s", addr)
-	} else {
-		host = addr
-	}
-	if proto == "unix" {
-		return fmt.Sprintf("%s://%s", proto, host), nil
-	}
-	return fmt.Sprintf("%s://%s:%d", proto, host, port), nil
-}
-
-// Get a repos name and returns the right reposName + tag
-// The tag can be confusing because of a port in a repository name.
-//     Ex: localhost.localdomain:5000/samalba/hipache:latest
-func ParseRepositoryTag(repos string) (string, string) {
-	n := strings.LastIndex(repos, ":")
-	if n < 0 {
-		return repos, ""
-	}
-	if tag := repos[n+1:]; !strings.Contains(tag, "/") {
-		return repos[:n], tag
-	}
-	return repos, ""
-}
-
 // An StatusError reports an unsuccessful exit by a command.
 type StatusError struct {
 	Status     string
@@ -785,27 +470,6 @@
 	return buf.String()
 }
 
-func PartParser(template, data string) (map[string]string, error) {
-	// ip:public:private
-	var (
-		templateParts = strings.Split(template, ":")
-		parts         = strings.Split(data, ":")
-		out           = make(map[string]string, len(templateParts))
-	)
-	if len(parts) != len(templateParts) {
-		return nil, fmt.Errorf("Invalid format to parse.  %s should match template %s", data, template)
-	}
-
-	for i, t := range templateParts {
-		value := ""
-		if len(parts) > i {
-			value = parts[i]
-		}
-		out[t] = value
-	}
-	return out, nil
-}
-
 var globalTestID string
 
 // TestDirectory creates a new temporary directory and returns its path.
@@ -919,14 +583,6 @@
 	return realPath, nil
 }
 
-func ParseKeyValueOpt(opt string) (string, string, error) {
-	parts := strings.SplitN(opt, "=", 2)
-	if len(parts) != 2 {
-		return "", "", fmt.Errorf("Unable to parse key/value option: %s", opt)
-	}
-	return strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1]), nil
-}
-
 // TreeSize walks a directory tree and returns its total size in bytes.
 func TreeSize(dir string) (size int64, err error) {
 	data := make(map[uint64]struct{})
@@ -960,16 +616,27 @@
 // ValidateContextDirectory checks if all the contents of the directory
 // can be read and returns an error if some files can't be read
 // symlinks which point to non-existing files don't trigger an error
-func ValidateContextDirectory(srcPath string) error {
+func ValidateContextDirectory(srcPath string, excludes []string) error {
 	var finalError error
 
 	filepath.Walk(filepath.Join(srcPath, "."), func(filePath string, f os.FileInfo, err error) error {
 		// skip this directory/file if it's not in the path, it won't get added to the context
-		_, err = filepath.Rel(srcPath, filePath)
+		relFilePath, err := filepath.Rel(srcPath, filePath)
 		if err != nil && os.IsPermission(err) {
 			return nil
 		}
 
+		skip, err := Matches(relFilePath, excludes)
+		if err != nil {
+			finalError = err
+		}
+		if skip {
+			if f.IsDir() {
+				return filepath.SkipDir
+			}
+			return nil
+		}
+
 		if _, err := os.Stat(filePath); err != nil && os.IsPermission(err) {
 			finalError = fmt.Errorf("can't stat '%s'", filePath)
 			return err
@@ -993,3 +660,32 @@
 	})
 	return finalError
 }
+
+func StringsContainsNoCase(slice []string, s string) bool {
+	for _, ss := range slice {
+		if strings.ToLower(s) == strings.ToLower(ss) {
+			return true
+		}
+	}
+	return false
+}
+
+// Matches returns true if relFilePath matches any of the patterns
+func Matches(relFilePath string, patterns []string) (bool, error) {
+	for _, exclude := range patterns {
+		matched, err := filepath.Match(exclude, relFilePath)
+		if err != nil {
+			log.Errorf("Error matching: %s (pattern: %s)", relFilePath, exclude)
+			return false, err
+		}
+		if matched {
+			if filepath.Clean(relFilePath) == "." {
+				log.Errorf("Can't exclude whole path, excluding pattern: %s", exclude)
+				continue
+			}
+			log.Debugf("Skipping excluded path: %s", relFilePath)
+			return true, nil
+		}
+	}
+	return false, nil
+}
diff --git a/utils/utils_test.go b/utils/utils_test.go
index 049c0e3..cf11182 100644
--- a/utils/utils_test.go
+++ b/utils/utils_test.go
@@ -2,7 +2,6 @@
 
 import (
 	"bytes"
-	"errors"
 	"io"
 	"io/ioutil"
 	"os"
@@ -35,217 +34,26 @@
 	}
 }
 
-type dummyWriter struct {
-	buffer      bytes.Buffer
-	failOnWrite bool
-}
-
-func (dw *dummyWriter) Write(p []byte) (n int, err error) {
-	if dw.failOnWrite {
-		return 0, errors.New("Fake fail")
-	}
-	return dw.buffer.Write(p)
-}
-
-func (dw *dummyWriter) String() string {
-	return dw.buffer.String()
-}
-
-func (dw *dummyWriter) Close() error {
-	return nil
-}
-
-func TestWriteBroadcaster(t *testing.T) {
-	writer := NewWriteBroadcaster()
-
-	// Test 1: Both bufferA and bufferB should contain "foo"
-	bufferA := &dummyWriter{}
-	writer.AddWriter(bufferA, "")
-	bufferB := &dummyWriter{}
-	writer.AddWriter(bufferB, "")
-	writer.Write([]byte("foo"))
-
-	if bufferA.String() != "foo" {
-		t.Errorf("Buffer contains %v", bufferA.String())
-	}
-
-	if bufferB.String() != "foo" {
-		t.Errorf("Buffer contains %v", bufferB.String())
-	}
-
-	// Test2: bufferA and bufferB should contain "foobar",
-	// while bufferC should only contain "bar"
-	bufferC := &dummyWriter{}
-	writer.AddWriter(bufferC, "")
-	writer.Write([]byte("bar"))
-
-	if bufferA.String() != "foobar" {
-		t.Errorf("Buffer contains %v", bufferA.String())
-	}
-
-	if bufferB.String() != "foobar" {
-		t.Errorf("Buffer contains %v", bufferB.String())
-	}
-
-	if bufferC.String() != "bar" {
-		t.Errorf("Buffer contains %v", bufferC.String())
-	}
-
-	// Test3: Test eviction on failure
-	bufferA.failOnWrite = true
-	writer.Write([]byte("fail"))
-	if bufferA.String() != "foobar" {
-		t.Errorf("Buffer contains %v", bufferA.String())
-	}
-	if bufferC.String() != "barfail" {
-		t.Errorf("Buffer contains %v", bufferC.String())
-	}
-	// Even though we reset the flag, no more writes should go in there
-	bufferA.failOnWrite = false
-	writer.Write([]byte("test"))
-	if bufferA.String() != "foobar" {
-		t.Errorf("Buffer contains %v", bufferA.String())
-	}
-	if bufferC.String() != "barfailtest" {
-		t.Errorf("Buffer contains %v", bufferC.String())
-	}
-
-	writer.CloseWriters()
-}
-
-type devNullCloser int
-
-func (d devNullCloser) Close() error {
-	return nil
-}
-
-func (d devNullCloser) Write(buf []byte) (int, error) {
-	return len(buf), nil
-}
-
-// This test checks for races. It is only useful when run with the race detector.
-func TestRaceWriteBroadcaster(t *testing.T) {
-	writer := NewWriteBroadcaster()
-	c := make(chan bool)
-	go func() {
-		writer.AddWriter(devNullCloser(0), "")
-		c <- true
-	}()
-	writer.Write([]byte("hello"))
-	<-c
-}
-
-func assertKernelVersion(t *testing.T, a, b *KernelVersionInfo, result int) {
-	if r := CompareKernelVersion(a, b); r != result {
-		t.Fatalf("Unexpected kernel version comparison result. Found %d, expected %d", r, result)
-	}
-}
-
-func TestCompareKernelVersion(t *testing.T) {
-	assertKernelVersion(t,
-		&KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0},
-		&KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0},
-		0)
-	assertKernelVersion(t,
-		&KernelVersionInfo{Kernel: 2, Major: 6, Minor: 0},
-		&KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0},
-		-1)
-	assertKernelVersion(t,
-		&KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0},
-		&KernelVersionInfo{Kernel: 2, Major: 6, Minor: 0},
-		1)
-	assertKernelVersion(t,
-		&KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0},
-		&KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0},
-		0)
-	assertKernelVersion(t,
-		&KernelVersionInfo{Kernel: 3, Major: 8, Minor: 5},
-		&KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0},
-		1)
-	assertKernelVersion(t,
-		&KernelVersionInfo{Kernel: 3, Major: 0, Minor: 20},
-		&KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0},
-		-1)
-}
-
-func TestParseHost(t *testing.T) {
-	var (
-		defaultHttpHost = "127.0.0.1"
-		defaultUnix     = "/var/run/docker.sock"
-	)
-	if addr, err := ParseHost(defaultHttpHost, defaultUnix, "0.0.0.0"); err == nil {
-		t.Errorf("tcp 0.0.0.0 address expected error return, but err == nil, got %s", addr)
-	}
-	if addr, err := ParseHost(defaultHttpHost, defaultUnix, "tcp://"); err == nil {
-		t.Errorf("default tcp:// address expected error return, but err == nil, got %s", addr)
-	}
-	if addr, err := ParseHost(defaultHttpHost, defaultUnix, "0.0.0.1:5555"); err != nil || addr != "tcp://0.0.0.1:5555" {
-		t.Errorf("0.0.0.1:5555 -> expected tcp://0.0.0.1:5555, got %s", addr)
-	}
-	if addr, err := ParseHost(defaultHttpHost, defaultUnix, ":6666"); err != nil || addr != "tcp://127.0.0.1:6666" {
-		t.Errorf(":6666 -> expected tcp://127.0.0.1:6666, got %s", addr)
-	}
-	if addr, err := ParseHost(defaultHttpHost, defaultUnix, "tcp://:7777"); err != nil || addr != "tcp://127.0.0.1:7777" {
-		t.Errorf("tcp://:7777 -> expected tcp://127.0.0.1:7777, got %s", addr)
-	}
-	if addr, err := ParseHost(defaultHttpHost, defaultUnix, ""); err != nil || addr != "unix:///var/run/docker.sock" {
-		t.Errorf("empty argument -> expected unix:///var/run/docker.sock, got %s", addr)
-	}
-	if addr, err := ParseHost(defaultHttpHost, defaultUnix, "unix:///var/run/docker.sock"); err != nil || addr != "unix:///var/run/docker.sock" {
-		t.Errorf("unix:///var/run/docker.sock -> expected unix:///var/run/docker.sock, got %s", addr)
-	}
-	if addr, err := ParseHost(defaultHttpHost, defaultUnix, "unix://"); err != nil || addr != "unix:///var/run/docker.sock" {
-		t.Errorf("unix:///var/run/docker.sock -> expected unix:///var/run/docker.sock, got %s", addr)
-	}
-	if addr, err := ParseHost(defaultHttpHost, defaultUnix, "udp://127.0.0.1"); err == nil {
-		t.Errorf("udp protocol address expected error return, but err == nil. Got %s", addr)
-	}
-	if addr, err := ParseHost(defaultHttpHost, defaultUnix, "udp://127.0.0.1:2375"); err == nil {
-		t.Errorf("udp protocol address expected error return, but err == nil. Got %s", addr)
-	}
-}
-
-func TestParseRepositoryTag(t *testing.T) {
-	if repo, tag := ParseRepositoryTag("root"); repo != "root" || tag != "" {
-		t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "root", "", repo, tag)
-	}
-	if repo, tag := ParseRepositoryTag("root:tag"); repo != "root" || tag != "tag" {
-		t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "root", "tag", repo, tag)
-	}
-	if repo, tag := ParseRepositoryTag("user/repo"); repo != "user/repo" || tag != "" {
-		t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "user/repo", "", repo, tag)
-	}
-	if repo, tag := ParseRepositoryTag("user/repo:tag"); repo != "user/repo" || tag != "tag" {
-		t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "user/repo", "tag", repo, tag)
-	}
-	if repo, tag := ParseRepositoryTag("url:5000/repo"); repo != "url:5000/repo" || tag != "" {
-		t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "url:5000/repo", "", repo, tag)
-	}
-	if repo, tag := ParseRepositoryTag("url:5000/repo:tag"); repo != "url:5000/repo" || tag != "tag" {
-		t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "url:5000/repo", "tag", repo, tag)
-	}
-}
-
 func TestCheckLocalDns(t *testing.T) {
 	for resolv, result := range map[string]bool{`# Dynamic
 nameserver 10.0.2.3
-search dotcloud.net`: false,
+search docker.com`: false,
 		`# Dynamic
 #nameserver 127.0.0.1
 nameserver 10.0.2.3
-search dotcloud.net`: false,
+search docker.com`: false,
 		`# Dynamic
 nameserver 10.0.2.3 #not used 127.0.1.1
-search dotcloud.net`: false,
+search docker.com`: false,
 		`# Dynamic
 #nameserver 10.0.2.3
-#search dotcloud.net`: true,
+#search docker.com`: true,
 		`# Dynamic
 nameserver 127.0.0.1
-search dotcloud.net`: true,
+search docker.com`: true,
 		`# Dynamic
 nameserver 127.0.1.1
-search dotcloud.net`: true,
+search docker.com`: true,
 		`# Dynamic
 `: true,
 		``: true,
@@ -255,50 +63,6 @@
 		}
 	}
 }
-
-func assertParseRelease(t *testing.T, release string, b *KernelVersionInfo, result int) {
-	var (
-		a *KernelVersionInfo
-	)
-	a, _ = ParseRelease(release)
-
-	if r := CompareKernelVersion(a, b); r != result {
-		t.Fatalf("Unexpected kernel version comparison result. Found %d, expected %d", r, result)
-	}
-	if a.Flavor != b.Flavor {
-		t.Fatalf("Unexpected parsed kernel flavor.  Found %s, expected %s", a.Flavor, b.Flavor)
-	}
-}
-
-func TestParseRelease(t *testing.T) {
-	assertParseRelease(t, "3.8.0", &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, 0)
-	assertParseRelease(t, "3.4.54.longterm-1", &KernelVersionInfo{Kernel: 3, Major: 4, Minor: 54, Flavor: ".longterm-1"}, 0)
-	assertParseRelease(t, "3.4.54.longterm-1", &KernelVersionInfo{Kernel: 3, Major: 4, Minor: 54, Flavor: ".longterm-1"}, 0)
-	assertParseRelease(t, "3.8.0-19-generic", &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0, Flavor: "-19-generic"}, 0)
-	assertParseRelease(t, "3.12.8tag", &KernelVersionInfo{Kernel: 3, Major: 12, Minor: 8, Flavor: "tag"}, 0)
-	assertParseRelease(t, "3.12-1-amd64", &KernelVersionInfo{Kernel: 3, Major: 12, Minor: 0, Flavor: "-1-amd64"}, 0)
-}
-
-func TestParsePortMapping(t *testing.T) {
-	data, err := PartParser("ip:public:private", "192.168.1.1:80:8080")
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	if len(data) != 3 {
-		t.FailNow()
-	}
-	if data["ip"] != "192.168.1.1" {
-		t.Fail()
-	}
-	if data["public"] != "80" {
-		t.Fail()
-	}
-	if data["private"] != "8080" {
-		t.Fail()
-	}
-}
-
 func TestReplaceAndAppendEnvVars(t *testing.T) {
 	var (
 		d = []string{"HOME=/"}
diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/reader.go b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/reader.go
index 920a9b0..a27559d 100644
--- a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/reader.go
+++ b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/reader.go
@@ -29,10 +29,11 @@
 // The Next method advances to the next file in the archive (including the first),
 // and then it can be treated as an io.Reader to access the file's data.
 type Reader struct {
-	r    io.Reader
-	err  error
-	pad  int64          // amount of padding (ignored) after current file entry
-	curr numBytesReader // reader for current file entry
+	r       io.Reader
+	err     error
+	pad     int64           // amount of padding (ignored) after current file entry
+	curr    numBytesReader  // reader for current file entry
+	hdrBuff [blockSize]byte // buffer to use in readHeader
 }
 
 // A numBytesReader is an io.Reader with a numBytes method, returning the number
@@ -426,7 +427,9 @@
 }
 
 func (tr *Reader) readHeader() *Header {
-	header := make([]byte, blockSize)
+	header := tr.hdrBuff[:]
+	copy(header, zeroBlock)
+
 	if _, tr.err = io.ReadFull(tr.r, header); tr.err != nil {
 		return nil
 	}
diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/writer.go b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/writer.go
index 6eff6f6..dafb2ca 100644
--- a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/writer.go
+++ b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/writer.go
@@ -37,8 +37,10 @@
 	nb         int64 // number of unwritten bytes for current file entry
 	pad        int64 // amount of padding to write after current file entry
 	closed     bool
-	usedBinary bool // whether the binary numeric field extension was used
-	preferPax  bool // use pax header instead of binary numeric header
+	usedBinary bool            // whether the binary numeric field extension was used
+	preferPax  bool            // use pax header instead of binary numeric header
+	hdrBuff    [blockSize]byte // buffer to use in writeHeader when writing a regular header
+	paxHdrBuff [blockSize]byte // buffer to use in writeHeader when writing a pax header
 }
 
 // NewWriter creates a new Writer writing to w.
@@ -160,7 +162,18 @@
 	// subsecond time resolution, but for now let's just capture
 	// too long fields or non ascii characters
 
-	header := make([]byte, blockSize)
+	var header []byte
+
+	// We need to select which scratch buffer to use carefully,
+	// since this method is called recursively to write PAX headers.
+	// If allowPax is true, this is the non-recursive call, and we will use hdrBuff.
+	// If allowPax is false, we are being called by writePAXHeader, and hdrBuff is
+	// already being used by the non-recursive call, so we must use paxHdrBuff.
+	header = tw.hdrBuff[:]
+	if !allowPax {
+		header = tw.paxHdrBuff[:]
+	}
+	copy(header, zeroBlock)
 	s := slicer(header)
 
 	// keep a reference to the filename to allow to overwrite it later if we detect that we can use ustar longnames instead of pax
diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/writer_test.go b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/writer_test.go
index 512fab1..5e42e32 100644
--- a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/writer_test.go
+++ b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/writer_test.go
@@ -454,3 +454,38 @@
 		t.Fatal("Couldn't recover long name")
 	}
 }
+
+func TestValidTypeflagWithPAXHeader(t *testing.T) {
+	var buffer bytes.Buffer
+	tw := NewWriter(&buffer)
+
+	fileName := strings.Repeat("ab", 100)
+
+	hdr := &Header{
+		Name:     fileName,
+		Size:     4,
+		Typeflag: 0,
+	}
+	if err := tw.WriteHeader(hdr); err != nil {
+		t.Fatalf("Failed to write header: %s", err)
+	}
+	if _, err := tw.Write([]byte("fooo")); err != nil {
+		t.Fatalf("Failed to write the file's data: %s", err)
+	}
+	tw.Close()
+
+	tr := NewReader(&buffer)
+
+	for {
+		header, err := tr.Next()
+		if err == io.EOF {
+			break
+		}
+		if err != nil {
+			t.Fatalf("Failed to read header: %s", err)
+		}
+		if header.Typeflag != 0 {
+			t.Fatalf("Typeflag should've been 0, found %d", header.Typeflag)
+		}
+	}
+}
diff --git a/vendor/src/github.com/docker/libcontainer/.travis.yml b/vendor/src/github.com/docker/libcontainer/.travis.yml
index 94dc5ac..3ce0e27 100644
--- a/vendor/src/github.com/docker/libcontainer/.travis.yml
+++ b/vendor/src/github.com/docker/libcontainer/.travis.yml
@@ -1,12 +1,36 @@
 language: go
+go: 1.3
+
+# let us have pretty experimental Docker-based Travis workers
+sudo: false
+
+env:
+    - TRAVIS_GLOBAL_WTF=1
+    - _GOOS=linux _GOARCH=amd64 CGO_ENABLED=1
+    - _GOOS=linux _GOARCH=amd64 CGO_ENABLED=0
+#    - _GOOS=linux _GOARCH=386 CGO_ENABLED=1 # TODO add this once Travis can handle it (https://github.com/travis-ci/travis-ci/issues/2207#issuecomment-49625061)
+    - _GOOS=linux _GOARCH=386 CGO_ENABLED=0
+    - _GOOS=linux _GOARCH=arm CGO_ENABLED=0
 
 install:
-    - go get -d ./...
-    - go get -d github.com/dotcloud/docker # just to be sure
-    - DOCKER_PATH="${GOPATH%%:*}/src/github.com/dotcloud/docker"
-    - sed -i 's!dotcloud/docker!docker/libcontainer!' "$DOCKER_PATH/hack/make/.validate"
+    - go get code.google.com/p/go.tools/cmd/cover
+    - mkdir -pv "${GOPATH%%:*}/src/github.com/docker" && [ -d "${GOPATH%%:*}/src/github.com/docker/libcontainer" ] || ln -sv "$(readlink -f .)" "${GOPATH%%:*}/src/github.com/docker/libcontainer"
+    - if [ -z "$TRAVIS_GLOBAL_WTF" ]; then
+          gvm cross "$_GOOS" "$_GOARCH";
+          export GOOS="$_GOOS" GOARCH="$_GOARCH";
+      fi
+    - export GOPATH="$GOPATH:$(pwd)/vendor"
+    - if [ -z "$TRAVIS_GLOBAL_WTF" ]; then go env; fi
+    - go get -d -v ./... # TODO remove this if /docker/docker gets purged from our includes
+    - if [ "$TRAVIS_GLOBAL_WTF" ]; then
+          export DOCKER_PATH="${GOPATH%%:*}/src/github.com/docker/docker";
+          mkdir -p "$DOCKER_PATH/hack/make";
+          ( cd "$DOCKER_PATH/hack/make" && wget -c 'https://raw.githubusercontent.com/docker/docker/master/hack/make/'{.validate,validate-dco,validate-gofmt} );
+          sed -i 's!docker/docker!docker/libcontainer!' "$DOCKER_PATH/hack/make/.validate";
+      fi
 
 script:
-    - bash "$DOCKER_PATH/hack/make/validate-dco"
-    - bash "$DOCKER_PATH/hack/make/validate-gofmt"
-    - go test
+    - if [ "$TRAVIS_GLOBAL_WTF" ]; then bash "$DOCKER_PATH/hack/make/validate-dco"; fi
+    - if [ "$TRAVIS_GLOBAL_WTF" ]; then bash "$DOCKER_PATH/hack/make/validate-gofmt"; fi
+    - if [ -z "$TRAVIS_GLOBAL_WTF" ]; then make direct-build; fi
+    - if [ -z "$TRAVIS_GLOBAL_WTF" -a "$GOARCH" != 'arm' ]; then make direct-test-short; fi
diff --git a/vendor/src/github.com/docker/libcontainer/CONTRIBUTORS_GUIDE.md b/vendor/src/github.com/docker/libcontainer/CONTRIBUTORS_GUIDE.md
index f026896..07bf22a 100644
--- a/vendor/src/github.com/docker/libcontainer/CONTRIBUTORS_GUIDE.md
+++ b/vendor/src/github.com/docker/libcontainer/CONTRIBUTORS_GUIDE.md
@@ -176,7 +176,7 @@
 a ``prepare-commit-msg`` hook to your libcontainer checkout:
 
 ```
-curl -o .git/hooks/prepare-commit-msg https://raw.githubusercontent.com/dotcloud/docker/master/contrib/prepare-commit-msg.hook && chmod +x .git/hooks/prepare-commit-msg
+curl -o .git/hooks/prepare-commit-msg https://raw.githubusercontent.com/docker/docker/master/contrib/prepare-commit-msg.hook && chmod +x .git/hooks/prepare-commit-msg
 ```
 
 * Note: the above script expects to find your GitHub user name in ``git config --get github.user``
diff --git a/vendor/src/github.com/docker/libcontainer/Dockerfile b/vendor/src/github.com/docker/libcontainer/Dockerfile
new file mode 100644
index 0000000..65bf573
--- /dev/null
+++ b/vendor/src/github.com/docker/libcontainer/Dockerfile
@@ -0,0 +1,23 @@
+FROM crosbymichael/golang
+
+RUN apt-get update && apt-get install -y gcc make
+RUN go get code.google.com/p/go.tools/cmd/cover
+
+# setup a playground for us to spawn containers in
+RUN mkdir /busybox && \
+    curl -sSL 'https://github.com/jpetazzo/docker-busybox/raw/buildroot-2014.02/rootfs.tar' | tar -xC /busybox
+
+RUN curl -sSL https://raw.githubusercontent.com/docker/docker/master/hack/dind -o /dind && \
+    chmod +x /dind
+
+COPY . /go/src/github.com/docker/libcontainer
+WORKDIR /go/src/github.com/docker/libcontainer
+RUN cp sample_configs/minimal.json /busybox/container.json
+
+ENV GOPATH $GOPATH:/go/src/github.com/docker/libcontainer/vendor
+
+RUN go get -d -v ./...
+RUN make direct-install
+
+ENTRYPOINT ["/dind"]
+CMD ["make", "direct-test"]
diff --git a/vendor/src/github.com/docker/libcontainer/MAINTAINERS b/vendor/src/github.com/docker/libcontainer/MAINTAINERS
index 8c36d09..24011b0 100644
--- a/vendor/src/github.com/docker/libcontainer/MAINTAINERS
+++ b/vendor/src/github.com/docker/libcontainer/MAINTAINERS
@@ -1,4 +1,6 @@
 Michael Crosby <michael@docker.com> (@crosbymichael)
 Rohit Jnagal <jnagal@google.com> (@rjnagal)
 Victor Marmol <vmarmol@google.com> (@vmarmol)
+Mrunal Patel <mpatel@redhat.com> (@mrunalp)
 .travis.yml: Tianon Gravi <admwiggin@gmail.com> (@tianon)
+update-vendor.sh: Tianon Gravi <admwiggin@gmail.com> (@tianon)
diff --git a/vendor/src/github.com/docker/libcontainer/Makefile b/vendor/src/github.com/docker/libcontainer/Makefile
new file mode 100644
index 0000000..d6852b2
--- /dev/null
+++ b/vendor/src/github.com/docker/libcontainer/Makefile
@@ -0,0 +1,24 @@
+
+all:
+	docker build -t docker/libcontainer .
+
+test: 
+	# we need NET_ADMIN for the netlink tests and SYS_ADMIN for mounting
+	docker run --rm -it --cap-add NET_ADMIN --cap-add SYS_ADMIN docker/libcontainer
+
+sh:
+	docker run --rm -it --cap-add NET_ADMIN --cap-add SYS_ADMIN -w /busybox docker/libcontainer nsinit exec sh
+
+GO_PACKAGES = $(shell find . -not \( -wholename ./vendor -prune \) -name '*.go' -print0 | xargs -0n1 dirname | sort -u)
+
+direct-test:
+	go test -cover -v $(GO_PACKAGES)
+
+direct-test-short:
+	go test -cover -test.short -v $(GO_PACKAGES)
+
+direct-build:
+	go build -v $(GO_PACKAGES)
+
+direct-install:
+	go install -v $(GO_PACKAGES)
diff --git a/vendor/src/github.com/docker/libcontainer/README.md b/vendor/src/github.com/docker/libcontainer/README.md
index ee14a57..b80d284 100644
--- a/vendor/src/github.com/docker/libcontainer/README.md
+++ b/vendor/src/github.com/docker/libcontainer/README.md
@@ -1,4 +1,4 @@
-## libcontainer - reference implementation for containers
+## libcontainer - reference implementation for containers [![Build Status](https://travis-ci.org/docker/libcontainer.png?branch=master)](https://travis-ci.org/docker/libcontainer)
 
 ### Note on API changes:
 
diff --git a/vendor/src/github.com/docker/libcontainer/api.go b/vendor/src/github.com/docker/libcontainer/api.go
deleted file mode 100644
index 310f06e..0000000
--- a/vendor/src/github.com/docker/libcontainer/api.go
+++ /dev/null
@@ -1,23 +0,0 @@
-package libcontainer
-
-import (
-	"github.com/docker/libcontainer/cgroups/fs"
-	"github.com/docker/libcontainer/network"
-)
-
-// Returns all available stats for the given container.
-func GetStats(container *Config, state *State) (*ContainerStats, error) {
-	var containerStats ContainerStats
-	stats, err := fs.GetStats(container.Cgroups)
-	if err != nil {
-		return &containerStats, err
-	}
-	containerStats.CgroupStats = stats
-	networkStats, err := network.GetStats(&state.NetworkState)
-	if err != nil {
-		return &containerStats, err
-	}
-	containerStats.NetworkStats = networkStats
-
-	return &containerStats, nil
-}
diff --git a/vendor/src/github.com/docker/libcontainer/api_temp.go b/vendor/src/github.com/docker/libcontainer/api_temp.go
new file mode 100644
index 0000000..9b2c520
--- /dev/null
+++ b/vendor/src/github.com/docker/libcontainer/api_temp.go
@@ -0,0 +1,34 @@
+/*
+Temporary API endpoint for libcontainer while the full API is finalized (api.go).
+*/
+package libcontainer
+
+import (
+	"github.com/docker/libcontainer/cgroups/fs"
+	"github.com/docker/libcontainer/cgroups/systemd"
+	"github.com/docker/libcontainer/network"
+)
+
+// TODO(vmarmol): Complete Stats() in final libcontainer API and move users to that.
+// DEPRECATED: The below portions are only to be used during the transition to the official API.
+// Returns all available stats for the given container.
+func GetStats(container *Config, state *State) (*ContainerStats, error) {
+	var (
+		err   error
+		stats = &ContainerStats{}
+	)
+
+	if systemd.UseSystemd() {
+		stats.CgroupStats, err = systemd.GetStats(container.Cgroups)
+	} else {
+		stats.CgroupStats, err = fs.GetStats(container.Cgroups)
+	}
+
+	if err != nil {
+		return stats, err
+	}
+
+	stats.NetworkStats, err = network.GetStats(&state.NetworkState)
+
+	return stats, err
+}
diff --git a/vendor/src/github.com/docker/libcontainer/apparmor/apparmor.go b/vendor/src/github.com/docker/libcontainer/apparmor/apparmor.go
index 704ee29..fb1574d 100644
--- a/vendor/src/github.com/docker/libcontainer/apparmor/apparmor.go
+++ b/vendor/src/github.com/docker/libcontainer/apparmor/apparmor.go
@@ -1,4 +1,4 @@
-// +build apparmor,linux,amd64
+// +build apparmor,linux
 
 package apparmor
 
diff --git a/vendor/src/github.com/docker/libcontainer/apparmor/apparmor_disabled.go b/vendor/src/github.com/docker/libcontainer/apparmor/apparmor_disabled.go
index 8d86ce9..937bf91 100644
--- a/vendor/src/github.com/docker/libcontainer/apparmor/apparmor_disabled.go
+++ b/vendor/src/github.com/docker/libcontainer/apparmor/apparmor_disabled.go
@@ -1,4 +1,4 @@
-// +build !apparmor !linux !amd64
+// +build !apparmor !linux
 
 package apparmor
 
diff --git a/vendor/src/github.com/docker/libcontainer/cgroups/cgroups.go b/vendor/src/github.com/docker/libcontainer/cgroups/cgroups.go
index 64ece56..567e9a6 100644
--- a/vendor/src/github.com/docker/libcontainer/cgroups/cgroups.go
+++ b/vendor/src/github.com/docker/libcontainer/cgroups/cgroups.go
@@ -1,15 +1,11 @@
 package cgroups
 
 import (
-	"errors"
+	"fmt"
 
 	"github.com/docker/libcontainer/devices"
 )
 
-var (
-	ErrNotFound = errors.New("mountpoint not found")
-)
-
 type FreezerState string
 
 const (
@@ -18,6 +14,29 @@
 	Thawed    FreezerState = "THAWED"
 )
 
+type NotFoundError struct {
+	Subsystem string
+}
+
+func (e *NotFoundError) Error() string {
+	return fmt.Sprintf("mountpoint for %s not found", e.Subsystem)
+}
+
+func NewNotFoundError(sub string) error {
+	return &NotFoundError{
+		Subsystem: sub,
+	}
+}
+
+func IsNotFound(err error) bool {
+	if err == nil {
+		return false
+	}
+
+	_, ok := err.(*NotFoundError)
+	return ok
+}
+
 type Cgroup struct {
 	Name   string `json:"name,omitempty"`
 	Parent string `json:"parent,omitempty"` // name of parent cgroup or slice
@@ -37,4 +56,5 @@
 
 type ActiveCgroup interface {
 	Cleanup() error
+	Paths() (map[string]string, error)
 }
diff --git a/vendor/src/github.com/docker/libcontainer/cgroups/cgutil/cgutil.go b/vendor/src/github.com/docker/libcontainer/cgroups/cgutil/cgutil.go
new file mode 100644
index 0000000..d1a6611
--- /dev/null
+++ b/vendor/src/github.com/docker/libcontainer/cgroups/cgutil/cgutil.go
@@ -0,0 +1,264 @@
+package main
+
+import (
+	"encoding/json"
+	"fmt"
+	"log"
+	"os"
+	"syscall"
+	"time"
+
+	"github.com/codegangsta/cli"
+	"github.com/docker/libcontainer/cgroups"
+	"github.com/docker/libcontainer/cgroups/fs"
+	"github.com/docker/libcontainer/cgroups/systemd"
+)
+
+var createCommand = cli.Command{
+	Name:  "create",
+	Usage: "Create a cgroup container using the supplied configuration and initial process.",
+	Flags: []cli.Flag{
+		cli.StringFlag{Name: "config, c", Value: "cgroup.json", Usage: "path to container configuration (cgroups.Cgroup object)"},
+		cli.IntFlag{Name: "pid, p", Value: 0, Usage: "pid of the initial process in the container"},
+	},
+	Action: createAction,
+}
+
+var destroyCommand = cli.Command{
+	Name:  "destroy",
+	Usage: "Destroy an existing cgroup container.",
+	Flags: []cli.Flag{
+		cli.StringFlag{Name: "name, n", Value: "", Usage: "container name"},
+		cli.StringFlag{Name: "parent, p", Value: "", Usage: "container parent"},
+	},
+	Action: destroyAction,
+}
+
+var statsCommand = cli.Command{
+	Name:  "stats",
+	Usage: "Get stats for cgroup",
+	Flags: []cli.Flag{
+		cli.StringFlag{Name: "name, n", Value: "", Usage: "container name"},
+		cli.StringFlag{Name: "parent, p", Value: "", Usage: "container parent"},
+	},
+	Action: statsAction,
+}
+
+var pauseCommand = cli.Command{
+	Name:  "pause",
+	Usage: "Pause cgroup",
+	Flags: []cli.Flag{
+		cli.StringFlag{Name: "name, n", Value: "", Usage: "container name"},
+		cli.StringFlag{Name: "parent, p", Value: "", Usage: "container parent"},
+	},
+	Action: pauseAction,
+}
+
+var resumeCommand = cli.Command{
+	Name:  "resume",
+	Usage: "Resume a paused cgroup",
+	Flags: []cli.Flag{
+		cli.StringFlag{Name: "name, n", Value: "", Usage: "container name"},
+		cli.StringFlag{Name: "parent, p", Value: "", Usage: "container parent"},
+	},
+	Action: resumeAction,
+}
+
+var psCommand = cli.Command{
+	Name:  "ps",
+	Usage: "Get list of pids for a cgroup",
+	Flags: []cli.Flag{
+		cli.StringFlag{Name: "name, n", Value: "", Usage: "container name"},
+		cli.StringFlag{Name: "parent, p", Value: "", Usage: "container parent"},
+	},
+	Action: psAction,
+}
+
+func getConfigFromFile(c *cli.Context) (*cgroups.Cgroup, error) {
+	f, err := os.Open(c.String("config"))
+	if err != nil {
+		return nil, err
+	}
+	defer f.Close()
+
+	var config *cgroups.Cgroup
+	if err := json.NewDecoder(f).Decode(&config); err != nil {
+		log.Fatal(err)
+	}
+	return config, nil
+}
+
+func openLog(name string) error {
+	f, err := os.OpenFile(name, os.O_CREATE|os.O_RDWR|os.O_APPEND, 0755)
+	if err != nil {
+		return err
+	}
+
+	log.SetOutput(f)
+	return nil
+}
+
+func getConfig(context *cli.Context) (*cgroups.Cgroup, error) {
+	name := context.String("name")
+	if name == "" {
+		log.Fatal(fmt.Errorf("Missing container name"))
+	}
+	parent := context.String("parent")
+	return &cgroups.Cgroup{
+		Name:   name,
+		Parent: parent,
+	}, nil
+}
+
+func killAll(config *cgroups.Cgroup) {
+	// We could use freezer here to prevent process spawning while we are trying
+	// to kill everything. But going with more portable solution of retrying for
+	// now.
+	pids := getPids(config)
+	retry := 10
+	for len(pids) != 0 || retry > 0 {
+		killPids(pids)
+		time.Sleep(100 * time.Millisecond)
+		retry--
+		pids = getPids(config)
+	}
+	if len(pids) != 0 {
+		log.Fatal(fmt.Errorf("Could not kill existing processes in the container."))
+	}
+}
+
+func getPids(config *cgroups.Cgroup) []int {
+	pids, err := fs.GetPids(config)
+	if err != nil {
+		log.Fatal(err)
+	}
+	return pids
+}
+
+func killPids(pids []int) {
+	for _, pid := range pids {
+		// pids might go away on their own. Ignore errors.
+		syscall.Kill(pid, syscall.SIGKILL)
+	}
+}
+
+func setFreezerState(context *cli.Context, state cgroups.FreezerState) {
+	config, err := getConfig(context)
+	if err != nil {
+		log.Fatal(err)
+	}
+
+	if systemd.UseSystemd() {
+		err = systemd.Freeze(config, state)
+	} else {
+		err = fs.Freeze(config, state)
+	}
+	if err != nil {
+		log.Fatal(err)
+	}
+}
+
+func createAction(context *cli.Context) {
+	config, err := getConfigFromFile(context)
+	if err != nil {
+		log.Fatal(err)
+	}
+	pid := context.Int("pid")
+	if pid <= 0 {
+		log.Fatal(fmt.Errorf("Invalid pid : %d", pid))
+	}
+	if systemd.UseSystemd() {
+		_, err := systemd.Apply(config, pid)
+		if err != nil {
+			log.Fatal(err)
+		}
+	} else {
+		_, err := fs.Apply(config, pid)
+		if err != nil {
+			log.Fatal(err)
+		}
+	}
+}
+
+func destroyAction(context *cli.Context) {
+	config, err := getConfig(context)
+	if err != nil {
+		log.Fatal(err)
+	}
+
+	killAll(config)
+	// Systemd will clean up cgroup state for empty container.
+	if !systemd.UseSystemd() {
+		err := fs.Cleanup(config)
+		if err != nil {
+			log.Fatal(err)
+		}
+	}
+}
+
+func statsAction(context *cli.Context) {
+	config, err := getConfig(context)
+	if err != nil {
+		log.Fatal(err)
+	}
+	stats, err := fs.GetStats(config)
+	if err != nil {
+		log.Fatal(err)
+	}
+
+	out, err := json.MarshalIndent(stats, "", "\t")
+	if err != nil {
+		log.Fatal(err)
+	}
+	fmt.Printf("Usage stats for '%s':\n %v\n", config.Name, string(out))
+}
+
+func pauseAction(context *cli.Context) {
+	setFreezerState(context, cgroups.Frozen)
+}
+
+func resumeAction(context *cli.Context) {
+	setFreezerState(context, cgroups.Thawed)
+}
+
+func psAction(context *cli.Context) {
+	config, err := getConfig(context)
+	if err != nil {
+		log.Fatal(err)
+	}
+
+	pids, err := fs.GetPids(config)
+	if err != nil {
+		log.Fatal(err)
+	}
+
+	fmt.Printf("Pids in '%s':\n", config.Name)
+	fmt.Println(pids)
+}
+
+func main() {
+	logPath := os.Getenv("log")
+	if logPath != "" {
+		if err := openLog(logPath); err != nil {
+			log.Fatal(err)
+		}
+	}
+
+	app := cli.NewApp()
+	app.Name = "cgutil"
+	app.Usage = "Test utility for libcontainer cgroups package"
+	app.Version = "0.1"
+
+	app.Commands = []cli.Command{
+		createCommand,
+		destroyCommand,
+		statsCommand,
+		pauseCommand,
+		resumeCommand,
+		psCommand,
+	}
+
+	if err := app.Run(os.Args); err != nil {
+		log.Fatal(err)
+	}
+}
diff --git a/vendor/src/github.com/docker/libcontainer/cgroups/cgutil/sample_cgroup.json b/vendor/src/github.com/docker/libcontainer/cgroups/cgutil/sample_cgroup.json
new file mode 100644
index 0000000..2d29784
--- /dev/null
+++ b/vendor/src/github.com/docker/libcontainer/cgroups/cgutil/sample_cgroup.json
@@ -0,0 +1,10 @@
+{
+	"name": "luke",
+	"parent": "darth",
+	"allow_all_devices": true,
+	"memory": 1073741824,
+	"memory_swap": -1,
+	"cpu_shares": 2048,
+	"cpu_quota": 500000,
+	"cpu_period": 250000
+}
diff --git a/vendor/src/github.com/docker/libcontainer/cgroups/fs/apply_raw.go b/vendor/src/github.com/docker/libcontainer/cgroups/fs/apply_raw.go
index 8fa34c2..443dbb6 100644
--- a/vendor/src/github.com/docker/libcontainer/cgroups/fs/apply_raw.go
+++ b/vendor/src/github.com/docker/libcontainer/cgroups/fs/apply_raw.go
@@ -12,21 +12,25 @@
 
 var (
 	subsystems = map[string]subsystem{
-		"devices":    &devicesGroup{},
-		"memory":     &memoryGroup{},
-		"cpu":        &cpuGroup{},
-		"cpuset":     &cpusetGroup{},
-		"cpuacct":    &cpuacctGroup{},
-		"blkio":      &blkioGroup{},
-		"perf_event": &perfEventGroup{},
-		"freezer":    &freezerGroup{},
+		"devices":    &DevicesGroup{},
+		"memory":     &MemoryGroup{},
+		"cpu":        &CpuGroup{},
+		"cpuset":     &CpusetGroup{},
+		"cpuacct":    &CpuacctGroup{},
+		"blkio":      &BlkioGroup{},
+		"perf_event": &PerfEventGroup{},
+		"freezer":    &FreezerGroup{},
 	}
+	CgroupProcesses = "cgroup.procs"
 )
 
 type subsystem interface {
-	Set(*data) error
+	// Returns the stats, as 'stats', corresponding to the cgroup under 'path'.
+	GetStats(path string, stats *cgroups.Stats) error
+	// Removes the cgroup represented by 'data'.
 	Remove(*data) error
-	GetStats(*data, *cgroups.Stats) error
+	// Creates and joins the cgroup represented by data.
+	Set(*data) error
 }
 
 type data struct {
@@ -52,6 +56,14 @@
 	return d, nil
 }
 
+func Cleanup(c *cgroups.Cgroup) error {
+	d, err := getCgroupData(c, 0)
+	if err != nil {
+		return fmt.Errorf("Could not get Cgroup data %s", err)
+	}
+	return d.Cleanup()
+}
+
 func GetStats(c *cgroups.Cgroup) (*cgroups.Stats, error) {
 	stats := cgroups.NewStats()
 
@@ -60,10 +72,19 @@
 		return nil, fmt.Errorf("getting CgroupData %s", err)
 	}
 
-	for sysName, sys := range subsystems {
-		// Don't fail if a cgroup hierarchy was not found.
-		if err := sys.GetStats(d, stats); err != nil && err != cgroups.ErrNotFound {
-			return nil, fmt.Errorf("getting stats for system %q %s", sysName, err)
+	for sysname, sys := range subsystems {
+		path, err := d.path(sysname)
+		if err != nil {
+			// Don't fail if a cgroup hierarchy was not found, just skip this subsystem
+			if cgroups.IsNotFound(err) {
+				continue
+			}
+
+			return nil, err
+		}
+
+		if err := sys.GetStats(path, stats); err != nil {
+			return nil, err
 		}
 	}
 
@@ -132,11 +153,47 @@
 	return filepath.Join(raw.root, subsystem, initPath), nil
 }
 
+func (raw *data) Paths() (map[string]string, error) {
+	paths := make(map[string]string)
+
+	for sysname := range subsystems {
+		path, err := raw.path(sysname)
+		if err != nil {
+			// Don't fail if a cgroup hierarchy was not found, just skip this subsystem
+			if cgroups.IsNotFound(err) {
+				continue
+			}
+
+			return nil, err
+		}
+
+		paths[sysname] = path
+	}
+
+	return paths, nil
+}
+
 func (raw *data) path(subsystem string) (string, error) {
+	// If the cgroup name/path is absolute do not look relative to the cgroup of the init process.
+	if filepath.IsAbs(raw.cgroup) {
+		path := filepath.Join(raw.root, subsystem, raw.cgroup)
+
+		if _, err := os.Stat(path); err != nil {
+			if os.IsNotExist(err) {
+				return "", cgroups.NewNotFoundError(subsystem)
+			}
+
+			return "", err
+		}
+
+		return path, nil
+	}
+
 	parent, err := raw.parent(subsystem)
 	if err != nil {
 		return "", err
 	}
+
 	return filepath.Join(parent, raw.cgroup), nil
 }
 
@@ -148,7 +205,7 @@
 	if err := os.MkdirAll(path, 0755); err != nil && !os.IsExist(err) {
 		return "", err
 	}
-	if err := writeFile(path, "cgroup.procs", strconv.Itoa(raw.pid)); err != nil {
+	if err := writeFile(path, CgroupProcesses, strconv.Itoa(raw.pid)); err != nil {
 		return "", err
 	}
 	return path, nil
diff --git a/vendor/src/github.com/docker/libcontainer/cgroups/fs/blkio.go b/vendor/src/github.com/docker/libcontainer/cgroups/fs/blkio.go
index 0e0a198..f784d01 100644
--- a/vendor/src/github.com/docker/libcontainer/cgroups/fs/blkio.go
+++ b/vendor/src/github.com/docker/libcontainer/cgroups/fs/blkio.go
@@ -11,18 +11,19 @@
 	"github.com/docker/libcontainer/cgroups"
 )
 
-type blkioGroup struct {
+type BlkioGroup struct {
 }
 
-func (s *blkioGroup) Set(d *data) error {
+func (s *BlkioGroup) Set(d *data) error {
 	// we just want to join this group even though we don't set anything
-	if _, err := d.join("blkio"); err != nil && err != cgroups.ErrNotFound {
+	if _, err := d.join("blkio"); err != nil && !cgroups.IsNotFound(err) {
 		return err
 	}
+
 	return nil
 }
 
-func (s *blkioGroup) Remove(d *data) error {
+func (s *BlkioGroup) Remove(d *data) error {
 	return removePath(d.path("blkio"))
 }
 
@@ -65,6 +66,9 @@
 	var blkioStats []cgroups.BlkioStatEntry
 	f, err := os.Open(path)
 	if err != nil {
+		if os.IsNotExist(err) {
+			return blkioStats, nil
+		}
 		return nil, err
 	}
 	defer f.Close()
@@ -110,13 +114,9 @@
 	return blkioStats, nil
 }
 
-func (s *blkioGroup) GetStats(d *data, stats *cgroups.Stats) error {
+func (s *BlkioGroup) GetStats(path string, stats *cgroups.Stats) error {
 	var blkioStats []cgroups.BlkioStatEntry
 	var err error
-	path, err := d.path("blkio")
-	if err != nil {
-		return err
-	}
 
 	if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.sectors_recursive")); err != nil {
 		return err
diff --git a/vendor/src/github.com/docker/libcontainer/cgroups/fs/blkio_test.go b/vendor/src/github.com/docker/libcontainer/cgroups/fs/blkio_test.go
index c916c86..db6f8d5 100644
--- a/vendor/src/github.com/docker/libcontainer/cgroups/fs/blkio_test.go
+++ b/vendor/src/github.com/docker/libcontainer/cgroups/fs/blkio_test.go
@@ -44,8 +44,8 @@
 		"blkio.sectors_recursive":          sectorsRecursiveContents,
 	})
 
-	blkio := &blkioGroup{}
-	err := blkio.GetStats(helper.CgroupData, &actualStats)
+	blkio := &BlkioGroup{}
+	err := blkio.GetStats(helper.CgroupPath, &actualStats)
 	if err != nil {
 		t.Fatal(err)
 	}
@@ -84,10 +84,10 @@
 		"blkio.io_queued_recursive":        queuedRecursiveContents,
 	})
 
-	blkio := &blkioGroup{}
-	err := blkio.GetStats(helper.CgroupData, &actualStats)
-	if err == nil {
-		t.Fatal("Expected to fail, but did not")
+	blkio := &BlkioGroup{}
+	err := blkio.GetStats(helper.CgroupPath, &actualStats)
+	if err != nil {
+		t.Fatalf("Failed unexpectedly: %s", err)
 	}
 }
 
@@ -100,10 +100,10 @@
 		"blkio.sectors_recursive":     sectorsRecursiveContents,
 	})
 
-	blkio := &blkioGroup{}
-	err := blkio.GetStats(helper.CgroupData, &actualStats)
-	if err == nil {
-		t.Fatal("Expected to fail, but did not")
+	blkio := &BlkioGroup{}
+	err := blkio.GetStats(helper.CgroupPath, &actualStats)
+	if err != nil {
+		t.Fatalf("Failed unexpectedly: %s", err)
 	}
 }
 
@@ -116,10 +116,10 @@
 		"blkio.sectors_recursive":          sectorsRecursiveContents,
 	})
 
-	blkio := &blkioGroup{}
-	err := blkio.GetStats(helper.CgroupData, &actualStats)
-	if err == nil {
-		t.Fatal("Expected to fail, but did not")
+	blkio := &BlkioGroup{}
+	err := blkio.GetStats(helper.CgroupPath, &actualStats)
+	if err != nil {
+		t.Fatalf("Failed unexpectedly: %s", err)
 	}
 }
 
@@ -132,10 +132,10 @@
 		"blkio.sectors_recursive":          sectorsRecursiveContents,
 	})
 
-	blkio := &blkioGroup{}
-	err := blkio.GetStats(helper.CgroupData, &actualStats)
-	if err == nil {
-		t.Fatal("Expected to fail, but did not")
+	blkio := &BlkioGroup{}
+	err := blkio.GetStats(helper.CgroupPath, &actualStats)
+	if err != nil {
+		t.Fatalf("Failed unexpectedly: %s", err)
 	}
 }
 
@@ -149,8 +149,8 @@
 		"blkio.sectors_recursive":          sectorsRecursiveContents,
 	})
 
-	blkio := &blkioGroup{}
-	err := blkio.GetStats(helper.CgroupData, &actualStats)
+	blkio := &BlkioGroup{}
+	err := blkio.GetStats(helper.CgroupPath, &actualStats)
 	if err == nil {
 		t.Fatal("Expected to fail, but did not")
 	}
@@ -166,8 +166,8 @@
 		"blkio.sectors_recursive":          sectorsRecursiveContents,
 	})
 
-	blkio := &blkioGroup{}
-	err := blkio.GetStats(helper.CgroupData, &actualStats)
+	blkio := &BlkioGroup{}
+	err := blkio.GetStats(helper.CgroupPath, &actualStats)
 	if err == nil {
 		t.Fatal("Expected to fail, but did not")
 	}
diff --git a/vendor/src/github.com/docker/libcontainer/cgroups/fs/cpu.go b/vendor/src/github.com/docker/libcontainer/cgroups/fs/cpu.go
index 1c692fd..efac9ed 100644
--- a/vendor/src/github.com/docker/libcontainer/cgroups/fs/cpu.go
+++ b/vendor/src/github.com/docker/libcontainer/cgroups/fs/cpu.go
@@ -5,15 +5,14 @@
 	"os"
 	"path/filepath"
 	"strconv"
-	"syscall"
 
 	"github.com/docker/libcontainer/cgroups"
 )
 
-type cpuGroup struct {
+type CpuGroup struct {
 }
 
-func (s *cpuGroup) Set(d *data) error {
+func (s *CpuGroup) Set(d *data) error {
 	// We always want to join the cpu group, to allow fair cpu scheduling
 	// on a container basis
 	dir, err := d.join("cpu")
@@ -38,19 +37,14 @@
 	return nil
 }
 
-func (s *cpuGroup) Remove(d *data) error {
+func (s *CpuGroup) Remove(d *data) error {
 	return removePath(d.path("cpu"))
 }
 
-func (s *cpuGroup) GetStats(d *data, stats *cgroups.Stats) error {
-	path, err := d.path("cpu")
-	if err != nil {
-		return err
-	}
-
+func (s *CpuGroup) GetStats(path string, stats *cgroups.Stats) error {
 	f, err := os.Open(filepath.Join(path, "cpu.stat"))
 	if err != nil {
-		if pathErr, ok := err.(*os.PathError); ok && pathErr.Err == syscall.ENOENT {
+		if os.IsNotExist(err) {
 			return nil
 		}
 		return err
diff --git a/vendor/src/github.com/docker/libcontainer/cgroups/fs/cpu_test.go b/vendor/src/github.com/docker/libcontainer/cgroups/fs/cpu_test.go
index ebdb6a5..017a1f4 100644
--- a/vendor/src/github.com/docker/libcontainer/cgroups/fs/cpu_test.go
+++ b/vendor/src/github.com/docker/libcontainer/cgroups/fs/cpu_test.go
@@ -23,8 +23,8 @@
 		"cpu.stat": cpuStatContent,
 	})
 
-	cpu := &cpuGroup{}
-	err := cpu.GetStats(helper.CgroupData, &actualStats)
+	cpu := &CpuGroup{}
+	err := cpu.GetStats(helper.CgroupPath, &actualStats)
 	if err != nil {
 		t.Fatal(err)
 	}
@@ -41,8 +41,8 @@
 	helper := NewCgroupTestUtil("cpu", t)
 	defer helper.cleanup()
 
-	cpu := &cpuGroup{}
-	err := cpu.GetStats(helper.CgroupData, &actualStats)
+	cpu := &CpuGroup{}
+	err := cpu.GetStats(helper.CgroupPath, &actualStats)
 	if err != nil {
 		t.Fatal("Expected not to fail, but did")
 	}
@@ -58,8 +58,8 @@
 		"cpu.stat": cpuStatContent,
 	})
 
-	cpu := &cpuGroup{}
-	err := cpu.GetStats(helper.CgroupData, &actualStats)
+	cpu := &CpuGroup{}
+	err := cpu.GetStats(helper.CgroupPath, &actualStats)
 	if err == nil {
 		t.Fatal("Expected failed stat parsing.")
 	}
diff --git a/vendor/src/github.com/docker/libcontainer/cgroups/fs/cpuacct.go b/vendor/src/github.com/docker/libcontainer/cgroups/fs/cpuacct.go
index a3d22c9..853ab6b 100644
--- a/vendor/src/github.com/docker/libcontainer/cgroups/fs/cpuacct.go
+++ b/vendor/src/github.com/docker/libcontainer/cgroups/fs/cpuacct.go
@@ -12,7 +12,7 @@
 	"time"
 
 	"github.com/docker/libcontainer/cgroups"
-	"github.com/dotcloud/docker/pkg/system"
+	"github.com/docker/libcontainer/system"
 )
 
 var (
@@ -22,31 +22,32 @@
 
 const nanosecondsInSecond = 1000000000
 
-type cpuacctGroup struct {
+type CpuacctGroup struct {
 }
 
-func (s *cpuacctGroup) Set(d *data) error {
+func (s *CpuacctGroup) Set(d *data) error {
 	// we just want to join this group even though we don't set anything
-	if _, err := d.join("cpuacct"); err != nil && err != cgroups.ErrNotFound {
+	if _, err := d.join("cpuacct"); err != nil && !cgroups.IsNotFound(err) {
 		return err
 	}
+
 	return nil
 }
 
-func (s *cpuacctGroup) Remove(d *data) error {
+func (s *CpuacctGroup) Remove(d *data) error {
 	return removePath(d.path("cpuacct"))
 }
 
-func (s *cpuacctGroup) GetStats(d *data, stats *cgroups.Stats) error {
+func (s *CpuacctGroup) GetStats(path string, stats *cgroups.Stats) error {
 	var (
+		err                                                                                                           error
 		startCpu, lastCpu, startSystem, lastSystem, startUsage, lastUsage, kernelModeUsage, userModeUsage, percentage uint64
 	)
-	path, err := d.path("cpuacct")
-	if kernelModeUsage, userModeUsage, err = s.getCpuUsage(d, path); err != nil {
+	if kernelModeUsage, userModeUsage, err = getCpuUsage(path); err != nil {
 		return err
 	}
 	startCpu = kernelModeUsage + userModeUsage
-	if startSystem, err = s.getSystemCpuUsage(d); err != nil {
+	if startSystem, err = getSystemCpuUsage(); err != nil {
 		return err
 	}
 	startUsageTime := time.Now()
@@ -54,12 +55,12 @@
 		return err
 	}
 	// sample for 100ms
-	time.Sleep(100 * time.Millisecond)
-	if kernelModeUsage, userModeUsage, err = s.getCpuUsage(d, path); err != nil {
+	time.Sleep(1000 * time.Millisecond)
+	if kernelModeUsage, userModeUsage, err = getCpuUsage(path); err != nil {
 		return err
 	}
 	lastCpu = kernelModeUsage + userModeUsage
-	if lastSystem, err = s.getSystemCpuUsage(d); err != nil {
+	if lastSystem, err = getSystemCpuUsage(); err != nil {
 		return err
 	}
 	usageSampleDuration := time.Since(startUsageTime)
@@ -73,14 +74,14 @@
 		deltaUsage  = lastUsage - startUsage
 	)
 	if deltaSystem > 0.0 {
-		percentage = ((deltaProc / deltaSystem) * clockTicks) * cpuCount
+		percentage = uint64((float64(deltaProc) / float64(deltaSystem)) * float64(clockTicks*cpuCount))
 	}
 	// NOTE: a percentage over 100% is valid for POSIX because that means the
 	// processes is using multiple cores
 	stats.CpuStats.CpuUsage.PercentUsage = percentage
 	// Delta usage is in nanoseconds of CPU time so get the usage (in cores) over the sample time.
 	stats.CpuStats.CpuUsage.CurrentUsage = deltaUsage / uint64(usageSampleDuration.Nanoseconds())
-	percpuUsage, err := s.getPercpuUsage(path)
+	percpuUsage, err := getPercpuUsage(path)
 	if err != nil {
 		return err
 	}
@@ -92,7 +93,7 @@
 }
 
 // TODO(vmarmol): Use cgroups stats.
-func (s *cpuacctGroup) getSystemCpuUsage(d *data) (uint64, error) {
+func getSystemCpuUsage() (uint64, error) {
 
 	f, err := os.Open("/proc/stat")
 	if err != nil {
@@ -125,7 +126,7 @@
 	return 0, fmt.Errorf("invalid stat format")
 }
 
-func (s *cpuacctGroup) getCpuUsage(d *data, path string) (uint64, uint64, error) {
+func getCpuUsage(path string) (uint64, uint64, error) {
 	kernelModeUsage := uint64(0)
 	userModeUsage := uint64(0)
 	data, err := ioutil.ReadFile(filepath.Join(path, "cpuacct.stat"))
@@ -146,7 +147,7 @@
 	return kernelModeUsage, userModeUsage, nil
 }
 
-func (s *cpuacctGroup) getPercpuUsage(path string) ([]uint64, error) {
+func getPercpuUsage(path string) ([]uint64, error) {
 	percpuUsage := []uint64{}
 	data, err := ioutil.ReadFile(filepath.Join(path, "cpuacct.usage_percpu"))
 	if err != nil {
diff --git a/vendor/src/github.com/docker/libcontainer/cgroups/fs/cpuset.go b/vendor/src/github.com/docker/libcontainer/cgroups/fs/cpuset.go
index 094e8b3..8847739 100644
--- a/vendor/src/github.com/docker/libcontainer/cgroups/fs/cpuset.go
+++ b/vendor/src/github.com/docker/libcontainer/cgroups/fs/cpuset.go
@@ -10,41 +10,50 @@
 	"github.com/docker/libcontainer/cgroups"
 )
 
-type cpusetGroup struct {
+type CpusetGroup struct {
 }
 
-func (s *cpusetGroup) Set(d *data) error {
+func (s *CpusetGroup) Set(d *data) error {
 	// we don't want to join this cgroup unless it is specified
 	if d.c.CpusetCpus != "" {
 		dir, err := d.path("cpuset")
 		if err != nil {
 			return err
 		}
-		if err := s.ensureParent(dir); err != nil {
-			return err
-		}
 
-		// because we are not using d.join we need to place the pid into the procs file
-		// unlike the other subsystems
-		if err := writeFile(dir, "cgroup.procs", strconv.Itoa(d.pid)); err != nil {
-			return err
-		}
-		if err := writeFile(dir, "cpuset.cpus", d.c.CpusetCpus); err != nil {
-			return err
-		}
+		return s.SetDir(dir, d.c.CpusetCpus, d.pid)
 	}
+
 	return nil
 }
 
-func (s *cpusetGroup) Remove(d *data) error {
+func (s *CpusetGroup) Remove(d *data) error {
 	return removePath(d.path("cpuset"))
 }
 
-func (s *cpusetGroup) GetStats(d *data, stats *cgroups.Stats) error {
+func (s *CpusetGroup) GetStats(path string, stats *cgroups.Stats) error {
 	return nil
 }
 
-func (s *cpusetGroup) getSubsystemSettings(parent string) (cpus []byte, mems []byte, err error) {
+func (s *CpusetGroup) SetDir(dir, value string, pid int) error {
+	if err := s.ensureParent(dir); err != nil {
+		return err
+	}
+
+	// because we are not using d.join we need to place the pid into the procs file
+	// unlike the other subsystems
+	if err := writeFile(dir, "cgroup.procs", strconv.Itoa(pid)); err != nil {
+		return err
+	}
+
+	if err := writeFile(dir, "cpuset.cpus", value); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (s *CpusetGroup) getSubsystemSettings(parent string) (cpus []byte, mems []byte, err error) {
 	if cpus, err = ioutil.ReadFile(filepath.Join(parent, "cpuset.cpus")); err != nil {
 		return
 	}
@@ -57,7 +66,7 @@
 // ensureParent ensures that the parent directory of current is created
 // with the proper cpus and mems files copied from it's parent if the values
 // are a file with a new line char
-func (s *cpusetGroup) ensureParent(current string) error {
+func (s *CpusetGroup) ensureParent(current string) error {
 	parent := filepath.Dir(current)
 
 	if _, err := os.Stat(parent); err != nil {
@@ -78,7 +87,7 @@
 
 // copyIfNeeded copies the cpuset.cpus and cpuset.mems from the parent
 // directory to the current directory if the file's contents are 0
-func (s *cpusetGroup) copyIfNeeded(current, parent string) error {
+func (s *CpusetGroup) copyIfNeeded(current, parent string) error {
 	var (
 		err                      error
 		currentCpus, currentMems []byte
@@ -105,6 +114,6 @@
 	return nil
 }
 
-func (s *cpusetGroup) isEmpty(b []byte) bool {
+func (s *CpusetGroup) isEmpty(b []byte) bool {
 	return len(bytes.Trim(b, "\n")) == 0
 }
diff --git a/vendor/src/github.com/docker/libcontainer/cgroups/fs/devices.go b/vendor/src/github.com/docker/libcontainer/cgroups/fs/devices.go
index 675cef3..98d5d2d 100644
--- a/vendor/src/github.com/docker/libcontainer/cgroups/fs/devices.go
+++ b/vendor/src/github.com/docker/libcontainer/cgroups/fs/devices.go
@@ -2,10 +2,10 @@
 
 import "github.com/docker/libcontainer/cgroups"
 
-type devicesGroup struct {
+type DevicesGroup struct {
 }
 
-func (s *devicesGroup) Set(d *data) error {
+func (s *DevicesGroup) Set(d *data) error {
 	dir, err := d.join("devices")
 	if err != nil {
 		return err
@@ -25,10 +25,10 @@
 	return nil
 }
 
-func (s *devicesGroup) Remove(d *data) error {
+func (s *DevicesGroup) Remove(d *data) error {
 	return removePath(d.path("devices"))
 }
 
-func (s *devicesGroup) GetStats(d *data, stats *cgroups.Stats) error {
+func (s *DevicesGroup) GetStats(path string, stats *cgroups.Stats) error {
 	return nil
 }
diff --git a/vendor/src/github.com/docker/libcontainer/cgroups/fs/freezer.go b/vendor/src/github.com/docker/libcontainer/cgroups/fs/freezer.go
index f6a1044..c6b677f 100644
--- a/vendor/src/github.com/docker/libcontainer/cgroups/fs/freezer.go
+++ b/vendor/src/github.com/docker/libcontainer/cgroups/fs/freezer.go
@@ -1,18 +1,16 @@
 package fs
 
 import (
-	"io/ioutil"
-	"path/filepath"
 	"strings"
 	"time"
 
 	"github.com/docker/libcontainer/cgroups"
 )
 
-type freezerGroup struct {
+type FreezerGroup struct {
 }
 
-func (s *freezerGroup) Set(d *data) error {
+func (s *FreezerGroup) Set(d *data) error {
 	switch d.c.Freezer {
 	case cgroups.Frozen, cgroups.Thawed:
 		dir, err := d.path("freezer")
@@ -35,7 +33,7 @@
 			time.Sleep(1 * time.Millisecond)
 		}
 	default:
-		if _, err := d.join("freezer"); err != nil && err != cgroups.ErrNotFound {
+		if _, err := d.join("freezer"); err != nil && !cgroups.IsNotFound(err) {
 			return err
 		}
 	}
@@ -43,29 +41,10 @@
 	return nil
 }
 
-func (s *freezerGroup) Remove(d *data) error {
+func (s *FreezerGroup) Remove(d *data) error {
 	return removePath(d.path("freezer"))
 }
 
-func getFreezerFileData(path string) (string, error) {
-	data, err := ioutil.ReadFile(path)
-	return strings.TrimSuffix(string(data), "\n"), err
-}
-
-func (s *freezerGroup) GetStats(d *data, stats *cgroups.Stats) error {
-	path, err := d.path("freezer")
-	if err != nil {
-		return err
-	}
-	var data string
-	if data, err = getFreezerFileData(filepath.Join(path, "freezer.parent_freezing")); err != nil {
-		return err
-	}
-	stats.FreezerStats.ParentState = data
-	if data, err = getFreezerFileData(filepath.Join(path, "freezer.self_freezing")); err != nil {
-		return err
-	}
-	stats.FreezerStats.SelfState = data
-
+func (s *FreezerGroup) GetStats(path string, stats *cgroups.Stats) error {
 	return nil
 }
diff --git a/vendor/src/github.com/docker/libcontainer/cgroups/fs/memory.go b/vendor/src/github.com/docker/libcontainer/cgroups/fs/memory.go
index b4453f4..ea92934 100644
--- a/vendor/src/github.com/docker/libcontainer/cgroups/fs/memory.go
+++ b/vendor/src/github.com/docker/libcontainer/cgroups/fs/memory.go
@@ -9,12 +9,12 @@
 	"github.com/docker/libcontainer/cgroups"
 )
 
-type memoryGroup struct {
+type MemoryGroup struct {
 }
 
-func (s *memoryGroup) Set(d *data) error {
+func (s *MemoryGroup) Set(d *data) error {
 	dir, err := d.join("memory")
-	// only return an error for memory if it was not specified
+	// only return an error for memory if it was specified
 	if err != nil && (d.c.Memory != 0 || d.c.MemoryReservation != 0 || d.c.MemorySwap != 0) {
 		return err
 	}
@@ -47,19 +47,17 @@
 	return nil
 }
 
-func (s *memoryGroup) Remove(d *data) error {
+func (s *MemoryGroup) Remove(d *data) error {
 	return removePath(d.path("memory"))
 }
 
-func (s *memoryGroup) GetStats(d *data, stats *cgroups.Stats) error {
-	path, err := d.path("memory")
-	if err != nil {
-		return err
-	}
-
+func (s *MemoryGroup) GetStats(path string, stats *cgroups.Stats) error {
 	// Set stats from memory.stat.
 	statsFile, err := os.Open(filepath.Join(path, "memory.stat"))
 	if err != nil {
+		if os.IsNotExist(err) {
+			return nil
+		}
 		return err
 	}
 	defer statsFile.Close()
diff --git a/vendor/src/github.com/docker/libcontainer/cgroups/fs/memory_test.go b/vendor/src/github.com/docker/libcontainer/cgroups/fs/memory_test.go
index 8307482..e92f1da 100644
--- a/vendor/src/github.com/docker/libcontainer/cgroups/fs/memory_test.go
+++ b/vendor/src/github.com/docker/libcontainer/cgroups/fs/memory_test.go
@@ -24,8 +24,8 @@
 		"memory.failcnt":            memoryFailcnt,
 	})
 
-	memory := &memoryGroup{}
-	err := memory.GetStats(helper.CgroupData, &actualStats)
+	memory := &MemoryGroup{}
+	err := memory.GetStats(helper.CgroupPath, &actualStats)
 	if err != nil {
 		t.Fatal(err)
 	}
@@ -41,10 +41,10 @@
 		"memory.max_usage_in_bytes": memoryMaxUsageContents,
 	})
 
-	memory := &memoryGroup{}
-	err := memory.GetStats(helper.CgroupData, &actualStats)
-	if err == nil {
-		t.Fatal("Expected failure")
+	memory := &MemoryGroup{}
+	err := memory.GetStats(helper.CgroupPath, &actualStats)
+	if err != nil {
+		t.Fatal(err)
 	}
 }
 
@@ -56,8 +56,8 @@
 		"memory.max_usage_in_bytes": memoryMaxUsageContents,
 	})
 
-	memory := &memoryGroup{}
-	err := memory.GetStats(helper.CgroupData, &actualStats)
+	memory := &MemoryGroup{}
+	err := memory.GetStats(helper.CgroupPath, &actualStats)
 	if err == nil {
 		t.Fatal("Expected failure")
 	}
@@ -71,8 +71,8 @@
 		"memory.usage_in_bytes": memoryUsageContents,
 	})
 
-	memory := &memoryGroup{}
-	err := memory.GetStats(helper.CgroupData, &actualStats)
+	memory := &MemoryGroup{}
+	err := memory.GetStats(helper.CgroupPath, &actualStats)
 	if err == nil {
 		t.Fatal("Expected failure")
 	}
@@ -87,8 +87,8 @@
 		"memory.max_usage_in_bytes": memoryMaxUsageContents,
 	})
 
-	memory := &memoryGroup{}
-	err := memory.GetStats(helper.CgroupData, &actualStats)
+	memory := &MemoryGroup{}
+	err := memory.GetStats(helper.CgroupPath, &actualStats)
 	if err == nil {
 		t.Fatal("Expected failure")
 	}
@@ -103,8 +103,8 @@
 		"memory.max_usage_in_bytes": memoryMaxUsageContents,
 	})
 
-	memory := &memoryGroup{}
-	err := memory.GetStats(helper.CgroupData, &actualStats)
+	memory := &MemoryGroup{}
+	err := memory.GetStats(helper.CgroupPath, &actualStats)
 	if err == nil {
 		t.Fatal("Expected failure")
 	}
@@ -119,8 +119,8 @@
 		"memory.max_usage_in_bytes": "bad",
 	})
 
-	memory := &memoryGroup{}
-	err := memory.GetStats(helper.CgroupData, &actualStats)
+	memory := &MemoryGroup{}
+	err := memory.GetStats(helper.CgroupPath, &actualStats)
 	if err == nil {
 		t.Fatal("Expected failure")
 	}
diff --git a/vendor/src/github.com/docker/libcontainer/cgroups/fs/perf_event.go b/vendor/src/github.com/docker/libcontainer/cgroups/fs/perf_event.go
index b834c3e..813274d 100644
--- a/vendor/src/github.com/docker/libcontainer/cgroups/fs/perf_event.go
+++ b/vendor/src/github.com/docker/libcontainer/cgroups/fs/perf_event.go
@@ -4,21 +4,21 @@
 	"github.com/docker/libcontainer/cgroups"
 )
 
-type perfEventGroup struct {
+type PerfEventGroup struct {
 }
 
-func (s *perfEventGroup) Set(d *data) error {
+func (s *PerfEventGroup) Set(d *data) error {
 	// we just want to join this group even though we don't set anything
-	if _, err := d.join("perf_event"); err != nil && err != cgroups.ErrNotFound {
+	if _, err := d.join("perf_event"); err != nil && !cgroups.IsNotFound(err) {
 		return err
 	}
 	return nil
 }
 
-func (s *perfEventGroup) Remove(d *data) error {
+func (s *PerfEventGroup) Remove(d *data) error {
 	return removePath(d.path("perf_event"))
 }
 
-func (s *perfEventGroup) GetStats(d *data, stats *cgroups.Stats) error {
+func (s *PerfEventGroup) GetStats(path string, stats *cgroups.Stats) error {
 	return nil
 }
diff --git a/vendor/src/github.com/docker/libcontainer/cgroups/fs/stats_test_util.go b/vendor/src/github.com/docker/libcontainer/cgroups/fs/stats_util_test.go
similarity index 100%
rename from vendor/src/github.com/docker/libcontainer/cgroups/fs/stats_test_util.go
rename to vendor/src/github.com/docker/libcontainer/cgroups/fs/stats_util_test.go
diff --git a/vendor/src/github.com/docker/libcontainer/cgroups/fs/test_util.go b/vendor/src/github.com/docker/libcontainer/cgroups/fs/util_test.go
similarity index 100%
rename from vendor/src/github.com/docker/libcontainer/cgroups/fs/test_util.go
rename to vendor/src/github.com/docker/libcontainer/cgroups/fs/util_test.go
diff --git a/vendor/src/github.com/docker/libcontainer/cgroups/stats.go b/vendor/src/github.com/docker/libcontainer/cgroups/stats.go
index 2640245..49913bc 100644
--- a/vendor/src/github.com/docker/libcontainer/cgroups/stats.go
+++ b/vendor/src/github.com/docker/libcontainer/cgroups/stats.go
@@ -55,17 +55,10 @@
 	SectorsRecursive        []BlkioStatEntry `json:"sectors_recursive,omitempty"`
 }
 
-// TODO(Vishh): Remove freezer from stats since it does not logically belong in stats.
-type FreezerStats struct {
-	ParentState string `json:"parent_state,omitempty"`
-	SelfState   string `json:"self_state,omitempty"`
-}
-
 type Stats struct {
-	CpuStats     CpuStats     `json:"cpu_stats,omitempty"`
-	MemoryStats  MemoryStats  `json:"memory_stats,omitempty"`
-	BlkioStats   BlkioStats   `json:"blkio_stats,omitempty"`
-	FreezerStats FreezerStats `json:"freezer_stats,omitempty"`
+	CpuStats    CpuStats    `json:"cpu_stats,omitempty"`
+	MemoryStats MemoryStats `json:"memory_stats,omitempty"`
+	BlkioStats  BlkioStats  `json:"blkio_stats,omitempty"`
 }
 
 func NewStats() *Stats {
diff --git a/vendor/src/github.com/docker/libcontainer/cgroups/systemd/apply_nosystemd.go b/vendor/src/github.com/docker/libcontainer/cgroups/systemd/apply_nosystemd.go
index 6dcfdff..6855910 100644
--- a/vendor/src/github.com/docker/libcontainer/cgroups/systemd/apply_nosystemd.go
+++ b/vendor/src/github.com/docker/libcontainer/cgroups/systemd/apply_nosystemd.go
@@ -23,3 +23,7 @@
 func Freeze(c *cgroups.Cgroup, state cgroups.FreezerState) error {
 	return fmt.Errorf("Systemd not supported")
 }
+
+func GetStats(c *cgroups.Cgroup) (*cgroups.Stats, error) {
+	return nil, fmt.Errorf("Systemd not supported")
+}
diff --git a/vendor/src/github.com/docker/libcontainer/cgroups/systemd/apply_systemd.go b/vendor/src/github.com/docker/libcontainer/cgroups/systemd/apply_systemd.go
index 6a0ce95..7af4818 100644
--- a/vendor/src/github.com/docker/libcontainer/cgroups/systemd/apply_systemd.go
+++ b/vendor/src/github.com/docker/libcontainer/cgroups/systemd/apply_systemd.go
@@ -13,24 +13,39 @@
 	"sync"
 	"time"
 
-	systemd1 "github.com/coreos/go-systemd/dbus"
+	systemd "github.com/coreos/go-systemd/dbus"
 	"github.com/docker/libcontainer/cgroups"
-	"github.com/dotcloud/docker/pkg/systemd"
+	"github.com/docker/libcontainer/cgroups/fs"
 	"github.com/godbus/dbus"
 )
 
 type systemdCgroup struct {
-	cleanupDirs []string
+	cgroup *cgroups.Cgroup
+}
+
+type subsystem interface {
+	GetStats(string, *cgroups.Stats) error
 }
 
 var (
 	connLock              sync.Mutex
-	theConn               *systemd1.Conn
+	theConn               *systemd.Conn
 	hasStartTransientUnit bool
+	subsystems            = map[string]subsystem{
+		"devices":    &fs.DevicesGroup{},
+		"memory":     &fs.MemoryGroup{},
+		"cpu":        &fs.CpuGroup{},
+		"cpuset":     &fs.CpusetGroup{},
+		"cpuacct":    &fs.CpuacctGroup{},
+		"blkio":      &fs.BlkioGroup{},
+		"perf_event": &fs.PerfEventGroup{},
+		"freezer":    &fs.FreezerGroup{},
+	}
 )
 
 func UseSystemd() bool {
-	if !systemd.SdBooted() {
+	s, err := os.Stat("/run/systemd/system")
+	if err != nil || !s.IsDir() {
 		return false
 	}
 
@@ -39,7 +54,7 @@
 
 	if theConn == nil {
 		var err error
-		theConn, err = systemd1.New()
+		theConn, err = systemd.New()
 		if err != nil {
 			return false
 		}
@@ -69,286 +84,149 @@
 	return "Unit"
 }
 
-type cgroupArg struct {
-	File  string
-	Value string
-}
-
 func Apply(c *cgroups.Cgroup, pid int) (cgroups.ActiveCgroup, error) {
 	var (
 		unitName   = getUnitName(c)
 		slice      = "system.slice"
-		properties []systemd1.Property
-		cpuArgs    []cgroupArg
-		cpusetArgs []cgroupArg
-		memoryArgs []cgroupArg
-		res        systemdCgroup
+		properties []systemd.Property
+		res        = &systemdCgroup{}
 	)
 
-	// First set up things not supported by systemd
-
-	// -1 disables memorySwap
-	if c.MemorySwap >= 0 && (c.Memory != 0 || c.MemorySwap > 0) {
-		memorySwap := c.MemorySwap
-
-		if memorySwap == 0 {
-			// By default, MemorySwap is set to twice the size of RAM.
-			memorySwap = c.Memory * 2
-		}
-
-		memoryArgs = append(memoryArgs, cgroupArg{"memory.memsw.limit_in_bytes", strconv.FormatInt(memorySwap, 10)})
-	}
-
-	if c.CpusetCpus != "" {
-		cpusetArgs = append(cpusetArgs, cgroupArg{"cpuset.cpus", c.CpusetCpus})
-	}
+	res.cgroup = c
 
 	if c.Slice != "" {
 		slice = c.Slice
 	}
 
 	properties = append(properties,
-		systemd1.Property{"Slice", dbus.MakeVariant(slice)},
-		systemd1.Property{"Description", dbus.MakeVariant("docker container " + c.Name)},
-		systemd1.Property{"PIDs", dbus.MakeVariant([]uint32{uint32(pid)})},
+		systemd.Property{"Slice", dbus.MakeVariant(slice)},
+		systemd.Property{"Description", dbus.MakeVariant("docker container " + c.Name)},
+		systemd.Property{"PIDs", dbus.MakeVariant([]uint32{uint32(pid)})},
 	)
 
 	// Always enable accounting, this gets us the same behaviour as the fs implementation,
 	// plus the kernel has some problems with joining the memory cgroup at a later time.
 	properties = append(properties,
-		systemd1.Property{"MemoryAccounting", dbus.MakeVariant(true)},
-		systemd1.Property{"CPUAccounting", dbus.MakeVariant(true)},
-		systemd1.Property{"BlockIOAccounting", dbus.MakeVariant(true)})
+		systemd.Property{"MemoryAccounting", dbus.MakeVariant(true)},
+		systemd.Property{"CPUAccounting", dbus.MakeVariant(true)},
+		systemd.Property{"BlockIOAccounting", dbus.MakeVariant(true)})
 
 	if c.Memory != 0 {
 		properties = append(properties,
-			systemd1.Property{"MemoryLimit", dbus.MakeVariant(uint64(c.Memory))})
+			systemd.Property{"MemoryLimit", dbus.MakeVariant(uint64(c.Memory))})
 	}
 	// TODO: MemoryReservation and MemorySwap not available in systemd
 
 	if c.CpuShares != 0 {
 		properties = append(properties,
-			systemd1.Property{"CPUShares", dbus.MakeVariant(uint64(c.CpuShares))})
+			systemd.Property{"CPUShares", dbus.MakeVariant(uint64(c.CpuShares))})
 	}
 
 	if _, err := theConn.StartTransientUnit(unitName, "replace", properties...); err != nil {
 		return nil, err
 	}
 
-	// To work around the lack of /dev/pts/* support above we need to manually add these
-	// so, ask systemd for the cgroup used
-	props, err := theConn.GetUnitTypeProperties(unitName, getIfaceForUnit(unitName))
-	if err != nil {
-		return nil, err
-	}
-
-	cgroup := props["ControlGroup"].(string)
-
 	if !c.AllowAllDevices {
-		// Atm we can't use the systemd device support because of two missing things:
-		// * Support for wildcards to allow mknod on any device
-		// * Support for wildcards to allow /dev/pts support
-		//
-		// The second is available in more recent systemd as "char-pts", but not in e.g. v208 which is
-		// in wide use. When both these are availalable we will be able to switch, but need to keep the old
-		// implementation for backwards compat.
-		//
-		// Note: we can't use systemd to set up the initial limits, and then change the cgroup
-		// because systemd will re-write the device settings if it needs to re-apply the cgroup context.
-		// This happens at least for v208 when any sibling unit is started.
-
-		mountpoint, err := cgroups.FindCgroupMountpoint("devices")
-		if err != nil {
+		if err := joinDevices(c, pid); err != nil {
 			return nil, err
 		}
-
-		initPath, err := cgroups.GetInitCgroupDir("devices")
-		if err != nil {
-			return nil, err
-		}
-
-		dir := filepath.Join(mountpoint, initPath, c.Parent, c.Name)
-
-		res.cleanupDirs = append(res.cleanupDirs, dir)
-
-		if err := os.MkdirAll(dir, 0755); err != nil && !os.IsExist(err) {
-			return nil, err
-		}
-
-		if err := ioutil.WriteFile(filepath.Join(dir, "cgroup.procs"), []byte(strconv.Itoa(pid)), 0700); err != nil {
-			return nil, err
-		}
-
-		if err := writeFile(dir, "devices.deny", "a"); err != nil {
-			return nil, err
-		}
-
-		for _, dev := range c.AllowedDevices {
-			if err := writeFile(dir, "devices.allow", dev.GetCgroupAllowString()); err != nil {
-				return nil, err
-			}
-		}
 	}
 
-	if len(cpuArgs) != 0 {
-		mountpoint, err := cgroups.FindCgroupMountpoint("cpu")
-		if err != nil {
+	// -1 disables memorySwap
+	if c.MemorySwap >= 0 && (c.Memory != 0 || c.MemorySwap > 0) {
+		if err := joinMemory(c, pid); err != nil {
 			return nil, err
 		}
 
-		path := filepath.Join(mountpoint, cgroup)
-
-		for _, arg := range cpuArgs {
-			if err := ioutil.WriteFile(filepath.Join(path, arg.File), []byte(arg.Value), 0700); err != nil {
-				return nil, err
-			}
-		}
-	}
-
-	if len(memoryArgs) != 0 {
-		mountpoint, err := cgroups.FindCgroupMountpoint("memory")
-		if err != nil {
-			return nil, err
-		}
-
-		path := filepath.Join(mountpoint, cgroup)
-
-		for _, arg := range memoryArgs {
-			if err := ioutil.WriteFile(filepath.Join(path, arg.File), []byte(arg.Value), 0700); err != nil {
-				return nil, err
-			}
-		}
 	}
 
 	// we need to manually join the freezer cgroup in systemd because it does not currently support it
 	// via the dbus api
-	freezerPath, err := joinFreezer(c, pid)
-	if err != nil {
+	if err := joinFreezer(c, pid); err != nil {
 		return nil, err
 	}
-	res.cleanupDirs = append(res.cleanupDirs, freezerPath)
 
-	if len(cpusetArgs) != 0 {
-		// systemd does not atm set up the cpuset controller, so we must manually
-		// join it. Additionally that is a very finicky controller where each
-		// level must have a full setup as the default for a new directory is "no cpus",
-		// so we avoid using any hierarchies here, creating a toplevel directory.
-		mountpoint, err := cgroups.FindCgroupMountpoint("cpuset")
-		if err != nil {
-			return nil, err
-		}
-
-		initPath, err := cgroups.GetInitCgroupDir("cpuset")
-		if err != nil {
-			return nil, err
-		}
-
-		var (
-			foundCpus bool
-			foundMems bool
-
-			rootPath = filepath.Join(mountpoint, initPath)
-			path     = filepath.Join(mountpoint, initPath, c.Parent+"-"+c.Name)
-		)
-
-		res.cleanupDirs = append(res.cleanupDirs, path)
-
-		if err := os.MkdirAll(path, 0755); err != nil && !os.IsExist(err) {
-			return nil, err
-		}
-
-		for _, arg := range cpusetArgs {
-			if arg.File == "cpuset.cpus" {
-				foundCpus = true
-			}
-			if arg.File == "cpuset.mems" {
-				foundMems = true
-			}
-			if err := ioutil.WriteFile(filepath.Join(path, arg.File), []byte(arg.Value), 0700); err != nil {
-				return nil, err
-			}
-		}
-
-		// These are required, if not specified inherit from parent
-		if !foundCpus {
-			s, err := ioutil.ReadFile(filepath.Join(rootPath, "cpuset.cpus"))
-			if err != nil {
-				return nil, err
-			}
-
-			if err := ioutil.WriteFile(filepath.Join(path, "cpuset.cpus"), s, 0700); err != nil {
-				return nil, err
-			}
-		}
-
-		// These are required, if not specified inherit from parent
-		if !foundMems {
-			s, err := ioutil.ReadFile(filepath.Join(rootPath, "cpuset.mems"))
-			if err != nil {
-				return nil, err
-			}
-
-			if err := ioutil.WriteFile(filepath.Join(path, "cpuset.mems"), s, 0700); err != nil {
-				return nil, err
-			}
-		}
-
-		if err := ioutil.WriteFile(filepath.Join(path, "cgroup.procs"), []byte(strconv.Itoa(pid)), 0700); err != nil {
+	if c.CpusetCpus != "" {
+		if err := joinCpuset(c, pid); err != nil {
 			return nil, err
 		}
 	}
 
-	return &res, nil
+	return res, nil
 }
 
 func writeFile(dir, file, data string) error {
 	return ioutil.WriteFile(filepath.Join(dir, file), []byte(data), 0700)
 }
 
+func (c *systemdCgroup) Paths() (map[string]string, error) {
+	paths := make(map[string]string)
+
+	for sysname := range subsystems {
+		subsystemPath, err := getSubsystemPath(c.cgroup, sysname)
+		if err != nil {
+			// Don't fail if a cgroup hierarchy was not found, just skip this subsystem
+			if cgroups.IsNotFound(err) {
+				continue
+			}
+
+			return nil, err
+		}
+
+		paths[sysname] = subsystemPath
+	}
+
+	return paths, nil
+}
+
 func (c *systemdCgroup) Cleanup() error {
 	// systemd cleans up, we don't need to do much
+	paths, err := c.Paths()
+	if err != nil {
+		return err
+	}
 
-	for _, path := range c.cleanupDirs {
+	for _, path := range paths {
 		os.RemoveAll(path)
 	}
 
 	return nil
 }
 
-func joinFreezer(c *cgroups.Cgroup, pid int) (string, error) {
-	path, err := getFreezerPath(c)
+func joinFreezer(c *cgroups.Cgroup, pid int) error {
+	path, err := getSubsystemPath(c, "freezer")
 	if err != nil {
-		return "", err
+		return err
 	}
 
 	if err := os.MkdirAll(path, 0755); err != nil && !os.IsExist(err) {
-		return "", err
+		return err
 	}
 
-	if err := ioutil.WriteFile(filepath.Join(path, "cgroup.procs"), []byte(strconv.Itoa(pid)), 0700); err != nil {
-		return "", err
-	}
-
-	return path, nil
+	return ioutil.WriteFile(filepath.Join(path, "cgroup.procs"), []byte(strconv.Itoa(pid)), 0700)
 }
 
-func getFreezerPath(c *cgroups.Cgroup) (string, error) {
-	mountpoint, err := cgroups.FindCgroupMountpoint("freezer")
+func getSubsystemPath(c *cgroups.Cgroup, subsystem string) (string, error) {
+	mountpoint, err := cgroups.FindCgroupMountpoint(subsystem)
 	if err != nil {
 		return "", err
 	}
 
-	initPath, err := cgroups.GetInitCgroupDir("freezer")
+	initPath, err := cgroups.GetInitCgroupDir(subsystem)
 	if err != nil {
 		return "", err
 	}
 
-	return filepath.Join(mountpoint, initPath, fmt.Sprintf("%s-%s", c.Parent, c.Name)), nil
+	slice := "system.slice"
+	if c.Slice != "" {
+		slice = c.Slice
+	}
 
+	return filepath.Join(mountpoint, initPath, slice, getUnitName(c)), nil
 }
 
 func Freeze(c *cgroups.Cgroup, state cgroups.FreezerState) error {
-	path, err := getFreezerPath(c)
+	path, err := getSubsystemPath(c, "freezer")
 	if err != nil {
 		return err
 	}
@@ -370,22 +248,111 @@
 }
 
 func GetPids(c *cgroups.Cgroup) ([]int, error) {
-	unitName := getUnitName(c)
-
-	mountpoint, err := cgroups.FindCgroupMountpoint("cpu")
+	path, err := getSubsystemPath(c, "cpu")
 	if err != nil {
 		return nil, err
 	}
 
-	props, err := theConn.GetUnitTypeProperties(unitName, getIfaceForUnit(unitName))
-	if err != nil {
-		return nil, err
-	}
-	cgroup := props["ControlGroup"].(string)
-
-	return cgroups.ReadProcsFile(filepath.Join(mountpoint, cgroup))
+	return cgroups.ReadProcsFile(path)
 }
 
 func getUnitName(c *cgroups.Cgroup) string {
 	return fmt.Sprintf("%s-%s.scope", c.Parent, c.Name)
 }
+
+/*
+ * This would be nicer to get from the systemd API when accounting
+ * is enabled, but sadly there is no way to do that yet.
+ * The lack of this functionality in the API & the approach taken
+ * is guided by
+ * http://www.freedesktop.org/wiki/Software/systemd/ControlGroupInterface/#readingaccountinginformation.
+ */
+func GetStats(c *cgroups.Cgroup) (*cgroups.Stats, error) {
+	stats := cgroups.NewStats()
+
+	for sysname, sys := range subsystems {
+		subsystemPath, err := getSubsystemPath(c, sysname)
+		if err != nil {
+			// Don't fail if a cgroup hierarchy was not found, just skip this subsystem
+			if cgroups.IsNotFound(err) {
+				continue
+			}
+
+			return nil, err
+		}
+
+		if err := sys.GetStats(subsystemPath, stats); err != nil {
+			return nil, err
+		}
+	}
+
+	return stats, nil
+}
+
+// Atm we can't use the systemd device support because of two missing things:
+// * Support for wildcards to allow mknod on any device
+// * Support for wildcards to allow /dev/pts support
+//
+// The second is available in more recent systemd as "char-pts", but not in e.g. v208 which is
+// in wide use. When both these are availalable we will be able to switch, but need to keep the old
+// implementation for backwards compat.
+//
+// Note: we can't use systemd to set up the initial limits, and then change the cgroup
+// because systemd will re-write the device settings if it needs to re-apply the cgroup context.
+// This happens at least for v208 when any sibling unit is started.
+func joinDevices(c *cgroups.Cgroup, pid int) error {
+	path, err := getSubsystemPath(c, "devices")
+	if err != nil {
+		return err
+	}
+
+	if err := os.MkdirAll(path, 0755); err != nil && !os.IsExist(err) {
+		return err
+	}
+
+	if err := ioutil.WriteFile(filepath.Join(path, "cgroup.procs"), []byte(strconv.Itoa(pid)), 0700); err != nil {
+		return err
+	}
+
+	if err := writeFile(path, "devices.deny", "a"); err != nil {
+		return err
+	}
+
+	for _, dev := range c.AllowedDevices {
+		if err := writeFile(path, "devices.allow", dev.GetCgroupAllowString()); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func joinMemory(c *cgroups.Cgroup, pid int) error {
+	memorySwap := c.MemorySwap
+
+	if memorySwap == 0 {
+		// By default, MemorySwap is set to twice the size of RAM.
+		memorySwap = c.Memory * 2
+	}
+
+	path, err := getSubsystemPath(c, "memory")
+	if err != nil {
+		return err
+	}
+
+	return ioutil.WriteFile(filepath.Join(path, "memory.memsw.limit_in_bytes"), []byte(strconv.FormatInt(memorySwap, 10)), 0700)
+}
+
+// systemd does not atm set up the cpuset controller, so we must manually
+// join it. Additionally that is a very finicky controller where each
+// level must have a full setup as the default for a new directory is "no cpus"
+func joinCpuset(c *cgroups.Cgroup, pid int) error {
+	path, err := getSubsystemPath(c, "cpuset")
+	if err != nil {
+		return err
+	}
+
+	s := &fs.CpusetGroup{}
+
+	return s.SetDir(path, c.CpusetCpus, pid)
+}
diff --git a/vendor/src/github.com/docker/libcontainer/cgroups/utils.go b/vendor/src/github.com/docker/libcontainer/cgroups/utils.go
index 0d19b3e..5516c5a 100644
--- a/vendor/src/github.com/docker/libcontainer/cgroups/utils.go
+++ b/vendor/src/github.com/docker/libcontainer/cgroups/utils.go
@@ -4,12 +4,13 @@
 	"bufio"
 	"fmt"
 	"io"
+	"io/ioutil"
 	"os"
 	"path/filepath"
 	"strconv"
 	"strings"
 
-	"github.com/dotcloud/docker/pkg/mount"
+	"github.com/docker/docker/pkg/mount"
 )
 
 // https://www.kernel.org/doc/Documentation/cgroups/cgroups.txt
@@ -28,7 +29,8 @@
 			}
 		}
 	}
-	return "", ErrNotFound
+
+	return "", NewNotFoundError(subsystem)
 }
 
 type Mount struct {
@@ -97,7 +99,7 @@
 		text := s.Text()
 		if text[0] != '#' {
 			parts := strings.Fields(text)
-			if len(parts) > 4 && parts[3] != "0" {
+			if len(parts) >= 4 && parts[3] != "0" {
 				subsystems = append(subsystems, parts[0])
 			}
 		}
@@ -152,17 +154,41 @@
 
 func parseCgroupFile(subsystem string, r io.Reader) (string, error) {
 	s := bufio.NewScanner(r)
+
 	for s.Scan() {
 		if err := s.Err(); err != nil {
 			return "", err
 		}
+
 		text := s.Text()
 		parts := strings.Split(text, ":")
+
 		for _, subs := range strings.Split(parts[1], ",") {
 			if subs == subsystem {
 				return parts[2], nil
 			}
 		}
 	}
-	return "", ErrNotFound
+
+	return "", NewNotFoundError(subsystem)
+}
+
+func pathExists(path string) bool {
+	if _, err := os.Stat(path); err != nil {
+		return false
+	}
+	return true
+}
+
+func EnterPid(cgroupPaths map[string]string, pid int) error {
+	for _, path := range cgroupPaths {
+		if pathExists(path) {
+			if err := ioutil.WriteFile(filepath.Join(path, "cgroup.procs"),
+				[]byte(strconv.Itoa(pid)), 0700); err != nil {
+				return err
+			}
+		}
+	}
+
+	return nil
 }
diff --git a/vendor/src/github.com/docker/libcontainer/config.go b/vendor/src/github.com/docker/libcontainer/config.go
new file mode 100644
index 0000000..8fe95c2
--- /dev/null
+++ b/vendor/src/github.com/docker/libcontainer/config.go
@@ -0,0 +1,86 @@
+package libcontainer
+
+import (
+	"github.com/docker/libcontainer/cgroups"
+	"github.com/docker/libcontainer/mount"
+	"github.com/docker/libcontainer/network"
+)
+
+type MountConfig mount.MountConfig
+
+type Network network.Network
+
+// Config defines configuration options for executing a process inside a contained environment.
+type Config struct {
+	// Mount specific options.
+	MountConfig *MountConfig `json:"mount_config,omitempty"`
+
+	// Hostname optionally sets the container's hostname if provided
+	Hostname string `json:"hostname,omitempty"`
+
+	// User will set the uid and gid of the executing process running inside the container
+	User string `json:"user,omitempty"`
+
+	// WorkingDir will change the processes current working directory inside the container's rootfs
+	WorkingDir string `json:"working_dir,omitempty"`
+
+	// Env will populate the processes environment with the provided values
+	// Any values from the parent processes will be cleared before the values
+	// provided in Env are provided to the process
+	Env []string `json:"environment,omitempty"`
+
+	// Tty when true will allocate a pty slave on the host for access by the container's process
+	// and ensure that it is mounted inside the container's rootfs
+	Tty bool `json:"tty,omitempty"`
+
+	// Namespaces specifies the container's namespaces that it should setup when cloning the init process
+	// If a namespace is not provided that namespace is shared from the container's parent process
+	Namespaces map[string]bool `json:"namespaces,omitempty"`
+
+	// Capabilities specify the capabilities to keep when executing the process inside the container
+	// All capbilities not specified will be dropped from the processes capability mask
+	Capabilities []string `json:"capabilities,omitempty"`
+
+	// Networks specifies the container's network setup to be created
+	Networks []*Network `json:"networks,omitempty"`
+
+	// Routes can be specified to create entries in the route table as the container is started
+	Routes []*Route `json:"routes,omitempty"`
+
+	// Cgroups specifies specific cgroup settings for the various subsystems that the container is
+	// placed into to limit the resources the container has available
+	Cgroups *cgroups.Cgroup `json:"cgroups,omitempty"`
+
+	// AppArmorProfile specifies the profile to apply to the process running in the container and is
+	// change at the time the process is execed
+	AppArmorProfile string `json:"apparmor_profile,omitempty"`
+
+	// ProcessLabel specifies the label to apply to the process running in the container.  It is
+	// commonly used by selinux
+	ProcessLabel string `json:"process_label,omitempty"`
+
+	// RestrictSys will remount /proc/sys, /sys, and mask over sysrq-trigger as well as /proc/irq and
+	// /proc/bus
+	RestrictSys bool `json:"restrict_sys,omitempty"`
+}
+
+// Routes can be specified to create entries in the route table as the container is started
+//
+// All of destination, source, and gateway should be either IPv4 or IPv6.
+// One of the three options must be present, and ommitted entries will use their
+// IP family default for the route table.  For IPv4 for example, setting the
+// gateway to 1.2.3.4 and the interface to eth0 will set up a standard
+// destination of 0.0.0.0(or *) when viewed in the route table.
+type Route struct {
+	// Sets the destination and mask, should be a CIDR.  Accepts IPv4 and IPv6
+	Destination string `json:"destination,omitempty"`
+
+	// Sets the source and mask, should be a CIDR.  Accepts IPv4 and IPv6
+	Source string `json:"source,omitempty"`
+
+	// Sets the gateway.  Accepts IPv4 and IPv6
+	Gateway string `json:"gateway,omitempty"`
+
+	// The device to set this route up for, for example: eth0
+	InterfaceName string `json:"interface_name,omitempty"`
+}
diff --git a/vendor/src/github.com/docker/libcontainer/container_test.go b/vendor/src/github.com/docker/libcontainer/config_test.go
similarity index 100%
rename from vendor/src/github.com/docker/libcontainer/container_test.go
rename to vendor/src/github.com/docker/libcontainer/config_test.go
diff --git a/vendor/src/github.com/docker/libcontainer/console/console.go b/vendor/src/github.com/docker/libcontainer/console/console.go
index 519b564..346f537 100644
--- a/vendor/src/github.com/docker/libcontainer/console/console.go
+++ b/vendor/src/github.com/docker/libcontainer/console/console.go
@@ -7,22 +7,24 @@
 	"os"
 	"path/filepath"
 	"syscall"
+	"unsafe"
 
 	"github.com/docker/libcontainer/label"
-	"github.com/dotcloud/docker/pkg/system"
 )
 
 // Setup initializes the proper /dev/console inside the rootfs path
 func Setup(rootfs, consolePath, mountLabel string) error {
-	oldMask := system.Umask(0000)
-	defer system.Umask(oldMask)
+	oldMask := syscall.Umask(0000)
+	defer syscall.Umask(oldMask)
 
 	if err := os.Chmod(consolePath, 0600); err != nil {
 		return err
 	}
+
 	if err := os.Chown(consolePath, 0, 0); err != nil {
 		return err
 	}
+
 	if err := label.SetFileLabel(consolePath, mountLabel); err != nil {
 		return fmt.Errorf("set file label %s %s", consolePath, err)
 	}
@@ -33,26 +35,94 @@
 	if err != nil && !os.IsExist(err) {
 		return fmt.Errorf("create %s %s", dest, err)
 	}
+
 	if f != nil {
 		f.Close()
 	}
 
-	if err := system.Mount(consolePath, dest, "bind", syscall.MS_BIND, ""); err != nil {
+	if err := syscall.Mount(consolePath, dest, "bind", syscall.MS_BIND, ""); err != nil {
 		return fmt.Errorf("bind %s to %s %s", consolePath, dest, err)
 	}
+
 	return nil
 }
 
 func OpenAndDup(consolePath string) error {
-	slave, err := system.OpenTerminal(consolePath, syscall.O_RDWR)
+	slave, err := OpenTerminal(consolePath, syscall.O_RDWR)
 	if err != nil {
 		return fmt.Errorf("open terminal %s", err)
 	}
-	if err := system.Dup2(slave.Fd(), 0); err != nil {
+
+	if err := syscall.Dup2(int(slave.Fd()), 0); err != nil {
 		return err
 	}
-	if err := system.Dup2(slave.Fd(), 1); err != nil {
+
+	if err := syscall.Dup2(int(slave.Fd()), 1); err != nil {
 		return err
 	}
-	return system.Dup2(slave.Fd(), 2)
+
+	return syscall.Dup2(int(slave.Fd()), 2)
+}
+
+// Unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f.
+// Unlockpt should be called before opening the slave side of a pseudoterminal.
+func Unlockpt(f *os.File) error {
+	var u int
+
+	return Ioctl(f.Fd(), syscall.TIOCSPTLCK, uintptr(unsafe.Pointer(&u)))
+}
+
+// Ptsname retrieves the name of the first available pts for the given master.
+func Ptsname(f *os.File) (string, error) {
+	var n int
+
+	if err := Ioctl(f.Fd(), syscall.TIOCGPTN, uintptr(unsafe.Pointer(&n))); err != nil {
+		return "", err
+	}
+
+	return fmt.Sprintf("/dev/pts/%d", n), nil
+}
+
+// CreateMasterAndConsole will open /dev/ptmx on the host and retreive the
+// pts name for use as the pty slave inside the container
+func CreateMasterAndConsole() (*os.File, string, error) {
+	master, err := os.OpenFile("/dev/ptmx", syscall.O_RDWR|syscall.O_NOCTTY|syscall.O_CLOEXEC, 0)
+	if err != nil {
+		return nil, "", err
+	}
+
+	console, err := Ptsname(master)
+	if err != nil {
+		return nil, "", err
+	}
+
+	if err := Unlockpt(master); err != nil {
+		return nil, "", err
+	}
+
+	return master, console, nil
+}
+
+// OpenPtmx opens /dev/ptmx, i.e. the PTY master.
+func OpenPtmx() (*os.File, error) {
+	// O_NOCTTY and O_CLOEXEC are not present in os package so we use the syscall's one for all.
+	return os.OpenFile("/dev/ptmx", syscall.O_RDONLY|syscall.O_NOCTTY|syscall.O_CLOEXEC, 0)
+}
+
+// OpenTerminal is a clone of os.OpenFile without the O_CLOEXEC
+// used to open the pty slave inside the container namespace
+func OpenTerminal(name string, flag int) (*os.File, error) {
+	r, e := syscall.Open(name, flag, 0)
+	if e != nil {
+		return nil, &os.PathError{Op: "open", Path: name, Err: e}
+	}
+	return os.NewFile(uintptr(r), name), nil
+}
+
+func Ioctl(fd uintptr, flag, data uintptr) error {
+	if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, flag, data); err != 0 {
+		return err
+	}
+
+	return nil
 }
diff --git a/vendor/src/github.com/docker/libcontainer/container.go b/vendor/src/github.com/docker/libcontainer/container.go
index 8fe95c2..c4d372c 100644
--- a/vendor/src/github.com/docker/libcontainer/container.go
+++ b/vendor/src/github.com/docker/libcontainer/container.go
@@ -1,86 +1,71 @@
+/*
+NOTE: The API is in flux and mainly not implemented. Proceed with caution until further notice.
+*/
 package libcontainer
 
-import (
-	"github.com/docker/libcontainer/cgroups"
-	"github.com/docker/libcontainer/mount"
-	"github.com/docker/libcontainer/network"
-)
-
-type MountConfig mount.MountConfig
-
-type Network network.Network
-
-// Config defines configuration options for executing a process inside a contained environment.
-type Config struct {
-	// Mount specific options.
-	MountConfig *MountConfig `json:"mount_config,omitempty"`
-
-	// Hostname optionally sets the container's hostname if provided
-	Hostname string `json:"hostname,omitempty"`
-
-	// User will set the uid and gid of the executing process running inside the container
-	User string `json:"user,omitempty"`
-
-	// WorkingDir will change the processes current working directory inside the container's rootfs
-	WorkingDir string `json:"working_dir,omitempty"`
-
-	// Env will populate the processes environment with the provided values
-	// Any values from the parent processes will be cleared before the values
-	// provided in Env are provided to the process
-	Env []string `json:"environment,omitempty"`
-
-	// Tty when true will allocate a pty slave on the host for access by the container's process
-	// and ensure that it is mounted inside the container's rootfs
-	Tty bool `json:"tty,omitempty"`
-
-	// Namespaces specifies the container's namespaces that it should setup when cloning the init process
-	// If a namespace is not provided that namespace is shared from the container's parent process
-	Namespaces map[string]bool `json:"namespaces,omitempty"`
-
-	// Capabilities specify the capabilities to keep when executing the process inside the container
-	// All capbilities not specified will be dropped from the processes capability mask
-	Capabilities []string `json:"capabilities,omitempty"`
-
-	// Networks specifies the container's network setup to be created
-	Networks []*Network `json:"networks,omitempty"`
-
-	// Routes can be specified to create entries in the route table as the container is started
-	Routes []*Route `json:"routes,omitempty"`
-
-	// Cgroups specifies specific cgroup settings for the various subsystems that the container is
-	// placed into to limit the resources the container has available
-	Cgroups *cgroups.Cgroup `json:"cgroups,omitempty"`
-
-	// AppArmorProfile specifies the profile to apply to the process running in the container and is
-	// change at the time the process is execed
-	AppArmorProfile string `json:"apparmor_profile,omitempty"`
-
-	// ProcessLabel specifies the label to apply to the process running in the container.  It is
-	// commonly used by selinux
-	ProcessLabel string `json:"process_label,omitempty"`
-
-	// RestrictSys will remount /proc/sys, /sys, and mask over sysrq-trigger as well as /proc/irq and
-	// /proc/bus
-	RestrictSys bool `json:"restrict_sys,omitempty"`
-}
-
-// Routes can be specified to create entries in the route table as the container is started
+// A libcontainer container object.
 //
-// All of destination, source, and gateway should be either IPv4 or IPv6.
-// One of the three options must be present, and ommitted entries will use their
-// IP family default for the route table.  For IPv4 for example, setting the
-// gateway to 1.2.3.4 and the interface to eth0 will set up a standard
-// destination of 0.0.0.0(or *) when viewed in the route table.
-type Route struct {
-	// Sets the destination and mask, should be a CIDR.  Accepts IPv4 and IPv6
-	Destination string `json:"destination,omitempty"`
+// Each container is thread-safe within the same process. Since a container can
+// be destroyed by a separate process, any function may return that the container
+// was not found.
+type Container interface {
+	// Returns the path to the container which contains the state
+	Path() string
 
-	// Sets the source and mask, should be a CIDR.  Accepts IPv4 and IPv6
-	Source string `json:"source,omitempty"`
+	// Returns the current run state of the container.
+	//
+	// Errors: container no longer exists,
+	//         system error.
+	RunState() (*RunState, error)
 
-	// Sets the gateway.  Accepts IPv4 and IPv6
-	Gateway string `json:"gateway,omitempty"`
+	// Returns the current config of the container.
+	Config() *Config
 
-	// The device to set this route up for, for example: eth0
-	InterfaceName string `json:"interface_name,omitempty"`
+	// Start a process inside the container. Returns the PID of the new process (in the caller process's namespace) and a channel that will return the exit status of the process whenever it dies.
+	//
+	// Errors: container no longer exists,
+	//         config is invalid,
+	//         container is paused,
+	//         system error.
+	Start(*ProcessConfig) (pid int, exitChan chan int, err error)
+
+	// Destroys the container after killing all running processes.
+	//
+	// Any event registrations are removed before the container is destroyed.
+	// No error is returned if the container is already destroyed.
+	//
+	// Errors: system error.
+	Destroy() error
+
+	// Returns the PIDs inside this container. The PIDs are in the namespace of the calling process.
+	//
+	// Errors: container no longer exists,
+	//         system error.
+	//
+	// Some of the returned PIDs may no longer refer to processes in the Container, unless
+	// the Container state is PAUSED in which case every PID in the slice is valid.
+	Processes() ([]int, error)
+
+	// Returns statistics for the container.
+	//
+	// Errors: container no longer exists,
+	//         system error.
+	Stats() (*ContainerStats, error)
+
+	// If the Container state is RUNNING or PAUSING, sets the Container state to PAUSING and pauses
+	// the execution of any user processes. Asynchronously, when the container finished being paused the
+	// state is changed to PAUSED.
+	// If the Container state is PAUSED, do nothing.
+	//
+	// Errors: container no longer exists,
+	//         system error.
+	Pause() error
+
+	// If the Container state is PAUSED, resumes the execution of any user processes in the
+	// Container before setting the Container state to RUNNING.
+	// If the Container state is RUNNING, do nothing.
+	//
+	// Errors: container no longer exists,
+	//         system error.
+	Resume() error
 }
diff --git a/vendor/src/github.com/docker/libcontainer/devices/defaults.go b/vendor/src/github.com/docker/libcontainer/devices/defaults.go
index 393c438..e0ad0b0 100644
--- a/vendor/src/github.com/docker/libcontainer/devices/defaults.go
+++ b/vendor/src/github.com/docker/libcontainer/devices/defaults.go
@@ -146,8 +146,8 @@
 			// /dev/fuse is created but not allowed.
 			// This is to allow java to work.  Because java
 			// Insists on there being a /dev/fuse
-			// https://github.com/dotcloud/docker/issues/514
-			// https://github.com/dotcloud/docker/issues/2393
+			// https://github.com/docker/docker/issues/514
+			// https://github.com/docker/docker/issues/2393
 			//
 			Path:              "/dev/fuse",
 			Type:              'c',
diff --git a/vendor/src/github.com/docker/libcontainer/factory.go b/vendor/src/github.com/docker/libcontainer/factory.go
new file mode 100644
index 0000000..9161ff0
--- /dev/null
+++ b/vendor/src/github.com/docker/libcontainer/factory.go
@@ -0,0 +1,25 @@
+package libcontainer
+
+type Factory interface {
+	// Creates a new container in the given path. A unique ID is generated for the container and
+	// starts the initial process inside the container.
+	//
+	// Returns the new container with a running process.
+	//
+	// Errors:
+	// Path already exists
+	// Config or initialConfig is invalid
+	// System error
+	//
+	// On error, any partially created container parts are cleaned up (the operation is atomic).
+	Create(path string, config *Config) (Container, error)
+
+	// Load takes the path for an existing container and reconstructs the container
+	// from the state.
+	//
+	// Errors:
+	// Path does not exist
+	// Container is stopped
+	// System error
+	Load(path string) (Container, error)
+}
diff --git a/vendor/src/github.com/docker/libcontainer/label/label.go b/vendor/src/github.com/docker/libcontainer/label/label.go
index 434e1c5..73869b3 100644
--- a/vendor/src/github.com/docker/libcontainer/label/label.go
+++ b/vendor/src/github.com/docker/libcontainer/label/label.go
@@ -2,6 +2,13 @@
 
 package label
 
+// InitLabels returns the process label and file labels to be used within
+// the container.  A list of options can be passed into this function to alter
+// the labels.
+func InitLabels(options []string) (string, string, error) {
+	return "", "", nil
+}
+
 func GenLabels(options string) (string, string, error) {
 	return "", "", nil
 }
@@ -18,7 +25,11 @@
 	return nil
 }
 
-func GetPidCon(pid int) (string, error) {
+func Relabel(path string, fileLabel string, relabel string) error {
+	return nil
+}
+
+func GetPidLabel(pid int) (string, error) {
 	return "", nil
 }
 
diff --git a/vendor/src/github.com/docker/libcontainer/label/label_selinux.go b/vendor/src/github.com/docker/libcontainer/label/label_selinux.go
index 0452144..5b1380a 100644
--- a/vendor/src/github.com/docker/libcontainer/label/label_selinux.go
+++ b/vendor/src/github.com/docker/libcontainer/label/label_selinux.go
@@ -9,30 +9,49 @@
 	"github.com/docker/libcontainer/selinux"
 )
 
-func GenLabels(options string) (string, string, error) {
+// InitLabels returns the process label and file labels to be used within
+// the container.  A list of options can be passed into this function to alter
+// the labels.  The labels returned will include a random MCS String, that is
+// guaranteed to be unique.
+func InitLabels(options []string) (string, string, error) {
 	if !selinux.SelinuxEnabled() {
 		return "", "", nil
 	}
 	var err error
 	processLabel, mountLabel := selinux.GetLxcContexts()
 	if processLabel != "" {
-		var (
-			s = strings.Fields(options)
-			l = len(s)
-		)
-		if l > 0 {
-			pcon := selinux.NewContext(processLabel)
-			for i := 0; i < l; i++ {
-				o := strings.Split(s[i], "=")
-				pcon[o[0]] = o[1]
+		pcon := selinux.NewContext(processLabel)
+		mcon := selinux.NewContext(mountLabel)
+		for _, opt := range options {
+			if opt == "disable" {
+				return "", "", nil
 			}
-			processLabel = pcon.Get()
-			mountLabel, err = selinux.CopyLevel(processLabel, mountLabel)
+			if i := strings.Index(opt, ":"); i == -1 {
+				return "", "", fmt.Errorf("Bad SELinux Option")
+			}
+			con := strings.SplitN(opt, ":", 2)
+			pcon[con[0]] = con[1]
+			if con[0] == "level" || con[0] == "user" {
+				mcon[con[0]] = con[1]
+			}
 		}
+		processLabel = pcon.Get()
+		mountLabel = mcon.Get()
 	}
 	return processLabel, mountLabel, err
 }
 
+// DEPRECATED: The GenLabels function is only to be used during the transition to the official API.
+func GenLabels(options string) (string, string, error) {
+	return InitLabels(strings.Fields(options))
+}
+
+// FormatMountLabel returns a string to be used by the mount command.
+// The format of this string will be used to alter the labeling of the mountpoint.
+// The string returned is suitable to be used as the options field of the mount command.
+// If you need to have additional mount point options, you can pass them in as
+// the first parameter.  Second parameter is the label that you wish to apply
+// to all content in the mount point.
 func FormatMountLabel(src, mountLabel string) string {
 	if mountLabel != "" {
 		switch src {
@@ -45,6 +64,8 @@
 	return src
 }
 
+// SetProcessLabel takes a process label and tells the kernel to assign the
+// label to the next program executed by the current process.
 func SetProcessLabel(processLabel string) error {
 	if selinux.SelinuxEnabled() {
 		return selinux.Setexeccon(processLabel)
@@ -52,6 +73,9 @@
 	return nil
 }
 
+// GetProcessLabel returns the process label that the kernel will assign
+// to the next program executed by the current process.  If "" is returned
+// this indicates that the default labeling will happen for the process.
 func GetProcessLabel() (string, error) {
 	if selinux.SelinuxEnabled() {
 		return selinux.Getexeccon()
@@ -59,6 +83,7 @@
 	return "", nil
 }
 
+// SetFileLabel modifies the "path" label to the specified file label
 func SetFileLabel(path string, fileLabel string) error {
 	if selinux.SelinuxEnabled() && fileLabel != "" {
 		return selinux.Setfilecon(path, fileLabel)
@@ -66,17 +91,39 @@
 	return nil
 }
 
-func GetPidCon(pid int) (string, error) {
+// Change the label of path to the filelabel string.  If the relabel string
+// is "z", relabel will change the MCS label to s0.  This will allow all
+// containers to share the content.  If the relabel string is a "Z" then
+// the MCS label should continue to be used.  SELinux will use this field
+// to make sure the content can not be shared by other containes.
+func Relabel(path string, fileLabel string, relabel string) error {
+	if fileLabel == "" {
+		return nil
+	}
+	if relabel == "z" {
+		c := selinux.NewContext(fileLabel)
+		c["level"] = "s0"
+		fileLabel = c.Get()
+	}
+	return selinux.Chcon(path, fileLabel, true)
+}
+
+// GetPidLabel will return the label of the process running with the specified pid
+func GetPidLabel(pid int) (string, error) {
 	if !selinux.SelinuxEnabled() {
 		return "", nil
 	}
 	return selinux.Getpidcon(pid)
 }
 
+// Init initialises the labeling system
 func Init() {
 	selinux.SelinuxEnabled()
 }
 
+// ReserveLabel will record the fact that the MCS label has already been used.
+// This will prevent InitLabels from using the MCS label in a newly created
+// container
 func ReserveLabel(label string) error {
 	selinux.ReserveLabel(label)
 	return nil
diff --git a/vendor/src/github.com/docker/libcontainer/label/label_selinux_test.go b/vendor/src/github.com/docker/libcontainer/label/label_selinux_test.go
new file mode 100644
index 0000000..c83654f
--- /dev/null
+++ b/vendor/src/github.com/docker/libcontainer/label/label_selinux_test.go
@@ -0,0 +1,48 @@
+// +build selinux,linux
+
+package label
+
+import (
+	"testing"
+
+	"github.com/docker/libcontainer/selinux"
+)
+
+func TestInit(t *testing.T) {
+	if selinux.SelinuxEnabled() {
+		var testNull []string
+		plabel, mlabel, err := InitLabels(testNull)
+		if err != nil {
+			t.Log("InitLabels Failed")
+			t.Fatal(err)
+		}
+		testDisabled := []string{"disable"}
+		plabel, mlabel, err = InitLabels(testDisabled)
+		if err != nil {
+			t.Log("InitLabels Disabled Failed")
+			t.Fatal(err)
+		}
+		if plabel != "" {
+			t.Log("InitLabels Disabled Failed")
+			t.Fatal()
+		}
+		testUser := []string{"user:user_u", "role:user_r", "type:user_t", "level:s0:c1,c15"}
+		plabel, mlabel, err = InitLabels(testUser)
+		if err != nil {
+			t.Log("InitLabels User Failed")
+			t.Fatal(err)
+		}
+		if plabel != "user_u:user_r:user_t:s0:c1,c15" || mlabel != "user_u:object_r:svirt_sandbox_file_t:s0:c1,c15" {
+			t.Log("InitLabels User Failed")
+			t.Log(plabel, mlabel)
+			t.Fatal(err)
+		}
+
+		testBadData := []string{"user", "role:user_r", "type:user_t", "level:s0:c1,c15"}
+		plabel, mlabel, err = InitLabels(testBadData)
+		if err == nil {
+			t.Log("InitLabels Bad Failed")
+			t.Fatal(err)
+		}
+	}
+}
diff --git a/vendor/src/github.com/docker/libcontainer/mount/init.go b/vendor/src/github.com/docker/libcontainer/mount/init.go
index 34fad6d..05ab334 100644
--- a/vendor/src/github.com/docker/libcontainer/mount/init.go
+++ b/vendor/src/github.com/docker/libcontainer/mount/init.go
@@ -8,10 +8,9 @@
 	"path/filepath"
 	"syscall"
 
+	"github.com/docker/docker/pkg/symlink"
 	"github.com/docker/libcontainer/label"
 	"github.com/docker/libcontainer/mount/nodes"
-	"github.com/dotcloud/docker/pkg/symlink"
-	"github.com/dotcloud/docker/pkg/system"
 )
 
 // default mount point flags
@@ -27,7 +26,7 @@
 
 // InitializeMountNamespace sets up the devices, mount points, and filesystems for use inside a
 // new mount namespace.
-func InitializeMountNamespace(rootfs, console string, mountConfig *MountConfig) error {
+func InitializeMountNamespace(rootfs, console string, sysReadonly bool, mountConfig *MountConfig) error {
 	var (
 		err  error
 		flag = syscall.MS_PRIVATE
@@ -35,16 +34,16 @@
 	if mountConfig.NoPivotRoot {
 		flag = syscall.MS_SLAVE
 	}
-	if err := system.Mount("", "/", "", uintptr(flag|syscall.MS_REC), ""); err != nil {
+	if err := syscall.Mount("", "/", "", uintptr(flag|syscall.MS_REC), ""); err != nil {
 		return fmt.Errorf("mounting / with flags %X %s", (flag | syscall.MS_REC), err)
 	}
-	if err := system.Mount(rootfs, rootfs, "bind", syscall.MS_BIND|syscall.MS_REC, ""); err != nil {
+	if err := syscall.Mount(rootfs, rootfs, "bind", syscall.MS_BIND|syscall.MS_REC, ""); err != nil {
 		return fmt.Errorf("mouting %s as bind %s", rootfs, err)
 	}
-	if err := mountSystem(rootfs, mountConfig); err != nil {
+	if err := mountSystem(rootfs, sysReadonly, mountConfig); err != nil {
 		return fmt.Errorf("mount system %s", err)
 	}
-	if err := setupBindmounts(rootfs, mountConfig.Mounts); err != nil {
+	if err := setupBindmounts(rootfs, mountConfig); err != nil {
 		return fmt.Errorf("bind mounts %s", err)
 	}
 	if err := nodes.CreateDeviceNodes(rootfs, mountConfig.DeviceNodes); err != nil {
@@ -53,10 +52,18 @@
 	if err := SetupPtmx(rootfs, console, mountConfig.MountLabel); err != nil {
 		return err
 	}
+
+	// stdin, stdout and stderr could be pointing to /dev/null from parent namespace.
+	// Re-open them inside this namespace.
+	if err := reOpenDevNull(rootfs); err != nil {
+		return fmt.Errorf("Failed to reopen /dev/null %s", err)
+	}
+
 	if err := setupDevSymlinks(rootfs); err != nil {
 		return fmt.Errorf("dev symlinks %s", err)
 	}
-	if err := system.Chdir(rootfs); err != nil {
+
+	if err := syscall.Chdir(rootfs); err != nil {
 		return fmt.Errorf("chdir into %s %s", rootfs, err)
 	}
 
@@ -75,19 +82,19 @@
 		}
 	}
 
-	system.Umask(0022)
+	syscall.Umask(0022)
 
 	return nil
 }
 
 // mountSystem sets up linux specific system mounts like sys, proc, shm, and devpts
 // inside the mount namespace
-func mountSystem(rootfs string, mountConfig *MountConfig) error {
-	for _, m := range newSystemMounts(rootfs, mountConfig.MountLabel, mountConfig.Mounts) {
+func mountSystem(rootfs string, sysReadonly bool, mountConfig *MountConfig) error {
+	for _, m := range newSystemMounts(rootfs, mountConfig.MountLabel, sysReadonly, mountConfig.Mounts) {
 		if err := os.MkdirAll(m.path, 0755); err != nil && !os.IsExist(err) {
 			return fmt.Errorf("mkdirall %s %s", m.path, err)
 		}
-		if err := system.Mount(m.source, m.path, m.device, uintptr(m.flags), m.data); err != nil {
+		if err := syscall.Mount(m.source, m.path, m.device, uintptr(m.flags), m.data); err != nil {
 			return fmt.Errorf("mounting %s into %s %s", m.source, m.path, err)
 		}
 	}
@@ -144,7 +151,8 @@
 	return nil
 }
 
-func setupBindmounts(rootfs string, bindMounts Mounts) error {
+func setupBindmounts(rootfs string, mountConfig *MountConfig) error {
+	bindMounts := mountConfig.Mounts
 	for _, m := range bindMounts.OfType("bind") {
 		var (
 			flags = syscall.MS_BIND | syscall.MS_REC
@@ -168,16 +176,21 @@
 			return fmt.Errorf("Creating new bind-mount target, %s", err)
 		}
 
-		if err := system.Mount(m.Source, dest, "bind", uintptr(flags), ""); err != nil {
+		if err := syscall.Mount(m.Source, dest, "bind", uintptr(flags), ""); err != nil {
 			return fmt.Errorf("mounting %s into %s %s", m.Source, dest, err)
 		}
 		if !m.Writable {
-			if err := system.Mount(m.Source, dest, "bind", uintptr(flags|syscall.MS_REMOUNT), ""); err != nil {
+			if err := syscall.Mount(m.Source, dest, "bind", uintptr(flags|syscall.MS_REMOUNT), ""); err != nil {
 				return fmt.Errorf("remounting %s into %s %s", m.Source, dest, err)
 			}
 		}
+		if m.Relabel != "" {
+			if err := label.Relabel(m.Source, mountConfig.MountLabel, m.Relabel); err != nil {
+				return fmt.Errorf("relabeling %s to %s %s", m.Source, mountConfig.MountLabel, err)
+			}
+		}
 		if m.Private {
-			if err := system.Mount("", dest, "none", uintptr(syscall.MS_PRIVATE), ""); err != nil {
+			if err := syscall.Mount("", dest, "none", uintptr(syscall.MS_PRIVATE), ""); err != nil {
 				return fmt.Errorf("mounting %s private %s", dest, err)
 			}
 		}
@@ -187,14 +200,45 @@
 
 // TODO: this is crappy right now and should be cleaned up with a better way of handling system and
 // standard bind mounts allowing them to be more dynamic
-func newSystemMounts(rootfs, mountLabel string, mounts Mounts) []mount {
+func newSystemMounts(rootfs, mountLabel string, sysReadonly bool, mounts Mounts) []mount {
 	systemMounts := []mount{
 		{source: "proc", path: filepath.Join(rootfs, "proc"), device: "proc", flags: defaultMountFlags},
-		{source: "sysfs", path: filepath.Join(rootfs, "sys"), device: "sysfs", flags: defaultMountFlags},
 		{source: "tmpfs", path: filepath.Join(rootfs, "dev"), device: "tmpfs", flags: syscall.MS_NOSUID | syscall.MS_STRICTATIME, data: label.FormatMountLabel("mode=755", mountLabel)},
 		{source: "shm", path: filepath.Join(rootfs, "dev", "shm"), device: "tmpfs", flags: defaultMountFlags, data: label.FormatMountLabel("mode=1777,size=65536k", mountLabel)},
 		{source: "devpts", path: filepath.Join(rootfs, "dev", "pts"), device: "devpts", flags: syscall.MS_NOSUID | syscall.MS_NOEXEC, data: label.FormatMountLabel("newinstance,ptmxmode=0666,mode=620,gid=5", mountLabel)},
 	}
 
+	sysMountFlags := defaultMountFlags
+	if sysReadonly {
+		sysMountFlags |= syscall.MS_RDONLY
+	}
+	systemMounts = append(systemMounts, mount{source: "sysfs", path: filepath.Join(rootfs, "sys"), device: "sysfs", flags: sysMountFlags})
+
 	return systemMounts
 }
+
+// Is stdin, stdout or stderr were to be pointing to '/dev/null',
+// this method will make them point to '/dev/null' from within this namespace.
+func reOpenDevNull(rootfs string) error {
+	var stat, devNullStat syscall.Stat_t
+	file, err := os.Open(filepath.Join(rootfs, "/dev/null"))
+	if err != nil {
+		return fmt.Errorf("Failed to open /dev/null - %s", err)
+	}
+	defer file.Close()
+	if err = syscall.Fstat(int(file.Fd()), &devNullStat); err != nil {
+		return fmt.Errorf("Failed to stat /dev/null - %s", err)
+	}
+	for fd := 0; fd < 3; fd++ {
+		if err = syscall.Fstat(fd, &stat); err != nil {
+			return fmt.Errorf("Failed to stat fd %d - %s", fd, err)
+		}
+		if stat.Rdev == devNullStat.Rdev {
+			// Close and re-open the fd.
+			if err = syscall.Dup2(int(file.Fd()), fd); err != nil {
+				return fmt.Errorf("Failed to dup fd %d to fd %d - %s", file.Fd(), fd, err)
+			}
+		}
+	}
+	return nil
+}
diff --git a/vendor/src/github.com/docker/libcontainer/mount/msmoveroot.go b/vendor/src/github.com/docker/libcontainer/mount/msmoveroot.go
index b336c86..94afd3a 100644
--- a/vendor/src/github.com/docker/libcontainer/mount/msmoveroot.go
+++ b/vendor/src/github.com/docker/libcontainer/mount/msmoveroot.go
@@ -4,16 +4,17 @@
 
 import (
 	"fmt"
-	"github.com/dotcloud/docker/pkg/system"
 	"syscall"
 )
 
 func MsMoveRoot(rootfs string) error {
-	if err := system.Mount(rootfs, "/", "", syscall.MS_MOVE, ""); err != nil {
+	if err := syscall.Mount(rootfs, "/", "", syscall.MS_MOVE, ""); err != nil {
 		return fmt.Errorf("mount move %s into / %s", rootfs, err)
 	}
-	if err := system.Chroot("."); err != nil {
+
+	if err := syscall.Chroot("."); err != nil {
 		return fmt.Errorf("chroot . %s", err)
 	}
-	return system.Chdir("/")
+
+	return syscall.Chdir("/")
 }
diff --git a/vendor/src/github.com/docker/libcontainer/mount/nodes/nodes.go b/vendor/src/github.com/docker/libcontainer/mount/nodes/nodes.go
index e3420b4..6a984e3 100644
--- a/vendor/src/github.com/docker/libcontainer/mount/nodes/nodes.go
+++ b/vendor/src/github.com/docker/libcontainer/mount/nodes/nodes.go
@@ -9,13 +9,12 @@
 	"syscall"
 
 	"github.com/docker/libcontainer/devices"
-	"github.com/dotcloud/docker/pkg/system"
 )
 
 // Create the device nodes in the container.
 func CreateDeviceNodes(rootfs string, nodesToCreate []*devices.Device) error {
-	oldMask := system.Umask(0000)
-	defer system.Umask(oldMask)
+	oldMask := syscall.Umask(0000)
+	defer syscall.Umask(oldMask)
 
 	for _, node := range nodesToCreate {
 		if err := CreateDeviceNode(rootfs, node); err != nil {
@@ -46,7 +45,7 @@
 		return fmt.Errorf("%c is not a valid device type for device %s", node.Type, node.Path)
 	}
 
-	if err := system.Mknod(dest, uint32(fileMode), devices.Mkdev(node.MajorNumber, node.MinorNumber)); err != nil && !os.IsExist(err) {
+	if err := syscall.Mknod(dest, uint32(fileMode), devices.Mkdev(node.MajorNumber, node.MinorNumber)); err != nil && !os.IsExist(err) {
 		return fmt.Errorf("mknod %s %s", node.Path, err)
 	}
 	return nil
diff --git a/vendor/src/github.com/docker/libcontainer/mount/pivotroot.go b/vendor/src/github.com/docker/libcontainer/mount/pivotroot.go
index ffd6051..a88ed4a 100644
--- a/vendor/src/github.com/docker/libcontainer/mount/pivotroot.go
+++ b/vendor/src/github.com/docker/libcontainer/mount/pivotroot.go
@@ -8,8 +8,6 @@
 	"os"
 	"path/filepath"
 	"syscall"
-
-	"github.com/dotcloud/docker/pkg/system"
 )
 
 func PivotRoot(rootfs string) error {
@@ -17,16 +15,20 @@
 	if err != nil {
 		return fmt.Errorf("can't create pivot_root dir %s, error %v", pivotDir, err)
 	}
-	if err := system.Pivotroot(rootfs, pivotDir); err != nil {
+
+	if err := syscall.PivotRoot(rootfs, pivotDir); err != nil {
 		return fmt.Errorf("pivot_root %s", err)
 	}
-	if err := system.Chdir("/"); err != nil {
+
+	if err := syscall.Chdir("/"); err != nil {
 		return fmt.Errorf("chdir / %s", err)
 	}
+
 	// path to pivot dir now changed, update
 	pivotDir = filepath.Join("/", filepath.Base(pivotDir))
-	if err := system.Unmount(pivotDir, syscall.MNT_DETACH); err != nil {
+	if err := syscall.Unmount(pivotDir, syscall.MNT_DETACH); err != nil {
 		return fmt.Errorf("unmount pivot_root dir %s", err)
 	}
+
 	return os.Remove(pivotDir)
 }
diff --git a/vendor/src/github.com/docker/libcontainer/mount/ptmx.go b/vendor/src/github.com/docker/libcontainer/mount/ptmx.go
index 32c0252..c316481 100644
--- a/vendor/src/github.com/docker/libcontainer/mount/ptmx.go
+++ b/vendor/src/github.com/docker/libcontainer/mount/ptmx.go
@@ -4,9 +4,10 @@
 
 import (
 	"fmt"
-	"github.com/docker/libcontainer/console"
 	"os"
 	"path/filepath"
+
+	"github.com/docker/libcontainer/console"
 )
 
 func SetupPtmx(rootfs, consolePath, mountLabel string) error {
@@ -14,13 +15,16 @@
 	if err := os.Remove(ptmx); err != nil && !os.IsNotExist(err) {
 		return err
 	}
+
 	if err := os.Symlink("pts/ptmx", ptmx); err != nil {
 		return fmt.Errorf("symlink dev ptmx %s", err)
 	}
+
 	if consolePath != "" {
 		if err := console.Setup(rootfs, consolePath, mountLabel); err != nil {
 			return err
 		}
 	}
+
 	return nil
 }
diff --git a/vendor/src/github.com/docker/libcontainer/mount/readonly.go b/vendor/src/github.com/docker/libcontainer/mount/readonly.go
index 0658358..9b4a6f7 100644
--- a/vendor/src/github.com/docker/libcontainer/mount/readonly.go
+++ b/vendor/src/github.com/docker/libcontainer/mount/readonly.go
@@ -3,10 +3,9 @@
 package mount
 
 import (
-	"github.com/dotcloud/docker/pkg/system"
 	"syscall"
 )
 
 func SetReadonly() error {
-	return system.Mount("/", "/", "bind", syscall.MS_BIND|syscall.MS_REMOUNT|syscall.MS_RDONLY|syscall.MS_REC, "")
+	return syscall.Mount("/", "/", "bind", syscall.MS_BIND|syscall.MS_REMOUNT|syscall.MS_RDONLY|syscall.MS_REC, "")
 }
diff --git a/vendor/src/github.com/docker/libcontainer/mount/remount.go b/vendor/src/github.com/docker/libcontainer/mount/remount.go
index 3e00509..99a0120 100644
--- a/vendor/src/github.com/docker/libcontainer/mount/remount.go
+++ b/vendor/src/github.com/docker/libcontainer/mount/remount.go
@@ -2,30 +2,30 @@
 
 package mount
 
-import (
-	"github.com/dotcloud/docker/pkg/system"
-	"syscall"
-)
+import "syscall"
 
 func RemountProc() error {
-	if err := system.Unmount("/proc", syscall.MNT_DETACH); err != nil {
+	if err := syscall.Unmount("/proc", syscall.MNT_DETACH); err != nil {
 		return err
 	}
-	if err := system.Mount("proc", "/proc", "proc", uintptr(defaultMountFlags), ""); err != nil {
+
+	if err := syscall.Mount("proc", "/proc", "proc", uintptr(defaultMountFlags), ""); err != nil {
 		return err
 	}
+
 	return nil
 }
 
 func RemountSys() error {
-	if err := system.Unmount("/sys", syscall.MNT_DETACH); err != nil {
+	if err := syscall.Unmount("/sys", syscall.MNT_DETACH); err != nil {
 		if err != syscall.EINVAL {
 			return err
 		}
 	} else {
-		if err := system.Mount("sysfs", "/sys", "sysfs", uintptr(defaultMountFlags), ""); err != nil {
+		if err := syscall.Mount("sysfs", "/sys", "sysfs", uintptr(defaultMountFlags), ""); err != nil {
 			return err
 		}
 	}
+
 	return nil
 }
diff --git a/vendor/src/github.com/docker/libcontainer/mount/types.go b/vendor/src/github.com/docker/libcontainer/mount/types.go
index a2659e5..063bbac 100644
--- a/vendor/src/github.com/docker/libcontainer/mount/types.go
+++ b/vendor/src/github.com/docker/libcontainer/mount/types.go
@@ -30,6 +30,7 @@
 	Source      string `json:"source,omitempty"`      // Source path, in the host namespace
 	Destination string `json:"destination,omitempty"` // Destination path, in the container
 	Writable    bool   `json:"writable,omitempty"`
+	Relabel     string `json:"relabel,omitempty"` // Relabel source if set, "z" indicates shared, "Z" indicates unshared
 	Private     bool   `json:"private,omitempty"`
 }
 
diff --git a/vendor/src/github.com/docker/libcontainer/namespaces/exec.go b/vendor/src/github.com/docker/libcontainer/namespaces/exec.go
index 0aa2bb9..382abfb 100644
--- a/vendor/src/github.com/docker/libcontainer/namespaces/exec.go
+++ b/vendor/src/github.com/docker/libcontainer/namespaces/exec.go
@@ -3,6 +3,7 @@
 package namespaces
 
 import (
+	"io"
 	"os"
 	"os/exec"
 	"syscall"
@@ -12,42 +13,34 @@
 	"github.com/docker/libcontainer/cgroups/fs"
 	"github.com/docker/libcontainer/cgroups/systemd"
 	"github.com/docker/libcontainer/network"
-	"github.com/dotcloud/docker/pkg/system"
+	"github.com/docker/libcontainer/syncpipe"
+	"github.com/docker/libcontainer/system"
 )
 
 // TODO(vishh): This is part of the libcontainer API and it does much more than just namespaces related work.
 // Move this to libcontainer package.
 // Exec performs setup outside of a namespace so that a container can be
 // executed.  Exec is a high level function for working with container namespaces.
-func Exec(container *libcontainer.Config, term Terminal, rootfs, dataPath string, args []string, createCommand CreateCommand, startCallback func()) (int, error) {
+func Exec(container *libcontainer.Config, stdin io.Reader, stdout, stderr io.Writer, console string, rootfs, dataPath string, args []string, createCommand CreateCommand, startCallback func()) (int, error) {
 	var (
-		master  *os.File
-		console string
-		err     error
+		err error
 	)
 
 	// create a pipe so that we can syncronize with the namespaced process and
 	// pass the veth name to the child
-	syncPipe, err := NewSyncPipe()
+	syncPipe, err := syncpipe.NewSyncPipe()
 	if err != nil {
 		return -1, err
 	}
 	defer syncPipe.Close()
 
-	if container.Tty {
-		master, console, err = system.CreateMasterAndConsole()
-		if err != nil {
-			return -1, err
-		}
-		term.SetMaster(master)
-	}
-
-	command := createCommand(container, console, rootfs, dataPath, os.Args[0], syncPipe.child, args)
-
-	if err := term.Attach(command); err != nil {
-		return -1, err
-	}
-	defer term.Close()
+	command := createCommand(container, console, rootfs, dataPath, os.Args[0], syncPipe.Child(), args)
+	// Note: these are only used in non-tty mode
+	// if there is a tty for the container it will be opened within the namespace and the
+	// fds will be duped to stdin, stdiout, and stderr
+	command.Stdin = stdin
+	command.Stdout = stdout
+	command.Stderr = stderr
 
 	if err := command.Start(); err != nil {
 		return -1, err
@@ -63,14 +56,19 @@
 
 	// Do this before syncing with child so that no children
 	// can escape the cgroup
-	cleaner, err := SetupCgroups(container, command.Process.Pid)
+	cgroupRef, err := SetupCgroups(container, command.Process.Pid)
 	if err != nil {
 		command.Process.Kill()
 		command.Wait()
 		return -1, err
 	}
-	if cleaner != nil {
-		defer cleaner.Cleanup()
+	defer cgroupRef.Cleanup()
+
+	cgroupPaths, err := cgroupRef.Paths()
+	if err != nil {
+		command.Process.Kill()
+		command.Wait()
+		return -1, err
 	}
 
 	var networkState network.NetworkState
@@ -84,6 +82,7 @@
 		InitPid:       command.Process.Pid,
 		InitStartTime: started,
 		NetworkState:  networkState,
+		CgroupPaths:   cgroupPaths,
 	}
 
 	if err := libcontainer.SaveState(dataPath, state); err != nil {
@@ -109,6 +108,7 @@
 			return -1, err
 		}
 	}
+
 	return command.ProcessState.Sys().(syscall.WaitStatus).ExitStatus(), nil
 }
 
@@ -139,12 +139,16 @@
 	   }
 	*/
 
-	command := exec.Command(init, append([]string{"init"}, args...)...)
+	command := exec.Command(init, append([]string{"init", "--"}, args...)...)
 	// make sure the process is executed inside the context of the rootfs
 	command.Dir = rootfs
 	command.Env = append(os.Environ(), env...)
 
-	system.SetCloneFlags(command, uintptr(GetNamespaceFlags(container.Namespaces)))
+	if command.SysProcAttr == nil {
+		command.SysProcAttr = &syscall.SysProcAttr{}
+	}
+	command.SysProcAttr.Cloneflags = uintptr(GetNamespaceFlags(container.Namespaces))
+
 	command.SysProcAttr.Pdeathsig = syscall.SIGKILL
 	command.ExtraFiles = []*os.File{pipe}
 
@@ -156,17 +160,20 @@
 func SetupCgroups(container *libcontainer.Config, nspid int) (cgroups.ActiveCgroup, error) {
 	if container.Cgroups != nil {
 		c := container.Cgroups
+
 		if systemd.UseSystemd() {
 			return systemd.Apply(c, nspid)
 		}
+
 		return fs.Apply(c, nspid)
 	}
+
 	return nil, nil
 }
 
 // InitializeNetworking creates the container's network stack outside of the namespace and moves
 // interfaces into the container's net namespaces if necessary
-func InitializeNetworking(container *libcontainer.Config, nspid int, pipe *SyncPipe, networkState *network.NetworkState) error {
+func InitializeNetworking(container *libcontainer.Config, nspid int, pipe *syncpipe.SyncPipe, networkState *network.NetworkState) error {
 	for _, config := range container.Networks {
 		strategy, err := network.GetStrategy(config.Type)
 		if err != nil {
diff --git a/vendor/src/github.com/docker/libcontainer/namespaces/execin.go b/vendor/src/github.com/docker/libcontainer/namespaces/execin.go
index d349282..8b81ede 100644
--- a/vendor/src/github.com/docker/libcontainer/namespaces/execin.go
+++ b/vendor/src/github.com/docker/libcontainer/namespaces/execin.go
@@ -3,40 +3,95 @@
 package namespaces
 
 import (
-	"encoding/json"
+	"fmt"
+	"io"
 	"os"
+	"os/exec"
+	"path/filepath"
 	"strconv"
+	"syscall"
 
 	"github.com/docker/libcontainer"
+	"github.com/docker/libcontainer/cgroups"
 	"github.com/docker/libcontainer/label"
-	"github.com/dotcloud/docker/pkg/system"
+	"github.com/docker/libcontainer/syncpipe"
+	"github.com/docker/libcontainer/system"
 )
 
-// ExecIn uses an existing pid and joins the pid's namespaces with the new command.
-func ExecIn(container *libcontainer.Config, state *libcontainer.State, args []string) error {
-	// TODO(vmarmol): If this gets too long, send it over a pipe to the child.
-	// Marshall the container into JSON since it won't be available in the namespace.
-	containerJson, err := json.Marshal(container)
-	if err != nil {
-		return err
+// ExecIn reexec's the initPath with the argv 0 rewrite to "nsenter" so that it is able to run the
+// setns code in a single threaded environment joining the existing containers' namespaces.
+func ExecIn(container *libcontainer.Config, state *libcontainer.State, userArgs []string, initPath, action string,
+	stdin io.Reader, stdout, stderr io.Writer, console string, startCallback func(*exec.Cmd)) (int, error) {
+
+	args := []string{fmt.Sprintf("nsenter-%s", action), "--nspid", strconv.Itoa(state.InitPid)}
+
+	if console != "" {
+		args = append(args, "--console", console)
 	}
 
-	// Enter the namespace and then finish setup
-	finalArgs := []string{os.Args[0], "nsenter", "--nspid", strconv.Itoa(state.InitPid), "--containerjson", string(containerJson), "--"}
-	finalArgs = append(finalArgs, args...)
-	if err := system.Execv(finalArgs[0], finalArgs[0:], os.Environ()); err != nil {
-		return err
+	cmd := &exec.Cmd{
+		Path: initPath,
+		Args: append(args, append([]string{"--"}, userArgs...)...),
 	}
-	panic("unreachable")
+
+	if filepath.Base(initPath) == initPath {
+		if lp, err := exec.LookPath(initPath); err == nil {
+			cmd.Path = lp
+		}
+	}
+
+	pipe, err := syncpipe.NewSyncPipe()
+	if err != nil {
+		return -1, err
+	}
+	defer pipe.Close()
+
+	// Note: these are only used in non-tty mode
+	// if there is a tty for the container it will be opened within the namespace and the
+	// fds will be duped to stdin, stdiout, and stderr
+	cmd.Stdin = stdin
+	cmd.Stdout = stdout
+	cmd.Stderr = stderr
+
+	cmd.ExtraFiles = []*os.File{pipe.Child()}
+
+	if err := cmd.Start(); err != nil {
+		return -1, err
+	}
+	pipe.CloseChild()
+
+	// Enter cgroups.
+	if err := EnterCgroups(state, cmd.Process.Pid); err != nil {
+		return -1, err
+	}
+
+	if err := pipe.SendToChild(container); err != nil {
+		cmd.Process.Kill()
+		cmd.Wait()
+		return -1, err
+	}
+
+	if startCallback != nil {
+		startCallback(cmd)
+	}
+
+	if err := cmd.Wait(); err != nil {
+		if _, ok := err.(*exec.ExitError); !ok {
+			return -1, err
+		}
+	}
+
+	return cmd.ProcessState.Sys().(syscall.WaitStatus).ExitStatus(), nil
 }
 
-// NsEnter is run after entering the namespace.
-func NsEnter(container *libcontainer.Config, nspid int, args []string) error {
-	// clear the current processes env and replace it with the environment
-	// defined on the container
+// Finalize expects that the setns calls have been setup and that is has joined an
+// existing namespace
+func FinalizeSetns(container *libcontainer.Config, args []string) error {
+	// clear the current processes env and replace it with the environment defined on the container
 	if err := LoadContainerEnvironment(container); err != nil {
 		return err
 	}
+
 	if err := FinalizeNamespace(container); err != nil {
 		return err
 	}
@@ -50,5 +105,10 @@
 	if err := system.Execv(args[0], args[0:], container.Env); err != nil {
 		return err
 	}
+
 	panic("unreachable")
 }
+
+func EnterCgroups(state *libcontainer.State, pid int) error {
+	return cgroups.EnterPid(state.CgroupPaths, pid)
+}
diff --git a/vendor/src/github.com/docker/libcontainer/namespaces/init.go b/vendor/src/github.com/docker/libcontainer/namespaces/init.go
index 53d2611..4c2b332 100644
--- a/vendor/src/github.com/docker/libcontainer/namespaces/init.go
+++ b/vendor/src/github.com/docker/libcontainer/namespaces/init.go
@@ -5,7 +5,6 @@
 import (
 	"fmt"
 	"os"
-	"runtime"
 	"strings"
 	"syscall"
 
@@ -18,16 +17,19 @@
 	"github.com/docker/libcontainer/network"
 	"github.com/docker/libcontainer/security/capabilities"
 	"github.com/docker/libcontainer/security/restrict"
+	"github.com/docker/libcontainer/syncpipe"
+	"github.com/docker/libcontainer/system"
+	"github.com/docker/libcontainer/user"
 	"github.com/docker/libcontainer/utils"
-	"github.com/dotcloud/docker/pkg/system"
-	"github.com/dotcloud/docker/pkg/user"
 )
 
 // TODO(vishh): This is part of the libcontainer API and it does much more than just namespaces related work.
 // Move this to libcontainer package.
 // Init is the init process that first runs inside a new namespace to setup mounts, users, networking,
 // and other options required for the new container.
-func Init(container *libcontainer.Config, uncleanRootfs, consolePath string, syncPipe *SyncPipe, args []string) (err error) {
+// The caller of Init function has to ensure that the go runtime is locked to an OS thread
+// (using runtime.LockOSThread) else system calls like setns called within Init may not work as intended.
+func Init(container *libcontainer.Config, uncleanRootfs, consolePath string, syncPipe *syncpipe.SyncPipe, args []string) (err error) {
 	defer func() {
 		if err != nil {
 			syncPipe.ReportChildError(err)
@@ -46,8 +48,8 @@
 	}
 
 	// We always read this as it is a way to sync with the parent as well
-	networkState, err := syncPipe.ReadFromParent()
-	if err != nil {
+	var networkState *network.NetworkState
+	if err := syncPipe.ReadFromParent(&networkState); err != nil {
 		return err
 	}
 
@@ -56,7 +58,7 @@
 			return err
 		}
 	}
-	if _, err := system.Setsid(); err != nil {
+	if _, err := syscall.Setsid(); err != nil {
 		return fmt.Errorf("setsid %s", err)
 	}
 	if consolePath != "" {
@@ -75,18 +77,17 @@
 
 	if err := mount.InitializeMountNamespace(rootfs,
 		consolePath,
+		container.RestrictSys,
 		(*mount.MountConfig)(container.MountConfig)); err != nil {
 		return fmt.Errorf("setup mount namespace %s", err)
 	}
 
 	if container.Hostname != "" {
-		if err := system.Sethostname(container.Hostname); err != nil {
+		if err := syscall.Sethostname([]byte(container.Hostname)); err != nil {
 			return fmt.Errorf("sethostname %s", err)
 		}
 	}
 
-	runtime.LockOSThread()
-
 	if err := apparmor.ApplyProfile(container.AppArmorProfile); err != nil {
 		return fmt.Errorf("set apparmor profile %s: %s", container.AppArmorProfile, err)
 	}
@@ -97,7 +98,7 @@
 
 	// TODO: (crosbymichael) make this configurable at the Config level
 	if container.RestrictSys {
-		if err := restrict.Restrict("proc/sys", "proc/sysrq-trigger", "proc/irq", "proc/bus", "sys"); err != nil {
+		if err := restrict.Restrict("proc/sys", "proc/sysrq-trigger", "proc/irq", "proc/bus"); err != nil {
 			return err
 		}
 	}
@@ -117,7 +118,7 @@
 		return fmt.Errorf("restore parent death signal %s", err)
 	}
 
-	return system.Execv(args[0], args[0:], container.Env)
+	return system.Execv(args[0], args[0:], os.Environ())
 }
 
 // RestoreParentDeathSignal sets the parent death signal to old.
@@ -150,19 +151,30 @@
 
 // SetupUser changes the groups, gid, and uid for the user inside the container
 func SetupUser(u string) error {
-	uid, gid, suppGids, err := user.GetUserGroupSupplementary(u, syscall.Getuid(), syscall.Getgid())
+	uid, gid, suppGids, home, err := user.GetUserGroupSupplementaryHome(u, syscall.Getuid(), syscall.Getgid(), "/")
 	if err != nil {
 		return fmt.Errorf("get supplementary groups %s", err)
 	}
-	if err := system.Setgroups(suppGids); err != nil {
+
+	if err := syscall.Setgroups(suppGids); err != nil {
 		return fmt.Errorf("setgroups %s", err)
 	}
-	if err := system.Setgid(gid); err != nil {
+
+	if err := syscall.Setgid(gid); err != nil {
 		return fmt.Errorf("setgid %s", err)
 	}
-	if err := system.Setuid(uid); err != nil {
+
+	if err := syscall.Setuid(uid); err != nil {
 		return fmt.Errorf("setuid %s", err)
 	}
+
+	// if we didn't get HOME already, set it based on the user's HOME
+	if envHome := os.Getenv("HOME"); envHome == "" {
+		if err := os.Setenv("HOME", home); err != nil {
+			return fmt.Errorf("set HOME %s", err)
+		}
+	}
+
 	return nil
 }
 
@@ -228,7 +240,7 @@
 	}
 
 	if container.WorkingDir != "" {
-		if err := system.Chdir(container.WorkingDir); err != nil {
+		if err := syscall.Chdir(container.WorkingDir); err != nil {
 			return fmt.Errorf("chdir to %s %s", container.WorkingDir, err)
 		}
 	}
diff --git a/vendor/src/github.com/docker/libcontainer/namespaces/nsenter.go b/vendor/src/github.com/docker/libcontainer/namespaces/nsenter.go
deleted file mode 100644
index d5eaa27..0000000
--- a/vendor/src/github.com/docker/libcontainer/namespaces/nsenter.go
+++ /dev/null
@@ -1,188 +0,0 @@
-// +build linux
-
-package namespaces
-
-/*
-#include <dirent.h>
-#include <errno.h>
-#include <fcntl.h>
-#include <linux/limits.h>
-#include <linux/sched.h>
-#include <signal.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <sys/stat.h>
-#include <sys/types.h>
-#include <unistd.h>
-#include <getopt.h>
-
-static const kBufSize = 256;
-
-void get_args(int *argc, char ***argv) {
-	// Read argv
-	int fd = open("/proc/self/cmdline", O_RDONLY);
-
-	// Read the whole commandline.
-	ssize_t contents_size = 0;
-	ssize_t contents_offset = 0;
-	char *contents = NULL;
-	ssize_t bytes_read = 0;
-	do {
-		contents_size += kBufSize;
-		contents = (char *) realloc(contents, contents_size);
-		bytes_read = read(fd, contents + contents_offset, contents_size - contents_offset);
-		contents_offset += bytes_read;
-	} while (bytes_read > 0);
-	close(fd);
-
-	// Parse the commandline into an argv. /proc/self/cmdline has \0 delimited args.
-	ssize_t i;
-	*argc = 0;
-	for (i = 0; i < contents_offset; i++) {
-		if (contents[i] == '\0') {
-			(*argc)++;
-		}
-	}
-	*argv = (char **) malloc(sizeof(char *) * ((*argc) + 1));
-	int idx;
-	for (idx = 0; idx < (*argc); idx++) {
-		(*argv)[idx] = contents;
-		contents += strlen(contents) + 1;
-	}
-	(*argv)[*argc] = NULL;
-}
-
-// Use raw setns syscall for versions of glibc that don't include it (namely glibc-2.12)
-#if __GLIBC__ == 2 && __GLIBC_MINOR__ < 14
-#define _GNU_SOURCE
-#include <sched.h>
-#include "syscall.h"
-#ifdef SYS_setns
-int setns(int fd, int nstype) {
-  return syscall(SYS_setns, fd, nstype);
-}
-#endif
-#endif
-
-void print_usage() {
-	fprintf(stderr, "<binary> nsenter --nspid <pid> --containerjson <container_json> -- cmd1 arg1 arg2...\n");
-}
-
-void nsenter() {
-	int argc;
-	char **argv;
-	get_args(&argc, &argv);
-
-	// Ignore if this is not for us.
-	if (argc < 2 || strcmp(argv[1], "nsenter") != 0) {
-		return;
-	}
-
-	// USAGE: <binary> nsenter <PID> <process label> <container JSON> <argv>...
-	if (argc < 6) {
-		fprintf(stderr, "nsenter: Incorrect usage, not enough arguments\n");
-		exit(1);
-	}
-
-	static const struct option longopts[] = {
-		{ "nspid",         required_argument, NULL, 'n' },
-		{ "containerjson", required_argument, NULL, 'c' },
-		{ NULL,            0,                 NULL,  0  }
-	};
-
-	int c;
-	pid_t init_pid = -1;
-	char *init_pid_str = NULL;
-	char *container_json = NULL;
-	while ((c = getopt_long_only(argc, argv, "n:s:c:", longopts, NULL)) != -1) {
-		switch (c) {
-		case 'n':
-			init_pid_str = optarg;
-			break;
-		case 'c':
-			container_json = optarg;
-			break;
-		}
-	}
-
-	if (container_json == NULL || init_pid_str == NULL) {
-		print_usage();
-		exit(1);
-	}
-
-	init_pid = strtol(init_pid_str, NULL, 10);
-	if (errno != 0 || init_pid <= 0) {
-		fprintf(stderr, "nsenter: Failed to parse PID from \"%s\" with error: \"%s\"\n", init_pid_str, strerror(errno));
-		print_usage();
-		exit(1);
-	}
-
-	argc -= 3;
-	argv += 3;
-
-	// Setns on all supported namespaces.
-	char ns_dir[PATH_MAX];
-	memset(ns_dir, 0, PATH_MAX);
-	snprintf(ns_dir, PATH_MAX - 1, "/proc/%d/ns/", init_pid);
-	struct dirent *dent;
-	DIR *dir = opendir(ns_dir);
-	if (dir == NULL) {
-		fprintf(stderr, "nsenter: Failed to open directory \"%s\" with error: \"%s\"\n", ns_dir, strerror(errno));
-		exit(1);
-	}
-
-	while((dent = readdir(dir)) != NULL) {
-		if(strcmp(dent->d_name, ".") == 0 || strcmp(dent->d_name, "..") == 0 || strcmp(dent->d_name, "user") == 0) {
-			continue;
-		}
-
-		// Get and open the namespace for the init we are joining..
-		char buf[PATH_MAX];
-		memset(buf, 0, PATH_MAX);
-		snprintf(buf, PATH_MAX - 1, "%s%s", ns_dir, dent->d_name);
-		int fd = open(buf, O_RDONLY);
-		if (fd == -1) {
-			fprintf(stderr, "nsenter: Failed to open ns file \"%s\" for ns \"%s\" with error: \"%s\"\n", buf, dent->d_name, strerror(errno));
-			exit(1);
-		}
-
-		// Set the namespace.
-		if (setns(fd, 0) == -1) {
-			fprintf(stderr, "nsenter: Failed to setns for \"%s\" with error: \"%s\"\n", dent->d_name, strerror(errno));
-			exit(1);
-		}
-		close(fd);
-	}
-	closedir(dir);
-
-	// We must fork to actually enter the PID namespace.
-	int child = fork();
-	if (child == 0) {
-		// Finish executing, let the Go runtime take over.
-		return;
-	} else {
-		// Parent, wait for the child.
-		int status = 0;
-		if (waitpid(child, &status, 0) == -1) {
-			fprintf(stderr, "nsenter: Failed to waitpid with error: \"%s\"\n", strerror(errno));
-			exit(1);
-		}
-
-		// Forward the child's exit code or re-send its death signal.
-		if (WIFEXITED(status)) {
-			exit(WEXITSTATUS(status));
-		} else if (WIFSIGNALED(status)) {
-			kill(getpid(), WTERMSIG(status));
-		}
-		exit(1);
-	}
-
-	return;
-}
-
-__attribute__((constructor)) init() {
-	nsenter();
-}
-*/
-import "C"
diff --git a/vendor/src/github.com/docker/libcontainer/namespaces/nsenter/README.md b/vendor/src/github.com/docker/libcontainer/namespaces/nsenter/README.md
new file mode 100644
index 0000000..ac94cba
--- /dev/null
+++ b/vendor/src/github.com/docker/libcontainer/namespaces/nsenter/README.md
@@ -0,0 +1,6 @@
+## nsenter
+
+The `nsenter` package registers a special init constructor that is called before the Go runtime has 
+a chance to boot.  This provides us the ability to `setns` on existing namespaces and avoid the issues
+that the Go runtime has with multiple threads.  This constructor is only called if this package is 
+registered, imported, in your go application and the argv 0 is `nsenter`.
diff --git a/vendor/src/github.com/docker/libcontainer/namespaces/nsenter/nsenter.c b/vendor/src/github.com/docker/libcontainer/namespaces/nsenter/nsenter.c
new file mode 100644
index 0000000..2869dd1
--- /dev/null
+++ b/vendor/src/github.com/docker/libcontainer/namespaces/nsenter/nsenter.c
@@ -0,0 +1,218 @@
+// +build cgo
+//
+// formated with indent -linux nsenter.c
+
+#include <errno.h>
+#include <fcntl.h>
+#include <linux/limits.h>
+#include <linux/sched.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <getopt.h>
+
+static const kBufSize = 256;
+static const char *kNsEnter = "nsenter";
+
+void get_args(int *argc, char ***argv)
+{
+	// Read argv
+	int fd = open("/proc/self/cmdline", O_RDONLY);
+
+	// Read the whole commandline.
+	ssize_t contents_size = 0;
+	ssize_t contents_offset = 0;
+	char *contents = NULL;
+	ssize_t bytes_read = 0;
+	do {
+		contents_size += kBufSize;
+		contents = (char *)realloc(contents, contents_size);
+		bytes_read =
+			read(fd, contents + contents_offset,
+			     contents_size - contents_offset);
+		contents_offset += bytes_read;
+	}
+	while (bytes_read > 0);
+	close(fd);
+
+	// Parse the commandline into an argv. /proc/self/cmdline has \0 delimited args.
+	ssize_t i;
+	*argc = 0;
+	for (i = 0; i < contents_offset; i++) {
+		if (contents[i] == '\0') {
+			(*argc)++;
+		}
+	}
+	*argv = (char **)malloc(sizeof(char *) * ((*argc) + 1));
+	int idx;
+	for (idx = 0; idx < (*argc); idx++) {
+		(*argv)[idx] = contents;
+		contents += strlen(contents) + 1;
+	}
+	(*argv)[*argc] = NULL;
+}
+
+// Use raw setns syscall for versions of glibc that don't include it (namely glibc-2.12)
+#if __GLIBC__ == 2 && __GLIBC_MINOR__ < 14
+#define _GNU_SOURCE
+#include <sched.h>
+#include "syscall.h"
+#ifdef SYS_setns
+int setns(int fd, int nstype)
+{
+	return syscall(SYS_setns, fd, nstype);
+}
+#endif
+#endif
+
+void print_usage()
+{
+	fprintf(stderr,
+		"nsenter --nspid <pid> --console <console> -- cmd1 arg1 arg2...\n");
+}
+
+void nsenter()
+{
+	int argc, c;
+	char **argv;
+	get_args(&argc, &argv);
+
+	// check argv 0 to ensure that we are supposed to setns
+	// we use strncmp to test for a value of "nsenter" but also allows alternate implmentations
+	// after the setns code path to continue to use the argv 0 to determine actions to be run
+	// resulting in the ability to specify "nsenter-mknod", "nsenter-exec", etc...
+	if (strncmp(argv[0], kNsEnter, strlen(kNsEnter)) != 0) {
+		return;
+	}
+
+	static const struct option longopts[] = {
+		{"nspid", required_argument, NULL, 'n'},
+		{"console", required_argument, NULL, 't'},
+		{NULL, 0, NULL, 0}
+	};
+    
+	pid_t init_pid = -1;
+	char *init_pid_str = NULL;
+	char *console = NULL;
+	while ((c = getopt_long_only(argc, argv, "n:c:", longopts, NULL)) != -1) {
+		switch (c) {
+		case 'n':
+			init_pid_str = optarg;
+			break;
+		case 't':
+			console = optarg;
+			break;
+		}
+	}
+
+	if (init_pid_str == NULL) {
+		print_usage();
+		exit(1);
+	}
+
+	init_pid = strtol(init_pid_str, NULL, 10);
+	if ((init_pid == 0 && errno == EINVAL) || errno == ERANGE) {
+		fprintf(stderr,
+			"nsenter: Failed to parse PID from \"%s\" with output \"%d\" and error: \"%s\"\n",
+			init_pid_str, init_pid, strerror(errno));
+		print_usage();
+		exit(1);
+	}
+
+	argc -= 3;
+	argv += 3;
+
+	if (setsid() == -1) {
+		fprintf(stderr, "setsid failed. Error: %s\n", strerror(errno));
+		exit(1);
+	}
+	// before we setns we need to dup the console
+	int consolefd = -1;
+	if (console != NULL) {
+		consolefd = open(console, O_RDWR);
+		if (consolefd < 0) {
+			fprintf(stderr,
+				"nsenter: failed to open console %s %s\n",
+				console, strerror(errno));
+			exit(1);
+		}
+	}
+	// Setns on all supported namespaces.
+	char ns_dir[PATH_MAX];
+	memset(ns_dir, 0, PATH_MAX);
+	snprintf(ns_dir, PATH_MAX - 1, "/proc/%d/ns/", init_pid);
+
+	char *namespaces[] = { "ipc", "uts", "net", "pid", "mnt" };
+	const int num = sizeof(namespaces) / sizeof(char *);
+	int i;
+	for (i = 0; i < num; i++) {
+		char buf[PATH_MAX];
+		memset(buf, 0, PATH_MAX);
+		snprintf(buf, PATH_MAX - 1, "%s%s", ns_dir, namespaces[i]);
+		int fd = open(buf, O_RDONLY);
+		if (fd == -1) {
+			// Ignore nonexistent namespaces.
+			if (errno == ENOENT)
+				continue;
+
+			fprintf(stderr,
+				"nsenter: Failed to open ns file \"%s\" for ns \"%s\" with error: \"%s\"\n",
+				buf, namespaces[i], strerror(errno));
+			exit(1);
+		}
+		// Set the namespace.
+		if (setns(fd, 0) == -1) {
+			fprintf(stderr,
+				"nsenter: Failed to setns for \"%s\" with error: \"%s\"\n",
+				namespaces[i], strerror(errno));
+			exit(1);
+		}
+		close(fd);
+	}
+
+	// We must fork to actually enter the PID namespace.
+	int child = fork();
+	if (child == 0) {
+		if (consolefd != -1) {
+			if (dup2(consolefd, STDIN_FILENO) != 0) {
+				fprintf(stderr, "nsenter: failed to dup 0 %s\n",
+					strerror(errno));
+				exit(1);
+			}
+			if (dup2(consolefd, STDOUT_FILENO) != STDOUT_FILENO) {
+				fprintf(stderr, "nsenter: failed to dup 1 %s\n",
+					strerror(errno));
+				exit(1);
+			}
+			if (dup2(consolefd, STDERR_FILENO) != STDERR_FILENO) {
+				fprintf(stderr, "nsenter: failed to dup 2 %s\n",
+					strerror(errno));
+				exit(1);
+			}
+		}
+		// Finish executing, let the Go runtime take over.
+		return;
+	} else {
+		// Parent, wait for the child.
+		int status = 0;
+		if (waitpid(child, &status, 0) == -1) {
+			fprintf(stderr,
+				"nsenter: Failed to waitpid with error: \"%s\"\n",
+				strerror(errno));
+			exit(1);
+		}
+		// Forward the child's exit code or re-send its death signal.
+		if (WIFEXITED(status)) {
+			exit(WEXITSTATUS(status));
+		} else if (WIFSIGNALED(status)) {
+			kill(getpid(), WTERMSIG(status));
+		}
+
+		exit(1);
+	}
+
+	return;
+}
diff --git a/vendor/src/github.com/docker/libcontainer/namespaces/nsenter/nsenter.go b/vendor/src/github.com/docker/libcontainer/namespaces/nsenter/nsenter.go
new file mode 100644
index 0000000..7d21e8e
--- /dev/null
+++ b/vendor/src/github.com/docker/libcontainer/namespaces/nsenter/nsenter.go
@@ -0,0 +1,10 @@
+// +build linux
+
+package nsenter
+
+/*
+__attribute__((constructor)) init() {
+	nsenter();
+}
+*/
+import "C"
diff --git a/vendor/src/github.com/docker/libcontainer/namespaces/nsenter/nsenter_unsupported.go b/vendor/src/github.com/docker/libcontainer/namespaces/nsenter/nsenter_unsupported.go
new file mode 100644
index 0000000..2459c63
--- /dev/null
+++ b/vendor/src/github.com/docker/libcontainer/namespaces/nsenter/nsenter_unsupported.go
@@ -0,0 +1,3 @@
+// +build !linux !cgo
+
+package nsenter
diff --git a/vendor/src/github.com/docker/libcontainer/namespaces/std_term.go b/vendor/src/github.com/docker/libcontainer/namespaces/std_term.go
deleted file mode 100644
index 324336a..0000000
--- a/vendor/src/github.com/docker/libcontainer/namespaces/std_term.go
+++ /dev/null
@@ -1,49 +0,0 @@
-package namespaces
-
-import (
-	"io"
-	"os"
-	"os/exec"
-)
-
-type StdTerminal struct {
-	stdin          io.Reader
-	stdout, stderr io.Writer
-}
-
-func (s *StdTerminal) SetMaster(*os.File) {
-	// no need to set master on non tty
-}
-
-func (s *StdTerminal) Close() error {
-	return nil
-}
-
-func (s *StdTerminal) Resize(h, w int) error {
-	return nil
-}
-
-func (s *StdTerminal) Attach(command *exec.Cmd) error {
-	inPipe, err := command.StdinPipe()
-	if err != nil {
-		return err
-	}
-	outPipe, err := command.StdoutPipe()
-	if err != nil {
-		return err
-	}
-	errPipe, err := command.StderrPipe()
-	if err != nil {
-		return err
-	}
-
-	go func() {
-		defer inPipe.Close()
-		io.Copy(inPipe, s.stdin)
-	}()
-
-	go io.Copy(s.stdout, outPipe)
-	go io.Copy(s.stderr, errPipe)
-
-	return nil
-}
diff --git a/vendor/src/github.com/docker/libcontainer/namespaces/term.go b/vendor/src/github.com/docker/libcontainer/namespaces/term.go
deleted file mode 100644
index 2a50bf8..0000000
--- a/vendor/src/github.com/docker/libcontainer/namespaces/term.go
+++ /dev/null
@@ -1,29 +0,0 @@
-package namespaces
-
-import (
-	"io"
-	"os"
-	"os/exec"
-)
-
-type Terminal interface {
-	io.Closer
-	SetMaster(*os.File)
-	Attach(*exec.Cmd) error
-	Resize(h, w int) error
-}
-
-func NewTerminal(stdin io.Reader, stdout, stderr io.Writer, tty bool) Terminal {
-	if tty {
-		return &TtyTerminal{
-			stdin:  stdin,
-			stdout: stdout,
-			stderr: stderr,
-		}
-	}
-	return &StdTerminal{
-		stdin:  stdin,
-		stdout: stdout,
-		stderr: stderr,
-	}
-}
diff --git a/vendor/src/github.com/docker/libcontainer/namespaces/tty_term.go b/vendor/src/github.com/docker/libcontainer/namespaces/tty_term.go
deleted file mode 100644
index 272cf2c..0000000
--- a/vendor/src/github.com/docker/libcontainer/namespaces/tty_term.go
+++ /dev/null
@@ -1,56 +0,0 @@
-package namespaces
-
-import (
-	"io"
-	"os"
-	"os/exec"
-
-	"github.com/dotcloud/docker/pkg/term"
-)
-
-type TtyTerminal struct {
-	stdin          io.Reader
-	stdout, stderr io.Writer
-	master         *os.File
-	state          *term.State
-}
-
-func (t *TtyTerminal) Resize(h, w int) error {
-	return term.SetWinsize(t.master.Fd(), &term.Winsize{Height: uint16(h), Width: uint16(w)})
-}
-
-func (t *TtyTerminal) SetMaster(master *os.File) {
-	t.master = master
-}
-
-func (t *TtyTerminal) Attach(command *exec.Cmd) error {
-	go io.Copy(t.stdout, t.master)
-	go io.Copy(t.master, t.stdin)
-
-	state, err := t.setupWindow(t.master, os.Stdin)
-
-	if err != nil {
-		return err
-	}
-
-	t.state = state
-	return err
-}
-
-// SetupWindow gets the parent window size and sets the master
-// pty to the current size and set the parents mode to RAW
-func (t *TtyTerminal) setupWindow(master, parent *os.File) (*term.State, error) {
-	ws, err := term.GetWinsize(parent.Fd())
-	if err != nil {
-		return nil, err
-	}
-	if err := term.SetWinsize(master.Fd(), ws); err != nil {
-		return nil, err
-	}
-	return term.SetRawTerminal(parent.Fd())
-}
-
-func (t *TtyTerminal) Close() error {
-	term.RestoreTerminal(os.Stdin.Fd(), t.state)
-	return t.master.Close()
-}
diff --git a/vendor/src/github.com/docker/libcontainer/namespaces/unsupported.go b/vendor/src/github.com/docker/libcontainer/namespaces/unsupported.go
deleted file mode 100644
index 8398b94..0000000
--- a/vendor/src/github.com/docker/libcontainer/namespaces/unsupported.go
+++ /dev/null
@@ -1,28 +0,0 @@
-// +build !linux
-
-package namespaces
-
-import (
-	"github.com/docker/libcontainer"
-	"github.com/docker/libcontainer/cgroups"
-)
-
-func Exec(container *libcontainer.Config, term Terminal, rootfs, dataPath string, args []string, createCommand CreateCommand, startCallback func()) (int, error) {
-	return -1, ErrUnsupported
-}
-
-func Init(container *libcontainer.Config, uncleanRootfs, consolePath string, syncPipe *SyncPipe, args []string) error {
-	return ErrUnsupported
-}
-
-func InitializeNetworking(container *libcontainer.Config, nspid int, pipe *SyncPipe) error {
-	return ErrUnsupported
-}
-
-func SetupCgroups(container *libcontainer.Config, nspid int) (cgroups.ActiveCgroup, error) {
-	return nil, ErrUnsupported
-}
-
-func GetNamespaceFlags(namespaces map[string]bool) (flag int) {
-	return 0
-}
diff --git a/vendor/src/github.com/docker/libcontainer/netlink/netlink.go b/vendor/src/github.com/docker/libcontainer/netlink/netlink.go
index 5cc7562..dd9b1c1 100644
--- a/vendor/src/github.com/docker/libcontainer/netlink/netlink.go
+++ b/vendor/src/github.com/docker/libcontainer/netlink/netlink.go
@@ -21,3 +21,10 @@
 	Iface   *net.Interface
 	Default bool
 }
+
+// An IfAddr defines IP network settings for a given network interface
+type IfAddr struct {
+	Iface *net.Interface
+	IP    net.IP
+	IPNet *net.IPNet
+}
diff --git a/vendor/src/github.com/docker/libcontainer/netlink/netlink_linux.go b/vendor/src/github.com/docker/libcontainer/netlink/netlink_linux.go
index 14e30aa..215fb17 100644
--- a/vendor/src/github.com/docker/libcontainer/netlink/netlink_linux.go
+++ b/vendor/src/github.com/docker/libcontainer/netlink/netlink_linux.go
@@ -1,11 +1,8 @@
-// +build amd64
-
 package netlink
 
 import (
 	"encoding/binary"
 	"fmt"
-	"math/rand"
 	"net"
 	"sync/atomic"
 	"syscall"
@@ -20,21 +17,27 @@
 	VETH_INFO_PEER = 1
 	IFLA_NET_NS_FD = 28
 	SIOC_BRADDBR   = 0x89a0
+	SIOC_BRDELBR   = 0x89a1
 	SIOC_BRADDIF   = 0x89a2
 )
 
 var nextSeqNr uint32
 
 type ifreqHwaddr struct {
-	IfrnName   [16]byte
+	IfrnName   [IFNAMSIZ]byte
 	IfruHwaddr syscall.RawSockaddr
 }
 
 type ifreqIndex struct {
-	IfrnName  [16]byte
+	IfrnName  [IFNAMSIZ]byte
 	IfruIndex int32
 }
 
+type ifreqFlags struct {
+	IfrnName  [IFNAMSIZ]byte
+	Ifruflags uint16
+}
+
 func nativeEndian() binary.ByteOrder {
 	var x uint32 = 0x01020304
 	if *(*byte)(unsafe.Pointer(&x)) == 0x01 {
@@ -186,13 +189,15 @@
 }
 
 func (a *RtAttr) Len() int {
+	if len(a.children) == 0 {
+		return (syscall.SizeofRtAttr + len(a.Data))
+	}
+
 	l := 0
 	for _, child := range a.children {
-		l += child.Len() + syscall.SizeofRtAttr
+		l += child.Len()
 	}
-	if l == 0 {
-		l++
-	}
+	l += syscall.SizeofRtAttr
 	return rtaAlignOf(l + len(a.Data))
 }
 
@@ -200,7 +205,7 @@
 	native := nativeEndian()
 
 	length := a.Len()
-	buf := make([]byte, rtaAlignOf(length+syscall.SizeofRtAttr))
+	buf := make([]byte, rtaAlignOf(length))
 
 	if a.Data != nil {
 		copy(buf[4:], a.Data)
@@ -213,11 +218,10 @@
 		}
 	}
 
-	if l := uint16(rtaAlignOf(length)); l != 0 {
-		native.PutUint16(buf[0:2], l+1)
+	if l := uint16(length); l != 0 {
+		native.PutUint16(buf[0:2], l)
 	}
 	native.PutUint16(buf[2:4], a.Type)
-
 	return buf
 }
 
@@ -647,30 +651,28 @@
 	return s.HandleAck(wb.Seq)
 }
 
-// Add an Ip address to an interface. This is identical to:
-// ip addr add $ip/$ipNet dev $iface
-func NetworkLinkAddIp(iface *net.Interface, ip net.IP, ipNet *net.IPNet) error {
+func networkLinkIpAction(action, flags int, ifa IfAddr) error {
 	s, err := getNetlinkSocket()
 	if err != nil {
 		return err
 	}
 	defer s.Close()
 
-	family := getIpFamily(ip)
+	family := getIpFamily(ifa.IP)
 
-	wb := newNetlinkRequest(syscall.RTM_NEWADDR, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK)
+	wb := newNetlinkRequest(action, flags)
 
 	msg := newIfAddrmsg(family)
-	msg.Index = uint32(iface.Index)
-	prefixLen, _ := ipNet.Mask.Size()
+	msg.Index = uint32(ifa.Iface.Index)
+	prefixLen, _ := ifa.IPNet.Mask.Size()
 	msg.Prefixlen = uint8(prefixLen)
 	wb.AddData(msg)
 
 	var ipData []byte
 	if family == syscall.AF_INET {
-		ipData = ip.To4()
+		ipData = ifa.IP.To4()
 	} else {
-		ipData = ip.To16()
+		ipData = ifa.IP.To16()
 	}
 
 	localData := newRtAttr(syscall.IFA_LOCAL, ipData)
@@ -686,6 +688,26 @@
 	return s.HandleAck(wb.Seq)
 }
 
+// Delete an IP address from an interface. This is identical to:
+// ip addr del $ip/$ipNet dev $iface
+func NetworkLinkDelIp(iface *net.Interface, ip net.IP, ipNet *net.IPNet) error {
+	return networkLinkIpAction(
+		syscall.RTM_DELADDR,
+		syscall.NLM_F_ACK,
+		IfAddr{iface, ip, ipNet},
+	)
+}
+
+// Add an Ip address to an interface. This is identical to:
+// ip addr add $ip/$ipNet dev $iface
+func NetworkLinkAddIp(iface *net.Interface, ip net.IP, ipNet *net.IPNet) error {
+	return networkLinkIpAction(
+		syscall.RTM_NEWADDR,
+		syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK,
+		IfAddr{iface, ip, ipNet},
+	)
+}
+
 func zeroTerminated(s string) []byte {
 	return []byte(s + "\000")
 }
@@ -697,6 +719,10 @@
 // Add a new network link of a specified type. This is identical to
 // running: ip add link $name type $linkType
 func NetworkLinkAdd(name string, linkType string) error {
+	if name == "" || linkType == "" {
+		return fmt.Errorf("Neither link name nor link type can be empty!")
+	}
+
 	s, err := getNetlinkSocket()
 	if err != nil {
 		return err
@@ -708,15 +734,43 @@
 	msg := newIfInfomsg(syscall.AF_UNSPEC)
 	wb.AddData(msg)
 
-	if name != "" {
-		nameData := newRtAttr(syscall.IFLA_IFNAME, zeroTerminated(name))
-		wb.AddData(nameData)
+	linkInfo := newRtAttr(syscall.IFLA_LINKINFO, nil)
+	newRtAttrChild(linkInfo, IFLA_INFO_KIND, nonZeroTerminated(linkType))
+	wb.AddData(linkInfo)
+
+	nameData := newRtAttr(syscall.IFLA_IFNAME, zeroTerminated(name))
+	wb.AddData(nameData)
+
+	if err := s.Send(wb); err != nil {
+		return err
 	}
 
-	kindData := newRtAttr(IFLA_INFO_KIND, nonZeroTerminated(linkType))
+	return s.HandleAck(wb.Seq)
+}
 
-	infoData := newRtAttr(syscall.IFLA_LINKINFO, kindData.ToWireFormat())
-	wb.AddData(infoData)
+// Delete a network link. This is identical to
+// running: ip link del $name
+func NetworkLinkDel(name string) error {
+	if name == "" {
+		return fmt.Errorf("Network link name can not be empty!")
+	}
+
+	s, err := getNetlinkSocket()
+	if err != nil {
+		return err
+	}
+	defer s.Close()
+
+	iface, err := net.InterfaceByName(name)
+	if err != nil {
+		return err
+	}
+
+	wb := newNetlinkRequest(syscall.RTM_DELLINK, syscall.NLM_F_ACK)
+
+	msg := newIfInfomsg(syscall.AF_UNSPEC)
+	msg.Index = int32(iface.Index)
+	wb.AddData(msg)
 
 	if err := s.Send(wb); err != nil {
 		return err
@@ -846,6 +900,10 @@
 }
 
 func NetworkChangeName(iface *net.Interface, newName string) error {
+	if len(newName) >= IFNAMSIZ {
+		return fmt.Errorf("Interface name %s too long", newName)
+	}
+
 	fd, err := getIfSocket()
 	if err != nil {
 		return err
@@ -898,13 +956,13 @@
 // Create the actual bridge device.  This is more backward-compatible than
 // netlink.NetworkLinkAdd and works on RHEL 6.
 func CreateBridge(name string, setMacAddr bool) error {
-	s, err := syscall.Socket(syscall.AF_INET6, syscall.SOCK_STREAM, syscall.IPPROTO_IP)
+	if len(name) >= IFNAMSIZ {
+		return fmt.Errorf("Interface name %s too long", name)
+	}
+
+	s, err := getIfSocket()
 	if err != nil {
-		// ipv6 issue, creating with ipv4
-		s, err = syscall.Socket(syscall.AF_INET, syscall.SOCK_STREAM, syscall.IPPROTO_IP)
-		if err != nil {
-			return err
-		}
+		return err
 	}
 	defer syscall.Close(s)
 
@@ -921,21 +979,48 @@
 	return nil
 }
 
+// Delete the actual bridge device.
+func DeleteBridge(name string) error {
+	s, err := getIfSocket()
+	if err != nil {
+		return err
+	}
+	defer syscall.Close(s)
+
+	nameBytePtr, err := syscall.BytePtrFromString(name)
+	if err != nil {
+		return err
+	}
+
+	var ifr ifreqFlags
+	copy(ifr.IfrnName[:len(ifr.IfrnName)-1], []byte(name))
+	if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, uintptr(s),
+		syscall.SIOCSIFFLAGS, uintptr(unsafe.Pointer(&ifr))); err != 0 {
+		return err
+	}
+
+	if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, uintptr(s),
+		SIOC_BRDELBR, uintptr(unsafe.Pointer(nameBytePtr))); err != 0 {
+		return err
+	}
+	return nil
+}
+
 // Add a slave to abridge device.  This is more backward-compatible than
 // netlink.NetworkSetMaster and works on RHEL 6.
 func AddToBridge(iface, master *net.Interface) error {
-	s, err := syscall.Socket(syscall.AF_INET6, syscall.SOCK_STREAM, syscall.IPPROTO_IP)
+	if len(master.Name) >= IFNAMSIZ {
+		return fmt.Errorf("Interface name %s too long", master.Name)
+	}
+
+	s, err := getIfSocket()
 	if err != nil {
-		// ipv6 issue, creating with ipv4
-		s, err = syscall.Socket(syscall.AF_INET, syscall.SOCK_STREAM, syscall.IPPROTO_IP)
-		if err != nil {
-			return err
-		}
+		return err
 	}
 	defer syscall.Close(s)
 
 	ifr := ifreqIndex{}
-	copy(ifr.IfrnName[:], master.Name)
+	copy(ifr.IfrnName[:len(ifr.IfrnName)-1], master.Name)
 	ifr.IfruIndex = int32(iface.Index)
 
 	if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, uintptr(s), SIOC_BRADDIF, uintptr(unsafe.Pointer(&ifr))); err != 0 {
@@ -946,12 +1031,16 @@
 }
 
 func setBridgeMacAddress(s int, name string) error {
+	if len(name) >= IFNAMSIZ {
+		return fmt.Errorf("Interface name %s too long", name)
+	}
+
 	ifr := ifreqHwaddr{}
 	ifr.IfruHwaddr.Family = syscall.ARPHRD_ETHER
-	copy(ifr.IfrnName[:], name)
+	copy(ifr.IfrnName[:len(ifr.IfrnName)-1], name)
 
 	for i := 0; i < 6; i++ {
-		ifr.IfruHwaddr.Data[i] = int8(rand.Intn(255))
+		ifr.IfruHwaddr.Data[i] = randIfrDataByte()
 	}
 
 	ifr.IfruHwaddr.Data[0] &^= 0x1 // clear multicast bit
diff --git a/vendor/src/github.com/docker/libcontainer/netlink/netlink_linux_arm.go b/vendor/src/github.com/docker/libcontainer/netlink/netlink_linux_arm.go
new file mode 100644
index 0000000..7789ae2
--- /dev/null
+++ b/vendor/src/github.com/docker/libcontainer/netlink/netlink_linux_arm.go
@@ -0,0 +1,9 @@
+package netlink
+
+import (
+	"math/rand"
+)
+
+func randIfrDataByte() uint8 {
+	return uint8(rand.Intn(255))
+}
diff --git a/vendor/src/github.com/docker/libcontainer/netlink/netlink_linux_notarm.go b/vendor/src/github.com/docker/libcontainer/netlink/netlink_linux_notarm.go
new file mode 100644
index 0000000..23c4a92
--- /dev/null
+++ b/vendor/src/github.com/docker/libcontainer/netlink/netlink_linux_notarm.go
@@ -0,0 +1,11 @@
+// +build !arm
+
+package netlink
+
+import (
+	"math/rand"
+)
+
+func randIfrDataByte() int8 {
+	return int8(rand.Intn(255))
+}
diff --git a/vendor/src/github.com/docker/libcontainer/netlink/netlink_linux_test.go b/vendor/src/github.com/docker/libcontainer/netlink/netlink_linux_test.go
new file mode 100644
index 0000000..086aee7
--- /dev/null
+++ b/vendor/src/github.com/docker/libcontainer/netlink/netlink_linux_test.go
@@ -0,0 +1,126 @@
+package netlink
+
+import (
+	"net"
+	"strings"
+	"testing"
+)
+
+func ipAssigned(iface *net.Interface, ip net.IP) bool {
+	addrs, _ := iface.Addrs()
+
+	for _, addr := range addrs {
+		args := strings.SplitN(addr.String(), "/", 2)
+		if args[0] == ip.String() {
+			return true
+		}
+	}
+
+	return false
+}
+
+func TestAddDelNetworkIp(t *testing.T) {
+	if testing.Short() {
+		return
+	}
+
+	ifaceName := "lo"
+	ip := net.ParseIP("127.0.1.1")
+	mask := net.IPv4Mask(255, 255, 255, 255)
+	ipNet := &net.IPNet{IP: ip, Mask: mask}
+
+	iface, err := net.InterfaceByName(ifaceName)
+	if err != nil {
+		t.Skip("No 'lo' interface; skipping tests")
+	}
+
+	if err := NetworkLinkAddIp(iface, ip, ipNet); err != nil {
+		t.Fatal(err)
+	}
+
+	if !ipAssigned(iface, ip) {
+		t.Fatalf("Could not locate address '%s' in lo address list.", ip.String())
+	}
+
+	if err := NetworkLinkDelIp(iface, ip, ipNet); err != nil {
+		t.Fatal(err)
+	}
+
+	if ipAssigned(iface, ip) {
+		t.Fatalf("Located address '%s' in lo address list after removal.", ip.String())
+	}
+}
+
+func TestCreateBridgeWithMac(t *testing.T) {
+	if testing.Short() {
+		return
+	}
+
+	name := "testbridge"
+
+	if err := CreateBridge(name, true); err != nil {
+		t.Fatal(err)
+	}
+
+	if _, err := net.InterfaceByName(name); err != nil {
+		t.Fatal(err)
+	}
+
+	// cleanup and tests
+
+	if err := DeleteBridge(name); err != nil {
+		t.Fatal(err)
+	}
+
+	if _, err := net.InterfaceByName(name); err == nil {
+		t.Fatalf("expected error getting interface because %s bridge was deleted", name)
+	}
+}
+
+func TestCreateBridgeLink(t *testing.T) {
+	if testing.Short() {
+		return
+	}
+
+	name := "mybrlink"
+
+	if err := NetworkLinkAdd(name, "bridge"); err != nil {
+		t.Fatal(err)
+	}
+
+	if _, err := net.InterfaceByName(name); err != nil {
+		t.Fatal(err)
+	}
+
+	if err := NetworkLinkDel(name); err != nil {
+		t.Fatal(err)
+	}
+
+	if _, err := net.InterfaceByName(name); err == nil {
+		t.Fatalf("expected error getting interface because %s bridge was deleted", name)
+	}
+
+}
+
+func TestCreateVethPair(t *testing.T) {
+	if testing.Short() {
+		return
+	}
+
+	var (
+		name1 = "veth1"
+		name2 = "veth2"
+	)
+
+	if err := NetworkCreateVethPair(name1, name2); err != nil {
+		t.Fatal(err)
+	}
+
+	if _, err := net.InterfaceByName(name1); err != nil {
+		t.Fatal(err)
+	}
+
+	if _, err := net.InterfaceByName(name2); err != nil {
+		t.Fatal(err)
+	}
+}
diff --git a/vendor/src/github.com/docker/libcontainer/netlink/netlink_unsupported.go b/vendor/src/github.com/docker/libcontainer/netlink/netlink_unsupported.go
index 1359345..f6e84ad 100644
--- a/vendor/src/github.com/docker/libcontainer/netlink/netlink_unsupported.go
+++ b/vendor/src/github.com/docker/libcontainer/netlink/netlink_unsupported.go
@@ -1,4 +1,4 @@
-// +build !linux !amd64
+// +build !linux
 
 package netlink
 
@@ -19,6 +19,10 @@
 	return ErrNotImplemented
 }
 
+func NetworkLinkDel(name string) error {
+	return ErrNotImplemented
+}
+
 func NetworkLinkUp(iface *net.Interface) error {
 	return ErrNotImplemented
 }
@@ -27,6 +31,10 @@
 	return ErrNotImplemented
 }
 
+func NetworkLinkDelIp(iface *net.Interface, ip net.IP, ipNet *net.IPNet) error {
+	return ErrNotImplemented
+}
+
 func AddRoute(destination, source, gateway, device string) error {
 	return ErrNotImplemented
 }
@@ -67,6 +75,10 @@
 	return ErrNotImplemented
 }
 
+func DeleteBridge(name string) error {
+	return ErrNotImplemented
+}
+
 func AddToBridge(iface, master *net.Interface) error {
 	return ErrNotImplemented
 }
diff --git a/vendor/src/github.com/docker/libcontainer/network/loopback.go b/vendor/src/github.com/docker/libcontainer/network/loopback.go
index 46a1fa8..1667b4d 100644
--- a/vendor/src/github.com/docker/libcontainer/network/loopback.go
+++ b/vendor/src/github.com/docker/libcontainer/network/loopback.go
@@ -15,9 +15,7 @@
 }
 
 func (l *Loopback) Initialize(config *Network, networkState *NetworkState) error {
-	if err := SetMtu("lo", config.Mtu); err != nil {
-		return fmt.Errorf("set lo mtu to %d %s", config.Mtu, err)
-	}
+	// Do not set the MTU on the loopback interface - use the default.
 	if err := InterfaceUp("lo"); err != nil {
 		return fmt.Errorf("lo up %s", err)
 	}
diff --git a/vendor/src/github.com/docker/libcontainer/network/netns.go b/vendor/src/github.com/docker/libcontainer/network/netns.go
index 6454447..1ff7506 100644
--- a/vendor/src/github.com/docker/libcontainer/network/netns.go
+++ b/vendor/src/github.com/docker/libcontainer/network/netns.go
@@ -7,7 +7,7 @@
 	"os"
 	"syscall"
 
-	"github.com/dotcloud/docker/pkg/system"
+	"github.com/docker/libcontainer/system"
 )
 
 //  crosbymichael: could make a network strategy that instead of returning veth pair names it returns a pid to an existing network namespace
@@ -23,12 +23,15 @@
 	if networkState.NsPath == "" {
 		return fmt.Errorf("nspath does is not specified in NetworkState")
 	}
+
 	f, err := os.OpenFile(networkState.NsPath, os.O_RDONLY, 0)
 	if err != nil {
 		return fmt.Errorf("failed get network namespace fd: %v", err)
 	}
+
 	if err := system.Setns(f.Fd(), syscall.CLONE_NEWNET); err != nil {
 		return fmt.Errorf("failed to setns current network namespace: %v", err)
 	}
+
 	return nil
 }
diff --git a/vendor/src/github.com/docker/libcontainer/network/network.go b/vendor/src/github.com/docker/libcontainer/network/network.go
index 48eeec6..c7560c0 100644
--- a/vendor/src/github.com/docker/libcontainer/network/network.go
+++ b/vendor/src/github.com/docker/libcontainer/network/network.go
@@ -44,6 +44,14 @@
 	return netlink.NetworkSetNsPid(iface, nsPid)
 }
 
+func SetInterfaceInNamespaceFd(name string, fd uintptr) error {
+	iface, err := net.InterfaceByName(name)
+	if err != nil {
+		return err
+	}
+	return netlink.NetworkSetNsFd(iface, int(fd))
+}
+
 func SetInterfaceMaster(name, master string) error {
 	iface, err := net.InterfaceByName(name)
 	if err != nil {
diff --git a/vendor/src/github.com/docker/libcontainer/network/stats.go b/vendor/src/github.com/docker/libcontainer/network/stats.go
index b69fa91..c8ece5c 100644
--- a/vendor/src/github.com/docker/libcontainer/network/stats.go
+++ b/vendor/src/github.com/docker/libcontainer/network/stats.go
@@ -9,36 +9,37 @@
 )
 
 type NetworkStats struct {
-	RxBytes   uint64 `json:"rx_bytes,omitempty"`
-	RxPackets uint64 `json:"rx_packets,omitempty"`
-	RxErrors  uint64 `json:"rx_errors,omitempty"`
-	RxDropped uint64 `json:"rx_dropped,omitempty"`
-	TxBytes   uint64 `json:"tx_bytes,omitempty"`
-	TxPackets uint64 `json:"tx_packets,omitempty"`
-	TxErrors  uint64 `json:"tx_errors,omitempty"`
-	TxDropped uint64 `json:"tx_dropped,omitempty"`
+	RxBytes   uint64 `json:"rx_bytes"`
+	RxPackets uint64 `json:"rx_packets"`
+	RxErrors  uint64 `json:"rx_errors"`
+	RxDropped uint64 `json:"rx_dropped"`
+	TxBytes   uint64 `json:"tx_bytes"`
+	TxPackets uint64 `json:"tx_packets"`
+	TxErrors  uint64 `json:"tx_errors"`
+	TxDropped uint64 `json:"tx_dropped"`
 }
 
 // Returns the network statistics for the network interfaces represented by the NetworkRuntimeInfo.
-func GetStats(networkState *NetworkState) (NetworkStats, error) {
+func GetStats(networkState *NetworkState) (*NetworkStats, error) {
 	// This can happen if the network runtime information is missing - possible if the container was created by an old version of libcontainer.
 	if networkState.VethHost == "" {
-		return NetworkStats{}, nil
+		return &NetworkStats{}, nil
 	}
 	data, err := readSysfsNetworkStats(networkState.VethHost)
 	if err != nil {
-		return NetworkStats{}, err
+		return nil, err
 	}
 
-	return NetworkStats{
-		RxBytes:   data["rx_bytes"],
-		RxPackets: data["rx_packets"],
-		RxErrors:  data["rx_errors"],
-		RxDropped: data["rx_dropped"],
-		TxBytes:   data["tx_bytes"],
-		TxPackets: data["tx_packets"],
-		TxErrors:  data["tx_errors"],
-		TxDropped: data["tx_dropped"],
+	// Ingress for host veth is from the container. Hence tx_bytes stat on the host veth is actually number of bytes received by the container.
+	return &NetworkStats{
+		RxBytes:   data["tx_bytes"],
+		RxPackets: data["tx_packets"],
+		RxErrors:  data["tx_errors"],
+		RxDropped: data["tx_dropped"],
+		TxBytes:   data["rx_bytes"],
+		TxPackets: data["rx_packets"],
+		TxErrors:  data["rx_errors"],
+		TxDropped: data["rx_dropped"],
 	}, nil
 }
 
diff --git a/vendor/src/github.com/docker/libcontainer/network/types.go b/vendor/src/github.com/docker/libcontainer/network/types.go
index 0f1df30..3b7a4e3 100644
--- a/vendor/src/github.com/docker/libcontainer/network/types.go
+++ b/vendor/src/github.com/docker/libcontainer/network/types.go
@@ -25,6 +25,7 @@
 
 	// Mtu sets the mtu value for the interface and will be mirrored on both the host and
 	// container's interfaces if a pair is created, specifically in the case of type veth
+	// Note: This does not apply to loopback interfaces.
 	Mtu int `json:"mtu,omitempty"`
 }
 
diff --git a/vendor/src/github.com/docker/libcontainer/nsinit/config.go b/vendor/src/github.com/docker/libcontainer/nsinit/config.go
new file mode 100644
index 0000000..74c7b3c
--- /dev/null
+++ b/vendor/src/github.com/docker/libcontainer/nsinit/config.go
@@ -0,0 +1,29 @@
+package main
+
+import (
+	"encoding/json"
+	"fmt"
+	"log"
+
+	"github.com/codegangsta/cli"
+)
+
+var configCommand = cli.Command{
+	Name:   "config",
+	Usage:  "display the container configuration",
+	Action: configAction,
+}
+
+func configAction(context *cli.Context) {
+	container, err := loadConfig()
+	if err != nil {
+		log.Fatal(err)
+	}
+
+	data, err := json.MarshalIndent(container, "", "\t")
+	if err != nil {
+		log.Fatal(err)
+	}
+
+	fmt.Printf("%s", data)
+}
diff --git a/vendor/src/github.com/docker/libcontainer/nsinit/exec.go b/vendor/src/github.com/docker/libcontainer/nsinit/exec.go
index c58c306..c46b191 100644
--- a/vendor/src/github.com/docker/libcontainer/nsinit/exec.go
+++ b/vendor/src/github.com/docker/libcontainer/nsinit/exec.go
@@ -2,13 +2,18 @@
 
 import (
 	"fmt"
+	"io"
 	"log"
 	"os"
 	"os/exec"
 	"os/signal"
+	"syscall"
+	"text/tabwriter"
 
 	"github.com/codegangsta/cli"
+	"github.com/docker/docker/pkg/term"
 	"github.com/docker/libcontainer"
+	consolepkg "github.com/docker/libcontainer/console"
 	"github.com/docker/libcontainer/namespaces"
 )
 
@@ -16,12 +21,29 @@
 	Name:   "exec",
 	Usage:  "execute a new command inside a container",
 	Action: execAction,
+	Flags: []cli.Flag{
+		cli.BoolFlag{Name: "list", Usage: "list all registered exec functions"},
+		cli.StringFlag{Name: "func", Value: "exec", Usage: "function name to exec inside a container"},
+	},
 }
 
 func execAction(context *cli.Context) {
+	if context.Bool("list") {
+		w := tabwriter.NewWriter(os.Stdout, 10, 1, 3, ' ', 0)
+		fmt.Fprint(w, "NAME\tUSAGE\n")
+
+		for k, f := range argvs {
+			fmt.Fprintf(w, "%s\t%s\n", k, f.Usage)
+		}
+
+		w.Flush()
+
+		return
+	}
+
 	var exitCode int
 
-	container, err := loadContainer()
+	container, err := loadConfig()
 	if err != nil {
 		log.Fatal(err)
 	}
@@ -32,10 +54,9 @@
 	}
 
 	if state != nil {
-		err = namespaces.ExecIn(container, state, []string(context.Args()))
+		exitCode, err = startInExistingContainer(container, state, context.String("func"), context)
 	} else {
-		term := namespaces.NewTerminal(os.Stdin, os.Stdout, os.Stderr, container.Tty)
-		exitCode, err = startContainer(container, term, dataPath, []string(context.Args()))
+		exitCode, err = startContainer(container, dataPath, []string(context.Args()))
 	}
 
 	if err != nil {
@@ -45,11 +66,68 @@
 	os.Exit(exitCode)
 }
 
+// the process for execing a new process inside an existing container is that we have to exec ourself
+// with the nsenter argument so that the C code can setns an the namespaces that we require.  Then that
+// code path will drop us into the path that we can do the final setup of the namespace and exec the users
+// application.
+func startInExistingContainer(config *libcontainer.Config, state *libcontainer.State, action string, context *cli.Context) (int, error) {
+	var (
+		master  *os.File
+		console string
+		err     error
+
+		sigc = make(chan os.Signal, 10)
+
+		stdin  = os.Stdin
+		stdout = os.Stdout
+		stderr = os.Stderr
+	)
+	signal.Notify(sigc)
+
+	if config.Tty {
+		stdin = nil
+		stdout = nil
+		stderr = nil
+
+		master, console, err = consolepkg.CreateMasterAndConsole()
+		if err != nil {
+			return -1, err
+		}
+
+		go io.Copy(master, os.Stdin)
+		go io.Copy(os.Stdout, master)
+
+		state, err := term.SetRawTerminal(os.Stdin.Fd())
+		if err != nil {
+			return -1, err
+		}
+
+		defer term.RestoreTerminal(os.Stdin.Fd(), state)
+	}
+
+	startCallback := func(cmd *exec.Cmd) {
+		go func() {
+			resizeTty(master)
+
+			for sig := range sigc {
+				switch sig {
+				case syscall.SIGWINCH:
+					resizeTty(master)
+				default:
+					cmd.Process.Signal(sig)
+				}
+			}
+		}()
+	}
+
+	return namespaces.ExecIn(config, state, context.Args(), os.Args[0], action, stdin, stdout, stderr, console, startCallback)
+}
+
 // startContainer starts the container. Returns the exit status or -1 and an
 // error.
 //
 // Signals sent to the current process will be forwarded to container.
-func startContainer(container *libcontainer.Config, term namespaces.Terminal, dataPath string, args []string) (int, error) {
+func startContainer(container *libcontainer.Config, dataPath string, args []string) (int, error) {
 	var (
 		cmd  *exec.Cmd
 		sigc = make(chan os.Signal, 10)
@@ -65,13 +143,66 @@
 		return cmd
 	}
 
+	var (
+		master  *os.File
+		console string
+		err     error
+
+		stdin  = os.Stdin
+		stdout = os.Stdout
+		stderr = os.Stderr
+	)
+
+	if container.Tty {
+		stdin = nil
+		stdout = nil
+		stderr = nil
+
+		master, console, err = consolepkg.CreateMasterAndConsole()
+		if err != nil {
+			return -1, err
+		}
+
+		go io.Copy(master, os.Stdin)
+		go io.Copy(os.Stdout, master)
+
+		state, err := term.SetRawTerminal(os.Stdin.Fd())
+		if err != nil {
+			return -1, err
+		}
+
+		defer term.RestoreTerminal(os.Stdin.Fd(), state)
+	}
+
 	startCallback := func() {
 		go func() {
+			resizeTty(master)
+
 			for sig := range sigc {
-				cmd.Process.Signal(sig)
+				switch sig {
+				case syscall.SIGWINCH:
+					resizeTty(master)
+				default:
+					cmd.Process.Signal(sig)
+				}
 			}
 		}()
 	}
 
-	return namespaces.Exec(container, term, "", dataPath, args, createCommand, startCallback)
+	return namespaces.Exec(container, stdin, stdout, stderr, console, "", dataPath, args, createCommand, startCallback)
+}
+
+func resizeTty(master *os.File) {
+	if master == nil {
+		return
+	}
+
+	ws, err := term.GetWinsize(os.Stdin.Fd())
+	if err != nil {
+		return
+	}
+
+	if err := term.SetWinsize(master.Fd(), ws); err != nil {
+		return
+	}
 }
diff --git a/vendor/src/github.com/docker/libcontainer/nsinit/init.go b/vendor/src/github.com/docker/libcontainer/nsinit/init.go
index eedb961..c091ee1 100644
--- a/vendor/src/github.com/docker/libcontainer/nsinit/init.go
+++ b/vendor/src/github.com/docker/libcontainer/nsinit/init.go
@@ -3,10 +3,12 @@
 import (
 	"log"
 	"os"
+	"runtime"
 	"strconv"
 
 	"github.com/codegangsta/cli"
 	"github.com/docker/libcontainer/namespaces"
+	"github.com/docker/libcontainer/syncpipe"
 )
 
 var (
@@ -22,7 +24,9 @@
 )
 
 func initAction(context *cli.Context) {
-	container, err := loadContainer()
+	runtime.LockOSThread()
+
+	container, err := loadConfig()
 	if err != nil {
 		log.Fatal(err)
 	}
@@ -37,7 +41,7 @@
 		log.Fatal(err)
 	}
 
-	syncPipe, err := namespaces.NewSyncPipeFromFd(0, uintptr(pipeFd))
+	syncPipe, err := syncpipe.NewSyncPipeFromFd(0, uintptr(pipeFd))
 	if err != nil {
 		log.Fatalf("unable to create sync pipe: %s", err)
 	}
diff --git a/vendor/src/github.com/docker/libcontainer/nsinit/main.go b/vendor/src/github.com/docker/libcontainer/nsinit/main.go
index 20132de..d65c014 100644
--- a/vendor/src/github.com/docker/libcontainer/nsinit/main.go
+++ b/vendor/src/github.com/docker/libcontainer/nsinit/main.go
@@ -3,35 +3,62 @@
 import (
 	"log"
 	"os"
+	"strings"
 
 	"github.com/codegangsta/cli"
 )
 
-var logPath = os.Getenv("log")
+var (
+	logPath = os.Getenv("log")
+	argvs   = make(map[string]*rFunc)
+)
 
-func preload(context *cli.Context) error {
-	if logPath != "" {
-		if err := openLog(logPath); err != nil {
-			return err
-		}
+func init() {
+	argvs["exec"] = &rFunc{
+		Usage:  "execute a process inside an existing container",
+		Action: nsenterExec,
 	}
 
-	return nil
+	argvs["mknod"] = &rFunc{
+		Usage:  "mknod a device inside an existing container",
+		Action: nsenterMknod,
+	}
+
+	argvs["ip"] = &rFunc{
+		Usage:  "display the container's network interfaces",
+		Action: nsenterIp,
+	}
 }
 
 func main() {
+	// we need to check our argv 0 for any registred functions to run instead of the
+	// normal cli code path
+	f, exists := argvs[strings.TrimPrefix(os.Args[0], "nsenter-")]
+	if exists {
+		runFunc(f)
+
+		return
+	}
+
 	app := cli.NewApp()
+
 	app.Name = "nsinit"
 	app.Version = "0.1"
 	app.Author = "libcontainer maintainers"
+	app.Flags = []cli.Flag{
+		cli.StringFlag{Name: "nspid"},
+		cli.StringFlag{Name: "console"},
+	}
 
 	app.Before = preload
+
 	app.Commands = []cli.Command{
 		execCommand,
 		initCommand,
 		statsCommand,
-		specCommand,
-		nsenterCommand,
+		configCommand,
+		pauseCommand,
+		unpauseCommand,
 	}
 
 	if err := app.Run(os.Args); err != nil {
diff --git a/vendor/src/github.com/docker/libcontainer/nsinit/nsenter.go b/vendor/src/github.com/docker/libcontainer/nsinit/nsenter.go
index faa6131..8dc149f 100644
--- a/vendor/src/github.com/docker/libcontainer/nsinit/nsenter.go
+++ b/vendor/src/github.com/docker/libcontainer/nsinit/nsenter.go
@@ -1,40 +1,84 @@
 package main
 
 import (
+	"fmt"
 	"log"
+	"net"
+	"os"
+	"strconv"
+	"strings"
+	"text/tabwriter"
 
-	"github.com/codegangsta/cli"
+	"github.com/docker/libcontainer"
+	"github.com/docker/libcontainer/devices"
+	"github.com/docker/libcontainer/mount/nodes"
 	"github.com/docker/libcontainer/namespaces"
+	_ "github.com/docker/libcontainer/namespaces/nsenter"
 )
 
-var nsenterCommand = cli.Command{
-	Name:   "nsenter",
-	Usage:  "init process for entering an existing namespace",
-	Action: nsenterAction,
-	Flags: []cli.Flag{
-		cli.IntFlag{Name: "nspid"},
-		cli.StringFlag{Name: "containerjson"},
-	},
-}
-
-func nsenterAction(context *cli.Context) {
-	args := context.Args()
-
-	if len(args) == 0 {
-		args = []string{"/bin/bash"}
-	}
-
-	container, err := loadContainerFromJson(context.String("containerjson"))
-	if err != nil {
-		log.Fatalf("unable to load container: %s", err)
-	}
-
-	nspid := context.Int("nspid")
-	if nspid <= 0 {
-		log.Fatalf("cannot enter into namespaces without valid pid: %q", nspid)
-	}
-
-	if err := namespaces.NsEnter(container, nspid, args); err != nil {
+// nsenterExec exec's a process inside an existing container
+func nsenterExec(config *libcontainer.Config, args []string) {
+	if err := namespaces.FinalizeSetns(config, args); err != nil {
 		log.Fatalf("failed to nsenter: %s", err)
 	}
 }
+
+// nsenterMknod runs mknod inside an existing container
+//
+// mknod <path> <type> <major> <minor>
+func nsenterMknod(config *libcontainer.Config, args []string) {
+	if len(args) != 4 {
+		log.Fatalf("expected mknod to have 4 arguments not %d", len(args))
+	}
+
+	t := rune(args[1][0])
+
+	major, err := strconv.Atoi(args[2])
+	if err != nil {
+		log.Fatal(err)
+	}
+
+	minor, err := strconv.Atoi(args[3])
+	if err != nil {
+		log.Fatal(err)
+	}
+
+	n := &devices.Device{
+		Path:        args[0],
+		Type:        t,
+		MajorNumber: int64(major),
+		MinorNumber: int64(minor),
+	}
+
+	if err := nodes.CreateDeviceNode("/", n); err != nil {
+		log.Fatal(err)
+	}
+}
+
+// nsenterIp displays the network interfaces inside a container's net namespace
+func nsenterIp(config *libcontainer.Config, args []string) {
+	interfaces, err := net.Interfaces()
+	if err != nil {
+		log.Fatal(err)
+	}
+
+	w := tabwriter.NewWriter(os.Stdout, 10, 1, 3, ' ', 0)
+	fmt.Fprint(w, "NAME\tMTU\tMAC\tFLAG\tADDRS\n")
+
+	for _, iface := range interfaces {
+		addrs, err := iface.Addrs()
+		if err != nil {
+			log.Fatal(err)
+		}
+
+		o := []string{}
+
+		for _, a := range addrs {
+			o = append(o, a.String())
+		}
+
+		fmt.Fprintf(w, "%s\t%d\t%s\t%s\t%s\n", iface.Name, iface.MTU, iface.HardwareAddr, iface.Flags, strings.Join(o, ","))
+	}
+
+	w.Flush()
+}
diff --git a/vendor/src/github.com/docker/libcontainer/nsinit/pause.go b/vendor/src/github.com/docker/libcontainer/nsinit/pause.go
new file mode 100644
index 0000000..ada2425
--- /dev/null
+++ b/vendor/src/github.com/docker/libcontainer/nsinit/pause.go
@@ -0,0 +1,49 @@
+package main
+
+import (
+	"log"
+
+	"github.com/codegangsta/cli"
+	"github.com/docker/libcontainer/cgroups"
+	"github.com/docker/libcontainer/cgroups/fs"
+	"github.com/docker/libcontainer/cgroups/systemd"
+)
+
+var pauseCommand = cli.Command{
+	Name:   "pause",
+	Usage:  "pause the container's processes",
+	Action: pauseAction,
+}
+
+var unpauseCommand = cli.Command{
+	Name:   "unpause",
+	Usage:  "unpause the container's processes",
+	Action: unpauseAction,
+}
+
+func pauseAction(context *cli.Context) {
+	if err := toggle(cgroups.Frozen); err != nil {
+		log.Fatal(err)
+	}
+}
+
+func unpauseAction(context *cli.Context) {
+	if err := toggle(cgroups.Thawed); err != nil {
+		log.Fatal(err)
+	}
+}
+
+func toggle(state cgroups.FreezerState) error {
+	container, err := loadConfig()
+	if err != nil {
+		return err
+	}
+
+	if systemd.UseSystemd() {
+		err = systemd.Freeze(container.Cgroups, state)
+	} else {
+		err = fs.Freeze(container.Cgroups, state)
+	}
+
+	return err
+}
diff --git a/vendor/src/github.com/docker/libcontainer/nsinit/spec.go b/vendor/src/github.com/docker/libcontainer/nsinit/spec.go
deleted file mode 100644
index beadc9d..0000000
--- a/vendor/src/github.com/docker/libcontainer/nsinit/spec.go
+++ /dev/null
@@ -1,40 +0,0 @@
-package main
-
-import (
-	"encoding/json"
-	"fmt"
-	"log"
-
-	"github.com/codegangsta/cli"
-	"github.com/docker/libcontainer"
-)
-
-var specCommand = cli.Command{
-	Name:   "spec",
-	Usage:  "display the container specification",
-	Action: specAction,
-}
-
-func specAction(context *cli.Context) {
-	container, err := loadContainer()
-	if err != nil {
-		log.Fatal(err)
-	}
-
-	spec, err := getContainerSpec(container)
-	if err != nil {
-		log.Fatalf("Failed to get spec - %v\n", err)
-	}
-
-	fmt.Printf("Spec:\n%v\n", spec)
-}
-
-// returns the container spec in json format.
-func getContainerSpec(container *libcontainer.Config) (string, error) {
-	spec, err := json.MarshalIndent(container, "", "\t")
-	if err != nil {
-		return "", err
-	}
-
-	return string(spec), nil
-}
diff --git a/vendor/src/github.com/docker/libcontainer/nsinit/stats.go b/vendor/src/github.com/docker/libcontainer/nsinit/stats.go
index eae9833..612b4a4 100644
--- a/vendor/src/github.com/docker/libcontainer/nsinit/stats.go
+++ b/vendor/src/github.com/docker/libcontainer/nsinit/stats.go
@@ -16,35 +16,24 @@
 }
 
 func statsAction(context *cli.Context) {
-	container, err := loadContainer()
+	container, err := loadConfig()
 	if err != nil {
 		log.Fatal(err)
 	}
 
-	runtimeCkpt, err := libcontainer.GetState(dataPath)
+	state, err := libcontainer.GetState(dataPath)
 	if err != nil {
 		log.Fatal(err)
 	}
 
-	stats, err := getStats(container, runtimeCkpt)
-	if err != nil {
-		log.Fatalf("Failed to get stats - %v\n", err)
-	}
-
-	fmt.Printf("Stats:\n%v\n", stats)
-}
-
-// returns the container stats in json format.
-func getStats(container *libcontainer.Config, state *libcontainer.State) (string, error) {
 	stats, err := libcontainer.GetStats(container, state)
 	if err != nil {
-		return "", err
+		log.Fatal(err)
 	}
-
-	out, err := json.MarshalIndent(stats, "", "\t")
+	data, err := json.MarshalIndent(stats, "", "\t")
 	if err != nil {
-		return "", err
+		log.Fatal(err)
 	}
 
-	return string(out), nil
+	fmt.Printf("%s", data)
 }
diff --git a/vendor/src/github.com/docker/libcontainer/nsinit/utils.go b/vendor/src/github.com/docker/libcontainer/nsinit/utils.go
index 44194d8..7f51559 100644
--- a/vendor/src/github.com/docker/libcontainer/nsinit/utils.go
+++ b/vendor/src/github.com/docker/libcontainer/nsinit/utils.go
@@ -6,10 +6,18 @@
 	"os"
 	"path/filepath"
 
+	"github.com/codegangsta/cli"
 	"github.com/docker/libcontainer"
+	"github.com/docker/libcontainer/syncpipe"
 )
 
-func loadContainer() (*libcontainer.Config, error) {
+// rFunc is a function registration for calling after an execin
+type rFunc struct {
+	Usage  string
+	Action func(*libcontainer.Config, []string)
+}
+
+func loadConfig() (*libcontainer.Config, error) {
 	f, err := os.Open(filepath.Join(dataPath, "container.json"))
 	if err != nil {
 		return nil, err
@@ -35,12 +43,52 @@
 	return nil
 }
 
-func loadContainerFromJson(rawData string) (*libcontainer.Config, error) {
-	var container *libcontainer.Config
+func findUserArgs() []string {
+	i := 0
+	for _, a := range os.Args {
+		i++
 
-	if err := json.Unmarshal([]byte(rawData), &container); err != nil {
+		if a == "--" {
+			break
+		}
+	}
+
+	return os.Args[i:]
+}
+
+// loadConfigFromFd loads a container's config from the sync pipe that is provided by
+// fd 3 when running a process
+func loadConfigFromFd() (*libcontainer.Config, error) {
+	syncPipe, err := syncpipe.NewSyncPipeFromFd(0, 3)
+	if err != nil {
 		return nil, err
 	}
 
-	return container, nil
+	var config *libcontainer.Config
+	if err := syncPipe.ReadFromParent(&config); err != nil {
+		return nil, err
+	}
+
+	return config, nil
+}
+
+func preload(context *cli.Context) error {
+	if logPath != "" {
+		if err := openLog(logPath); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func runFunc(f *rFunc) {
+	userArgs := findUserArgs()
+
+	config, err := loadConfigFromFd()
+	if err != nil {
+		log.Fatalf("unable to receive config from sync pipe: %s", err)
+	}
+
+	f.Action(config, userArgs)
 }
diff --git a/vendor/src/github.com/docker/libcontainer/process.go b/vendor/src/github.com/docker/libcontainer/process.go
new file mode 100644
index 0000000..489666a
--- /dev/null
+++ b/vendor/src/github.com/docker/libcontainer/process.go
@@ -0,0 +1,27 @@
+package libcontainer
+
+import "io"
+
+// Configuration for a process to be run inside a container.
+type ProcessConfig struct {
+	// The command to be run followed by any arguments.
+	Args []string
+
+	// Map of environment variables to their values.
+	Env []string
+
+	// Stdin is a pointer to a reader which provides the standard input stream.
+	// Stdout is a pointer to a writer which receives the standard output stream.
+	// Stderr is a pointer to a writer which receives the standard error stream.
+	//
+	// If a reader or writer is nil, the input stream is assumed to be empty and the output is
+	// discarded.
+	//
+	// The readers and writers, if supplied, are closed when the process terminates. Their Close
+	// methods should be idempotent.
+	//
+	// Stdout and Stderr may refer to the same writer in which case the output is interspersed.
+	Stdin  io.ReadCloser
+	Stdout io.WriteCloser
+	Stderr io.WriteCloser
+}
diff --git a/vendor/src/github.com/docker/libcontainer/security/capabilities/capabilities.go b/vendor/src/github.com/docker/libcontainer/security/capabilities/capabilities.go
index 21e4de2..7aef5fa 100644
--- a/vendor/src/github.com/docker/libcontainer/security/capabilities/capabilities.go
+++ b/vendor/src/github.com/docker/libcontainer/security/capabilities/capabilities.go
@@ -27,7 +27,7 @@
 	return nil
 }
 
-// DropCapabilities drops all capabilities for the current process expect those specified in the container configuration.
+// DropCapabilities drops all capabilities for the current process except those specified in the container configuration.
 func DropCapabilities(capList []string) error {
 	c, err := capability.NewPid(os.Getpid())
 	if err != nil {
diff --git a/vendor/src/github.com/docker/libcontainer/security/capabilities/types.go b/vendor/src/github.com/docker/libcontainer/security/capabilities/types.go
index feb38e3..a960b80 100644
--- a/vendor/src/github.com/docker/libcontainer/security/capabilities/types.go
+++ b/vendor/src/github.com/docker/libcontainer/security/capabilities/types.go
@@ -64,8 +64,6 @@
 	{Key: "MAC_ADMIN", Value: capability.CAP_MAC_ADMIN},
 	{Key: "NET_ADMIN", Value: capability.CAP_NET_ADMIN},
 	{Key: "SYSLOG", Value: capability.CAP_SYSLOG},
-	{Key: "SETUID", Value: capability.CAP_SETUID},
-	{Key: "SETGID", Value: capability.CAP_SETGID},
 	{Key: "CHOWN", Value: capability.CAP_CHOWN},
 	{Key: "NET_RAW", Value: capability.CAP_NET_RAW},
 	{Key: "DAC_OVERRIDE", Value: capability.CAP_DAC_OVERRIDE},
diff --git a/vendor/src/github.com/docker/libcontainer/security/restrict/restrict.go b/vendor/src/github.com/docker/libcontainer/security/restrict/restrict.go
index ff7ae2f..dd765b1 100644
--- a/vendor/src/github.com/docker/libcontainer/security/restrict/restrict.go
+++ b/vendor/src/github.com/docker/libcontainer/security/restrict/restrict.go
@@ -7,23 +7,21 @@
 	"os"
 	"syscall"
 	"time"
-
-	"github.com/dotcloud/docker/pkg/system"
 )
 
 const defaultMountFlags = syscall.MS_NOEXEC | syscall.MS_NOSUID | syscall.MS_NODEV
 
 func mountReadonly(path string) error {
 	for i := 0; i < 5; i++ {
-		if err := system.Mount("", path, "", syscall.MS_REMOUNT|syscall.MS_RDONLY, ""); err != nil && !os.IsNotExist(err) {
+		if err := syscall.Mount("", path, "", syscall.MS_REMOUNT|syscall.MS_RDONLY, ""); err != nil && !os.IsNotExist(err) {
 			switch err {
 			case syscall.EINVAL:
 				// Probably not a mountpoint, use bind-mount
-				if err := system.Mount(path, path, "", syscall.MS_BIND, ""); err != nil {
+				if err := syscall.Mount(path, path, "", syscall.MS_BIND, ""); err != nil {
 					return err
 				}
 
-				return system.Mount(path, path, "", syscall.MS_BIND|syscall.MS_REMOUNT|syscall.MS_RDONLY|syscall.MS_REC|defaultMountFlags, "")
+				return syscall.Mount(path, path, "", syscall.MS_BIND|syscall.MS_REMOUNT|syscall.MS_RDONLY|syscall.MS_REC|defaultMountFlags, "")
 			case syscall.EBUSY:
 				time.Sleep(100 * time.Millisecond)
 				continue
@@ -47,7 +45,7 @@
 		}
 	}
 
-	if err := system.Mount("/dev/null", "/proc/kcore", "", syscall.MS_BIND, ""); err != nil && !os.IsNotExist(err) {
+	if err := syscall.Mount("/dev/null", "/proc/kcore", "", syscall.MS_BIND, ""); err != nil && !os.IsNotExist(err) {
 		return fmt.Errorf("unable to bind-mount /dev/null over /proc/kcore: %s", err)
 	}
 
diff --git a/vendor/src/github.com/docker/libcontainer/selinux/selinux.go b/vendor/src/github.com/docker/libcontainer/selinux/selinux.go
index 6cf7bd7..bfa7957 100644
--- a/vendor/src/github.com/docker/libcontainer/selinux/selinux.go
+++ b/vendor/src/github.com/docker/libcontainer/selinux/selinux.go
@@ -1,3 +1,5 @@
+// +build linux
+
 package selinux
 
 import (
@@ -5,14 +7,16 @@
 	"crypto/rand"
 	"encoding/binary"
 	"fmt"
-	"github.com/dotcloud/docker/pkg/mount"
-	"github.com/dotcloud/docker/pkg/system"
 	"io"
 	"os"
+	"path/filepath"
 	"regexp"
 	"strconv"
 	"strings"
 	"syscall"
+
+	"github.com/docker/docker/pkg/mount"
+	"github.com/docker/libcontainer/system"
 )
 
 const (
@@ -76,7 +80,7 @@
 	}
 	selinuxEnabledChecked = true
 	if fs := getSelinuxMountPoint(); fs != "" {
-		if con, _ := getcon(); con != "kernel" {
+		if con, _ := Getcon(); con != "kernel" {
 			selinuxEnabled = true
 		}
 	}
@@ -145,16 +149,23 @@
 	return system.Lsetxattr(path, xattrNameSelinux, []byte(scon), 0)
 }
 
+// Return the SELinux label for this path
+func Getfilecon(path string) (string, error) {
+	con, err := system.Lgetxattr(path, xattrNameSelinux)
+	return string(con), err
+}
+
 func Setfscreatecon(scon string) error {
-	return writeCon(fmt.Sprintf("/proc/self/task/%d/attr/fscreate", system.Gettid()), scon)
+	return writeCon(fmt.Sprintf("/proc/self/task/%d/attr/fscreate", syscall.Gettid()), scon)
 }
 
 func Getfscreatecon() (string, error) {
-	return readCon(fmt.Sprintf("/proc/self/task/%d/attr/fscreate", system.Gettid()))
+	return readCon(fmt.Sprintf("/proc/self/task/%d/attr/fscreate", syscall.Gettid()))
 }
 
-func getcon() (string, error) {
-	return readCon(fmt.Sprintf("/proc/self/task/%d/attr/current", system.Gettid()))
+// Return the SELinux label of the current process thread.
+func Getcon() (string, error) {
+	return readCon(fmt.Sprintf("/proc/self/task/%d/attr/current", syscall.Gettid()))
 }
 
 func Getpidcon(pid int) (string, error) {
@@ -184,7 +195,7 @@
 }
 
 func Setexeccon(scon string) error {
-	return writeCon(fmt.Sprintf("/proc/self/task/%d/attr/exec", system.Gettid()), scon)
+	return writeCon(fmt.Sprintf("/proc/self/task/%d/attr/exec", syscall.Gettid()), scon)
 }
 
 func (c SELinuxContext) Get() string {
@@ -396,3 +407,36 @@
 	tcon["level"] = scon["level"]
 	return tcon.Get(), nil
 }
+
+// Prevent users from relabing system files
+func badPrefix(fpath string) error {
+	var badprefixes = []string{"/usr"}
+
+	for _, prefix := range badprefixes {
+		if fpath == prefix || strings.HasPrefix(fpath, fmt.Sprintf("%s/", prefix)) {
+			return fmt.Errorf("Relabeling content in %s is not allowed.", prefix)
+		}
+	}
+	return nil
+}
+
+// Change the fpath file object to the SELinux label scon.
+// If the fpath is a directory and recurse is true Chcon will walk the
+// directory tree setting the label
+func Chcon(fpath string, scon string, recurse bool) error {
+	if !SelinuxEnabled() {
+		return nil
+	}
+	if err := badPrefix(fpath); err != nil {
+		return err
+	}
+	callback := func(p string, info os.FileInfo, err error) error {
+		return Setfilecon(p, scon)
+	}
+
+	if recurse {
+		return filepath.Walk(fpath, callback)
+	}
+
+	return Setfilecon(fpath, scon)
+}
diff --git a/vendor/src/github.com/docker/libcontainer/selinux/selinux_test.go b/vendor/src/github.com/docker/libcontainer/selinux/selinux_test.go
index 40ed70d..34c3497 100644
--- a/vendor/src/github.com/docker/libcontainer/selinux/selinux_test.go
+++ b/vendor/src/github.com/docker/libcontainer/selinux/selinux_test.go
@@ -1,9 +1,12 @@
+// +build linux
+
 package selinux_test
 
 import (
-	"github.com/docker/libcontainer/selinux"
 	"os"
 	"testing"
+
+	"github.com/docker/libcontainer/selinux"
 )
 
 func testSetfilecon(t *testing.T) {
diff --git a/vendor/src/github.com/docker/libcontainer/state.go b/vendor/src/github.com/docker/libcontainer/state.go
index a055bb0..208b4c6 100644
--- a/vendor/src/github.com/docker/libcontainer/state.go
+++ b/vendor/src/github.com/docker/libcontainer/state.go
@@ -12,14 +12,36 @@
 type State struct {
 	// InitPid is the init process id in the parent namespace
 	InitPid int `json:"init_pid,omitempty"`
+
 	// InitStartTime is the init process start time
 	InitStartTime string `json:"init_start_time,omitempty"`
+
 	// Network runtime state.
 	NetworkState network.NetworkState `json:"network_state,omitempty"`
+
+	// Path to all the cgroups setup for a container. Key is cgroup subsystem name.
+	CgroupPaths map[string]string `json:"cgroup_paths,omitempty"`
 }
 
-// The name of the runtime state file
-const stateFile = "state.json"
+// The running state of the container.
+type RunState int
+
+const (
+	// The name of the runtime state file
+	stateFile = "state.json"
+
+	// The container exists and is running.
+	Running RunState = iota
+
+	// The container exists, it is in the process of being paused.
+	Pausing
+
+	// The container exists, but all its processes are paused.
+	Paused
+
+	// The container does not exist.
+	Destroyed
+)
 
 // SaveState writes the container's runtime state to a state.json file
 // in the specified path
diff --git a/vendor/src/github.com/docker/libcontainer/namespaces/sync_pipe.go b/vendor/src/github.com/docker/libcontainer/syncpipe/sync_pipe.go
similarity index 75%
rename from vendor/src/github.com/docker/libcontainer/namespaces/sync_pipe.go
rename to vendor/src/github.com/docker/libcontainer/syncpipe/sync_pipe.go
index dcb5d97..d2870f5 100644
--- a/vendor/src/github.com/docker/libcontainer/namespaces/sync_pipe.go
+++ b/vendor/src/github.com/docker/libcontainer/syncpipe/sync_pipe.go
@@ -1,4 +1,4 @@
-package namespaces
+package syncpipe
 
 import (
 	"encoding/json"
@@ -6,8 +6,6 @@
 	"io/ioutil"
 	"os"
 	"syscall"
-
-	"github.com/docker/libcontainer/network"
 )
 
 // SyncPipe allows communication to and from the child processes
@@ -39,8 +37,8 @@
 	return s.parent
 }
 
-func (s *SyncPipe) SendToChild(networkState *network.NetworkState) error {
-	data, err := json.Marshal(networkState)
+func (s *SyncPipe) SendToChild(v interface{}) error {
+	data, err := json.Marshal(v)
 	if err != nil {
 		return err
 	}
@@ -63,18 +61,19 @@
 	return nil
 }
 
-func (s *SyncPipe) ReadFromParent() (*network.NetworkState, error) {
+func (s *SyncPipe) ReadFromParent(v interface{}) error {
 	data, err := ioutil.ReadAll(s.child)
 	if err != nil {
-		return nil, fmt.Errorf("error reading from sync pipe %s", err)
+		return fmt.Errorf("error reading from sync pipe %s", err)
 	}
-	var networkState *network.NetworkState
+
 	if len(data) > 0 {
-		if err := json.Unmarshal(data, &networkState); err != nil {
-			return nil, err
+		if err := json.Unmarshal(data, v); err != nil {
+			return err
 		}
 	}
-	return networkState, nil
+
+	return nil
 }
 
 func (s *SyncPipe) ReportChildError(err error) {
diff --git a/vendor/src/github.com/docker/libcontainer/namespaces/sync_pipe_linux.go b/vendor/src/github.com/docker/libcontainer/syncpipe/sync_pipe_linux.go
similarity index 94%
rename from vendor/src/github.com/docker/libcontainer/namespaces/sync_pipe_linux.go
rename to vendor/src/github.com/docker/libcontainer/syncpipe/sync_pipe_linux.go
index ad61e75..bea4b52 100644
--- a/vendor/src/github.com/docker/libcontainer/namespaces/sync_pipe_linux.go
+++ b/vendor/src/github.com/docker/libcontainer/syncpipe/sync_pipe_linux.go
@@ -1,4 +1,4 @@
-package namespaces
+package syncpipe
 
 import (
 	"os"
diff --git a/vendor/src/github.com/docker/libcontainer/namespaces/sync_pipe_test.go b/vendor/src/github.com/docker/libcontainer/syncpipe/sync_pipe_test.go
similarity index 72%
rename from vendor/src/github.com/docker/libcontainer/namespaces/sync_pipe_test.go
rename to vendor/src/github.com/docker/libcontainer/syncpipe/sync_pipe_test.go
index 69bd0ab..6833277 100644
--- a/vendor/src/github.com/docker/libcontainer/namespaces/sync_pipe_test.go
+++ b/vendor/src/github.com/docker/libcontainer/syncpipe/sync_pipe_test.go
@@ -1,12 +1,14 @@
-package namespaces
+package syncpipe
 
 import (
 	"fmt"
 	"testing"
-
-	"github.com/docker/libcontainer/network"
 )
 
+type testStruct struct {
+	Name string
+}
+
 func TestSendErrorFromChild(t *testing.T) {
 	pipe, err := NewSyncPipe()
 	if err != nil {
@@ -46,16 +48,16 @@
 
 	expected := "libcontainer"
 
-	if err := pipe.SendToChild(&network.NetworkState{VethHost: expected}); err != nil {
+	if err := pipe.SendToChild(testStruct{Name: expected}); err != nil {
 		t.Fatal(err)
 	}
 
-	payload, err := pipe.ReadFromParent()
-	if err != nil {
+	var s *testStruct
+	if err := pipe.ReadFromParent(&s); err != nil {
 		t.Fatal(err)
 	}
 
-	if payload.VethHost != expected {
-		t.Fatalf("expected veth host %q but received %q", expected, payload.VethHost)
+	if s.Name != expected {
+		t.Fatalf("expected name %q but received %q", expected, s.Name)
 	}
 }
diff --git a/vendor/src/github.com/docker/libcontainer/system/linux.go b/vendor/src/github.com/docker/libcontainer/system/linux.go
new file mode 100644
index 0000000..c07ef15
--- /dev/null
+++ b/vendor/src/github.com/docker/libcontainer/system/linux.go
@@ -0,0 +1,60 @@
+// +build linux
+
+package system
+
+import (
+	"os/exec"
+	"syscall"
+	"unsafe"
+)
+
+func Execv(cmd string, args []string, env []string) error {
+	name, err := exec.LookPath(cmd)
+	if err != nil {
+		return err
+	}
+
+	return syscall.Exec(name, args, env)
+}
+
+func ParentDeathSignal(sig uintptr) error {
+	if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_SET_PDEATHSIG, sig, 0); err != 0 {
+		return err
+	}
+	return nil
+}
+
+func GetParentDeathSignal() (int, error) {
+	var sig int
+
+	_, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_GET_PDEATHSIG, uintptr(unsafe.Pointer(&sig)), 0)
+
+	if err != 0 {
+		return -1, err
+	}
+
+	return sig, nil
+}
+
+func SetKeepCaps() error {
+	if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_SET_KEEPCAPS, 1, 0); err != 0 {
+		return err
+	}
+
+	return nil
+}
+
+func ClearKeepCaps() error {
+	if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_SET_KEEPCAPS, 0, 0); err != 0 {
+		return err
+	}
+
+	return nil
+}
+
+func Setctty() error {
+	if _, _, err := syscall.RawSyscall(syscall.SYS_IOCTL, 0, uintptr(syscall.TIOCSCTTY), 0); err != 0 {
+		return err
+	}
+	return nil
+}
diff --git a/pkg/system/proc.go b/vendor/src/github.com/docker/libcontainer/system/proc.go
similarity index 99%
rename from pkg/system/proc.go
rename to vendor/src/github.com/docker/libcontainer/system/proc.go
index a492346..37808a2 100644
--- a/pkg/system/proc.go
+++ b/vendor/src/github.com/docker/libcontainer/system/proc.go
@@ -14,6 +14,7 @@
 	if err != nil {
 		return "", err
 	}
+
 	parts := strings.Split(string(data), " ")
 	// the starttime is located at pos 22
 	// from the man page
diff --git a/pkg/system/setns_linux.go b/vendor/src/github.com/docker/libcontainer/system/setns_linux.go
similarity index 83%
rename from pkg/system/setns_linux.go
rename to vendor/src/github.com/docker/libcontainer/system/setns_linux.go
index 2b6f9e7..32821ee 100644
--- a/pkg/system/setns_linux.go
+++ b/vendor/src/github.com/docker/libcontainer/system/setns_linux.go
@@ -11,17 +11,21 @@
 // We need different setns values for the different platforms and arch
 // We are declaring the macro here because the SETNS syscall does not exist in th stdlib
 var setNsMap = map[string]uintptr{
+	"linux/386":   346,
 	"linux/amd64": 308,
+	"linux/arm":   374,
 }
 
 func Setns(fd uintptr, flags uintptr) error {
 	ns, exists := setNsMap[fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH)]
 	if !exists {
-		return ErrNotSupportedPlatform
+		return fmt.Errorf("unsupported platform %s/%s", runtime.GOOS, runtime.GOARCH)
 	}
+
 	_, _, err := syscall.RawSyscall(ns, fd, flags, 0)
 	if err != 0 {
 		return err
 	}
+
 	return nil
 }
diff --git a/vendor/src/github.com/docker/libcontainer/system/sysconfig.go b/vendor/src/github.com/docker/libcontainer/system/sysconfig.go
new file mode 100644
index 0000000..5efddef
--- /dev/null
+++ b/vendor/src/github.com/docker/libcontainer/system/sysconfig.go
@@ -0,0 +1,12 @@
+// +build cgo
+
+package system
+
+/*
+#include <unistd.h>
+*/
+import "C"
+
+func GetClockTicks() int {
+	return int(C.sysconf(C._SC_CLK_TCK))
+}
diff --git a/vendor/src/github.com/docker/libcontainer/system/sysconfig_notcgo.go b/vendor/src/github.com/docker/libcontainer/system/sysconfig_notcgo.go
new file mode 100644
index 0000000..663db82
--- /dev/null
+++ b/vendor/src/github.com/docker/libcontainer/system/sysconfig_notcgo.go
@@ -0,0 +1,8 @@
+// +build !cgo
+
+package system
+
+func GetClockTicks() int {
+	// TODO figure out a better alternative for platforms where we're missing cgo
+	return 100
+}
diff --git a/vendor/src/github.com/docker/libcontainer/system/xattrs_linux.go b/vendor/src/github.com/docker/libcontainer/system/xattrs_linux.go
new file mode 100644
index 0000000..00edb20
--- /dev/null
+++ b/vendor/src/github.com/docker/libcontainer/system/xattrs_linux.go
@@ -0,0 +1,59 @@
+package system
+
+import (
+	"syscall"
+	"unsafe"
+)
+
+// Returns a nil slice and nil error if the xattr is not set
+func Lgetxattr(path string, attr string) ([]byte, error) {
+	pathBytes, err := syscall.BytePtrFromString(path)
+	if err != nil {
+		return nil, err
+	}
+	attrBytes, err := syscall.BytePtrFromString(attr)
+	if err != nil {
+		return nil, err
+	}
+
+	dest := make([]byte, 128)
+	destBytes := unsafe.Pointer(&dest[0])
+	sz, _, errno := syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0)
+	if errno == syscall.ENODATA {
+		return nil, nil
+	}
+	if errno == syscall.ERANGE {
+		dest = make([]byte, sz)
+		destBytes := unsafe.Pointer(&dest[0])
+		sz, _, errno = syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0)
+	}
+	if errno != 0 {
+		return nil, errno
+	}
+
+	return dest[:sz], nil
+}
+
+var _zero uintptr
+
+func Lsetxattr(path string, attr string, data []byte, flags int) error {
+	pathBytes, err := syscall.BytePtrFromString(path)
+	if err != nil {
+		return err
+	}
+	attrBytes, err := syscall.BytePtrFromString(attr)
+	if err != nil {
+		return err
+	}
+	var dataBytes unsafe.Pointer
+	if len(data) > 0 {
+		dataBytes = unsafe.Pointer(&data[0])
+	} else {
+		dataBytes = unsafe.Pointer(&_zero)
+	}
+	_, _, errno := syscall.Syscall6(syscall.SYS_LSETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(dataBytes), uintptr(len(data)), uintptr(flags), 0)
+	if errno != 0 {
+		return errno
+	}
+	return nil
+}
diff --git a/vendor/src/github.com/docker/libcontainer/types.go b/vendor/src/github.com/docker/libcontainer/types.go
index 5095dca..c341137 100644
--- a/vendor/src/github.com/docker/libcontainer/types.go
+++ b/vendor/src/github.com/docker/libcontainer/types.go
@@ -6,6 +6,6 @@
 )
 
 type ContainerStats struct {
-	NetworkStats network.NetworkStats `json:"network_stats, omitempty"`
-	CgroupStats  *cgroups.Stats       `json:"cgroup_stats, omitempty"`
+	NetworkStats *network.NetworkStats `json:"network_stats,omitempty"`
+	CgroupStats  *cgroups.Stats        `json:"cgroup_stats,omitempty"`
 }
diff --git a/vendor/src/github.com/docker/libcontainer/update-vendor.sh b/vendor/src/github.com/docker/libcontainer/update-vendor.sh
new file mode 100755
index 0000000..df66a0a
--- /dev/null
+++ b/vendor/src/github.com/docker/libcontainer/update-vendor.sh
@@ -0,0 +1,48 @@
+#!/usr/bin/env bash
+set -e
+
+cd "$(dirname "$BASH_SOURCE")"
+
+# Downloads dependencies into vendor/ directory
+mkdir -p vendor
+cd vendor
+
+clone() {
+	vcs=$1
+	pkg=$2
+	rev=$3
+	
+	pkg_url=https://$pkg
+	target_dir=src/$pkg
+	
+	echo -n "$pkg @ $rev: "
+	
+	if [ -d $target_dir ]; then
+		echo -n 'rm old, '
+		rm -fr $target_dir
+	fi
+	
+	echo -n 'clone, '
+	case $vcs in
+		git)
+			git clone --quiet --no-checkout $pkg_url $target_dir
+			( cd $target_dir && git reset --quiet --hard $rev )
+			;;
+		hg)
+			hg clone --quiet --updaterev $rev $pkg_url $target_dir
+			;;
+	esac
+	
+	echo -n 'rm VCS, '
+	( cd $target_dir && rm -rf .{git,hg} )
+	
+	echo done
+}
+
+# the following lines are in sorted order, FYI
+clone git github.com/codegangsta/cli 1.1.0
+clone git github.com/coreos/go-systemd v2
+clone git github.com/godbus/dbus v1
+clone git github.com/syndtr/gocapability 3c85049eae
+
+# intentionally not vendoring Docker itself...  that'd be a circle :)
diff --git a/pkg/user/MAINTAINERS b/vendor/src/github.com/docker/libcontainer/user/MAINTAINERS
similarity index 100%
rename from pkg/user/MAINTAINERS
rename to vendor/src/github.com/docker/libcontainer/user/MAINTAINERS
diff --git a/pkg/user/user.go b/vendor/src/github.com/docker/libcontainer/user/user.go
similarity index 87%
rename from pkg/user/user.go
rename to vendor/src/github.com/docker/libcontainer/user/user.go
index df47101..493dd86 100644
--- a/pkg/user/user.go
+++ b/vendor/src/github.com/docker/libcontainer/user/user.go
@@ -165,12 +165,13 @@
 	return out, nil
 }
 
-// Given a string like "user", "1000", "user:group", "1000:1000", returns the uid, gid, and list of supplementary group IDs, if possible.
-func GetUserGroupSupplementary(userSpec string, defaultUid int, defaultGid int) (int, int, []int, error) {
+// Given a string like "user", "1000", "user:group", "1000:1000", returns the uid, gid, list of supplementary group IDs, and home directory, if available and/or applicable.
+func GetUserGroupSupplementaryHome(userSpec string, defaultUid, defaultGid int, defaultHome string) (int, int, []int, string, error) {
 	var (
 		uid      = defaultUid
 		gid      = defaultGid
 		suppGids = []int{}
+		home     = defaultHome
 
 		userArg, groupArg string
 	)
@@ -188,7 +189,7 @@
 		if userArg == "" {
 			userArg = strconv.Itoa(uid)
 		}
-		return 0, 0, nil, fmt.Errorf("Unable to find user %v: %v", userArg, err)
+		return 0, 0, nil, "", fmt.Errorf("Unable to find user %v: %v", userArg, err)
 	}
 
 	haveUser := users != nil && len(users) > 0
@@ -196,15 +197,16 @@
 		// if we found any user entries that matched our filter, let's take the first one as "correct"
 		uid = users[0].Uid
 		gid = users[0].Gid
+		home = users[0].Home
 	} else if userArg != "" {
 		// we asked for a user but didn't find them...  let's check to see if we wanted a numeric user
 		uid, err = strconv.Atoi(userArg)
 		if err != nil {
 			// not numeric - we have to bail
-			return 0, 0, nil, fmt.Errorf("Unable to find user %v", userArg)
+			return 0, 0, nil, "", fmt.Errorf("Unable to find user %v", userArg)
 		}
 		if uid < minId || uid > maxId {
-			return 0, 0, nil, ErrRange
+			return 0, 0, nil, "", ErrRange
 		}
 
 		// if userArg couldn't be found in /etc/passwd but is numeric, just roll with it - this is legit
@@ -223,7 +225,7 @@
 			return false
 		})
 		if err != nil && !os.IsNotExist(err) {
-			return 0, 0, nil, fmt.Errorf("Unable to find groups for user %v: %v", users[0].Name, err)
+			return 0, 0, nil, "", fmt.Errorf("Unable to find groups for user %v: %v", users[0].Name, err)
 		}
 
 		haveGroup := groups != nil && len(groups) > 0
@@ -236,10 +238,10 @@
 				gid, err = strconv.Atoi(groupArg)
 				if err != nil {
 					// not numeric - we have to bail
-					return 0, 0, nil, fmt.Errorf("Unable to find group %v", groupArg)
+					return 0, 0, nil, "", fmt.Errorf("Unable to find group %v", groupArg)
 				}
 				if gid < minId || gid > maxId {
-					return 0, 0, nil, ErrRange
+					return 0, 0, nil, "", ErrRange
 				}
 
 				// if groupArg couldn't be found in /etc/group but is numeric, just roll with it - this is legit
@@ -252,5 +254,5 @@
 		}
 	}
 
-	return uid, gid, suppGids, nil
+	return uid, gid, suppGids, home, nil
 }
diff --git a/pkg/user/user_test.go b/vendor/src/github.com/docker/libcontainer/user/user_test.go
similarity index 100%
rename from pkg/user/user_test.go
rename to vendor/src/github.com/docker/libcontainer/user/user_test.go