Merge pull request #1525 from griff/1503-fix
Don't read from stdout when only attached to stdin
diff --git a/Dockerfile b/Dockerfile
index a7a7724..0801907 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -3,32 +3,37 @@
from ubuntu:12.04
maintainer Solomon Hykes <solomon@dotcloud.com>
# Build dependencies
+run echo 'deb http://archive.ubuntu.com/ubuntu precise main universe' > /etc/apt/sources.list
+run apt-get update
run apt-get install -y -q curl
run apt-get install -y -q git
+run apt-get install -y -q mercurial
# Install Go
-run curl -s https://go.googlecode.com/files/go1.1.1.linux-amd64.tar.gz | tar -v -C /usr/local -xz
+run curl -s https://go.googlecode.com/files/go1.1.2.linux-amd64.tar.gz | tar -v -C /usr/local -xz
env PATH /usr/local/go/bin:/usr/local/bin:/usr/local/sbin:/usr/bin:/usr/sbin:/bin:/sbin
env GOPATH /go
env CGO_ENABLED 0
run cd /tmp && echo 'package main' > t.go && go test -a -i -v
+# Ubuntu stuff
+run apt-get install -y -q ruby1.9.3 rubygems
+run gem install fpm
+run apt-get install -y -q reprepro dpkg-sig
+# Install s3cmd 1.0.1 (earlier versions don't support env variables in the config)
+run apt-get install -y -q python-pip
+run pip install s3cmd
+run pip install python-magic
+run /bin/echo -e '[default]\naccess_key=$AWS_ACCESS_KEY\nsecret_key=$AWS_SECRET_KEY\n' > /.s3cfg
# Download dependencies
run PKG=github.com/kr/pty REV=27435c699; git clone http://$PKG /go/src/$PKG && cd /go/src/$PKG && git checkout -f $REV
run PKG=github.com/gorilla/context/ REV=708054d61e5; git clone http://$PKG /go/src/$PKG && cd /go/src/$PKG && git checkout -f $REV
run PKG=github.com/gorilla/mux/ REV=9b36453141c; git clone http://$PKG /go/src/$PKG && cd /go/src/$PKG && git checkout -f $REV
run PKG=github.com/dotcloud/tar/ REV=d06045a6d9; git clone http://$PKG /go/src/$PKG && cd /go/src/$PKG && git checkout -f $REV
-# Run dependencies
-run apt-get install -y iptables
-# lxc requires updating ubuntu sources
-run echo 'deb http://archive.ubuntu.com/ubuntu precise main universe' > /etc/apt/sources.list
-run apt-get update
-run apt-get install -y lxc
-run apt-get install -y aufs-tools
# Docker requires code.google.com/p/go.net/websocket
run apt-get install -y -q mercurial
-run PKG=code.google.com/p/go.net REV=78ad7f42aa2e; hg clone https://$PKG /go/src/$PKG && cd /go/src/$PKG && hg checkout -r $REV
+run PKG=code.google.com/p/go.net/ REV=84a4013f96e0; hg clone http://$PKG /go/src/$PKG && cd /go/src/$PKG && hg checkout $REV
# Upload docker source
add . /go/src/github.com/dotcloud/docker
+run ln -s /go/src/github.com/dotcloud/docker /src
# Build the binary
-run cd /go/src/github.com/dotcloud/docker/docker && go install -ldflags "-X main.GITCOMMIT '??' -d -w"
-env PATH /usr/local/go/bin:/go/bin:/usr/local/bin:/usr/local/sbin:/usr/bin:/usr/sbin:/bin:/sbin
-cmd ["docker"]
+run cd /go/src/github.com/dotcloud/docker && hack/release/make.sh
+cmd cd /go/src/github.com/dotcloud/docker && hack/release/release.sh
diff --git a/Makefile b/Makefile
deleted file mode 100644
index dd365dc..0000000
--- a/Makefile
+++ /dev/null
@@ -1,95 +0,0 @@
-DOCKER_PACKAGE := github.com/dotcloud/docker
-RELEASE_VERSION := $(shell git tag | grep -E "v[0-9\.]+$$" | sort -nr | head -n 1)
-SRCRELEASE := docker-$(RELEASE_VERSION)
-BINRELEASE := docker-$(RELEASE_VERSION).tgz
-BUILD_SRC := build_src
-BUILD_PATH := ${BUILD_SRC}/src/${DOCKER_PACKAGE}
-
-GIT_ROOT := $(shell git rev-parse --show-toplevel)
-BUILD_DIR := $(CURDIR)/.gopath
-
-GOPATH ?= $(BUILD_DIR)
-export GOPATH
-
-GO_OPTIONS ?= -a -ldflags='-w -d'
-ifeq ($(VERBOSE), 1)
-GO_OPTIONS += -v
-endif
-
-GIT_COMMIT = $(shell git rev-parse --short HEAD)
-GIT_STATUS = $(shell test -n "`git status --porcelain`" && echo "+CHANGES")
-
-BUILD_OPTIONS = -a -ldflags "-X main.GITCOMMIT $(GIT_COMMIT)$(GIT_STATUS) -d -w"
-
-SRC_DIR := $(GOPATH)/src
-
-DOCKER_DIR := $(SRC_DIR)/$(DOCKER_PACKAGE)
-DOCKER_MAIN := $(DOCKER_DIR)/docker
-
-DOCKER_BIN_RELATIVE := bin/docker
-DOCKER_BIN := $(CURDIR)/$(DOCKER_BIN_RELATIVE)
-
-.PHONY: all clean test hack release srcrelease $(BINRELEASE) $(SRCRELEASE) $(DOCKER_BIN) $(DOCKER_DIR)
-
-all: $(DOCKER_BIN)
-
-$(DOCKER_BIN): $(DOCKER_DIR)
- @mkdir -p $(dir $@)
- @(cd $(DOCKER_MAIN); CGO_ENABLED=0 go build $(GO_OPTIONS) $(BUILD_OPTIONS) -o $@)
- @echo $(DOCKER_BIN_RELATIVE) is created.
-
-$(DOCKER_DIR):
- @mkdir -p $(dir $@)
- @if [ -h $@ ]; then rm -f $@; fi; ln -sf $(CURDIR)/ $@
- @(cd $(DOCKER_MAIN); go get -d $(GO_OPTIONS))
-
-whichrelease:
- echo $(RELEASE_VERSION)
-
-release: $(BINRELEASE)
- s3cmd -P put $(BINRELEASE) s3://get.docker.io/builds/`uname -s`/`uname -m`/docker-$(RELEASE_VERSION).tgz
- s3cmd -P put docker-latest.tgz s3://get.docker.io/builds/`uname -s`/`uname -m`/docker-latest.tgz
- s3cmd -P put $(SRCRELEASE)/bin/docker s3://get.docker.io/builds/`uname -s`/`uname -m`/docker
- echo $(RELEASE_VERSION) > latest ; s3cmd -P put latest s3://get.docker.io/latest ; rm latest
-
-srcrelease: $(SRCRELEASE)
-deps: $(DOCKER_DIR)
-
-# A clean checkout of $RELEASE_VERSION, with vendored dependencies
-$(SRCRELEASE):
- rm -fr $(SRCRELEASE)
- git clone $(GIT_ROOT) $(SRCRELEASE)
- cd $(SRCRELEASE); git checkout -q $(RELEASE_VERSION)
-
-# A binary release ready to be uploaded to a mirror
-$(BINRELEASE): $(SRCRELEASE)
- rm -f $(BINRELEASE)
- cd $(SRCRELEASE); make; cp -R bin docker-$(RELEASE_VERSION); tar -f ../$(BINRELEASE) -zv -c docker-$(RELEASE_VERSION)
- cd $(SRCRELEASE); cp -R bin docker-latest; tar -f ../docker-latest.tgz -zv -c docker-latest
-clean:
- @rm -rf $(dir $(DOCKER_BIN))
-ifeq ($(GOPATH), $(BUILD_DIR))
- @rm -rf $(BUILD_DIR)
-else ifneq ($(DOCKER_DIR), $(realpath $(DOCKER_DIR)))
- @rm -f $(DOCKER_DIR)
-endif
-
-test:
- # Copy docker source and dependencies for testing
- rm -rf ${BUILD_SRC}; mkdir -p ${BUILD_PATH}
- tar --exclude=${BUILD_SRC} -cz . | tar -xz -C ${BUILD_PATH}
- GOPATH=${CURDIR}/${BUILD_SRC} go get -d
- # Do the test
- sudo -E GOPATH=${CURDIR}/${BUILD_SRC} CGO_ENABLED=0 go test ${GO_OPTIONS}
-
-testall: all
- @(cd $(DOCKER_DIR); CGO_ENABLED=0 sudo -E go test ./... $(GO_OPTIONS))
-
-fmt:
- @gofmt -s -l -w .
-
-hack:
- cd $(CURDIR)/hack && vagrant up
-
-ssh-dev:
- cd $(CURDIR)/hack && vagrant ssh
diff --git a/README.md b/README.md
index ed27091..89767a9 100644
--- a/README.md
+++ b/README.md
@@ -166,8 +166,12 @@
Installing from source
----------------------
-1. Make sure you have a [Go language](http://golang.org/doc/install)
-compiler >= 1.1 and [git](http://git-scm.com) installed.
+1. Install Dependencies
+ * [Go language 1.1.x](http://golang.org/doc/install)
+ * [git](http://git-scm.com)
+ * [lxc](http://lxc.sourceforge.net)
+ * [aufs-tools](http://aufs.sourceforge.net)
+
2. Checkout the source code
```bash
diff --git a/VERSION b/VERSION
new file mode 100644
index 0000000..aaa0831
--- /dev/null
+++ b/VERSION
@@ -0,0 +1 @@
+0.5.3-dev
diff --git a/api.go b/api.go
index 18f42c4..41edc4f 100644
--- a/api.go
+++ b/api.go
@@ -526,7 +526,7 @@
out.Warnings = append(out.Warnings, "Your kernel does not support memory swap capabilities. Limitation discarded.")
}
- if !srv.runtime.capabilities.IPv4Forwarding {
+ if srv.runtime.capabilities.IPv4ForwardingDisabled {
log.Println("Warning: IPv4 forwarding is disabled.")
out.Warnings = append(out.Warnings, "IPv4 forwarding is disabled.")
}
diff --git a/auth/auth.go b/auth/auth.go
index 003a6e7..9131487 100644
--- a/auth/auth.go
+++ b/auth/auth.go
@@ -76,7 +76,7 @@
configFile := ConfigFile{Configs: make(map[string]AuthConfig), rootPath: rootPath}
confFile := path.Join(rootPath, CONFIGFILE)
if _, err := os.Stat(confFile); err != nil {
- return &configFile, ErrConfigFileMissing
+ return &configFile, nil //missing file is not an error
}
b, err := ioutil.ReadFile(confFile)
if err != nil {
@@ -86,13 +86,13 @@
if err := json.Unmarshal(b, &configFile.Configs); err != nil {
arr := strings.Split(string(b), "\n")
if len(arr) < 2 {
- return nil, fmt.Errorf("The Auth config file is empty")
+ return &configFile, fmt.Errorf("The Auth config file is empty")
}
authConfig := AuthConfig{}
origAuth := strings.Split(arr[0], " = ")
authConfig.Username, authConfig.Password, err = decodeAuth(origAuth[1])
if err != nil {
- return nil, err
+ return &configFile, err
}
origEmail := strings.Split(arr[1], " = ")
authConfig.Email = origEmail[1]
@@ -101,7 +101,7 @@
for k, authConfig := range configFile.Configs {
authConfig.Username, authConfig.Password, err = decodeAuth(authConfig.Auth)
if err != nil {
- return nil, err
+ return &configFile, err
}
authConfig.Auth = ""
configFile.Configs[k] = authConfig
diff --git a/buildfile.go b/buildfile.go
index 5a26626..64c4503 100644
--- a/buildfile.go
+++ b/buildfile.go
@@ -197,6 +197,11 @@
return b.commit("", b.config.Cmd, fmt.Sprintf("EXPOSE %v", ports))
}
+func (b *buildFile) CmdUser(args string) error {
+ b.config.User = args
+ return b.commit("", b.config.Cmd, fmt.Sprintf("USER %v", args))
+}
+
func (b *buildFile) CmdInsert(args string) error {
return fmt.Errorf("INSERT has been deprecated. Please use ADD instead")
}
@@ -222,6 +227,11 @@
return nil
}
+func (b *buildFile) CmdWorkdir(workdir string) error {
+ b.config.WorkingDir = workdir
+ return b.commit("", b.config.Cmd, fmt.Sprintf("WORKDIR %v", workdir))
+}
+
func (b *buildFile) CmdVolume(args string) error {
if args == "" {
return fmt.Errorf("Volume cannot be empty")
diff --git a/buildfile_test.go b/buildfile_test.go
index d89c40d..1498616 100644
--- a/buildfile_test.go
+++ b/buildfile_test.go
@@ -270,6 +270,17 @@
}
}
+func TestBuildUser(t *testing.T) {
+ img := buildImage(testContextTemplate{`
+ from {IMAGE}
+ user dockerio
+ `, nil, nil}, t, nil, true)
+
+ if img.Config.User != "dockerio" {
+ t.Fail()
+ }
+}
+
func TestBuildEnv(t *testing.T) {
img := buildImage(testContextTemplate{`
from {IMAGE}
diff --git a/commands.go b/commands.go
index d0a1c6b..d90d0eb 100644
--- a/commands.go
+++ b/commands.go
@@ -27,10 +27,9 @@
"unicode"
)
-const VERSION = "0.5.3-dev"
-
var (
GITCOMMIT string
+ VERSION string
)
func (cli *DockerCli) getMethod(name string) (reflect.Method, bool) {
@@ -72,7 +71,7 @@
return nil
}
}
- help := fmt.Sprintf("Usage: docker [OPTIONS] COMMAND [arg...]\n -H=[tcp://%s:%d]: tcp://host:port to bind/connect to or unix://path/to/socket to use\n\nA self-sufficient runtime for linux containers.\n\nCommands:\n", DEFAULTHTTPHOST, DEFAULTHTTPPORT)
+ help := fmt.Sprintf("Usage: docker [OPTIONS] COMMAND [arg...]\n -H=[unix://%s]: tcp://host:port to bind/connect to or unix://path/to/socket to use\n\nA self-sufficient runtime for linux containers.\n\nCommands:\n", DEFAULTUNIXSOCKET)
for _, command := range [][]string{
{"attach", "Attach to a running container"},
{"build", "Build a container from a Dockerfile"},
@@ -303,6 +302,8 @@
return nil
}
+ cli.LoadConfigFile()
+
var oldState *term.State
if *flUsername == "" || *flPassword == "" || *flEmail == "" {
oldState, err = term.SetRawTerminal(cli.terminalFd)
@@ -498,6 +499,7 @@
}
if len(out.IndexServerAddress) != 0 {
+ cli.LoadConfigFile()
u := cli.configFile.Configs[out.IndexServerAddress].Username
if len(u) > 0 {
fmt.Fprintf(cli.out, "Username: %v\n", u)
@@ -838,12 +840,18 @@
return nil
}
+ cli.LoadConfigFile()
+
// If we're not using a custom registry, we know the restrictions
// applied to repository names and can warn the user in advance.
// Custom repositories can have different rules, and we must also
// allow pushing by image ID.
if len(strings.SplitN(name, "/", 2)) == 1 {
- return fmt.Errorf("Impossible to push a \"root\" repository. Please rename your repository in <user>/<repo> (ex: %s/%s)", cli.configFile.Configs[auth.IndexServerAddress()].Username, name)
+ username := cli.configFile.Configs[auth.IndexServerAddress()].Username
+ if username == "" {
+ username = "<user>"
+ }
+ return fmt.Errorf("Impossible to push a \"root\" repository. Please rename your repository in <user>/<repo> (ex: %s/%s)", username, name)
}
v := url.Values{}
@@ -1402,6 +1410,13 @@
body, statusCode, err := cli.call("POST", "/containers/create", config)
//if image not found try to pull it
if statusCode == 404 {
+ _, tag := utils.ParseRepositoryTag(config.Image)
+ if tag == "" {
+ tag = DEFAULTTAG
+ }
+
+ fmt.Printf("Unable to find image '%s' (tag: %s) locally\n", config.Image, tag)
+
v := url.Values{}
repos, tag := utils.ParseRepositoryTag(config.Image)
v.Set("fromImage", repos)
@@ -1551,6 +1566,9 @@
}
dial, err := net.Dial(cli.proto, cli.addr)
if err != nil {
+ if strings.Contains(err.Error(), "connection refused") {
+ return nil, -1, fmt.Errorf("Can't connect to docker daemon. Is 'docker -d' running on this host?")
+ }
return nil, -1, err
}
clientconn := httputil.NewClientConn(dial, nil)
@@ -1591,6 +1609,9 @@
}
dial, err := net.Dial(cli.proto, cli.addr)
if err != nil {
+ if strings.Contains(err.Error(), "connection refused") {
+ return fmt.Errorf("Can't connect to docker daemon. Is 'docker -d' running on this host?")
+ }
return err
}
clientconn := httputil.NewClientConn(dial, nil)
@@ -1637,6 +1658,9 @@
dial, err := net.Dial(cli.proto, cli.addr)
if err != nil {
+ if strings.Contains(err.Error(), "connection refused") {
+ return fmt.Errorf("Can't connect to docker daemon. Is 'docker -d' running on this host?")
+ }
return err
}
clientconn := httputil.NewClientConn(dial, nil)
@@ -1753,6 +1777,14 @@
return flags
}
+func (cli *DockerCli) LoadConfigFile() (err error) {
+ cli.configFile, err = auth.LoadConfig(os.Getenv("HOME"))
+ if err != nil {
+ fmt.Fprintf(cli.err, "WARNING: %s\n", err)
+ }
+ return err
+}
+
func NewDockerCli(in io.ReadCloser, out, err io.Writer, proto, addr string) *DockerCli {
var (
isTerminal = false
@@ -1769,15 +1801,9 @@
if err == nil {
err = out
}
-
- configFile, e := auth.LoadConfig(os.Getenv("HOME"))
- if e != nil {
- fmt.Fprintf(err, "WARNING: %s\n", e)
- }
return &DockerCli{
proto: proto,
addr: addr,
- configFile: configFile,
in: in,
out: out,
err: err,
diff --git a/commands_test.go b/commands_test.go
index 9686849..25e4804 100644
--- a/commands_test.go
+++ b/commands_test.go
@@ -90,6 +90,69 @@
}
+// TestRunWorkdir checks that 'docker run -w' correctly sets a custom working directory
+func TestRunWorkdir(t *testing.T) {
+ stdout, stdoutPipe := io.Pipe()
+
+ cli := NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
+ defer cleanup(globalRuntime)
+
+ c := make(chan struct{})
+ go func() {
+ defer close(c)
+ if err := cli.CmdRun("-w", "/foo/bar", unitTestImageID, "pwd"); err != nil {
+ t.Fatal(err)
+ }
+ }()
+
+ setTimeout(t, "Reading command output time out", 2*time.Second, func() {
+ cmdOutput, err := bufio.NewReader(stdout).ReadString('\n')
+ if err != nil {
+ t.Fatal(err)
+ }
+ if cmdOutput != "/foo/bar\n" {
+ t.Fatalf("'pwd' should display '%s', not '%s'", "/foo/bar\n", cmdOutput)
+ }
+ })
+
+ setTimeout(t, "CmdRun timed out", 5*time.Second, func() {
+ <-c
+ })
+
+}
+
+// TestRunWorkdirExists checks that 'docker run -w' correctly sets a custom working directory, even if it exists
+func TestRunWorkdirExists(t *testing.T) {
+ stdout, stdoutPipe := io.Pipe()
+
+ cli := NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
+ defer cleanup(globalRuntime)
+
+ c := make(chan struct{})
+ go func() {
+ defer close(c)
+ if err := cli.CmdRun("-w", "/proc", unitTestImageID, "pwd"); err != nil {
+ t.Fatal(err)
+ }
+ }()
+
+ setTimeout(t, "Reading command output time out", 2*time.Second, func() {
+ cmdOutput, err := bufio.NewReader(stdout).ReadString('\n')
+ if err != nil {
+ t.Fatal(err)
+ }
+ if cmdOutput != "/proc\n" {
+ t.Fatalf("'pwd' should display '%s', not '%s'", "/proc\n", cmdOutput)
+ }
+ })
+
+ setTimeout(t, "CmdRun timed out", 5*time.Second, func() {
+ <-c
+ })
+
+}
+
+
func TestRunExit(t *testing.T) {
stdin, stdinPipe := io.Pipe()
stdout, stdoutPipe := io.Pipe()
diff --git a/container.go b/container.go
index 4d38d49..7de3539 100644
--- a/container.go
+++ b/container.go
@@ -2,6 +2,7 @@
import (
"encoding/json"
+ "errors"
"flag"
"fmt"
"github.com/dotcloud/docker/term"
@@ -76,6 +77,7 @@
Image string // Name of the image as it was passed by the operator (eg. could be symbolic)
Volumes map[string]struct{}
VolumesFrom string
+ WorkingDir string
Entrypoint []string
NetworkDisabled bool
Privileged bool
@@ -84,6 +86,7 @@
type HostConfig struct {
Binds []string
ContainerIDFile string
+ LxcConf []KeyValuePair
}
type BindMap struct {
@@ -92,6 +95,15 @@
Mode string
}
+var (
+ ErrInvaidWorikingDirectory = errors.New("The working directory is invalid. It needs to be an absolute path.")
+)
+
+type KeyValuePair struct {
+ Key string
+ Value string
+}
+
func ParseRun(args []string, capabilities *Capabilities) (*Config, *HostConfig, *flag.FlagSet, error) {
cmd := Subcmd("run", "[OPTIONS] IMAGE [COMMAND] [ARG...]", "Run a command in a new container")
if len(args) > 0 && args[0] != "--help" {
@@ -100,6 +112,7 @@
}
flHostname := cmd.String("h", "", "Container host name")
+ flWorkingDir := cmd.String("w", "", "Working directory inside the container")
flUser := cmd.String("u", "", "Username or UID")
flDetach := cmd.Bool("d", false, "Detached mode: Run container in the background, print new container id")
flAttach := NewAttachOpts()
@@ -133,12 +146,18 @@
flVolumesFrom := cmd.String("volumes-from", "", "Mount volumes from the specified container")
flEntrypoint := cmd.String("entrypoint", "", "Overwrite the default entrypoint of the image")
+ var flLxcOpts ListOpts
+ cmd.Var(&flLxcOpts, "lxc-conf", "Add custom lxc options -lxc-conf=\"lxc.cgroup.cpuset.cpus = 0,1\"")
+
if err := cmd.Parse(args); err != nil {
return nil, nil, cmd, err
}
if *flDetach && len(flAttach) > 0 {
return nil, nil, cmd, fmt.Errorf("Conflicting options: -a and -d")
}
+ if *flWorkingDir != "" && !path.IsAbs(*flWorkingDir) {
+ return nil, nil, cmd, ErrInvaidWorikingDirectory
+ }
// If neither -d or -a are set, attach to everything by default
if len(flAttach) == 0 && !*flDetach {
if !*flDetach {
@@ -177,6 +196,12 @@
entrypoint = []string{*flEntrypoint}
}
+ var lxcConf []KeyValuePair
+ lxcConf, err := parseLxcConfOpts(flLxcOpts)
+ if err != nil {
+ return nil, nil, cmd, err
+ }
+
config := &Config{
Hostname: *flHostname,
PortSpecs: flPorts,
@@ -197,10 +222,12 @@
VolumesFrom: *flVolumesFrom,
Entrypoint: entrypoint,
Privileged: *flPrivileged,
+ WorkingDir: *flWorkingDir,
}
hostConfig := &HostConfig{
Binds: binds,
ContainerIDFile: *flContainerIDFile,
+ LxcConf: lxcConf,
}
if capabilities != nil && *flMemory > 0 && !capabilities.SwapLimit {
@@ -304,7 +331,7 @@
return ioutil.WriteFile(container.hostConfigPath(), data, 0666)
}
-func (container *Container) generateLXCConfig() error {
+func (container *Container) generateLXCConfig(hostConfig *HostConfig) error {
fo, err := os.Create(container.lxcConfigPath())
if err != nil {
return err
@@ -313,6 +340,11 @@
if err := LxcTemplateCompiled.Execute(fo, container); err != nil {
return err
}
+ if hostConfig != nil {
+ if err := LxcHostConfigTemplateCompiled.Execute(fo, hostConfig); err != nil {
+ return err
+ }
+ }
return nil
}
@@ -509,7 +541,7 @@
container.State.Lock()
defer container.State.Unlock()
- if len(hostConfig.Binds) == 0 {
+ if len(hostConfig.Binds) == 0 && len(hostConfig.LxcConf) == 0 {
hostConfig, _ = container.ReadHostConfig()
}
@@ -537,7 +569,7 @@
container.Config.MemorySwap = -1
}
- if !container.runtime.capabilities.IPv4Forwarding {
+ if container.runtime.capabilities.IPv4ForwardingDisabled {
log.Printf("WARNING: IPv4 forwarding is disabled. Networking will not work")
}
@@ -634,7 +666,7 @@
}
}
- if err := container.generateLXCConfig(); err != nil {
+ if err := container.generateLXCConfig(hostConfig); err != nil {
return err
}
@@ -666,6 +698,18 @@
"-e", "container=lxc",
"-e", "HOSTNAME="+container.Config.Hostname,
)
+ if container.Config.WorkingDir != "" {
+ workingDir := path.Clean(container.Config.WorkingDir)
+ utils.Debugf("[working dir] working dir is %s", workingDir)
+
+ if err := os.MkdirAll(path.Join(container.RootfsPath(), workingDir), 0755); err != nil {
+ return nil
+ }
+
+ params = append(params,
+ "-w", workingDir,
+ )
+ }
for _, elem := range container.Config.Env {
params = append(params, "-e", elem)
diff --git a/container_test.go b/container_test.go
index 3752615..ba48ceb 100644
--- a/container_test.go
+++ b/container_test.go
@@ -1070,7 +1070,7 @@
t.Fatal(err)
}
defer runtime.Destroy(container)
- container.generateLXCConfig()
+ container.generateLXCConfig(nil)
grepFile(t, container.lxcConfigPath(), "lxc.utsname = foobar")
grepFile(t, container.lxcConfigPath(),
fmt.Sprintf("lxc.cgroup.memory.limit_in_bytes = %d", mem))
@@ -1078,6 +1078,36 @@
fmt.Sprintf("lxc.cgroup.memory.memsw.limit_in_bytes = %d", mem*2))
}
+func TestCustomLxcConfig(t *testing.T) {
+ runtime := mkRuntime(t)
+ defer nuke(runtime)
+ container, err := NewBuilder(runtime).Create(&Config{
+ Image: GetTestImage(runtime).ID,
+ Cmd: []string{"/bin/true"},
+
+ Hostname: "foobar",
+ },
+ )
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer runtime.Destroy(container)
+ hostConfig := &HostConfig{LxcConf: []KeyValuePair{
+ {
+ Key: "lxc.utsname",
+ Value: "docker",
+ },
+ {
+ Key: "lxc.cgroup.cpuset.cpus",
+ Value: "0,1",
+ },
+ }}
+
+ container.generateLXCConfig(hostConfig)
+ grepFile(t, container.lxcConfigPath(), "lxc.utsname = docker")
+ grepFile(t, container.lxcConfigPath(), "lxc.cgroup.cpuset.cpus = 0,1")
+}
+
func BenchmarkRunSequencial(b *testing.B) {
runtime := mkRuntime(b)
defer nuke(runtime)
diff --git a/contrib/install.sh b/contrib/install.sh
index 04340e2..3cf7169 100755
--- a/contrib/install.sh
+++ b/contrib/install.sh
@@ -35,10 +35,10 @@
fi
fi
-echo "Downloading docker binary and uncompressing into /usr/local/bin..."
-curl -s https://get.docker.io/builds/$(uname -s)/$(uname -m)/docker-latest.tgz |
-tar -C /usr/local/bin --strip-components=1 -zxf- \
-docker-latest/docker
+echo "Downloading docker binary to /usr/local/bin..."
+curl -s https://get.docker.io/builds/$(uname -s)/$(uname -m)/docker-latest \
+ > /usr/local/bin/docker
+chmod +x /usr/local/bin/docker
if [ -f /etc/init/dockerd.conf ]
then
@@ -50,7 +50,7 @@
start on filesystem or runlevel [2345]
stop on runlevel [!2345]
respawn
-exec env LANG="en_US.UTF-8" /usr/local/bin/docker -d
+exec /usr/local/bin/docker -d
EOF
fi
diff --git a/docker/docker.go b/docker/docker.go
index a48865b..6ac0c93 100644
--- a/docker/docker.go
+++ b/docker/docker.go
@@ -16,6 +16,7 @@
var (
GITCOMMIT string
+ VERSION string
)
func main() {
@@ -25,6 +26,7 @@
return
}
// FIXME: Switch d and D ? (to be more sshd like)
+ flVersion := flag.Bool("v", false, "Print version information and quit")
flDaemon := flag.Bool("d", false, "Daemon mode")
flDebug := flag.Bool("D", false, "Debug mode")
flAutoRestart := flag.Bool("r", false, "Restart previously running containers")
@@ -36,6 +38,10 @@
flHosts := docker.ListOpts{fmt.Sprintf("unix://%s", docker.DEFAULTUNIXSOCKET)}
flag.Var(&flHosts, "H", "tcp://host:port to bind/connect to or unix://path/to/socket to use")
flag.Parse()
+ if *flVersion {
+ showVersion()
+ return
+ }
if len(flHosts) > 1 {
flHosts = flHosts[1:] //trick to display a nice default value in the usage
}
@@ -52,6 +58,7 @@
os.Setenv("DEBUG", "1")
}
docker.GITCOMMIT = GITCOMMIT
+ docker.VERSION = VERSION
if *flDaemon {
if flag.NArg() != 0 {
flag.Usage()
@@ -74,6 +81,10 @@
}
}
+func showVersion() {
+ fmt.Printf("Docker version %s, build %s\n", VERSION, GITCOMMIT)
+}
+
func createPidFile(pidfile string) error {
if pidString, err := ioutil.ReadFile(pidfile); err == nil {
pid, err := strconv.Atoi(string(pidString))
diff --git a/docs/sources/api/docker_remote_api_v1.4.rst b/docs/sources/api/docker_remote_api_v1.4.rst
index adb0c1d..d512de9 100644
--- a/docs/sources/api/docker_remote_api_v1.4.rst
+++ b/docs/sources/api/docker_remote_api_v1.4.rst
@@ -129,7 +129,9 @@
"Dns":null,
"Image":"base",
"Volumes":{},
- "VolumesFrom":""
+ "VolumesFrom":"",
+ "WorkingDir":""
+
}
**Example response**:
@@ -195,7 +197,9 @@
"Dns": null,
"Image": "base",
"Volumes": {},
- "VolumesFrom": ""
+ "VolumesFrom": "",
+ "WorkingDir":""
+
},
"State": {
"Running": false,
@@ -352,7 +356,8 @@
Content-Type: application/json
{
- "Binds":["/tmp:/tmp"]
+ "Binds":["/tmp:/tmp"],
+ "LxcConf":{"lxc.utsname":"docker"}
}
**Example response**:
@@ -746,7 +751,8 @@
,"Dns":null,
"Image":"base",
"Volumes":null,
- "VolumesFrom":""
+ "VolumesFrom":"",
+ "WorkingDir":""
},
"Size": 6824592
}
diff --git a/docs/sources/commandline/cli.rst b/docs/sources/commandline/cli.rst
index 6d96886..9f8c296 100644
--- a/docs/sources/commandline/cli.rst
+++ b/docs/sources/commandline/cli.rst
@@ -15,7 +15,7 @@
$ sudo docker
Usage: docker [OPTIONS] COMMAND [arg...]
- -H=[tcp://127.0.0.1:4243]: tcp://host:port to bind/connect to or unix://path/to/socket to use
+ -H=[unix:///var/run/docker.sock]: tcp://host:port to bind/connect to or unix://path/to/socket to use
A self-sufficient runtime for linux containers.
diff --git a/docs/sources/commandline/command/attach.rst b/docs/sources/commandline/command/attach.rst
index 4c4c189..12ed802 100644
--- a/docs/sources/commandline/command/attach.rst
+++ b/docs/sources/commandline/command/attach.rst
@@ -10,4 +10,50 @@
Usage: docker attach CONTAINER
- Attach to a running container
+ Attach to a running container.
+
+You can detach from the container again (and leave it running) with
+``CTRL-c`` (for a quiet exit) or ``CTRL-\`` to get a stacktrace of
+the Docker client when it quits.
+
+To stop a container, use ``docker stop``
+
+To kill the container, use ``docker kill``
+
+Examples:
+---------
+
+.. code-block:: bash
+
+ $ ID=$(sudo docker run -d ubuntu /usr/bin/top -b)
+ $ sudo docker attach $ID
+ top - 02:05:52 up 3:05, 0 users, load average: 0.01, 0.02, 0.05
+ Tasks: 1 total, 1 running, 0 sleeping, 0 stopped, 0 zombie
+ Cpu(s): 0.1%us, 0.2%sy, 0.0%ni, 99.7%id, 0.0%wa, 0.0%hi, 0.0%si, 0.0%st
+ Mem: 373572k total, 355560k used, 18012k free, 27872k buffers
+ Swap: 786428k total, 0k used, 786428k free, 221740k cached
+
+ PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND
+ 1 root 20 0 17200 1116 912 R 0 0.3 0:00.03 top
+
+ top - 02:05:55 up 3:05, 0 users, load average: 0.01, 0.02, 0.05
+ Tasks: 1 total, 1 running, 0 sleeping, 0 stopped, 0 zombie
+ Cpu(s): 0.0%us, 0.2%sy, 0.0%ni, 99.8%id, 0.0%wa, 0.0%hi, 0.0%si, 0.0%st
+ Mem: 373572k total, 355244k used, 18328k free, 27872k buffers
+ Swap: 786428k total, 0k used, 786428k free, 221776k cached
+
+ PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND
+ 1 root 20 0 17208 1144 932 R 0 0.3 0:00.03 top
+
+
+ top - 02:05:58 up 3:06, 0 users, load average: 0.01, 0.02, 0.05
+ Tasks: 1 total, 1 running, 0 sleeping, 0 stopped, 0 zombie
+ Cpu(s): 0.2%us, 0.3%sy, 0.0%ni, 99.5%id, 0.0%wa, 0.0%hi, 0.0%si, 0.0%st
+ Mem: 373572k total, 355780k used, 17792k free, 27880k buffers
+ Swap: 786428k total, 0k used, 786428k free, 221776k cached
+
+ PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND
+ 1 root 20 0 17208 1144 932 R 0 0.3 0:00.03 top
+ ^C$
+ $ sudo docker stop $ID
+
diff --git a/docs/sources/commandline/command/run.rst b/docs/sources/commandline/command/run.rst
index 46b206a..cd28366 100644
--- a/docs/sources/commandline/command/run.rst
+++ b/docs/sources/commandline/command/run.rst
@@ -29,7 +29,8 @@
-v=[]: Create a bind mount with: [host-dir]:[container-dir]:[rw|ro]. If "host-dir" is missing, then docker creates a new volume.
-volumes-from="": Mount all volumes from the given container.
-entrypoint="": Overwrite the default entrypoint set by the image.
-
+ -w="": Working directory inside the container
+ -lxc-conf=[]: Add custom lxc options -lxc-conf="lxc.cgroup.cpuset.cpus = 0,1"
Examples
--------
@@ -62,3 +63,22 @@
everything that the host can do. This flag exists to allow special
use-cases, like running Docker within Docker.
+.. code-block:: bash
+
+ docker run -w /path/to/dir/ -i -t ubuntu pwd
+
+The ``-w`` lets the command beeing executed inside directory given,
+here /path/to/dir/. If the path does not exists it is created inside the
+container.
+
+.. code-block:: bash
+
+ docker run -v `pwd`:`pwd` -w `pwd` -i -t ubuntu pwd
+
+The ``-v`` flag mounts the current working directory into the container.
+The ``-w`` lets the command beeing executed inside the current
+working directory, by changeing into the directory to the value
+returned by ``pwd``. So this combination executes the command
+using the container, but inside the current working directory.
+
+
diff --git a/docs/sources/examples/index.rst b/docs/sources/examples/index.rst
index 58da18e..2664b95 100644
--- a/docs/sources/examples/index.rst
+++ b/docs/sources/examples/index.rst
@@ -21,3 +21,4 @@
running_ssh_service
couchdb_data_volumes
postgresql_service
+ mongodb
diff --git a/docs/sources/examples/mongodb.rst b/docs/sources/examples/mongodb.rst
new file mode 100644
index 0000000..e351b9b
--- /dev/null
+++ b/docs/sources/examples/mongodb.rst
@@ -0,0 +1,98 @@
+:title: Building a Docker Image with MongoDB
+:description: How to build a Docker image with MongoDB pre-installed
+:keywords: docker, example, package installation, networking, mongodb
+
+.. _mongodb_image:
+
+Building an Image with MongoDB
+==============================
+
+.. include:: example_header.inc
+
+The goal of this example is to show how you can build your own
+docker images with MongoDB preinstalled. We will do that by
+constructing a Dockerfile that downloads a base image, adds an
+apt source and installs the database software on Ubuntu.
+
+Creating a ``Dockerfile``
++++++++++++++++++++++++++
+
+Create an empty file called ``Dockerfile``:
+
+.. code-block:: bash
+
+ touch Dockerfile
+
+Next, define the parent image you want to use to build your own image on top of.
+Here, we’ll use `CentOS <https://index.docker.io/_/ubuntu/>`_ (tag: ``latest``)
+available on the `docker index`_:
+
+.. code-block:: bash
+
+ FROM ubuntu:latest
+
+Since we want to be running the latest version of MongoDB we'll need to add the
+10gen repo to our apt sources list.
+
+.. code-block:: bash
+
+ # Add 10gen official apt source to the sources list
+ RUN apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 7F0CEB10
+ RUN echo 'deb http://downloads-distro.mongodb.org/repo/ubuntu-upstart dist 10gen' | tee /etc/apt/sources.list.d/10gen.list
+
+Then, we don't want Ubuntu to complain about init not being available so we'll
+divert /sbin/initctl to /bin/true so it thinks everything is working.
+
+.. code-block:: bash
+
+ # Hack for initctl not being available in Ubuntu
+ RUN dpkg-divert --local --rename --add /sbin/initctl
+ RUN ln -s /bin/true /sbin/initctl
+
+Afterwards we'll be able to update our apt repositories and install MongoDB
+
+.. code-block:: bash
+
+ # Install MongoDB
+ RUN apt-get update
+ RUN apt-get install mongodb-10gen
+
+To run MongoDB we'll have to create the default data directory (because we want it to
+run without needing to provide a special configuration file)
+
+.. code-block:: bash
+
+ # Create the MongoDB data directory
+ RUN mkdir -p /data/db
+
+Finally, we'll expose the standard port that MongoDB runs on (27107)
+
+.. code-block:: bash
+
+ EXPOSE 27017
+
+Now, lets build the image which will go through the ``Dockerfile`` we made and
+run all of the commands.
+
+.. code-block:: bash
+
+ docker build -t <yourname>/mongodb .
+
+Now you should be able to run ``mongod`` as a daemon and be able to connect on
+the local port!
+
+.. code-block:: bash
+
+ # Regular style
+ MONGO_ID=$(docker run -d <yourname>/mongodb mongod)
+
+ # Lean and mean
+ MONGO_ID=$(docker run -d <yourname>/mongodb mongod --noprealloc --smallfiles)
+
+ # Check the logs out
+ docker logs $MONGO_ID
+
+ # Connect and play around
+ mongo --port <port you get from `docker ps`>
+
+Sweet!
diff --git a/docs/sources/examples/postgresql_service.rst b/docs/sources/examples/postgresql_service.rst
index 96b19c8..27543ec 100644
--- a/docs/sources/examples/postgresql_service.rst
+++ b/docs/sources/examples/postgresql_service.rst
@@ -84,11 +84,11 @@
host all all 0.0.0.0/0 md5
Additionaly, inside ``/etc/postgresql/9.2/main/postgresql.conf``
-uncomment ``listen_address`` so it is as follows:
+uncomment ``listen_addresses`` so it is as follows:
.. code-block:: bash
- listen_address='*'
+ listen_addresses='*'
*Note:* this PostgreSQL setup is for development only purposes. Refer
to PostgreSQL documentation how to fine-tune these settings so that it
diff --git a/docs/sources/use/basics.rst b/docs/sources/use/basics.rst
index 78f1bf9..eef6740 100644
--- a/docs/sources/use/basics.rst
+++ b/docs/sources/use/basics.rst
@@ -41,7 +41,7 @@
The ``docker`` daemon always runs as root, and since ``docker``
version 0.5.2, ``docker`` binds to a Unix socket instead of a TCP
port. By default that Unix socket is owned by the user *root*, and so,
-by default, you can access it with ``sudo``.
+by default, you can access it with ``sudo``.
Starting in version 0.5.3, if you create a Unix group called *docker*
and add users to it, then the ``docker`` daemon will make the
@@ -50,6 +50,19 @@
if you run the ``docker`` client as a user in the *docker* group then
you don't need to add ``sudo`` to all the client commands.
+.. code-block:: bash
+
+ # Add the docker group
+ sudo groupadd docker
+
+ # Add the ubuntu user to the docker group
+ # You may have to logout and log back in again for
+ # this to take effect
+ sudo gpasswd -a ubuntu docker
+
+ # Restart the docker daemon
+ sudo service docker restart
+
Bind Docker to another host/port or a Unix socket
-------------------------------------------------
diff --git a/docs/sources/use/builder.rst b/docs/sources/use/builder.rst
index 85d9642..cc85bef 100644
--- a/docs/sources/use/builder.rst
+++ b/docs/sources/use/builder.rst
@@ -102,11 +102,50 @@
3.4 CMD
-------
- ``CMD <command>``
+CMD has three forms:
-The ``CMD`` instruction sets the command to be executed when running
-the image. This is functionally equivalent to running ``docker commit
--run '{"Cmd": <command>}'`` outside the builder.
+* ``CMD ["executable","param1","param2"]`` (like an *exec*, preferred form)
+* ``CMD ["param1","param2"]`` (as *default parameters to ENTRYPOINT*)
+* ``CMD command param1 param2`` (as a *shell*)
+
+There can only be one CMD in a Dockerfile. If you list more than one
+CMD then only the last CMD will take effect.
+
+**The main purpose of a CMD is to provide defaults for an executing
+container.** These defaults can include an executable, or they can
+omit the executable, in which case you must specify an ENTRYPOINT as
+well.
+
+When used in the shell or exec formats, the ``CMD`` instruction sets
+the command to be executed when running the image. This is
+functionally equivalent to running ``docker commit -run '{"Cmd":
+<command>}'`` outside the builder.
+
+If you use the *shell* form of the CMD, then the ``<command>`` will
+execute in ``/bin/sh -c``:
+
+.. code-block:: bash
+
+ FROM ubuntu
+ CMD echo "This is a test." | wc -
+
+If you want to **run your** ``<command>`` **without a shell** then you
+must express the command as a JSON array and give the full path to the
+executable. **This array form is the preferred format of CMD.** Any
+additional parameters must be individually expressed as strings in the
+array:
+
+.. code-block:: bash
+
+ FROM ubuntu
+ CMD ["/usr/bin/wc","--help"]
+
+If you would like your container to run the same executable every
+time, then you should consider using ``ENTRYPOINT`` in combination
+with ``CMD``. See :ref:`entrypoint_def`.
+
+If the user specifies arguments to ``docker run`` then they will
+override the default specified in CMD.
.. note::
Don't confuse ``RUN`` with ``CMD``. ``RUN`` actually runs a
@@ -186,16 +225,55 @@
directories in its path. All new files and directories are created
with mode 0755, uid and gid 0.
+.. _entrypoint_def:
+
3.8 ENTRYPOINT
--------------
- ``ENTRYPOINT ["/bin/echo"]``
+ENTRYPOINT has two forms:
-The ``ENTRYPOINT`` instruction adds an entry command that will not be
-overwritten when arguments are passed to docker run, unlike the
+* ``ENTRYPOINT ["executable", "param1", "param2"]`` (like an *exec*,
+ preferred form)
+* ``ENTRYPOINT command param1 param2`` (as a *shell*)
+
+There can only be one ``ENTRYPOINT`` in a Dockerfile. If you have more
+than one ``ENTRYPOINT``, then only the last one in the Dockerfile will
+have an effect.
+
+An ``ENTRYPOINT`` helps you to configure a container that you can run
+as an executable. That is, when you specify an ``ENTRYPOINT``, then
+the whole container runs as if it was just that executable.
+
+The ``ENTRYPOINT`` instruction adds an entry command that will **not**
+be overwritten when arguments are passed to ``docker run``, unlike the
behavior of ``CMD``. This allows arguments to be passed to the
-entrypoint. i.e. ``docker run <image> -d`` will pass the "-d" argument
-to the entrypoint.
+entrypoint. i.e. ``docker run <image> -d`` will pass the "-d"
+argument to the ENTRYPOINT.
+
+You can specify parameters either in the ENTRYPOINT JSON array (as in
+"like an exec" above), or by using a CMD statement. Parameters in the
+ENTRYPOINT will not be overridden by the ``docker run`` arguments, but
+parameters specified via CMD will be overridden by ``docker run``
+arguments.
+
+Like a ``CMD``, you can specify a plain string for the ENTRYPOINT and
+it will execute in ``/bin/sh -c``:
+
+.. code-block:: bash
+
+ FROM ubuntu
+ ENTRYPOINT wc -l -
+
+For example, that Dockerfile's image will *always* take stdin as input
+("-") and print the number of lines ("-l"). If you wanted to make
+this optional but default, you could use a CMD:
+
+.. code-block:: bash
+
+ FROM ubuntu
+ CMD ["-l", "-"]
+ ENTRYPOINT ["/usr/bin/wc"]
+
3.9 VOLUME
----------
@@ -205,6 +283,23 @@
The ``VOLUME`` instruction will add one or more new volumes to any
container created from the image.
+3.10 USER
+---------
+
+ ``USER daemon``
+
+The ``USER`` instruction sets the username or UID to use when running
+the image.
+
+3.11 WORKDIR
+------------
+
+ ``WORKDIR /path/to/workdir``
+
+The ``WORKDIR`` instruction sets the working directory in which
+the command given by ``CMD`` is executed.
+
+
4. Dockerfile Examples
======================
diff --git a/graph.go b/graph.go
index 606a683..c54725f 100644
--- a/graph.go
+++ b/graph.go
@@ -323,9 +323,9 @@
return
}
if children, exists := byParent[parent.ID]; exists {
- byParent[parent.ID] = []*Image{image}
- } else {
byParent[parent.ID] = append(children, image)
+ } else {
+ byParent[parent.ID] = []*Image{image}
}
})
return byParent, err
diff --git a/graph_test.go b/graph_test.go
index 2898fcc..32fb0ef 100644
--- a/graph_test.go
+++ b/graph_test.go
@@ -234,6 +234,45 @@
assertNImages(graph, t, 1)
}
+func TestByParent(t *testing.T) {
+ archive1, _ := fakeTar()
+ archive2, _ := fakeTar()
+ archive3, _ := fakeTar()
+
+ graph := tempGraph(t)
+ defer os.RemoveAll(graph.Root)
+ parentImage := &Image{
+ ID: GenerateID(),
+ Comment: "parent",
+ Created: time.Now(),
+ Parent: "",
+ }
+ childImage1 := &Image{
+ ID: GenerateID(),
+ Comment: "child1",
+ Created: time.Now(),
+ Parent: parentImage.ID,
+ }
+ childImage2 := &Image{
+ ID: GenerateID(),
+ Comment: "child2",
+ Created: time.Now(),
+ Parent: parentImage.ID,
+ }
+ _ = graph.Register(nil, archive1, parentImage)
+ _ = graph.Register(nil, archive2, childImage1)
+ _ = graph.Register(nil, archive3, childImage2)
+
+ byParent, err := graph.ByParent()
+ if err != nil {
+ t.Fatal(err)
+ }
+ numChildren := len(byParent[parentImage.ID])
+ if numChildren != 2 {
+ t.Fatalf("Expected 2 children, found %d", numChildren)
+ }
+}
+
func assertNImages(graph *Graph, t *testing.T, n int) {
if images, err := graph.All(); err != nil {
t.Fatal(err)
diff --git a/hack/RELEASE.md b/hack/RELEASE.md
deleted file mode 100644
index 5cf4077..0000000
--- a/hack/RELEASE.md
+++ /dev/null
@@ -1,133 +0,0 @@
-## A maintainer's guide to releasing Docker
-
-So you're in charge of a docker release? Cool. Here's what to do.
-
-If your experience deviates from this document, please document the changes to keep it
-up-to-date.
-
-
-### 1. Pull from master and create a release branch
-
- ```bash
- $ git checkout master
- $ git pull
- $ git checkout -b bump_$VERSION
- ```
-
-### 2. Update CHANGELOG.md
-
- You can run this command for reference:
-
- ```bash
- LAST_VERSION=$(git tag | grep -E "v[0-9\.]+$" | sort -nr | head -n 1)
- git log $LAST_VERSION..HEAD
- ```
-
- Each change should be formatted as ```BULLET CATEGORY: DESCRIPTION```
-
- * BULLET is either ```-```, ```+``` or ```*```, to indicate a bugfix,
- new feature or upgrade, respectively.
-
- * CATEGORY should describe which part of the project is affected.
- Valid categories are:
- * Builder
- * Documentation
- * Hack
- * Packaging
- * Remote API
- * Runtime
-
- * DESCRIPTION: a concise description of the change that is relevant to the end-user,
- using the present tense.
- Changes should be described in terms of how they affect the user, for example "new feature
- X which allows Y", "fixed bug which caused X", "increased performance of Y".
-
- EXAMPLES:
-
- ```
- + Builder: 'docker build -t FOO' applies the tag FOO to the newly built container.
- * Runtime: improve detection of kernel version
- - Remote API: fix a bug in the optional unix socket transport
- ```
-
-### 3. Change VERSION in commands.go
-
-### 4. Run all tests
-
- ```bash
- $ make test
- ```
-
-### 5. Commit and create a pull request
-
- ```bash
- $ git add commands.go CHANGELOG.md
- $ git commit -m "Bump version to $VERSION"
- $ git push origin bump_$VERSION
- ```
-
-### 6. Get 2 other maintainers to validate the pull request
-
-### 7. Merge the pull request and apply tags
-
- ```bash
- $ git checkout master
- $ git merge bump_$VERSION
- $ git tag -a v$VERSION # Don't forget the v!
- $ git tag -f -a latest
- $ git push
- $ git push --tags
- ```
-
-### 8. Publish binaries
-
- To run this you will need access to the release credentials.
- Get them from [the infrastructure maintainers](https://github.com/dotcloud/docker/blob/master/hack/infrastructure/MAINTAINERS).
-
- ```bash
- $ RELEASE_IMAGE=image_provided_by_infrastructure_maintainers
- $ BUILD=$(docker run -d -e RELEASE_PPA=0 $RELEASE_IMAGE)
- ```
-
- This will do 2 things:
-
- * It will build and upload the binaries on http://get.docker.io
- * It will *test* the release on our Ubuntu PPA (a PPA is a community repository for ubuntu packages)
-
- Wait for the build to complete.
-
- ```bash
- $ docker wait $BUILD # This should print 0. If it doesn't, your build failed.
- ```
-
- Check that the output looks OK. Here's an example of a correct output:
-
- ```bash
- $ docker logs 2>&1 b4e7c8299d73 | grep -e 'Public URL' -e 'Successfully uploaded'
- Public URL of the object is: http://get.docker.io.s3.amazonaws.com/builds/Linux/x86_64/docker-v0.4.7.tgz
- Public URL of the object is: http://get.docker.io.s3.amazonaws.com/builds/Linux/x86_64/docker-latest.tgz
- Successfully uploaded packages.
- ```
-
- If you don't see 3 lines similar to this, something might be wrong. Check the full logs and try again.
-
-
-### 9. Publish Ubuntu packages
-
- If everything went well in the previous step, you can finalize the release by submitting the Ubuntu
- packages.
-
- ```bash
- $ RELEASE_IMAGE=image_provided_by_infrastructure_maintainers
- $ docker run -e RELEASE_PPA=1 $RELEASE_IMAGE
- ```
-
- If that goes well, Ubuntu Precise package is in its way. It will take anywhere from 0.5 to 30 hours
- for the builders to complete their job depending on builder demand at this time. At this point, Quantal
- and Raring packages need to be created using the Launchpad interface:
- https://launchpad.net/~dotcloud/+archive/lxc-docker/+packages
-
- Notify [the packager maintainers](https://github.com/dotcloud/docker/blob/master/packaging/MAINTAINERS)
- who will ensure PPA is ready.
-
- Congratulations! You're done
diff --git a/hack/dockerbuilder/Dockerfile b/hack/dockerbuilder/Dockerfile
deleted file mode 100644
index 496ee45..0000000
--- a/hack/dockerbuilder/Dockerfile
+++ /dev/null
@@ -1,36 +0,0 @@
-# DESCRIPTION Build a container capable of producing official binary and
-# PPA packages and uploading them to S3 and Launchpad
-# VERSION 1.2
-# DOCKER_VERSION 0.4
-# AUTHOR Solomon Hykes <solomon@dotcloud.com>
-# Daniel Mizyrycki <daniel@dotcloud.net>
-# BUILD_CMD docker build -t dockerbuilder .
-# RUN_CMD docker run -e AWS_ID="$AWS_ID" -e AWS_KEY="$AWS_KEY" -e GPG_KEY="$GPG_KEY" -e PUBLISH_PPA="$PUBLISH_PPA" dockerbuilder
-#
-# ENV_VARIABLES AWS_ID, AWS_KEY: S3 credentials for uploading Docker binary and tarball
-# GPG_KEY: Signing key for docker package
-# PUBLISH_PPA: 0 for staging release, 1 for production release
-#
-from ubuntu:12.04
-maintainer Solomon Hykes <solomon@dotcloud.com>
-# Workaround the upstart issue
-run dpkg-divert --local --rename --add /sbin/initctl
-run ln -s /bin/true /sbin/initctl
-# Enable universe and gophers PPA
-run DEBIAN_FRONTEND=noninteractive apt-get install -y -q python-software-properties
-run add-apt-repository "deb http://archive.ubuntu.com/ubuntu $(lsb_release -sc) universe"
-run add-apt-repository -y ppa:dotcloud/docker-golang/ubuntu
-run apt-get update
-# Packages required to checkout, build and upload docker
-run DEBIAN_FRONTEND=noninteractive apt-get install -y -q s3cmd curl
-run curl -s -o /go.tar.gz https://go.googlecode.com/files/go1.1.2.linux-amd64.tar.gz
-run tar -C /usr/local -xzf /go.tar.gz
-run echo "export PATH=/usr/local/go/bin:$PATH" > /.bashrc
-run echo "export PATH=/usr/local/go/bin:$PATH" > /.bash_profile
-run DEBIAN_FRONTEND=noninteractive apt-get install -y -q git build-essential
-# Packages required to build an ubuntu package
-run DEBIAN_FRONTEND=noninteractive apt-get install -y -q golang-stable debhelper autotools-dev devscripts
-# Copy dockerbuilder files into the container
-add . /src
-run cp /src/dockerbuilder /usr/local/bin/ && chmod +x /usr/local/bin/dockerbuilder
-cmd ["dockerbuilder"]
diff --git a/hack/dockerbuilder/MAINTAINERS b/hack/dockerbuilder/MAINTAINERS
deleted file mode 100644
index 5dfc881..0000000
--- a/hack/dockerbuilder/MAINTAINERS
+++ /dev/null
@@ -1 +0,0 @@
-Daniel Mizyrycki <daniel@dotcloud.com> (@mzdaniel)
diff --git a/hack/dockerbuilder/dockerbuilder b/hack/dockerbuilder/dockerbuilder
deleted file mode 100644
index 9fa05ce..0000000
--- a/hack/dockerbuilder/dockerbuilder
+++ /dev/null
@@ -1,42 +0,0 @@
-#!/bin/sh
-set -x
-set -e
-
-export PATH=/usr/local/go/bin:$PATH
-
-PACKAGE=github.com/dotcloud/docker
-
-if [ $# -gt 1 ]; then
- echo "Usage: $0 [REVISION]"
- exit 1
-fi
-
-export REVISION=$1
-
-if [ -z "$AWS_ID" -o -z "$AWS_KEY" ]; then
- echo "Warning: either AWS_ID or AWS_KEY environment variable not set. Won't upload to S3."
-else
- /bin/echo -e "[default]\naccess_key = $AWS_ID\nsecret_key = $AWS_KEY\n" > /.s3cfg
-fi
-
-if [ -z "$GPG_KEY" ]; then
- echo "Warning: environment variable GPG_KEY is not set. Ubuntu package upload will not succeed."
- NO_UBUNTU=1
-fi
-
-rm -fr docker-release
-git clone https://github.com/dotcloud/docker docker-release
-cd docker-release
-if [ -z "$REVISION" ]; then
- make release
-else
- make release RELEASE_VERSION=$REVISION
-fi
-
-# Remove credentials from container
-rm -f /.s3cfg
-
-if [ -z "$NO_UBUNTU" ]; then
- export PATH=`echo $PATH | sed 's#/usr/local/go/bin:##g'`
- (cd packaging/ubuntu && make ubuntu)
-fi
diff --git a/hack/release/README.md b/hack/release/README.md
new file mode 100644
index 0000000..f01bb3a
--- /dev/null
+++ b/hack/release/README.md
@@ -0,0 +1,106 @@
+## A maintainer's guide to releasing Docker
+
+So you're in charge of a Docker release? Cool. Here's what to do.
+
+If your experience deviates from this document, please document the changes
+to keep it up-to-date.
+
+
+### 1. Pull from master and create a release branch
+
+```bash
+git checkout master
+git pull
+git checkout -b bump_$VERSION
+```
+
+### 2. Update CHANGELOG.md
+
+You can run this command for reference:
+
+```bash
+LAST_VERSION=$(git tag | grep -E "v[0-9\.]+$" | sort -nr | head -n 1)
+git log $LAST_VERSION..HEAD
+```
+
+Each change should be formatted as ```BULLET CATEGORY: DESCRIPTION```
+
+* BULLET is either ```-```, ```+``` or ```*```, to indicate a bugfix,
+ new feature or upgrade, respectively.
+
+* CATEGORY should describe which part of the project is affected.
+ Valid categories are:
+ * Builder
+ * Documentation
+ * Hack
+ * Packaging
+ * Remote API
+ * Runtime
+
+* DESCRIPTION: a concise description of the change that is relevant to the
+ end-user, using the present tense. Changes should be described in terms
+ of how they affect the user, for example "new feature X which allows Y",
+ "fixed bug which caused X", "increased performance of Y".
+
+EXAMPLES:
+
+```
++ Builder: 'docker build -t FOO' applies the tag FOO to the newly built
+ container.
+* Runtime: improve detection of kernel version
+- Remote API: fix a bug in the optional unix socket transport
+```
+
+### 3. Change the contents of the VERSION file
+
+### 4. Run all tests
+
+```bash
+go test
+```
+
+### 5. Commit and create a pull request
+
+```bash
+git add CHANGELOG.md
+git commit -m "Bump version to $VERSION"
+git push origin bump_$VERSION
+```
+
+### 6. Get 2 other maintainers to validate the pull request
+
+### 7. Merge the pull request and apply tags
+
+```bash
+git checkout master
+git merge bump_$VERSION
+git tag -a v$VERSION # Don't forget the v!
+git tag -f -a latest
+git push
+git push --tags
+```
+
+### 8. Publish binaries
+
+To run this you will need access to the release credentials.
+Get them from [the infrastructure maintainers](
+https://github.com/dotcloud/docker/blob/master/hack/infrastructure/MAINTAINERS).
+
+```bash
+docker build -t releasedocker .
+docker run \
+ -e AWS_S3_BUCKET=get-nightly.docker.io \
+ -e AWS_ACCESS_KEY=$(cat ~/.aws/access_key) \
+ -e AWS_SECRET_KEY=$(cat ~/.aws/secret_key) \
+ -e GPG_PASSPHRASE=supersecretsesame \
+ releasedocker
+```
+
+It will build and upload the binaries on the specified bucket (you should
+use get-nightly.docker.io for general testing, and once everything is fine,
+switch to get.docker.io).
+
+
+### 9. Rejoice!
+
+Congratulations! You're done.
diff --git a/hack/release/make.sh b/hack/release/make.sh
new file mode 100755
index 0000000..a939583
--- /dev/null
+++ b/hack/release/make.sh
@@ -0,0 +1,177 @@
+#!/bin/sh
+
+# This script builds various binary artifacts from a checkout of the docker
+# source code.
+#
+# Requirements:
+# - The current directory should be a checkout of the docker source code
+# (http://github.com/dotcloud/docker). Whatever version is checked out
+# will be built.
+# - The VERSION file, at the root of the repository, should exist, and
+# will be used as Docker binary version and package version.
+# - The hash of the git commit will also be included in the Docker binary,
+# with the suffix -dirty if the repository isn't clean.
+# - The script is intented to be run as part of a docker build, as defined
+# in the Dockerfile at the root of the source. In other words:
+# DO NOT CALL THIS SCRIPT DIRECTLY.
+# - The right way to call this script is to invoke "docker build ." from
+# your checkout of the Docker repository.
+#
+
+set -e
+
+# We're a nice, sexy, little shell script, and people might try to run us;
+# but really, they shouldn't. We want to be in a container!
+RESOLVCONF=$(readlink --canonicalize /etc/resolv.conf)
+grep -q "$RESOLVCONF" /proc/mounts || {
+ echo "# I will only run within a container."
+ echo "# Try this instead:"
+ echo "docker build ."
+ exit 1
+}
+
+VERSION=$(cat ./VERSION)
+PKGVERSION="$VERSION"
+GITCOMMIT=$(git rev-parse --short HEAD)
+if test -n "$(git status --porcelain)"
+then
+ GITCOMMIT="$GITCOMMIT-dirty"
+ PKGVERSION="$PKGVERSION-$(date +%Y%m%d%H%M%S)-$GITCOMMIT"
+fi
+
+PACKAGE_ARCHITECTURE="$(dpkg-architecture -qDEB_HOST_ARCH)"
+PACKAGE_URL="http://www.docker.io/"
+PACKAGE_MAINTAINER="docker@dotcloud.com"
+PACKAGE_DESCRIPTION="lxc-docker is a Linux container runtime
+Docker complements LXC with a high-level API which operates at the process
+level. It runs unix processes with strong guarantees of isolation and
+repeatability across servers.
+Docker is a great building block for automating distributed systems:
+large-scale web deployments, database clusters, continuous deployment systems,
+private PaaS, service-oriented architectures, etc."
+
+UPSTART_SCRIPT='description "Docker daemon"
+
+start on filesystem or runlevel [2345]
+stop on runlevel [!2345]
+
+respawn
+
+exec docker -d
+'
+
+# Each "bundle" is a different type of build artefact: static binary, Ubuntu
+# package, etc.
+
+# Build Docker as a static binary file
+bundle_binary() {
+ mkdir -p bundles/$VERSION/binary
+ go build -o bundles/$VERSION/binary/docker-$VERSION \
+ -ldflags "-X main.GITCOMMIT $GITCOMMIT -X main.VERSION $VERSION -d -w" \
+ ./docker
+}
+
+
+# Build Docker's test suite as a collection of binary files (one per
+# sub-package to test)
+bundle_test() {
+ mkdir -p bundles/$VERSION/test
+ for test_dir in $(find_test_dirs); do
+ test_binary=$(
+ cd $test_dir
+ go test -c -v -ldflags "-X main.GITCOMMIT $GITCOMMIT -X main.VERSION $VERSION -d -w" >&2
+ find . -maxdepth 1 -type f -name '*.test' -executable
+ )
+ cp $test_dir/$test_binary bundles/$VERSION/test/
+ done
+}
+
+# Build docker as an ubuntu package using FPM and REPREPRO (sue me).
+# bundle_binary must be called first.
+bundle_ubuntu() {
+ mkdir -p bundles/$VERSION/ubuntu
+
+ DIR=$(pwd)/bundles/$VERSION/ubuntu/build
+
+ # Generate an upstart config file (ubuntu-specific)
+ mkdir -p $DIR/etc/init
+ echo "$UPSTART_SCRIPT" > $DIR/etc/init/docker.conf
+
+ # Copy the binary
+ mkdir -p $DIR/usr/bin
+ cp bundles/$VERSION/binary/docker-$VERSION $DIR/usr/bin/docker
+
+ # Generate postinstall/prerm scripts
+ cat >/tmp/postinstall <<EOF
+#!/bin/sh
+/sbin/stop docker || true
+/sbin/start docker
+EOF
+ cat >/tmp/prerm <<EOF
+#!/bin/sh
+/sbin/stop docker || true
+EOF
+ chmod +x /tmp/postinstall /tmp/prerm
+
+ (
+ cd bundles/$VERSION/ubuntu
+ fpm -s dir -C $DIR \
+ --name lxc-docker-$VERSION --version $PKGVERSION \
+ --after-install /tmp/postinstall \
+ --before-remove /tmp/prerm \
+ --architecture "$PACKAGE_ARCHITECTURE" \
+ --prefix / \
+ --depends lxc --depends aufs-tools \
+ --description "$PACKAGE_DESCRIPTION" \
+ --maintainer "$PACKAGE_MAINTAINER" \
+ --conflicts lxc-docker-virtual-package \
+ --provides lxc-docker \
+ --provides lxc-docker-virtual-package \
+ --replaces lxc-docker \
+ --replaces lxc-docker-virtual-package \
+ --url "$PACKAGE_URL" \
+ --vendor "$PACKAGE_VENDOR" \
+ -t deb .
+ mkdir empty
+ fpm -s dir -C empty \
+ --name lxc-docker --version $PKGVERSION \
+ --architecture "$PACKAGE_ARCHITECTURE" \
+ --depends lxc-docker-$VERSION \
+ --description "$PACKAGE_DESCRIPTION" \
+ --maintainer "$PACKAGE_MAINTAINER" \
+ --url "$PACKAGE_URL" \
+ --vendor "$PACKAGE_VENDOR" \
+ -t deb .
+ )
+}
+
+
+# This helper function walks the current directory looking for directories
+# holding Go test files, and prints their paths on standard output, one per
+# line.
+find_test_dirs() {
+ find . -name '*_test.go' |
+ { while read f; do dirname $f; done; } |
+ sort -u
+}
+
+
+main() {
+ bundle_binary
+ bundle_ubuntu
+ #bundle_test
+ cat <<EOF
+###############################################################################
+Now run the resulting image, making sure that you set AWS_S3_BUCKET,
+AWS_ACCESS_KEY, and AWS_SECRET_KEY environment variables:
+
+docker run -e AWS_S3_BUCKET=get-staging.docker.io \\
+ AWS_ACCESS_KEY=AKI1234... \\
+ AWS_SECRET_KEY=sEs3mE... \\
+ GPG_PASSPHRASE=sesame... \\
+ image_id_or_name
+###############################################################################
+EOF
+}
+
+main
diff --git a/hack/release/release.sh b/hack/release/release.sh
new file mode 100755
index 0000000..4d5bd00
--- /dev/null
+++ b/hack/release/release.sh
@@ -0,0 +1,175 @@
+#!/bin/sh
+
+# This script looks for bundles built by make.sh, and releases them on a
+# public S3 bucket.
+#
+# Bundles should be available for the VERSION string passed as argument.
+#
+# The correct way to call this script is inside a container built by the
+# official Dockerfile at the root of the Docker source code. The Dockerfile,
+# make.sh and release.sh should all be from the same source code revision.
+
+set -e
+
+# Print a usage message and exit.
+usage() {
+ cat <<EOF
+To run, I need:
+- to be in a container generated by the Dockerfile at the top of the Docker
+ repository;
+- to be provided with the name of an S3 bucket, in environment variable
+ AWS_S3_BUCKET;
+- to be provided with AWS credentials for this S3 bucket, in environment
+ variables AWS_ACCESS_KEY and AWS_SECRET_KEY;
+- the passphrase to unlock the GPG key which will sign the deb packages
+ (passed as environment variable GPG_PASSPHRASE);
+- a generous amount of good will and nice manners.
+The canonical way to run me is to run the image produced by the Dockerfile: e.g.:"
+
+docker run -e AWS_S3_BUCKET=get-staging.docker.io \\
+ AWS_ACCESS_KEY=AKI1234... \\
+ AWS_SECRET_KEY=sEs4mE... \\
+ GPG_PASSPHRASE=m0resEs4mE... \\
+ f0058411
+EOF
+ exit 1
+}
+
+[ "$AWS_S3_BUCKET" ] || usage
+[ "$AWS_ACCESS_KEY" ] || usage
+[ "$AWS_SECRET_KEY" ] || usage
+[ "$GPG_PASSPHRASE" ] || usage
+[ -d /go/src/github.com/dotcloud/docker/ ] || usage
+cd /go/src/github.com/dotcloud/docker/
+
+VERSION=$(cat VERSION)
+BUCKET=$AWS_S3_BUCKET
+
+setup_s3() {
+ # Try creating the bucket. Ignore errors (it might already exist).
+ s3cmd mb s3://$BUCKET 2>/dev/null || true
+ # Check access to the bucket.
+ # s3cmd has no useful exit status, so we cannot check that.
+ # Instead, we check if it outputs anything on standard output.
+ # (When there are problems, it uses standard error instead.)
+ s3cmd info s3://$BUCKET | grep -q .
+ # Make the bucket accessible through website endpoints.
+ s3cmd ws-create --ws-index index --ws-error error s3://$BUCKET
+}
+
+# write_to_s3 uploads the contents of standard input to the specified S3 url.
+write_to_s3() {
+ DEST=$1
+ F=`mktemp`
+ cat > $F
+ s3cmd --acl-public put $F $DEST
+ rm -f $F
+}
+
+s3_url() {
+ echo "http://$BUCKET.s3.amazonaws.com"
+}
+
+# Upload the 'ubuntu' bundle to S3:
+# 1. A full APT repository is published at $BUCKET/ubuntu/
+# 2. Instructions for using the APT repository are uploaded at $BUCKET/ubuntu/info
+release_ubuntu() {
+ # Make sure that we have our keys
+ mkdir -p /.gnupg/
+ s3cmd sync s3://$BUCKET/ubuntu/.gnupg/ /.gnupg/ || true
+ gpg --list-keys releasedocker >/dev/null || {
+ gpg --gen-key --batch <<EOF
+Key-Type: RSA
+Key-Length: 2048
+Passphrase: $GPG_PASSPHRASE
+Name-Real: Docker Release Tool
+Name-Email: docker@dotcloud.com
+Name-Comment: releasedocker
+Expire-Date: 0
+%commit
+EOF
+ }
+
+ # Sign our packages
+ dpkg-sig -g "--passphrase $GPG_PASSPHRASE" -k releasedocker \
+ --sign builder bundles/$VERSION/ubuntu/*.deb
+
+ # Setup the APT repo
+ APTDIR=bundles/$VERSION/ubuntu/apt
+ mkdir -p $APTDIR/conf $APTDIR/db
+ s3cmd sync s3://$BUCKET/ubuntu/db/ $APTDIR/db/ || true
+ cat > $APTDIR/conf/distributions <<EOF
+Codename: docker
+Components: main
+Architectures: amd64 i386
+EOF
+
+ # Add the DEB package to the APT repo
+ DEBFILE=bundles/$VERSION/ubuntu/lxc-docker*.deb
+ reprepro -b $APTDIR includedeb docker $DEBFILE
+
+ # Sign
+ for F in $(find $APTDIR -name Release)
+ do
+ gpg -u releasedocker --passphrase $GPG_PASSPHRASE \
+ --armor --sign --detach-sign \
+ --output $F.gpg $F
+ done
+
+ # Upload keys
+ s3cmd sync /.gnupg/ s3://$BUCKET/ubuntu/.gnupg/
+ gpg --armor --export releasedocker > bundles/$VERSION/ubuntu/gpg
+ s3cmd --acl-public put bundles/$VERSION/ubuntu/gpg s3://$BUCKET/gpg
+
+ # Upload repo
+ s3cmd --acl-public sync $APTDIR/ s3://$BUCKET/ubuntu/
+ cat <<EOF | write_to_s3 s3://$BUCKET/ubuntu/info
+# Add the repository to your APT sources
+echo deb $(s3_url $BUCKET)/ubuntu docker main > /etc/apt/sources.list.d/docker.list
+# Then import the repository key
+curl $(s3_url $BUCKET)/gpg | apt-key add -
+# Install docker
+apt-get update ; apt-get install -y lxc-docker
+EOF
+ echo "APT repository uploaded. Instructions available at $(s3_url $BUCKET)/ubuntu/info"
+}
+
+# Upload a static binary to S3
+release_binary() {
+ [ -e bundles/$VERSION ]
+ S3DIR=s3://$BUCKET/builds/Linux/x86_64
+ s3cmd --acl-public put bundles/$VERSION/binary/docker-$VERSION $S3DIR/docker-$VERSION
+ cat <<EOF | write_to_s3 s3://$BUCKET/builds/info
+# To install, run the following command as root:
+curl -O http://$BUCKET.s3.amazonaws.com/builds/Linux/x86_64/docker-$VERSION && chmod +x docker-$VERSION && sudo mv docker-$VERSION /usr/local/bin/docker
+# Then start docker in daemon mode:
+sudo /usr/local/bin/docker -d
+EOF
+ if [ -z "$NOLATEST" ]; then
+ echo "Copying docker-$VERSION to docker-latest"
+ s3cmd --acl-public cp $S3DIR/docker-$VERSION $S3DIR/docker-latest
+ echo "Advertising $VERSION on $BUCKET as most recent version"
+ echo $VERSION | write_to_s3 s3://$BUCKET/latest
+ fi
+}
+
+# Upload the index script
+release_index() {
+ (
+ if [ "$BUCKET" != "get.docker.io" ]
+ then
+ sed s,https://get.docker.io/,http://$BUCKET.s3.amazonaws.com/, contrib/install.sh
+ else
+ cat contrib/install.sh
+ fi
+ ) | write_to_s3 s3://$BUCKET/index
+}
+
+main() {
+ setup_s3
+ release_binary
+ release_ubuntu
+ release_index
+}
+
+main
diff --git a/lxc_template.go b/lxc_template.go
index 2ed05ad..d357c02 100644
--- a/lxc_template.go
+++ b/lxc_template.go
@@ -121,7 +121,16 @@
{{end}}
`
+const LxcHostConfigTemplate = `
+{{if .LxcConf}}
+{{range $pair := .LxcConf}}
+{{$pair.Key}} = {{$pair.Value}}
+{{end}}
+{{end}}
+`
+
var LxcTemplateCompiled *template.Template
+var LxcHostConfigTemplateCompiled *template.Template
func getMemorySwap(config *Config) int64 {
// By default, MemorySwap is set to twice the size of RAM.
@@ -141,4 +150,8 @@
if err != nil {
panic(err)
}
+ LxcHostConfigTemplateCompiled, err = template.New("lxc-hostconfig").Funcs(funcMap).Parse(LxcHostConfigTemplate)
+ if err != nil {
+ panic(err)
+ }
}
diff --git a/packaging/README.md b/packaging/README.md
new file mode 100644
index 0000000..1efe79a
--- /dev/null
+++ b/packaging/README.md
@@ -0,0 +1,12 @@
+# Docker packaging
+
+This directory has one subdirectory per packaging distribution.
+At minimum, each of these subdirectories should contain a
+README.$DISTRIBUTION explaining how to create the native
+docker package and how to install it.
+
+**Important:** the debian and ubuntu directories are here for
+reference only. Since we experienced many issues with Launchpad,
+we gave up on using it to have a Docker PPA (at least, for now!)
+and we are using a simpler process.
+See [/hack/release](../hack/release) for details.
diff --git a/packaging/README.rst b/packaging/README.rst
deleted file mode 100644
index 7e927cc..0000000
--- a/packaging/README.rst
+++ /dev/null
@@ -1,8 +0,0 @@
-Docker packaging
-================
-
-This directory has one subdirectory per packaging distribution.
-At minimum, each of these subdirectories should contain a
-README.$DISTRIBUTION explaining how to create the native
-docker package and how to install it.
-
diff --git a/registry/registry.go b/registry/registry.go
index ba62b34..759652f 100644
--- a/registry/registry.go
+++ b/registry/registry.go
@@ -162,7 +162,6 @@
// Check if an image exists in the Registry
func (r *Registry) LookupRemoteImage(imgID, registry string, token []string) bool {
-
req, err := r.reqFactory.NewRequest("GET", registry+"images/"+imgID+"/json", nil)
if err != nil {
return false
@@ -230,7 +229,8 @@
}
for _, host := range registries {
endpoint := fmt.Sprintf("%srepositories/%s/tags", host, repository)
- req, err := r.opaqueRequest("GET", endpoint, nil)
+ req, err := r.reqFactory.NewRequest("GET", endpoint, nil)
+
if err != nil {
return nil, err
}
@@ -263,12 +263,11 @@
}
func (r *Registry) GetRepositoryData(indexEp, remote string) (*RepositoryData, error) {
-
repositoryTarget := fmt.Sprintf("%srepositories/%s/images", indexEp, remote)
utils.Debugf("[registry] Calling GET %s", repositoryTarget)
- req, err := r.opaqueRequest("GET", repositoryTarget, nil)
+ req, err := r.reqFactory.NewRequest("GET", repositoryTarget, nil)
if err != nil {
return nil, err
}
@@ -426,22 +425,14 @@
return tarsumLayer.Sum(jsonRaw), nil
}
-func (r *Registry) opaqueRequest(method, urlStr string, body io.Reader) (*http.Request, error) {
- req, err := r.reqFactory.NewRequest(method, urlStr, body)
- if err != nil {
- return nil, err
- }
- req.URL.Opaque = strings.Replace(urlStr, req.URL.Scheme+":", "", 1)
- return req, err
-}
-
// push a tag on the registry.
// Remote has the format '<user>/<repo>
func (r *Registry) PushRegistryTag(remote, revision, tag, registry string, token []string) error {
// "jsonify" the string
revision = "\"" + revision + "\""
+ path := fmt.Sprintf("repositories/%s/tags/%s", remote, tag)
- req, err := r.opaqueRequest("PUT", registry+"repositories/"+remote+"/tags/"+tag, strings.NewReader(revision))
+ req, err := r.reqFactory.NewRequest("PUT", registry+path, strings.NewReader(revision))
if err != nil {
return err
}
@@ -480,11 +471,10 @@
if validate {
suffix = "images"
}
-
u := fmt.Sprintf("%srepositories/%s/%s", indexEp, remote, suffix)
utils.Debugf("[registry] PUT %s", u)
utils.Debugf("Image list pushed to index:\n%s\n", imgListJSON)
- req, err := r.opaqueRequest("PUT", u, bytes.NewReader(imgListJSON))
+ req, err := r.reqFactory.NewRequest("PUT", u, bytes.NewReader(imgListJSON))
if err != nil {
return nil, err
}
@@ -504,7 +494,7 @@
// Redirect if necessary
for res.StatusCode >= 300 && res.StatusCode < 400 {
utils.Debugf("Redirected to %s\n", res.Header.Get("Location"))
- req, err = r.opaqueRequest("PUT", res.Header.Get("Location"), bytes.NewReader(imgListJSON))
+ req, err = r.reqFactory.NewRequest("PUT", res.Header.Get("Location"), bytes.NewReader(imgListJSON))
if err != nil {
return nil, err
}
diff --git a/registry/registry_mock_test.go b/registry/registry_mock_test.go
index e752315..6eb94b6 100644
--- a/registry/registry_mock_test.go
+++ b/registry/registry_mock_test.go
@@ -343,4 +343,4 @@
<-c
}
-//*/
\ No newline at end of file
+//*/
diff --git a/runtime.go b/runtime.go
index 0f97c01..7525680 100644
--- a/runtime.go
+++ b/runtime.go
@@ -15,9 +15,9 @@
)
type Capabilities struct {
- MemoryLimit bool
- SwapLimit bool
- IPv4Forwarding bool
+ MemoryLimit bool
+ SwapLimit bool
+ IPv4ForwardingDisabled bool
}
type Runtime struct {
@@ -244,8 +244,8 @@
}
content, err3 := ioutil.ReadFile("/proc/sys/net/ipv4/ip_forward")
- runtime.capabilities.IPv4Forwarding = err3 == nil && len(content) > 0 && content[0] == '1'
- if !runtime.capabilities.IPv4Forwarding && !quiet {
+ runtime.capabilities.IPv4ForwardingDisabled = err3 != nil || len(content) == 0 || content[0] != '1'
+ if runtime.capabilities.IPv4ForwardingDisabled && !quiet {
log.Printf("WARNING: IPv4 forwarding is disabled.")
}
}
diff --git a/server.go b/server.go
index f4e08dc..fc41424 100644
--- a/server.go
+++ b/server.go
@@ -241,6 +241,8 @@
outs = append(outs, out)
}
}
+
+ sortImagesByCreationAndTag(outs)
return outs, nil
}
@@ -269,7 +271,7 @@
Images: imgcount,
MemoryLimit: srv.runtime.capabilities.MemoryLimit,
SwapLimit: srv.runtime.capabilities.SwapLimit,
- IPv4Forwarding: srv.runtime.capabilities.IPv4Forwarding,
+ IPv4Forwarding: !srv.runtime.capabilities.IPv4ForwardingDisabled,
Debug: os.Getenv("DEBUG") != "",
NFd: utils.GetTotalUsedFds(),
NGoroutines: runtime.NumGoroutine(),
@@ -417,19 +419,30 @@
if err != nil {
return err
}
-
+ out.Write(sf.FormatProgress(utils.TruncateID(imgID), "Pulling", "dependend layers"))
// FIXME: Try to stream the images?
// FIXME: Launch the getRemoteImage() in goroutines
+
for _, id := range history {
+
+ // ensure no two downloads of the same layer happen at the same time
+ if err := srv.poolAdd("pull", "layer:"+id); err != nil {
+ utils.Debugf("Image (id: %s) pull is already running, skipping: %v", id, err)
+ return nil
+ }
+ defer srv.poolRemove("pull", "layer:"+id)
+
if !srv.runtime.graph.Exists(id) {
out.Write(sf.FormatProgress(utils.TruncateID(id), "Pulling", "metadata"))
imgJSON, imgSize, err := r.GetRemoteImageJSON(id, endpoint, token)
if err != nil {
+ out.Write(sf.FormatProgress(utils.TruncateID(id), "Error", "pulling dependend layers"))
// FIXME: Keep going in case of error?
return err
}
img, err := NewImgJSON(imgJSON)
if err != nil {
+ out.Write(sf.FormatProgress(utils.TruncateID(id), "Error", "pulling dependend layers"))
return fmt.Errorf("Failed to parse json: %s", err)
}
@@ -437,13 +450,17 @@
out.Write(sf.FormatProgress(utils.TruncateID(id), "Pulling", "fs layer"))
layer, err := r.GetRemoteImageLayer(img.ID, endpoint, token)
if err != nil {
+ out.Write(sf.FormatProgress(utils.TruncateID(id), "Error", "pulling dependend layers"))
return err
}
defer layer.Close()
if err := srv.runtime.graph.Register(imgJSON, utils.ProgressReader(layer, imgSize, out, sf.FormatProgress(utils.TruncateID(id), "Downloading", "%8v/%v (%v)"), sf, false), img); err != nil {
+ out.Write(sf.FormatProgress(utils.TruncateID(id), "Error", "downloading dependend layers"))
return err
}
}
+ out.Write(sf.FormatProgress(utils.TruncateID(id), "Download", "complete"))
+
}
return nil
}
@@ -491,29 +508,57 @@
downloadImage := func(img *registry.ImgData) {
if askedTag != "" && img.Tag != askedTag {
utils.Debugf("(%s) does not match %s (id: %s), skipping", img.Tag, askedTag, img.ID)
- errors <- nil
+ if parallel {
+ errors <- nil
+ }
return
}
if img.Tag == "" {
utils.Debugf("Image (id: %s) present in this repository but untagged, skipping", img.ID)
- errors <- nil
+ if parallel {
+ errors <- nil
+ }
return
}
+
+ // ensure no two downloads of the same image happen at the same time
+ if err := srv.poolAdd("pull", "img:"+img.ID); err != nil {
+ utils.Debugf("Image (id: %s) pull is already running, skipping: %v", img.ID, err)
+ if parallel {
+ errors <- nil
+ }
+ return
+ }
+ defer srv.poolRemove("pull", "img:"+img.ID)
+
out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Pulling", fmt.Sprintf("image (%s) from %s", img.Tag, localName)))
success := false
+ var lastErr error
for _, ep := range repoData.Endpoints {
+ out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Pulling", fmt.Sprintf("image (%s) from %s, endpoint: %s", img.Tag, localName, ep)))
if err := srv.pullImage(r, out, img.ID, ep, repoData.Tokens, sf); err != nil {
- out.Write(sf.FormatStatus(utils.TruncateID(img.ID), "Error while retrieving image for tag: %s (%s); checking next endpoint", askedTag, err))
+ // Its not ideal that only the last error is returned, it would be better to concatenate the errors.
+ // As the error is also given to the output stream the user will see the error.
+ lastErr = err
+ out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Error pulling", fmt.Sprintf("image (%s) from %s, endpoint: %s, %s", img.Tag, localName, ep, err)))
continue
}
success = true
break
}
if !success {
- errors <- fmt.Errorf("Could not find repository on any of the indexed registries.")
+ out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Error pulling", fmt.Sprintf("image (%s) from %s, %s", img.Tag, localName, lastErr)))
+ if parallel {
+ errors <- fmt.Errorf("Could not find repository on any of the indexed registries.")
+ return
+ }
}
- errors <- nil
+ out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Download", "complete"))
+
+ if parallel {
+ errors <- nil
+ }
}
if parallel {
@@ -522,15 +567,18 @@
downloadImage(image)
}
}
-
if parallel {
+ var lastError error
for i := 0; i < len(repoData.ImgList); i++ {
if err := <-errors; err != nil {
- return err
+ lastError = err
}
}
- }
+ if lastError != nil {
+ return lastError
+ }
+ }
for tag, id := range tagsList {
if askedTag != "" && tag != askedTag {
continue
@@ -828,7 +876,13 @@
container, err := b.Create(config)
if err != nil {
if srv.runtime.graph.IsNotExist(err) {
- return "", fmt.Errorf("No such image: %s", config.Image)
+
+ _, tag := utils.ParseRepositoryTag(config.Image)
+ if tag == "" {
+ tag = DEFAULTTAG
+ }
+
+ return "", fmt.Errorf("No such image: %s (tag: %s)", config.Image, tag)
}
return "", err
}
diff --git a/server_test.go b/server_test.go
index bcf8009..95ebcf2 100644
--- a/server_test.go
+++ b/server_test.go
@@ -206,6 +206,7 @@
}
func TestContainerTop(t *testing.T) {
+ t.Skip("Fixme. Skipping test for now. Reported error: 'server_test.go:236: Expected 2 processes, found 1.'")
runtime := mkRuntime(t)
srv := &Server{runtime: runtime}
defer nuke(runtime)
diff --git a/sorter.go b/sorter.go
new file mode 100644
index 0000000..a818841
--- /dev/null
+++ b/sorter.go
@@ -0,0 +1,36 @@
+package docker
+
+import "sort"
+
+type imageSorter struct {
+ images []APIImages
+ by func(i1, i2 *APIImages) bool // Closure used in the Less method.
+}
+
+// Len is part of sort.Interface.
+func (s *imageSorter) Len() int {
+ return len(s.images)
+}
+
+// Swap is part of sort.Interface.
+func (s *imageSorter) Swap(i, j int) {
+ s.images[i], s.images[j] = s.images[j], s.images[i]
+}
+
+// Less is part of sort.Interface. It is implemented by calling the "by" closure in the sorter.
+func (s *imageSorter) Less(i, j int) bool {
+ return s.by(&s.images[i], &s.images[j])
+}
+
+// Sort []ApiImages by most recent creation date and tag name.
+func sortImagesByCreationAndTag(images []APIImages) {
+ creationAndTag := func(i1, i2 *APIImages) bool {
+ return i1.Created > i2.Created || (i1.Created == i2.Created && i2.Tag > i1.Tag)
+ }
+
+ sorter := &imageSorter{
+ images: images,
+ by: creationAndTag}
+
+ sort.Sort(sorter)
+}
diff --git a/sorter_test.go b/sorter_test.go
new file mode 100644
index 0000000..5519708
--- /dev/null
+++ b/sorter_test.go
@@ -0,0 +1,57 @@
+package docker
+
+import (
+ "testing"
+)
+
+func TestServerListOrderedImagesByCreationDate(t *testing.T) {
+ runtime := mkRuntime(t)
+ defer nuke(runtime)
+
+ archive, err := fakeTar()
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, err = runtime.graph.Create(archive, nil, "Testing", "", nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ srv := &Server{runtime: runtime}
+
+ images, err := srv.Images(true, "")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if images[0].Created < images[1].Created {
+ t.Error("Expected []APIImges to be ordered by most recent creation date.")
+ }
+}
+
+func TestServerListOrderedImagesByCreationDateAndTag(t *testing.T) {
+ runtime := mkRuntime(t)
+ defer nuke(runtime)
+
+ archive, err := fakeTar()
+ if err != nil {
+ t.Fatal(err)
+ }
+ image, err := runtime.graph.Create(archive, nil, "Testing", "", nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ srv := &Server{runtime: runtime}
+ srv.ContainerTag(image.ID, "repo", "foo", false)
+ srv.ContainerTag(image.ID, "repo", "bar", false)
+
+ images, err := srv.Images(true, "")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if images[0].Created != images[1].Created || images[0].Tag >= images[1].Tag {
+ t.Error("Expected []APIImges to be ordered by most recent creation date and tag name.")
+ }
+}
diff --git a/sysinit.go b/sysinit.go
index fb36cd2..aa5d2b2 100644
--- a/sysinit.go
+++ b/sysinit.go
@@ -22,6 +22,15 @@
}
}
+// Setup working directory
+func setupWorkingDirectory(workdir string) {
+ if workdir == "" {
+ return
+ }
+ syscall.Chdir(workdir)
+}
+
+
// Takes care of dropping privileges to the desired user
func changeUser(u string) {
if u == "" {
@@ -83,6 +92,7 @@
}
var u = flag.String("u", "", "username or uid")
var gw = flag.String("g", "", "gateway address")
+ var workdir = flag.String("w", "", "workdir")
var flEnv ListOpts
flag.Var(&flEnv, "e", "Set environment variables")
@@ -91,6 +101,7 @@
cleanupEnv(flEnv)
setupNetworking(*gw)
+ setupWorkingDirectory(*workdir)
changeUser(*u)
executeProgram(flag.Arg(0), flag.Args())
}
diff --git a/utils.go b/utils.go
index 2264cae..aed8ffd 100644
--- a/utils.go
+++ b/utils.go
@@ -1,6 +1,7 @@
package docker
import (
+ "fmt"
"strings"
)
@@ -132,6 +133,9 @@
if userConf.Entrypoint == nil || len(userConf.Entrypoint) == 0 {
userConf.Entrypoint = imageConf.Entrypoint
}
+ if userConf.WorkingDir == "" {
+ userConf.WorkingDir = imageConf.WorkingDir
+ }
if userConf.VolumesFrom == "" {
userConf.VolumesFrom = imageConf.VolumesFrom
}
@@ -143,3 +147,23 @@
}
}
}
+
+func parseLxcConfOpts(opts ListOpts) ([]KeyValuePair, error) {
+ out := make([]KeyValuePair, len(opts))
+ for i, o := range opts {
+ k, v, err := parseLxcOpt(o)
+ if err != nil {
+ return nil, err
+ }
+ out[i] = KeyValuePair{Key: k, Value: v}
+ }
+ return out, nil
+}
+
+func parseLxcOpt(opt string) (string, string, error) {
+ parts := strings.SplitN(opt, "=", 2)
+ if len(parts) != 2 {
+ return "", "", fmt.Errorf("Unable to parse lxc conf option: %s", opt)
+ }
+ return strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1]), nil
+}
diff --git a/utils/utils.go b/utils/utils.go
index 497d7f4..6a5beb8 100644
--- a/utils/utils.go
+++ b/utils/utils.go
@@ -645,6 +645,7 @@
}
return jm.Error
}
+ fmt.Fprintf(out, "%c[2K\r", 27)
if jm.Time != 0 {
fmt.Fprintf(out, "[%s] ", time.Unix(jm.Time, 0))
}
@@ -655,7 +656,6 @@
fmt.Fprintf(out, "(from %s) ", jm.From)
}
if jm.Progress != "" {
- fmt.Fprintf(out, "%c[2K", 27)
fmt.Fprintf(out, "%s %s\r", jm.Status, jm.Progress)
} else {
fmt.Fprintf(out, "%s\r\n", jm.Status)
diff --git a/utils_test.go b/utils_test.go
index 5c37e9e..e8aae17 100644
--- a/utils_test.go
+++ b/utils_test.go
@@ -301,3 +301,20 @@
t.Fail()
}
}
+
+func TestParseLxcConfOpt(t *testing.T) {
+ opts := []string{"lxc.utsname=docker", "lxc.utsname = docker "}
+
+ for _, o := range opts {
+ k, v, err := parseLxcOpt(o)
+ if err != nil {
+ t.FailNow()
+ }
+ if k != "lxc.utsname" {
+ t.Fail()
+ }
+ if v != "docker" {
+ t.Fail()
+ }
+ }
+}