Merge pull request #32521 from mkumatag/update_shellwords

Vendor package update github.com/mattn/go-shellwords
diff --git a/api/server/router/swarm/backend.go b/api/server/router/swarm/backend.go
index 28b9a98..3a5da97 100644
--- a/api/server/router/swarm/backend.go
+++ b/api/server/router/swarm/backend.go
@@ -17,7 +17,7 @@
 	GetUnlockKey() (string, error)
 	UnlockSwarm(req types.UnlockRequest) error
 	GetServices(basictypes.ServiceListOptions) ([]types.Service, error)
-	GetService(string) (types.Service, error)
+	GetService(idOrName string, insertDefaults bool) (types.Service, error)
 	CreateService(types.ServiceSpec, string) (*basictypes.ServiceCreateResponse, error)
 	UpdateService(string, uint64, types.ServiceSpec, basictypes.ServiceUpdateOptions) (*basictypes.ServiceUpdateResponse, error)
 	RemoveService(string) error
@@ -30,7 +30,7 @@
 	GetTask(string) (types.Task, error)
 	GetSecrets(opts basictypes.SecretListOptions) ([]types.Secret, error)
 	CreateSecret(s types.SecretSpec) (string, error)
-	RemoveSecret(id string) error
+	RemoveSecret(idOrName string) error
 	GetSecret(id string) (types.Secret, error)
-	UpdateSecret(id string, version uint64, spec types.SecretSpec) error
+	UpdateSecret(idOrName string, version uint64, spec types.SecretSpec) error
 }
diff --git a/api/server/router/swarm/cluster_routes.go b/api/server/router/swarm/cluster_routes.go
index dfae13f..4c60b6b 100644
--- a/api/server/router/swarm/cluster_routes.go
+++ b/api/server/router/swarm/cluster_routes.go
@@ -151,7 +151,17 @@
 }
 
 func (sr *swarmRouter) getService(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
-	service, err := sr.backend.GetService(vars["id"])
+	var insertDefaults bool
+	if value := r.URL.Query().Get("insertDefaults"); value != "" {
+		var err error
+		insertDefaults, err = strconv.ParseBool(value)
+		if err != nil {
+			err := fmt.Errorf("invalid value for insertDefaults: %s", value)
+			return errors.NewBadRequestError(err)
+		}
+	}
+
+	service, err := sr.backend.GetService(vars["id"], insertDefaults)
 	if err != nil {
 		logrus.Errorf("Error getting service %s: %v", vars["id"], err)
 		return err
diff --git a/api/server/router/swarm/helpers.go b/api/server/router/swarm/helpers.go
index af745b8..ea692ea 100644
--- a/api/server/router/swarm/helpers.go
+++ b/api/server/router/swarm/helpers.go
@@ -39,7 +39,7 @@
 	// checking for whether logs are TTY involves iterating over every service
 	// and task. idk if there is a better way
 	for _, service := range selector.Services {
-		s, err := sr.backend.GetService(service)
+		s, err := sr.backend.GetService(service, false)
 		if err != nil {
 			// maybe should return some context with this error?
 			return err
diff --git a/api/swagger.yaml b/api/swagger.yaml
index f6e6fc6..36927b5 100644
--- a/api/swagger.yaml
+++ b/api/swagger.yaml
@@ -7584,6 +7584,11 @@
           description: "ID or name of service."
           required: true
           type: "string"
+        - name: "insertDefaults"
+          in: "query"
+          description: "Fill empty fields with default values."
+          type: "boolean"
+          default: false
       tags: ["Service"]
     delete:
       summary: "Delete a service"
diff --git a/api/types/client.go b/api/types/client.go
index 2bf6ad0..d7bc550 100644
--- a/api/types/client.go
+++ b/api/types/client.go
@@ -316,12 +316,18 @@
 	Rollback string
 }
 
-// ServiceListOptions holds parameters to list  services with.
+// ServiceListOptions holds parameters to list services with.
 type ServiceListOptions struct {
 	Filters filters.Args
 }
 
-// TaskListOptions holds parameters to list  tasks with.
+// ServiceInspectOptions holds parameters related to the "service inspect"
+// operation.
+type ServiceInspectOptions struct {
+	InsertDefaults bool
+}
+
+// TaskListOptions holds parameters to list tasks with.
 type TaskListOptions struct {
 	Filters filters.Args
 }
diff --git a/cli/command/idresolver/client_test.go b/cli/command/idresolver/client_test.go
index 8c02d7e..f84683b 100644
--- a/cli/command/idresolver/client_test.go
+++ b/cli/command/idresolver/client_test.go
@@ -1,6 +1,7 @@
 package idresolver
 
 import (
+	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/api/types/swarm"
 	"github.com/docker/docker/client"
 	"golang.org/x/net/context"
@@ -19,7 +20,7 @@
 	return swarm.Node{}, []byte{}, nil
 }
 
-func (cli *fakeClient) ServiceInspectWithRaw(ctx context.Context, serviceID string) (swarm.Service, []byte, error) {
+func (cli *fakeClient) ServiceInspectWithRaw(ctx context.Context, serviceID string, options types.ServiceInspectOptions) (swarm.Service, []byte, error) {
 	if cli.serviceInspectFunc != nil {
 		return cli.serviceInspectFunc(serviceID)
 	}
diff --git a/cli/command/idresolver/idresolver.go b/cli/command/idresolver/idresolver.go
index 25c51a2..6088b64 100644
--- a/cli/command/idresolver/idresolver.go
+++ b/cli/command/idresolver/idresolver.go
@@ -3,6 +3,7 @@
 import (
 	"golang.org/x/net/context"
 
+	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/api/types/swarm"
 	"github.com/docker/docker/client"
 	"github.com/pkg/errors"
@@ -39,7 +40,7 @@
 		}
 		return id, nil
 	case swarm.Service:
-		service, _, err := r.client.ServiceInspectWithRaw(ctx, id)
+		service, _, err := r.client.ServiceInspectWithRaw(ctx, id, types.ServiceInspectOptions{})
 		if err != nil {
 			return id, nil
 		}
diff --git a/cli/command/service/create.go b/cli/command/service/create.go
index 0e77f73..bb2a1fe 100644
--- a/cli/command/service/create.go
+++ b/cli/command/service/create.go
@@ -30,7 +30,7 @@
 	flags.StringVar(&opts.mode, flagMode, "replicated", "Service mode (replicated or global)")
 	flags.StringVar(&opts.name, flagName, "", "Service name")
 
-	addServiceFlags(flags, opts)
+	addServiceFlags(flags, opts, buildServiceDefaultFlagMapping())
 
 	flags.VarP(&opts.labels, flagLabel, "l", "Service labels")
 	flags.Var(&opts.containerLabels, flagContainerLabel, "Container labels")
@@ -65,7 +65,7 @@
 
 	ctx := context.Background()
 
-	service, err := opts.ToService(ctx, apiClient)
+	service, err := opts.ToService(ctx, apiClient, flags)
 	if err != nil {
 		return err
 	}
diff --git a/cli/command/service/inspect.go b/cli/command/service/inspect.go
index 8a8b51c..fae24ee 100644
--- a/cli/command/service/inspect.go
+++ b/cli/command/service/inspect.go
@@ -5,6 +5,7 @@
 
 	"golang.org/x/net/context"
 
+	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/cli"
 	"github.com/docker/docker/cli/command"
 	"github.com/docker/docker/cli/command/formatter"
@@ -51,7 +52,8 @@
 	}
 
 	getRef := func(ref string) (interface{}, []byte, error) {
-		service, _, err := client.ServiceInspectWithRaw(ctx, ref)
+		// Service inspect shows defaults values in empty fields.
+		service, _, err := client.ServiceInspectWithRaw(ctx, ref, types.ServiceInspectOptions{InsertDefaults: true})
 		if err == nil || !apiclient.IsErrServiceNotFound(err) {
 			return service, nil, err
 		}
diff --git a/cli/command/service/logs.go b/cli/command/service/logs.go
index 30ed504..2440c16 100644
--- a/cli/command/service/logs.go
+++ b/cli/command/service/logs.go
@@ -86,7 +86,7 @@
 		tty          bool
 	)
 
-	service, _, err := cli.ServiceInspectWithRaw(ctx, opts.target)
+	service, _, err := cli.ServiceInspectWithRaw(ctx, opts.target, types.ServiceInspectOptions{})
 	if err != nil {
 		// if it's any error other than service not found, it's Real
 		if !client.IsErrServiceNotFound(err) {
diff --git a/cli/command/service/opts.go b/cli/command/service/opts.go
index 0664368..4211c5b 100644
--- a/cli/command/service/opts.go
+++ b/cli/command/service/opts.go
@@ -12,7 +12,10 @@
 	"github.com/docker/docker/client"
 	"github.com/docker/docker/opts"
 	runconfigopts "github.com/docker/docker/runconfig/opts"
+	"github.com/docker/swarmkit/api"
+	"github.com/docker/swarmkit/api/defaults"
 	shlex "github.com/flynn-archive/go-shlex"
+	gogotypes "github.com/gogo/protobuf/types"
 	"github.com/pkg/errors"
 	"github.com/spf13/pflag"
 	"golang.org/x/net/context"
@@ -177,6 +180,9 @@
 }
 
 func (s *ShlexOpt) String() string {
+	if len(*s) == 0 {
+		return ""
+	}
 	return fmt.Sprint(*s)
 }
 
@@ -194,17 +200,77 @@
 	order           string
 }
 
-func (opts updateOptions) config() *swarm.UpdateConfig {
+func updateConfigFromDefaults(defaultUpdateConfig *api.UpdateConfig) *swarm.UpdateConfig {
+	defaultFailureAction := strings.ToLower(api.UpdateConfig_FailureAction_name[int32(defaultUpdateConfig.FailureAction)])
+	defaultMonitor, _ := gogotypes.DurationFromProto(defaultUpdateConfig.Monitor)
 	return &swarm.UpdateConfig{
-		Parallelism:     opts.parallelism,
-		Delay:           opts.delay,
-		Monitor:         opts.monitor,
-		FailureAction:   opts.onFailure,
-		MaxFailureRatio: opts.maxFailureRatio.Value(),
-		Order:           opts.order,
+		Parallelism:     defaultUpdateConfig.Parallelism,
+		Delay:           defaultUpdateConfig.Delay,
+		Monitor:         defaultMonitor,
+		FailureAction:   defaultFailureAction,
+		MaxFailureRatio: defaultUpdateConfig.MaxFailureRatio,
+		Order:           defaultOrder(defaultUpdateConfig.Order),
 	}
 }
 
+func (opts updateOptions) updateConfig(flags *pflag.FlagSet) *swarm.UpdateConfig {
+	if !anyChanged(flags, flagUpdateParallelism, flagUpdateDelay, flagUpdateMonitor, flagUpdateFailureAction, flagUpdateMaxFailureRatio) {
+		return nil
+	}
+
+	updateConfig := updateConfigFromDefaults(defaults.Service.Update)
+
+	if flags.Changed(flagUpdateParallelism) {
+		updateConfig.Parallelism = opts.parallelism
+	}
+	if flags.Changed(flagUpdateDelay) {
+		updateConfig.Delay = opts.delay
+	}
+	if flags.Changed(flagUpdateMonitor) {
+		updateConfig.Monitor = opts.monitor
+	}
+	if flags.Changed(flagUpdateFailureAction) {
+		updateConfig.FailureAction = opts.onFailure
+	}
+	if flags.Changed(flagUpdateMaxFailureRatio) {
+		updateConfig.MaxFailureRatio = opts.maxFailureRatio.Value()
+	}
+	if flags.Changed(flagUpdateOrder) {
+		updateConfig.Order = opts.order
+	}
+
+	return updateConfig
+}
+
+func (opts updateOptions) rollbackConfig(flags *pflag.FlagSet) *swarm.UpdateConfig {
+	if !anyChanged(flags, flagRollbackParallelism, flagRollbackDelay, flagRollbackMonitor, flagRollbackFailureAction, flagRollbackMaxFailureRatio) {
+		return nil
+	}
+
+	updateConfig := updateConfigFromDefaults(defaults.Service.Rollback)
+
+	if flags.Changed(flagRollbackParallelism) {
+		updateConfig.Parallelism = opts.parallelism
+	}
+	if flags.Changed(flagRollbackDelay) {
+		updateConfig.Delay = opts.delay
+	}
+	if flags.Changed(flagRollbackMonitor) {
+		updateConfig.Monitor = opts.monitor
+	}
+	if flags.Changed(flagRollbackFailureAction) {
+		updateConfig.FailureAction = opts.onFailure
+	}
+	if flags.Changed(flagRollbackMaxFailureRatio) {
+		updateConfig.MaxFailureRatio = opts.maxFailureRatio.Value()
+	}
+	if flags.Changed(flagRollbackOrder) {
+		updateConfig.Order = opts.order
+	}
+
+	return updateConfig
+}
+
 type resourceOptions struct {
 	limitCPU      opts.NanoCPUs
 	limitMemBytes opts.MemBytes
@@ -232,13 +298,70 @@
 	window      DurationOpt
 }
 
-func (r *restartPolicyOptions) ToRestartPolicy() *swarm.RestartPolicy {
-	return &swarm.RestartPolicy{
-		Condition:   swarm.RestartPolicyCondition(r.condition),
-		Delay:       r.delay.Value(),
-		MaxAttempts: r.maxAttempts.Value(),
-		Window:      r.window.Value(),
+func defaultRestartPolicy() *swarm.RestartPolicy {
+	defaultMaxAttempts := defaults.Service.Task.Restart.MaxAttempts
+	rp := &swarm.RestartPolicy{
+		MaxAttempts: &defaultMaxAttempts,
 	}
+
+	if defaults.Service.Task.Restart.Delay != nil {
+		defaultRestartDelay, _ := gogotypes.DurationFromProto(defaults.Service.Task.Restart.Delay)
+		rp.Delay = &defaultRestartDelay
+	}
+	if defaults.Service.Task.Restart.Window != nil {
+		defaultRestartWindow, _ := gogotypes.DurationFromProto(defaults.Service.Task.Restart.Window)
+		rp.Window = &defaultRestartWindow
+	}
+	rp.Condition = defaultRestartCondition()
+
+	return rp
+}
+
+func defaultRestartCondition() swarm.RestartPolicyCondition {
+	switch defaults.Service.Task.Restart.Condition {
+	case api.RestartOnNone:
+		return "none"
+	case api.RestartOnFailure:
+		return "on-failure"
+	case api.RestartOnAny:
+		return "any"
+	default:
+		return ""
+	}
+}
+
+func defaultOrder(order api.UpdateConfig_UpdateOrder) string {
+	switch order {
+	case api.UpdateConfig_STOP_FIRST:
+		return "stop-first"
+	case api.UpdateConfig_START_FIRST:
+		return "start-first"
+	default:
+		return ""
+	}
+}
+
+func (r *restartPolicyOptions) ToRestartPolicy(flags *pflag.FlagSet) *swarm.RestartPolicy {
+	if !anyChanged(flags, flagRestartDelay, flagRestartMaxAttempts, flagRestartWindow, flagRestartCondition) {
+		return nil
+	}
+
+	restartPolicy := defaultRestartPolicy()
+
+	if flags.Changed(flagRestartDelay) {
+		restartPolicy.Delay = r.delay.Value()
+	}
+	if flags.Changed(flagRestartCondition) {
+		restartPolicy.Condition = swarm.RestartPolicyCondition(r.condition)
+	}
+	if flags.Changed(flagRestartMaxAttempts) {
+		restartPolicy.MaxAttempts = r.maxAttempts.Value()
+	}
+	if flags.Changed(flagRestartWindow) {
+		restartPolicy.Window = r.window.Value()
+	}
+
+	return restartPolicy
 }
 
 type credentialSpecOpt struct {
@@ -463,7 +586,14 @@
 	return serviceMode, nil
 }
 
-func (opts *serviceOptions) ToService(ctx context.Context, apiClient client.APIClient) (swarm.ServiceSpec, error) {
+func (opts *serviceOptions) ToStopGracePeriod(flags *pflag.FlagSet) *time.Duration {
+	if flags.Changed(flagStopGracePeriod) {
+		return opts.stopGrace.Value()
+	}
+	return nil
+}
+
+func (opts *serviceOptions) ToService(ctx context.Context, apiClient client.APIClient, flags *pflag.FlagSet) (swarm.ServiceSpec, error) {
 	var service swarm.ServiceSpec
 
 	envVariables, err := runconfigopts.ReadKVStrings(opts.envFile.GetAll(), opts.env.GetAll())
@@ -526,13 +656,13 @@
 					Options:     opts.dnsOption.GetAll(),
 				},
 				Hosts:           convertExtraHostsToSwarmHosts(opts.hosts.GetAll()),
-				StopGracePeriod: opts.stopGrace.Value(),
+				StopGracePeriod: opts.ToStopGracePeriod(flags),
 				Secrets:         nil,
 				Healthcheck:     healthConfig,
 			},
 			Networks:      networks,
 			Resources:     opts.resources.ToResourceRequirements(),
-			RestartPolicy: opts.restartPolicy.ToRestartPolicy(),
+			RestartPolicy: opts.restartPolicy.ToRestartPolicy(flags),
 			Placement: &swarm.Placement{
 				Constraints: opts.constraints.GetAll(),
 				Preferences: opts.placementPrefs.prefs,
@@ -540,8 +670,8 @@
 			LogDriver: opts.logDriver.toLogDriver(),
 		},
 		Mode:           serviceMode,
-		UpdateConfig:   opts.update.config(),
-		RollbackConfig: opts.rollback.config(),
+		UpdateConfig:   opts.update.updateConfig(flags),
+		RollbackConfig: opts.update.rollbackConfig(flags),
 		EndpointSpec:   opts.endpoint.ToEndpointSpec(),
 	}
 
@@ -554,9 +684,67 @@
 	return service, nil
 }
 
+type flagDefaults map[string]interface{}
+
+func (fd flagDefaults) getUint64(flagName string) uint64 {
+	if val, ok := fd[flagName].(uint64); ok {
+		return val
+	}
+	return 0
+}
+
+func (fd flagDefaults) getString(flagName string) string {
+	if val, ok := fd[flagName].(string); ok {
+		return val
+	}
+	return ""
+}
+
+func buildServiceDefaultFlagMapping() flagDefaults {
+	defaultFlagValues := make(map[string]interface{})
+
+	defaultFlagValues[flagStopGracePeriod], _ = gogotypes.DurationFromProto(defaults.Service.Task.GetContainer().StopGracePeriod)
+	defaultFlagValues[flagRestartCondition] = `"` + defaultRestartCondition() + `"`
+	defaultFlagValues[flagRestartDelay], _ = gogotypes.DurationFromProto(defaults.Service.Task.Restart.Delay)
+
+	if defaults.Service.Task.Restart.MaxAttempts != 0 {
+		defaultFlagValues[flagRestartMaxAttempts] = defaults.Service.Task.Restart.MaxAttempts
+	}
+
+	defaultRestartWindow, _ := gogotypes.DurationFromProto(defaults.Service.Task.Restart.Window)
+	if defaultRestartWindow != 0 {
+		defaultFlagValues[flagRestartWindow] = defaultRestartWindow
+	}
+
+	defaultFlagValues[flagUpdateParallelism] = defaults.Service.Update.Parallelism
+	defaultFlagValues[flagUpdateDelay] = defaults.Service.Update.Delay
+	defaultFlagValues[flagUpdateMonitor], _ = gogotypes.DurationFromProto(defaults.Service.Update.Monitor)
+	defaultFlagValues[flagUpdateFailureAction] = `"` + strings.ToLower(api.UpdateConfig_FailureAction_name[int32(defaults.Service.Update.FailureAction)]) + `"`
+	defaultFlagValues[flagUpdateMaxFailureRatio] = defaults.Service.Update.MaxFailureRatio
+	defaultFlagValues[flagUpdateOrder] = `"` + defaultOrder(defaults.Service.Update.Order) + `"`
+
+	defaultFlagValues[flagRollbackParallelism] = defaults.Service.Rollback.Parallelism
+	defaultFlagValues[flagRollbackDelay] = defaults.Service.Rollback.Delay
+	defaultFlagValues[flagRollbackMonitor], _ = gogotypes.DurationFromProto(defaults.Service.Rollback.Monitor)
+	defaultFlagValues[flagRollbackFailureAction] = `"` + strings.ToLower(api.UpdateConfig_FailureAction_name[int32(defaults.Service.Rollback.FailureAction)]) + `"`
+	defaultFlagValues[flagRollbackMaxFailureRatio] = defaults.Service.Rollback.MaxFailureRatio
+	defaultFlagValues[flagRollbackOrder] = `"` + defaultOrder(defaults.Service.Rollback.Order) + `"`
+
+	defaultFlagValues[flagEndpointMode] = "vip"
+
+	return defaultFlagValues
+}
+
 // addServiceFlags adds all flags that are common to both `create` and `update`.
 // Any flags that are not common are added separately in the individual command
-func addServiceFlags(flags *pflag.FlagSet, opts *serviceOptions) {
+func addServiceFlags(flags *pflag.FlagSet, opts *serviceOptions, defaultFlagValues flagDefaults) {
+	flagDesc := func(flagName string, desc string) string {
+		if defaultValue, ok := defaultFlagValues[flagName]; ok {
+			return fmt.Sprintf("%s (default %v)", desc, defaultValue)
+		}
+		return desc
+	}
+
 	flags.BoolVarP(&opts.detach, "detach", "d", true, "Exit immediately instead of waiting for the service to converge")
 	flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Suppress progress output")
 
@@ -572,39 +760,40 @@
 	flags.Var(&opts.resources.limitMemBytes, flagLimitMemory, "Limit Memory")
 	flags.Var(&opts.resources.resCPU, flagReserveCPU, "Reserve CPUs")
 	flags.Var(&opts.resources.resMemBytes, flagReserveMemory, "Reserve Memory")
-	flags.Var(&opts.stopGrace, flagStopGracePeriod, "Time to wait before force killing a container (ns|us|ms|s|m|h)")
 
+	flags.Var(&opts.stopGrace, flagStopGracePeriod, flagDesc(flagStopGracePeriod, "Time to wait before force killing a container (ns|us|ms|s|m|h)"))
 	flags.Var(&opts.replicas, flagReplicas, "Number of tasks")
 
-	flags.StringVar(&opts.restartPolicy.condition, flagRestartCondition, "", `Restart when condition is met ("none"|"on-failure"|"any")`)
-	flags.Var(&opts.restartPolicy.delay, flagRestartDelay, "Delay between restart attempts (ns|us|ms|s|m|h)")
-	flags.Var(&opts.restartPolicy.maxAttempts, flagRestartMaxAttempts, "Maximum number of restarts before giving up")
-	flags.Var(&opts.restartPolicy.window, flagRestartWindow, "Window used to evaluate the restart policy (ns|us|ms|s|m|h)")
+	flags.StringVar(&opts.restartPolicy.condition, flagRestartCondition, "", flagDesc(flagRestartCondition, `Restart when condition is met ("none"|"on-failure"|"any")`))
+	flags.Var(&opts.restartPolicy.delay, flagRestartDelay, flagDesc(flagRestartDelay, "Delay between restart attempts (ns|us|ms|s|m|h)"))
+	flags.Var(&opts.restartPolicy.maxAttempts, flagRestartMaxAttempts, flagDesc(flagRestartMaxAttempts, "Maximum number of restarts before giving up"))
 
-	flags.Uint64Var(&opts.update.parallelism, flagUpdateParallelism, 1, "Maximum number of tasks updated simultaneously (0 to update all at once)")
-	flags.DurationVar(&opts.update.delay, flagUpdateDelay, time.Duration(0), "Delay between updates (ns|us|ms|s|m|h) (default 0s)")
-	flags.DurationVar(&opts.update.monitor, flagUpdateMonitor, time.Duration(0), "Duration after each task update to monitor for failure (ns|us|ms|s|m|h)")
+	flags.Var(&opts.restartPolicy.window, flagRestartWindow, flagDesc(flagRestartWindow, "Window used to evaluate the restart policy (ns|us|ms|s|m|h)"))
+
+	flags.Uint64Var(&opts.update.parallelism, flagUpdateParallelism, defaultFlagValues.getUint64(flagUpdateParallelism), "Maximum number of tasks updated simultaneously (0 to update all at once)")
+	flags.DurationVar(&opts.update.delay, flagUpdateDelay, 0, flagDesc(flagUpdateDelay, "Delay between updates (ns|us|ms|s|m|h)"))
+	flags.DurationVar(&opts.update.monitor, flagUpdateMonitor, 0, flagDesc(flagUpdateMonitor, "Duration after each task update to monitor for failure (ns|us|ms|s|m|h)"))
 	flags.SetAnnotation(flagUpdateMonitor, "version", []string{"1.25"})
-	flags.StringVar(&opts.update.onFailure, flagUpdateFailureAction, "pause", `Action on update failure ("pause"|"continue"|"rollback")`)
-	flags.Var(&opts.update.maxFailureRatio, flagUpdateMaxFailureRatio, "Failure rate to tolerate during an update")
+	flags.StringVar(&opts.update.onFailure, flagUpdateFailureAction, "", flagDesc(flagUpdateFailureAction, `Action on update failure ("pause"|"continue"|"rollback")`))
+	flags.Var(&opts.update.maxFailureRatio, flagUpdateMaxFailureRatio, flagDesc(flagUpdateMaxFailureRatio, "Failure rate to tolerate during an update"))
 	flags.SetAnnotation(flagUpdateMaxFailureRatio, "version", []string{"1.25"})
-	flags.StringVar(&opts.update.order, flagUpdateOrder, "stop-first", `Update order ("start-first"|"stop-first")`)
+	flags.StringVar(&opts.update.order, flagUpdateOrder, "", flagDesc(flagUpdateOrder, `Update order ("start-first"|"stop-first")`))
 	flags.SetAnnotation(flagUpdateOrder, "version", []string{"1.29"})
 
-	flags.Uint64Var(&opts.rollback.parallelism, flagRollbackParallelism, 1, "Maximum number of tasks rolled back simultaneously (0 to roll back all at once)")
+	flags.Uint64Var(&opts.rollback.parallelism, flagRollbackParallelism, defaultFlagValues.getUint64(flagRollbackParallelism), "Maximum number of tasks rolled back simultaneously (0 to roll back all at once)")
 	flags.SetAnnotation(flagRollbackParallelism, "version", []string{"1.28"})
-	flags.DurationVar(&opts.rollback.delay, flagRollbackDelay, time.Duration(0), "Delay between task rollbacks (ns|us|ms|s|m|h) (default 0s)")
+	flags.DurationVar(&opts.rollback.delay, flagRollbackDelay, 0, flagDesc(flagRollbackDelay, "Delay between task rollbacks (ns|us|ms|s|m|h)"))
 	flags.SetAnnotation(flagRollbackDelay, "version", []string{"1.28"})
-	flags.DurationVar(&opts.rollback.monitor, flagRollbackMonitor, time.Duration(0), "Duration after each task rollback to monitor for failure (ns|us|ms|s|m|h) (default 0s)")
+	flags.DurationVar(&opts.rollback.monitor, flagRollbackMonitor, 0, flagDesc(flagRollbackMonitor, "Duration after each task rollback to monitor for failure (ns|us|ms|s|m|h)"))
 	flags.SetAnnotation(flagRollbackMonitor, "version", []string{"1.28"})
-	flags.StringVar(&opts.rollback.onFailure, flagRollbackFailureAction, "pause", `Action on rollback failure ("pause"|"continue")`)
+	flags.StringVar(&opts.rollback.onFailure, flagRollbackFailureAction, "", flagDesc(flagRollbackFailureAction, `Action on rollback failure ("pause"|"continue")`))
 	flags.SetAnnotation(flagRollbackFailureAction, "version", []string{"1.28"})
-	flags.Var(&opts.rollback.maxFailureRatio, flagRollbackMaxFailureRatio, "Failure rate to tolerate during a rollback")
+	flags.Var(&opts.rollback.maxFailureRatio, flagRollbackMaxFailureRatio, flagDesc(flagRollbackMaxFailureRatio, "Failure rate to tolerate during a rollback"))
 	flags.SetAnnotation(flagRollbackMaxFailureRatio, "version", []string{"1.28"})
-	flags.StringVar(&opts.rollback.order, flagRollbackOrder, "stop-first", `Rollback order ("start-first"|"stop-first")`)
+	flags.StringVar(&opts.rollback.order, flagRollbackOrder, "", flagDesc(flagRollbackOrder, `Rollback order ("start-first"|"stop-first")`))
 	flags.SetAnnotation(flagRollbackOrder, "version", []string{"1.29"})
 
-	flags.StringVar(&opts.endpoint.mode, flagEndpointMode, "vip", "Endpoint mode (vip or dnsrr)")
+	flags.StringVar(&opts.endpoint.mode, flagEndpointMode, defaultFlagValues.getString(flagEndpointMode), "Endpoint mode (vip or dnsrr)")
 
 	flags.BoolVar(&opts.registryAuth, flagRegistryAuth, false, "Send registry authentication details to swarm agents")
 
diff --git a/cli/command/service/progress/progress.go b/cli/command/service/progress/progress.go
index ccc7e60..bfeaa31 100644
--- a/cli/command/service/progress/progress.go
+++ b/cli/command/service/progress/progress.go
@@ -85,7 +85,7 @@
 	)
 
 	for {
-		service, _, err := client.ServiceInspectWithRaw(ctx, serviceID)
+		service, _, err := client.ServiceInspectWithRaw(ctx, serviceID, types.ServiceInspectOptions{})
 		if err != nil {
 			return err
 		}
diff --git a/cli/command/service/scale.go b/cli/command/service/scale.go
index ed76c86..98163c8 100644
--- a/cli/command/service/scale.go
+++ b/cli/command/service/scale.go
@@ -71,7 +71,7 @@
 	client := dockerCli.Client()
 	ctx := context.Background()
 
-	service, _, err := client.ServiceInspectWithRaw(ctx, serviceID)
+	service, _, err := client.ServiceInspectWithRaw(ctx, serviceID, types.ServiceInspectOptions{})
 	if err != nil {
 		return err
 	}
diff --git a/cli/command/service/update.go b/cli/command/service/update.go
index b59f163..233da68 100644
--- a/cli/command/service/update.go
+++ b/cli/command/service/update.go
@@ -17,6 +17,7 @@
 	"github.com/docker/docker/opts"
 	runconfigopts "github.com/docker/docker/runconfig/opts"
 	"github.com/docker/go-connections/nat"
+	"github.com/docker/swarmkit/api/defaults"
 	"github.com/pkg/errors"
 	"github.com/spf13/cobra"
 	"github.com/spf13/pflag"
@@ -42,7 +43,7 @@
 	flags.SetAnnotation("rollback", "version", []string{"1.25"})
 	flags.Bool("force", false, "Force update even if no changes require it")
 	flags.SetAnnotation("force", "version", []string{"1.25"})
-	addServiceFlags(flags, serviceOpts)
+	addServiceFlags(flags, serviceOpts, nil)
 
 	flags.Var(newListOptsVar(), flagEnvRemove, "Remove an environment variable")
 	flags.Var(newListOptsVar(), flagGroupRemove, "Remove a previously added supplementary user group from the container")
@@ -101,7 +102,7 @@
 	apiClient := dockerCli.Client()
 	ctx := context.Background()
 
-	service, _, err := apiClient.ServiceInspectWithRaw(ctx, serviceID)
+	service, _, err := apiClient.ServiceInspectWithRaw(ctx, serviceID, types.ServiceInspectOptions{})
 	if err != nil {
 		return err
 	}
@@ -294,9 +295,8 @@
 
 	if anyChanged(flags, flagRestartCondition, flagRestartDelay, flagRestartMaxAttempts, flagRestartWindow) {
 		if task.RestartPolicy == nil {
-			task.RestartPolicy = &swarm.RestartPolicy{}
+			task.RestartPolicy = defaultRestartPolicy()
 		}
-
 		if flags.Changed(flagRestartCondition) {
 			value, _ := flags.GetString(flagRestartCondition)
 			task.RestartPolicy.Condition = swarm.RestartPolicyCondition(value)
@@ -332,7 +332,7 @@
 
 	if anyChanged(flags, flagUpdateParallelism, flagUpdateDelay, flagUpdateMonitor, flagUpdateFailureAction, flagUpdateMaxFailureRatio, flagUpdateOrder) {
 		if spec.UpdateConfig == nil {
-			spec.UpdateConfig = &swarm.UpdateConfig{}
+			spec.UpdateConfig = updateConfigFromDefaults(defaults.Service.Update)
 		}
 		updateUint64(flagUpdateParallelism, &spec.UpdateConfig.Parallelism)
 		updateDuration(flagUpdateDelay, &spec.UpdateConfig.Delay)
@@ -344,7 +344,7 @@
 
 	if anyChanged(flags, flagRollbackParallelism, flagRollbackDelay, flagRollbackMonitor, flagRollbackFailureAction, flagRollbackMaxFailureRatio, flagRollbackOrder) {
 		if spec.RollbackConfig == nil {
-			spec.RollbackConfig = &swarm.UpdateConfig{}
+			spec.RollbackConfig = updateConfigFromDefaults(defaults.Service.Rollback)
 		}
 		updateUint64(flagRollbackParallelism, &spec.RollbackConfig.Parallelism)
 		updateDuration(flagRollbackDelay, &spec.RollbackConfig.Delay)
diff --git a/cli/command/system/inspect.go b/cli/command/system/inspect.go
index 361902a..ad23d35 100644
--- a/cli/command/system/inspect.go
+++ b/cli/command/system/inspect.go
@@ -4,6 +4,7 @@
 	"fmt"
 	"strings"
 
+	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/cli"
 	"github.com/docker/docker/cli/command"
 	"github.com/docker/docker/cli/command/inspect"
@@ -79,7 +80,8 @@
 
 func inspectService(ctx context.Context, dockerCli *command.DockerCli) inspect.GetRefFunc {
 	return func(ref string) (interface{}, []byte, error) {
-		return dockerCli.Client().ServiceInspectWithRaw(ctx, ref)
+		// Service inspect shows defaults values in empty fields.
+		return dockerCli.Client().ServiceInspectWithRaw(ctx, ref, types.ServiceInspectOptions{InsertDefaults: true})
 	}
 }
 
diff --git a/client/interface.go b/client/interface.go
index 6f8c094..8dbe430 100644
--- a/client/interface.go
+++ b/client/interface.go
@@ -123,7 +123,7 @@
 // ServiceAPIClient defines API client methods for the services
 type ServiceAPIClient interface {
 	ServiceCreate(ctx context.Context, service swarm.ServiceSpec, options types.ServiceCreateOptions) (types.ServiceCreateResponse, error)
-	ServiceInspectWithRaw(ctx context.Context, serviceID string) (swarm.Service, []byte, error)
+	ServiceInspectWithRaw(ctx context.Context, serviceID string, options types.ServiceInspectOptions) (swarm.Service, []byte, error)
 	ServiceList(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error)
 	ServiceRemove(ctx context.Context, serviceID string) error
 	ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (types.ServiceUpdateResponse, error)
diff --git a/client/service_inspect.go b/client/service_inspect.go
index ca71cbd..d7e051e 100644
--- a/client/service_inspect.go
+++ b/client/service_inspect.go
@@ -3,16 +3,21 @@
 import (
 	"bytes"
 	"encoding/json"
+	"fmt"
 	"io/ioutil"
 	"net/http"
+	"net/url"
 
+	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/api/types/swarm"
 	"golang.org/x/net/context"
 )
 
 // ServiceInspectWithRaw returns the service information and the raw data.
-func (cli *Client) ServiceInspectWithRaw(ctx context.Context, serviceID string) (swarm.Service, []byte, error) {
-	serverResp, err := cli.get(ctx, "/services/"+serviceID, nil, nil)
+func (cli *Client) ServiceInspectWithRaw(ctx context.Context, serviceID string, opts types.ServiceInspectOptions) (swarm.Service, []byte, error) {
+	query := url.Values{}
+	query.Set("insertDefaults", fmt.Sprintf("%v", opts.InsertDefaults))
+	serverResp, err := cli.get(ctx, "/services/"+serviceID, query, nil)
 	if err != nil {
 		if serverResp.statusCode == http.StatusNotFound {
 			return swarm.Service{}, nil, serviceNotFoundError{serviceID}
diff --git a/client/service_inspect_test.go b/client/service_inspect_test.go
index 0346847..d53f583 100644
--- a/client/service_inspect_test.go
+++ b/client/service_inspect_test.go
@@ -9,6 +9,7 @@
 	"strings"
 	"testing"
 
+	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/api/types/swarm"
 	"golang.org/x/net/context"
 )
@@ -18,7 +19,7 @@
 		client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")),
 	}
 
-	_, _, err := client.ServiceInspectWithRaw(context.Background(), "nothing")
+	_, _, err := client.ServiceInspectWithRaw(context.Background(), "nothing", types.ServiceInspectOptions{})
 	if err == nil || err.Error() != "Error response from daemon: Server error" {
 		t.Fatalf("expected a Server Error, got %v", err)
 	}
@@ -29,7 +30,7 @@
 		client: newMockClient(errorMock(http.StatusNotFound, "Server error")),
 	}
 
-	_, _, err := client.ServiceInspectWithRaw(context.Background(), "unknown")
+	_, _, err := client.ServiceInspectWithRaw(context.Background(), "unknown", types.ServiceInspectOptions{})
 	if err == nil || !IsErrServiceNotFound(err) {
 		t.Fatalf("expected a serviceNotFoundError error, got %v", err)
 	}
@@ -55,7 +56,7 @@
 		}),
 	}
 
-	serviceInspect, _, err := client.ServiceInspectWithRaw(context.Background(), "service_id")
+	serviceInspect, _, err := client.ServiceInspectWithRaw(context.Background(), "service_id", types.ServiceInspectOptions{})
 	if err != nil {
 		t.Fatal(err)
 	}
diff --git a/contrib/builder/deb/amd64/generate.sh b/contrib/builder/deb/amd64/generate.sh
index b72893b..38972ba 100755
--- a/contrib/builder/deb/amd64/generate.sh
+++ b/contrib/builder/deb/amd64/generate.sh
@@ -81,15 +81,15 @@
 	)
 	# packaging for "sd-journal.h" and libraries varies
 	case "$suite" in
-		precise|wheezy) ;;
-		jessie|trusty) packages+=( libsystemd-journal-dev );;
-		*) packages+=( libsystemd-dev );;
+		wheezy) ;;
+		jessie|trusty) packages+=( libsystemd-journal-dev ) ;;
+		*) packages+=( libsystemd-dev ) ;;
 	esac
 
-	# debian wheezy & ubuntu precise do not have the right libseccomp libs
+	# debian wheezy does not have the right libseccomp libs
 	# debian jessie & ubuntu trusty have a libseccomp < 2.2.1 :(
 	case "$suite" in
-		precise|wheezy|jessie|trusty)
+		wheezy|jessie|trusty)
 			packages=( "${packages[@]/libseccomp-dev}" )
 			runcBuildTags="apparmor selinux"
 			;;
@@ -99,23 +99,6 @@
 			;;
 	esac
 
-
-	if [ "$suite" = 'precise' ]; then
-		# precise has a few package issues
-
-		# - dh-systemd doesn't exist at all
-		packages=( "${packages[@]/dh-systemd}" )
-
-		# - libdevmapper-dev is missing critical structs (too old)
-		packages=( "${packages[@]/libdevmapper-dev}" )
-		extraBuildTags+=' exclude_graphdriver_devicemapper'
-
-		# - btrfs-tools is missing "ioctl.h" (too old), so it's useless
-		#   (since kernels on precise are old too, just skip btrfs entirely)
-		packages=( "${packages[@]/btrfs-tools}" )
-		extraBuildTags+=' exclude_graphdriver_btrfs'
-	fi
-
 	if [ "$suite" = 'wheezy' ]; then
 		# pull a couple packages from backports explicitly
 		# (build failures otherwise)
diff --git a/contrib/builder/deb/amd64/ubuntu-precise/Dockerfile b/contrib/builder/deb/amd64/ubuntu-precise/Dockerfile
deleted file mode 100644
index f183ef2..0000000
--- a/contrib/builder/deb/amd64/ubuntu-precise/Dockerfile
+++ /dev/null
@@ -1,16 +0,0 @@
-#
-# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/amd64/generate.sh"!
-#
-
-FROM ubuntu:precise
-
-RUN apt-get update && apt-get install -y apparmor bash-completion  build-essential cmake curl ca-certificates debhelper dh-apparmor  git libapparmor-dev  libltdl-dev  pkg-config vim-common --no-install-recommends && rm -rf /var/lib/apt/lists/*
-
-ENV GO_VERSION 1.7.5
-RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local
-ENV PATH $PATH:/usr/local/go/bin
-
-ENV AUTO_GOPATH 1
-
-ENV DOCKER_BUILDTAGS apparmor exclude_graphdriver_btrfs exclude_graphdriver_devicemapper pkcs11 selinux
-ENV RUNC_BUILDTAGS apparmor selinux
diff --git a/contrib/builder/deb/armhf/generate.sh b/contrib/builder/deb/armhf/generate.sh
index dcd135a..9bb943e 100755
--- a/contrib/builder/deb/armhf/generate.sh
+++ b/contrib/builder/deb/armhf/generate.sh
@@ -83,15 +83,15 @@
 	)
 	# packaging for "sd-journal.h" and libraries varies
 	case "$suite" in
-		precise|wheezy) ;;
-		jessie|trusty) packages+=( libsystemd-journal-dev );;
-		*) packages+=( libsystemd-dev );;
+		wheezy) ;;
+		jessie|trusty) packages+=( libsystemd-journal-dev ) ;;
+		*) packages+=( libsystemd-dev ) ;;
 	esac
 
-	# debian wheezy & ubuntu precise do not have the right libseccomp libs
+	# debian wheezy does not have the right libseccomp libs
 	# debian jessie & ubuntu trusty have a libseccomp < 2.2.1 :(
 	case "$suite" in
-		precise|wheezy|jessie|trusty)
+		wheezy|jessie|trusty)
 			packages=( "${packages[@]/libseccomp-dev}" )
 			runcBuildTags="apparmor selinux"
 			;;
@@ -101,23 +101,6 @@
 			;;
 	esac
 
-
-	if [ "$suite" = 'precise' ]; then
-		# precise has a few package issues
-
-		# - dh-systemd doesn't exist at all
-		packages=( "${packages[@]/dh-systemd}" )
-
-		# - libdevmapper-dev is missing critical structs (too old)
-		packages=( "${packages[@]/libdevmapper-dev}" )
-		extraBuildTags+=' exclude_graphdriver_devicemapper'
-
-		# - btrfs-tools is missing "ioctl.h" (too old), so it's useless
-		#   (since kernels on precise are old too, just skip btrfs entirely)
-		packages=( "${packages[@]/btrfs-tools}" )
-		extraBuildTags+=' exclude_graphdriver_btrfs'
-	fi
-
 	if [ "$suite" = 'wheezy' ]; then
 		# pull a couple packages from backports explicitly
 		# (build failures otherwise)
diff --git a/contrib/completion/bash/docker b/contrib/completion/bash/docker
index d6bec28..c5cbe1b 100644
--- a/contrib/completion/bash/docker
+++ b/contrib/completion/bash/docker
@@ -4078,11 +4078,7 @@
 			COMPREPLY=( $( compgen -W "--help" -- "$cur" ) )
 			;;
 		*)
-			local counter=$(__docker_pos_first_nonflag)
-			if [ $cword -eq $counter ]; then
-				__docker_complete_stacks
-			fi
-			;;
+			__docker_complete_stacks
 	esac
 }
 
diff --git a/daemon/cluster/convert/container.go b/daemon/cluster/convert/container.go
index b2ccc9f..99753c8 100644
--- a/daemon/cluster/convert/container.go
+++ b/daemon/cluster/convert/container.go
@@ -281,7 +281,7 @@
 func healthConfigFromGRPC(h *swarmapi.HealthConfig) *container.HealthConfig {
 	interval, _ := gogotypes.DurationFromProto(h.Interval)
 	timeout, _ := gogotypes.DurationFromProto(h.Timeout)
-	startPeriod, _ := gogotypes.DurationFromProto(h.Timeout)
+	startPeriod, _ := gogotypes.DurationFromProto(h.StartPeriod)
 	return &container.HealthConfig{
 		Test:        h.Test,
 		Interval:    interval,
diff --git a/daemon/cluster/helpers.go b/daemon/cluster/helpers.go
index 6523a80..98c7cc5 100644
--- a/daemon/cluster/helpers.go
+++ b/daemon/cluster/helpers.go
@@ -58,9 +58,9 @@
 	return rl.Nodes[0], nil
 }
 
-func getService(ctx context.Context, c swarmapi.ControlClient, input string) (*swarmapi.Service, error) {
+func getService(ctx context.Context, c swarmapi.ControlClient, input string, insertDefaults bool) (*swarmapi.Service, error) {
 	// GetService to match via full ID.
-	if rg, err := c.GetService(ctx, &swarmapi.GetServiceRequest{ServiceID: input}); err == nil {
+	if rg, err := c.GetService(ctx, &swarmapi.GetServiceRequest{ServiceID: input, InsertDefaults: insertDefaults}); err == nil {
 		return rg.Service, nil
 	}
 
@@ -91,7 +91,15 @@
 		return nil, fmt.Errorf("service %s is ambiguous (%d matches found)", input, l)
 	}
 
-	return rl.Services[0], nil
+	if !insertDefaults {
+		return rl.Services[0], nil
+	}
+
+	rg, err := c.GetService(ctx, &swarmapi.GetServiceRequest{ServiceID: rl.Services[0].ID, InsertDefaults: true})
+	if err == nil {
+		return rg.Service, nil
+	}
+	return nil, err
 }
 
 func getTask(ctx context.Context, c swarmapi.ControlClient, input string) (*swarmapi.Task, error) {
diff --git a/daemon/cluster/services.go b/daemon/cluster/services.go
index 8fd730e..8d5d4a5 100644
--- a/daemon/cluster/services.go
+++ b/daemon/cluster/services.go
@@ -87,10 +87,10 @@
 }
 
 // GetService returns a service based on an ID or name.
-func (c *Cluster) GetService(input string) (types.Service, error) {
+func (c *Cluster) GetService(input string, insertDefaults bool) (types.Service, error) {
 	var service *swarmapi.Service
 	if err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error {
-		s, err := getService(ctx, state.controlClient, input)
+		s, err := getService(ctx, state.controlClient, input, insertDefaults)
 		if err != nil {
 			return err
 		}
@@ -187,7 +187,7 @@
 			return apierrors.NewBadRequestError(err)
 		}
 
-		currentService, err := getService(ctx, state.controlClient, serviceIDOrName)
+		currentService, err := getService(ctx, state.controlClient, serviceIDOrName, false)
 		if err != nil {
 			return err
 		}
@@ -289,7 +289,7 @@
 // RemoveService removes a service from a managed swarm cluster.
 func (c *Cluster) RemoveService(input string) error {
 	return c.lockedManagerAction(func(ctx context.Context, state nodeState) error {
-		service, err := getService(ctx, state.controlClient, input)
+		service, err := getService(ctx, state.controlClient, input, false)
 		if err != nil {
 			return err
 		}
@@ -442,7 +442,7 @@
 	// don't rely on swarmkit to resolve IDs, do it ourselves
 	swarmSelector := &swarmapi.LogSelector{}
 	for _, s := range selector.Services {
-		service, err := getService(ctx, cc, s)
+		service, err := getService(ctx, cc, s, false)
 		if err != nil {
 			return nil, err
 		}
diff --git a/daemon/cluster/tasks.go b/daemon/cluster/tasks.go
index 001a345..6a6c59f 100644
--- a/daemon/cluster/tasks.go
+++ b/daemon/cluster/tasks.go
@@ -23,7 +23,7 @@
 		if filter.Include("service") {
 			serviceFilters := filter.Get("service")
 			for _, serviceFilter := range serviceFilters {
-				service, err := c.GetService(serviceFilter)
+				service, err := c.GetService(serviceFilter, false)
 				if err != nil {
 					return err
 				}
diff --git a/docs/api/version-history.md b/docs/api/version-history.md
index 5bf2460..d89cbcf 100644
--- a/docs/api/version-history.md
+++ b/docs/api/version-history.md
@@ -22,7 +22,7 @@
 * `POST /networks/create` now supports creating the ingress network, by specifying an `Ingress` boolean field. As of now this is supported only when using the overlay network driver.
 * `GET /networks/(name)` now returns an `Ingress` field showing whether the network is the ingress one.
 * `GET /networks/` now supports a `scope` filter to filter networks based on the network mode (`swarm`, `global`, or `local`).
-* `POST /containers/create`, `POST /service/create` and `POST /services/(id or name)/update` now takes the field `StartPeriod` as a part of the `HealthConfig` allowing for specification of a period during which the container should not be considered unealthy even if health checks do not pass.
+* `POST /containers/create`, `POST /service/create` and `POST /services/(id or name)/update` now takes the field `StartPeriod` as a part of the `HealthConfig` allowing for specification of a period during which the container should not be considered unhealthy even if health checks do not pass.
 
 ## v1.28 API changes
 
diff --git a/docs/reference/builder.md b/docs/reference/builder.md
index 523fd2f..9c28e8b 100644
--- a/docs/reference/builder.md
+++ b/docs/reference/builder.md
@@ -135,9 +135,11 @@
 be UPPERCASE to distinguish them from arguments more easily.
 
 
-Docker runs instructions in a `Dockerfile` in order. **The first
-instruction must be \`FROM\`** in order to specify the [*Base
-Image*](glossary.md#base-image) from which you are building.
+Docker runs instructions in a `Dockerfile` in order. A `Dockerfile` **must
+start with a \`FROM\` instruction**. The `FROM` instruction specifies the [*Base
+Image*](glossary.md#base-image) from which you are building. `FROM` may only be
+proceeded by one or more `ARG` instructions, which declare arguments that are used
+in `FROM` lines in the `Dockerfile`.
 
 Docker treats lines that *begin* with `#` as a comment, unless the line is
 a valid [parser directive](#parser-directives). A `#` marker anywhere
@@ -356,11 +358,12 @@
 * `COPY`
 * `ENV`
 * `EXPOSE`
+* `FROM`
 * `LABEL`
-* `USER`
-* `WORKDIR`
-* `VOLUME`
 * `STOPSIGNAL`
+* `USER`
+* `VOLUME`
+* `WORKDIR`
 
 as well as:
 
@@ -371,14 +374,14 @@
 > variable, even when combined with any of the instructions listed above.
 
 Environment variable substitution will use the same value for each variable
-throughout the entire command. In other words, in this example:
+throughout the entire instruction. In other words, in this example:
 
     ENV abc=hello
     ENV abc=bye def=$abc
     ENV ghi=$abc
 
 will result in `def` having a value of `hello`, not `bye`. However,
-`ghi` will have a value of `bye` because it is not part of the same command
+`ghi` will have a value of `bye` because it is not part of the same instruction 
 that set `abc` to `bye`.
 
 ## .dockerignore file
@@ -469,7 +472,7 @@
 
 You can even use the `.dockerignore` file to exclude the `Dockerfile`
 and `.dockerignore` files.  These files are still sent to the daemon
-because it needs them to do its job.  But the `ADD` and `COPY` commands
+because it needs them to do its job.  But the `ADD` and `COPY` instructions
 do not copy them to the image.
 
 Finally, you may want to specify which files to include in the
@@ -492,24 +495,40 @@
 
 The `FROM` instruction initializes a new build stage and sets the 
 [*Base Image*](glossary.md#base-image) for subsequent instructions. As such, a 
-valid `Dockerfile` must have `FROM` as its first instruction. The image can be
+valid `Dockerfile` must start with a `FROM` instruction. The image can be
 any valid image – it is especially easy to start by **pulling an image** from 
 the [*Public Repositories*](https://docs.docker.com/engine/tutorials/dockerrepos/).
 
-- `FROM` must be the first non-comment instruction in the `Dockerfile`.
+- `ARG` is the only instruction that may proceed `FROM` in the `Dockerfile`. 
+  See [Understand how ARG and FROM interact](#understand-how-arg-and-from-interact).
 
-- `FROM` can appear multiple times within a single `Dockerfile` in order to 
-create multiple images or use one build stage as a dependency for another. 
-Simply make a note of the last image ID output by the commit before each new 
-`FROM` command. Each `FROM` command resets all the previous commands.
+- `FROM` can appear multiple times within a single `Dockerfile` to 
+  create multiple images or use one build stage as a dependency for another.
+  Simply make a note of the last image ID output by the commit before each new 
+  `FROM` instruction. Each `FROM` instruction clears any state created by previous
+  instructions.
 
-- Optionally a name can be given to a new build stage. That name can be then
-used in subsequent `FROM` and `COPY --from=<name|index>` commands to refer back
-to the image built in this stage.
+- Optionally a name can be given to a new build stage by adding `AS name` to the 
+  `FROM` instruction. The name can be used in subsequent `FROM` and
+  `COPY --from=<name|index>` instructions to refer to the image built in this stage.
 
 - The `tag` or `digest` values are optional. If you omit either of them, the 
-builder assumes a `latest` tag by default. The builder returns an error if it
-cannot match the `tag` value.
+  builder assumes a `latest` tag by default. The builder returns an error if it
+  cannot find the `tag` value.
+
+### Understand how ARG and FROM interact
+
+`FROM` instructions support variables that are declared by any `ARG` 
+instructions that occur before the first `FROM`.
+
+```Dockerfile
+ARG  CODE_VERSION=latest
+FROM base:${CODE_VERSION}
+CMD  /code/run-app
+
+FROM extras:${CODE_VERSION}
+CMD  /code/run-extras
+```
 
 ## RUN
 
@@ -947,7 +966,7 @@
 the source location to a previous build stage (created with `FROM .. AS <name>`)
 that will be used instead of a build context sent by the user. The flag also 
 accepts a numeric index assigned for all previous build stages started with 
-`FROM` command. In case a build stage with a specified name can't be found an 
+`FROM` instruction. In case a build stage with a specified name can't be found an 
 image with the same name is attempted to be used instead.
 
 `COPY` obeys the following rules:
@@ -1353,7 +1372,7 @@
 A user builds this file by calling:
 
 ```
-$ docker build --build-arg user=what_user Dockerfile
+$ docker build --build-arg user=what_user .
 ```
 
 The `USER` at line 2 evaluates to `some_user` as the `user` variable is defined on the
@@ -1379,7 +1398,7 @@
 Then, assume this image is built with this command:
 
 ```
-$ docker build --build-arg CONT_IMG_VER=v2.0.1 Dockerfile
+$ docker build --build-arg CONT_IMG_VER=v2.0.1 .
 ```
 
 In this case, the `RUN` instruction uses `v1.0.0` instead of the `ARG` setting
@@ -1401,7 +1420,7 @@
 image. Consider a docker build without the `--build-arg` flag:
 
 ```
-$ docker build Dockerfile
+$ docker build .
 ```
 
 Using this Dockerfile example, `CONT_IMG_VER` is still persisted in the image but
diff --git a/docs/reference/commandline/service_create.md b/docs/reference/commandline/service_create.md
index 9490f1b..082dffb 100644
--- a/docs/reference/commandline/service_create.md
+++ b/docs/reference/commandline/service_create.md
@@ -21,61 +21,60 @@
 Create a new service
 
 Options:
-      --constraint list                    Placement constraints (default [])
-      --container-label list               Container labels (default [])
-  -d, --detach                             Exit immediately instead of waiting for the service to converge
-                                           (default true)
-      --dns list                           Set custom DNS servers (default [])
-      --dns-option list                    Set DNS options (default [])
-      --dns-search list                    Set custom DNS search domains (default [])
-      --endpoint-mode string               Endpoint mode ("vip"|"dnsrr") (default "vip")
-  -e, --env list                           Set environment variables (default [])
-      --env-file list                      Read in a file of environment variables (default [])
-      --group list                         Set one or more supplementary user groups for the container (default [])
+      --constraint list                    Placement constraints
+      --container-label list               Container labels
+  -d, --detach                             Exit immediately instead of waiting for the service to converge (default true)
+      --dns list                           Set custom DNS servers
+      --dns-option list                    Set DNS options
+      --dns-search list                    Set custom DNS search domains
+      --endpoint-mode string               Endpoint mode (vip or dnsrr) (default "vip")
+      --entrypoint command                 Overwrite the default ENTRYPOINT of the image
+  -e, --env list                           Set environment variables
+      --env-file list                      Read in a file of environment variables
+      --group list                         Set one or more supplementary user groups for the container
       --health-cmd string                  Command to run to check health
       --health-interval duration           Time between running the check (ns|us|ms|s|m|h)
       --health-retries int                 Consecutive failures needed to report unhealthy
+      --health-start-period duration       Start period for the container to initialize before counting retries towards unstable (ns|us|ms|s|m|h)
       --health-timeout duration            Maximum time to allow one check to run (ns|us|ms|s|m|h)
-      --health-start-period duration       Start period for the container to initialize before counting retries towards unstable (ns|us|ms|s|m|h) (default 0s)
       --help                               Print usage
-      --host list                          Set one or more custom host-to-IP mappings (host:ip) (default [])
+      --host list                          Set one or more custom host-to-IP mappings (host:ip)
       --hostname string                    Container hostname
-  -l, --label list                         Service labels (default [])
-      --limit-cpu decimal                  Limit CPUs (default 0.000)
+  -l, --label list                         Service labels
+      --limit-cpu decimal                  Limit CPUs
       --limit-memory bytes                 Limit Memory
       --log-driver string                  Logging driver for service
-      --log-opt list                       Logging driver options (default [])
+      --log-opt list                       Logging driver options
       --mode string                        Service mode (replicated or global) (default "replicated")
       --mount mount                        Attach a filesystem mount to the service
       --name string                        Service name
-      --network list                       Network attachments (default [])
+      --network list                       Network attachments
       --no-healthcheck                     Disable any container-specified HEALTHCHECK
       --placement-pref pref                Add a placement preference
   -p, --publish port                       Publish a port as a node port
+  -q, --quiet                              Suppress progress output
       --read-only                          Mount the container's root filesystem as read only
       --replicas uint                      Number of tasks
-      --reserve-cpu decimal                Reserve CPUs (default 0.000)
+      --reserve-cpu decimal                Reserve CPUs
       --reserve-memory bytes               Reserve Memory
-      --restart-condition string           Restart when condition is met ("none"|"on-failure"|"any")
-      --restart-delay duration             Delay between restart attempts (ns|us|ms|s|m|h)
+      --restart-condition string           Restart when condition is met ("none"|"on-failure"|"any") (default "any")
+      --restart-delay duration             Delay between restart attempts (ns|us|ms|s|m|h) (default 5s)
       --restart-max-attempts uint          Maximum number of restarts before giving up
       --restart-window duration            Window used to evaluate the restart policy (ns|us|ms|s|m|h)
       --rollback-delay duration            Delay between task rollbacks (ns|us|ms|s|m|h) (default 0s)
       --rollback-failure-action string     Action on rollback failure ("pause"|"continue") (default "pause")
-      --rollback-max-failure-ratio float   Failure rate to tolerate during a rollback
-      --rollback-monitor duration          Duration after each task rollback to monitor for failure
-                                           (ns|us|ms|s|m|h) (default 0s)
+      --rollback-max-failure-ratio float   Failure rate to tolerate during a rollback (default 0)
+      --rollback-monitor duration          Duration after each task rollback to monitor for failure (ns|us|ms|s|m|h) (default 5s)
       --rollback-order string              Rollback order ("start-first"|"stop-first") (default "stop-first")
-      --rollback-parallelism uint          Maximum number of tasks rolled back simultaneously (0 to roll
-                                           back all at once) (default 1)
+      --rollback-parallelism uint          Maximum number of tasks rolled back simultaneously (0 to roll back all at once) (default 1)
       --secret secret                      Specify secrets to expose to the service
-      --stop-grace-period duration         Time to wait before force killing a container (ns|us|ms|s|m|h)
+      --stop-grace-period duration         Time to wait before force killing a container (ns|us|ms|s|m|h) (default 10s)
       --stop-signal string                 Signal to stop the container
   -t, --tty                                Allocate a pseudo-TTY
       --update-delay duration              Delay between updates (ns|us|ms|s|m|h) (default 0s)
       --update-failure-action string       Action on update failure ("pause"|"continue"|"rollback") (default "pause")
-      --update-max-failure-ratio float     Failure rate to tolerate during an update
-      --update-monitor duration            Duration after each task update to monitor for failure (ns|us|ms|s|m|h)
+      --update-max-failure-ratio float     Failure rate to tolerate during an update (default 0)
+      --update-monitor duration            Duration after each task update to monitor for failure (ns|us|ms|s|m|h) (default 5s)
       --update-order string                Update order ("start-first"|"stop-first") (default "stop-first")
       --update-parallelism uint            Maximum number of tasks updated simultaneously (0 to update all at once) (default 1)
   -u, --user string                        Username or UID (format: <name|uid>[:<group|gid>])
diff --git a/docs/reference/commandline/service_update.md b/docs/reference/commandline/service_update.md
index f79caeb..fae6b0a 100644
--- a/docs/reference/commandline/service_update.md
+++ b/docs/reference/commandline/service_update.md
@@ -21,43 +21,43 @@
 Update a service
 
 Options:
-      --args string                        Service command args
-      --constraint-add list                Add or update a placement constraint (default [])
-      --constraint-rm list                 Remove a constraint (default [])
-      --container-label-add list           Add or update a container label (default [])
-      --container-label-rm list            Remove a container label by its key (default [])
-  -d, --detach                             Exit immediately instead of waiting for the service to converge
-                                           (default true)
-      --dns-add list                       Add or update a custom DNS server (default [])
-      --dns-option-add list                Add or update a DNS option (default [])
-      --dns-option-rm list                 Remove a DNS option (default [])
-      --dns-rm list                        Remove a custom DNS server (default [])
-      --dns-search-add list                Add or update a custom DNS search domain (default [])
-      --dns-search-rm list                 Remove a DNS search domain (default [])
-      --endpoint-mode string               Endpoint mode ("vip"|"dnsrr") (default "vip")
-      --env-add list                       Add or update an environment variable (default [])
-      --env-rm list                        Remove an environment variable (default [])
+      --args command                       Service command args
+      --constraint-add list                Add or update a placement constraint
+      --constraint-rm list                 Remove a constraint
+      --container-label-add list           Add or update a container label
+      --container-label-rm list            Remove a container label by its key
+  -d, --detach                             Exit immediately instead of waiting for the service to converge (default true)
+      --dns-add list                       Add or update a custom DNS server
+      --dns-option-add list                Add or update a DNS option
+      --dns-option-rm list                 Remove a DNS option
+      --dns-rm list                        Remove a custom DNS server
+      --dns-search-add list                Add or update a custom DNS search domain
+      --dns-search-rm list                 Remove a DNS search domain
+      --endpoint-mode string               Endpoint mode (vip or dnsrr)
+      --entrypoint command                 Overwrite the default ENTRYPOINT of the image
+      --env-add list                       Add or update an environment variable
+      --env-rm list                        Remove an environment variable
       --force                              Force update even if no changes require it
-      --group-add list                     Add an additional supplementary user group to the container (default [])
-      --group-rm list                      Remove a previously added supplementary user group from the container (default [])
+      --group-add list                     Add an additional supplementary user group to the container
+      --group-rm list                      Remove a previously added supplementary user group from the container
       --health-cmd string                  Command to run to check health
       --health-interval duration           Time between running the check (ns|us|ms|s|m|h)
       --health-retries int                 Consecutive failures needed to report unhealthy
+      --health-start-period duration       Start period for the container to initialize before counting retries towards unstable (ns|us|ms|s|m|h)
       --health-timeout duration            Maximum time to allow one check to run (ns|us|ms|s|m|h)
-      --health-start-period duration       Start period for the container to initialize before counting retries towards unstable (ns|us|ms|s|m|h) (default 0s)
       --help                               Print usage
-      --host-add list                      Add or update a custom host-to-IP mapping (host:ip) (default [])
-      --host-rm list                       Remove a custom host-to-IP mapping (host:ip) (default [])
+      --host-add list                      Add or update a custom host-to-IP mapping (host:ip)
+      --host-rm list                       Remove a custom host-to-IP mapping (host:ip)
       --hostname string                    Container hostname
       --image string                       Service image tag
-      --label-add list                     Add or update a service label (default [])
-      --label-rm list                      Remove a label by its key (default [])
-      --limit-cpu decimal                  Limit CPUs (default 0.000)
+      --label-add list                     Add or update a service label
+      --label-rm list                      Remove a label by its key
+      --limit-cpu decimal                  Limit CPUs
       --limit-memory bytes                 Limit Memory
       --log-driver string                  Logging driver for service
-      --log-opt list                       Logging driver options (default [])
+      --log-opt list                       Logging driver options
       --mount-add mount                    Add or update a mount on a service
-      --mount-rm list                      Remove a mount by its target path (default [])
+      --mount-rm list                      Remove a mount by its target path
       --network-add list                   Add a network
       --network-rm list                    Remove a network
       --no-healthcheck                     Disable any container-specified HEALTHCHECK
@@ -65,34 +65,33 @@
       --placement-pref-rm pref             Remove a placement preference
       --publish-add port                   Add or update a published port
       --publish-rm port                    Remove a published port by its target port
+  -q, --quiet                              Suppress progress output
       --read-only                          Mount the container's root filesystem as read only
       --replicas uint                      Number of tasks
-      --reserve-cpu decimal                Reserve CPUs (default 0.000)
+      --reserve-cpu decimal                Reserve CPUs
       --reserve-memory bytes               Reserve Memory
       --restart-condition string           Restart when condition is met ("none"|"on-failure"|"any")
       --restart-delay duration             Delay between restart attempts (ns|us|ms|s|m|h)
       --restart-max-attempts uint          Maximum number of restarts before giving up
       --restart-window duration            Window used to evaluate the restart policy (ns|us|ms|s|m|h)
       --rollback                           Rollback to previous specification
-      --rollback-delay duration            Delay between task rollbacks (ns|us|ms|s|m|h) (default 0s)
-      --rollback-failure-action string     Action on rollback failure ("pause"|"continue") (default "pause")
+      --rollback-delay duration            Delay between task rollbacks (ns|us|ms|s|m|h)
+      --rollback-failure-action string     Action on rollback failure ("pause"|"continue")
       --rollback-max-failure-ratio float   Failure rate to tolerate during a rollback
-      --rollback-monitor duration          Duration after each task rollback to monitor for failure
-                                           (ns|us|ms|s|m|h) (default 0s)
+      --rollback-monitor duration          Duration after each task rollback to monitor for failure (ns|us|ms|s|m|h)
       --rollback-order string              Rollback order ("start-first"|"stop-first") (default "stop-first")
-      --rollback-parallelism uint          Maximum number of tasks rolled back simultaneously (0 to roll
-                                           back all at once) (default 1)
+      --rollback-parallelism uint          Maximum number of tasks rolled back simultaneously (0 to roll back all at once)
       --secret-add secret                  Add or update a secret on a service
-      --secret-rm list                     Remove a secret (default [])
+      --secret-rm list                     Remove a secret
       --stop-grace-period duration         Time to wait before force killing a container (ns|us|ms|s|m|h)
       --stop-signal string                 Signal to stop the container
   -t, --tty                                Allocate a pseudo-TTY
-      --update-delay duration              Delay between updates (ns|us|ms|s|m|h) (default 0s)
-      --update-failure-action string       Action on update failure ("pause"|"continue"|"rollback") (default "pause")
+      --update-delay duration              Delay between updates (ns|us|ms|s|m|h)
+      --update-failure-action string       Action on update failure ("pause"|"continue"|"rollback")
       --update-max-failure-ratio float     Failure rate to tolerate during an update
-      --update-monitor duration            Duration after each task update to monitor for failure (ns|us|ms|s|m|h) 
-      --update-order string                Update order ("start-first"|"stop-first") (default "stop-first")
-      --update-parallelism uint            Maximum number of tasks updated simultaneously (0 to update all at once) (default 1)
+      --update-monitor duration            Duration after each task update to monitor for failure (ns|us|ms|s|m|h)
+      --update-order string                Update order ("start-first"|"stop-first")
+      --update-parallelism uint            Maximum number of tasks updated simultaneously (0 to update all at once)
   -u, --user string                        Username or UID (format: <name|uid>[:<group|gid>])
       --with-registry-auth                 Send registry authentication details to swarm agents
   -w, --workdir string                     Working directory inside the container
diff --git a/integration-cli/docker_api_swarm_service_test.go b/integration-cli/docker_api_swarm_service_test.go
index a96f684..6a3c9f1 100644
--- a/integration-cli/docker_api_swarm_service_test.go
+++ b/integration-cli/docker_api_swarm_service_test.go
@@ -60,6 +60,16 @@
 	id := d.CreateService(c, simpleTestService, setInstances(instances))
 	waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, instances)
 
+	// insertDefaults inserts UpdateConfig when service is fetched by ID
+	_, out, err := d.SockRequest("GET", "/services/"+id+"?insertDefaults=true", nil)
+	c.Assert(err, checker.IsNil, check.Commentf("%s", out))
+	c.Assert(string(out), checker.Contains, "UpdateConfig")
+
+	// insertDefaults inserts UpdateConfig when service is fetched by ID
+	_, out, err = d.SockRequest("GET", "/services/top?insertDefaults=true", nil)
+	c.Assert(err, checker.IsNil, check.Commentf("%s", out))
+	c.Assert(string(out), checker.Contains, "UpdateConfig")
+
 	service := d.GetService(c, id)
 	instances = 5
 	d.UpdateService(c, service, setInstances(instances))
diff --git a/integration-cli/docker_cli_network_unix_test.go b/integration-cli/docker_cli_network_unix_test.go
index c5e233f..f95dcad 100644
--- a/integration-cli/docker_cli_network_unix_test.go
+++ b/integration-cli/docker_cli_network_unix_test.go
@@ -12,6 +12,7 @@
 	"os"
 	"path/filepath"
 	"strings"
+	"syscall"
 	"time"
 
 	"github.com/docker/docker/api/types"
@@ -1789,3 +1790,56 @@
 	_, _, err := dockerCmdWithError("network", "disconnect", network, name)
 	c.Assert(err, check.IsNil)
 }
+
+// TestConntrackFlowsLeak covers the failure scenario of ticket: https://github.com/docker/docker/issues/8795
+// Validates that conntrack is correctly cleaned once a container is destroyed
+func (s *DockerNetworkSuite) TestConntrackFlowsLeak(c *check.C) {
+	testRequires(c, IsAmd64, DaemonIsLinux, Network)
+
+	// Create a new network
+	dockerCmd(c, "network", "create", "--subnet=192.168.10.0/24", "--gateway=192.168.10.1", "-o", "com.docker.network.bridge.host_binding_ipv4=192.168.10.1", "testbind")
+	assertNwIsAvailable(c, "testbind")
+
+	// Launch the server, this will remain listening on an exposed port and reply to any request in a ping/pong fashion
+	cmd := "while true; do echo hello | nc -w 1 -lu 8080; done"
+	_, _, err := dockerCmdWithError("run", "-d", "--name", "server", "--net", "testbind", "-p", "8080:8080/udp", "appropriate/nc", "sh", "-c", cmd)
+	c.Assert(err, check.IsNil)
+
+	// Launch a container client, here the objective is to create a flow that is natted in order to expose the bug
+	cmd = "echo world | nc -q 1 -u 192.168.10.1 8080"
+	_, _, err = dockerCmdWithError("run", "-d", "--name", "client", "--net=host", "appropriate/nc", "sh", "-c", cmd)
+	c.Assert(err, check.IsNil)
+
+	// Get all the flows using netlink
+	flows, err := netlink.ConntrackTableList(netlink.ConntrackTable, syscall.AF_INET)
+	c.Assert(err, check.IsNil)
+	var flowMatch int
+	for _, flow := range flows {
+		// count only the flows that we are interested in, skipping others that can be laying around the host
+		if flow.Forward.Protocol == syscall.IPPROTO_UDP &&
+			flow.Forward.DstIP.Equal(net.ParseIP("192.168.10.1")) &&
+			flow.Forward.DstPort == 8080 {
+			flowMatch++
+		}
+	}
+	// The client should have created only 1 flow
+	c.Assert(flowMatch, checker.Equals, 1)
+
+	// Now delete the server, this will trigger the conntrack cleanup
+	err = deleteContainer("server")
+	c.Assert(err, checker.IsNil)
+
+	// Fetch again all the flows and validate that there is no server flow in the conntrack laying around
+	flows, err = netlink.ConntrackTableList(netlink.ConntrackTable, syscall.AF_INET)
+	c.Assert(err, check.IsNil)
+	flowMatch = 0
+	for _, flow := range flows {
+		if flow.Forward.Protocol == syscall.IPPROTO_UDP &&
+			flow.Forward.DstIP.Equal(net.ParseIP("192.168.10.1")) &&
+			flow.Forward.DstPort == 8080 {
+			flowMatch++
+		}
+	}
+	// All the flows have to be gone
+	c.Assert(flowMatch, checker.Equals, 0)
+}
diff --git a/opts/opts.go b/opts/opts.go
index 8c82960..f76f308 100644
--- a/opts/opts.go
+++ b/opts/opts.go
@@ -38,7 +38,10 @@
 }
 
 func (opts *ListOpts) String() string {
-	return fmt.Sprintf("%v", []string((*opts.values)))
+	if len(*opts.values) == 0 {
+		return ""
+	}
+	return fmt.Sprintf("%v", *opts.values)
 }
 
 // Set validates if needed the input value and adds it to the
@@ -343,6 +346,9 @@
 
 // String returns the string format of the number
 func (c *NanoCPUs) String() string {
+	if *c == 0 {
+		return ""
+	}
 	return big.NewRat(c.Value(), 1e9).FloatString(3)
 }
 
diff --git a/opts/opts_test.go b/opts/opts_test.go
index e137127..c1e7735 100644
--- a/opts/opts_test.go
+++ b/opts/opts_test.go
@@ -93,12 +93,12 @@
 	// Re-using logOptsvalidator (used by MapOpts)
 	o := NewListOpts(logOptsValidator)
 	o.Set("foo")
-	if o.String() != "[]" {
-		t.Errorf("%s != []", o.String())
+	if o.String() != "" {
+		t.Errorf(`%s != ""`, o.String())
 	}
 	o.Set("foo=bar")
-	if o.String() != "[]" {
-		t.Errorf("%s != []", o.String())
+	if o.String() != "" {
+		t.Errorf(`%s != ""`, o.String())
 	}
 	o.Set("max-file=2")
 	if o.Len() != 1 {
@@ -111,8 +111,8 @@
 		t.Error("o.Get(\"baz\") == true")
 	}
 	o.Delete("max-file=2")
-	if o.String() != "[]" {
-		t.Errorf("%s != []", o.String())
+	if o.String() != "" {
+		t.Errorf(`%s != ""`, o.String())
 	}
 }
 
diff --git a/vendor.conf b/vendor.conf
index e2aa557..aa8b2f7 100644
--- a/vendor.conf
+++ b/vendor.conf
@@ -24,7 +24,7 @@
 github.com/imdario/mergo 0.2.1
 
 #get libnetwork packages
-github.com/docker/libnetwork ab8f7e61743aa7e54c5d0dad0551543adadc33cf
+github.com/docker/libnetwork b13e0604016a4944025aaff521d9c125850b0d04
 github.com/docker/go-events 18b43f1bc85d9cdd42c05a6cd2d444c7a200a894
 github.com/armon/go-radix e39d623f12e8e41c7b5529e9a9dd67a1e2261f80
 github.com/armon/go-metrics eb0af217e5e9747e41dd5303755356b62d28e3ec
@@ -34,7 +34,7 @@
 github.com/hashicorp/serf 598c54895cc5a7b1a24a398d635e8c0ea0959870
 github.com/docker/libkv 1d8431073ae03cdaedb198a89722f3aab6d418ef
 github.com/vishvananda/netns 604eaf189ee867d8c147fafc28def2394e878d25
-github.com/vishvananda/netlink c682914b0b231f6cad204a86e565551e51d387c0
+github.com/vishvananda/netlink 1e86b2bee5b6a7d377e4c02bb7f98209d6a7297c
 github.com/BurntSushi/toml f706d00e3de6abe700c994cdd545a1a4915af060
 github.com/samuel/go-zookeeper d0e0d8e11f318e000a8cc434616d69e329edc374
 github.com/deckarep/golang-set ef32fa3046d9f249d399f98ebaf9be944430fd1d
diff --git a/vendor/github.com/docker/libnetwork/controller.go b/vendor/github.com/docker/libnetwork/controller.go
index 0674af5..6a5eda0 100644
--- a/vendor/github.com/docker/libnetwork/controller.go
+++ b/vendor/github.com/docker/libnetwork/controller.go
@@ -47,6 +47,7 @@
 	"container/heap"
 	"fmt"
 	"net"
+	"path/filepath"
 	"strings"
 	"sync"
 	"time"
@@ -979,6 +980,8 @@
 
 	if sb.ingress {
 		c.ingressSandbox = sb
+		sb.config.hostsPath = filepath.Join(c.cfg.Daemon.DataDir, "/network/files/hosts")
+		sb.config.resolvConfPath = filepath.Join(c.cfg.Daemon.DataDir, "/network/files/resolv.conf")
 		sb.id = "ingress_sbox"
 	}
 	c.Unlock()
diff --git a/vendor/github.com/docker/libnetwork/drivers/bridge/bridge.go b/vendor/github.com/docker/libnetwork/drivers/bridge/bridge.go
index 742816c..e681b8f 100644
--- a/vendor/github.com/docker/libnetwork/drivers/bridge/bridge.go
+++ b/vendor/github.com/docker/libnetwork/drivers/bridge/bridge.go
@@ -1346,6 +1346,13 @@
 
 	endpoint.portMapping = nil
 
+	// Clean the connection tracker state of the host for the specific endpoint
+	// The host kernel keeps track of the connections (TCP and UDP), so if a new endpoint gets the same IP of
+	// this one (that is going down), is possible that some of the packets would not be routed correctly inside
+	// the new endpoint
+	// Deeper details: https://github.com/docker/docker/issues/8795
+	clearEndpointConnections(d.nlh, endpoint)
+
 	if err = d.storeUpdate(endpoint); err != nil {
 		return fmt.Errorf("failed to update bridge endpoint %s to store: %v", endpoint.id[0:7], err)
 	}
diff --git a/vendor/github.com/docker/libnetwork/drivers/bridge/setup_ip_tables.go b/vendor/github.com/docker/libnetwork/drivers/bridge/setup_ip_tables.go
index b2720c5..839e16f 100644
--- a/vendor/github.com/docker/libnetwork/drivers/bridge/setup_ip_tables.go
+++ b/vendor/github.com/docker/libnetwork/drivers/bridge/setup_ip_tables.go
@@ -7,6 +7,7 @@
 
 	"github.com/Sirupsen/logrus"
 	"github.com/docker/libnetwork/iptables"
+	"github.com/vishvananda/netlink"
 )
 
 // DockerChain: DOCKER iptable chain name
@@ -348,3 +349,15 @@
 	}
 	return nil
 }
+
+func clearEndpointConnections(nlh *netlink.Handle, ep *bridgeEndpoint) {
+	var ipv4List []net.IP
+	var ipv6List []net.IP
+	if ep.addr != nil {
+		ipv4List = append(ipv4List, ep.addr.IP)
+	}
+	if ep.addrv6 != nil {
+		ipv6List = append(ipv6List, ep.addrv6.IP)
+	}
+	iptables.DeleteConntrackEntries(nlh, ipv4List, ipv6List)
+}
diff --git a/vendor/github.com/docker/libnetwork/endpoint.go b/vendor/github.com/docker/libnetwork/endpoint.go
index a9008e4..de63cf3 100644
--- a/vendor/github.com/docker/libnetwork/endpoint.go
+++ b/vendor/github.com/docker/libnetwork/endpoint.go
@@ -665,7 +665,7 @@
 
 func (ep *endpoint) Leave(sbox Sandbox, options ...EndpointOption) error {
 	if sbox == nil || sbox.ID() == "" || sbox.Key() == "" {
-		return types.BadRequestErrorf("invalid Sandbox passed to enpoint leave: %v", sbox)
+		return types.BadRequestErrorf("invalid Sandbox passed to endpoint leave: %v", sbox)
 	}
 
 	sb, ok := sbox.(*sandbox)
diff --git a/vendor/github.com/docker/libnetwork/error.go b/vendor/github.com/docker/libnetwork/error.go
index f62ac0c..5f00709 100644
--- a/vendor/github.com/docker/libnetwork/error.go
+++ b/vendor/github.com/docker/libnetwork/error.go
@@ -129,7 +129,7 @@
 }
 
 func (aee *ActiveEndpointsError) Error() string {
-	return fmt.Sprintf("network %s has active endpoints", aee.name)
+	return fmt.Sprintf("network %s id %s has active endpoints", aee.name, aee.id)
 }
 
 // Forbidden denotes the type of this error
diff --git a/vendor/github.com/docker/libnetwork/iptables/conntrack.go b/vendor/github.com/docker/libnetwork/iptables/conntrack.go
new file mode 100644
index 0000000..5731c53
--- /dev/null
+++ b/vendor/github.com/docker/libnetwork/iptables/conntrack.go
@@ -0,0 +1,59 @@
+package iptables
+
+import (
+	"errors"
+	"net"
+	"syscall"
+
+	"github.com/Sirupsen/logrus"
+	"github.com/vishvananda/netlink"
+)
+
+var (
+	// ErrConntrackNotConfigurable means that conntrack module is not loaded or does not have the netlink module loaded
+	ErrConntrackNotConfigurable = errors.New("conntrack is not available")
+)
+
+// IsConntrackProgrammable returns true if the handle supports the NETLINK_NETFILTER and the base modules are loaded
+func IsConntrackProgrammable(nlh *netlink.Handle) bool {
+	return nlh.SupportsNetlinkFamily(syscall.NETLINK_NETFILTER)
+}
+
+// DeleteConntrackEntries deletes all the conntrack connections on the host for the specified IP
+// Returns the number of flows deleted for IPv4, IPv6 else error
+func DeleteConntrackEntries(nlh *netlink.Handle, ipv4List []net.IP, ipv6List []net.IP) (uint, uint, error) {
+	if !IsConntrackProgrammable(nlh) {
+		return 0, 0, ErrConntrackNotConfigurable
+	}
+
+	var totalIPv4FlowPurged uint
+	for _, ipAddress := range ipv4List {
+		flowPurged, err := purgeConntrackState(nlh, syscall.AF_INET, ipAddress)
+		if err != nil {
+			logrus.Warnf("Failed to delete conntrack state for %s: %v", ipAddress, err)
+			continue
+		}
+		totalIPv4FlowPurged += flowPurged
+	}
+
+	var totalIPv6FlowPurged uint
+	for _, ipAddress := range ipv6List {
+		flowPurged, err := purgeConntrackState(nlh, syscall.AF_INET6, ipAddress)
+		if err != nil {
+			logrus.Warnf("Failed to delete conntrack state for %s: %v", ipAddress, err)
+			continue
+		}
+		totalIPv6FlowPurged += flowPurged
+	}
+
+	logrus.Debugf("DeleteConntrackEntries purged ipv4:%d, ipv6:%d", totalIPv4FlowPurged, totalIPv6FlowPurged)
+	return totalIPv4FlowPurged, totalIPv6FlowPurged, nil
+}
+
+func purgeConntrackState(nlh *netlink.Handle, family netlink.InetFamily, ipAddress net.IP) (uint, error) {
+	filter := &netlink.ConntrackFilter{}
+	// NOTE: doing the flush using the ipAddress is safe because today there cannot be multiple networks with the same subnet
+	// so it will not be possible to flush flows that are of other containers
+	filter.AddIP(netlink.ConntrackNatAnyIP, ipAddress)
+	return nlh.ConntrackDeleteFilter(netlink.ConntrackTable, family, filter)
+}
diff --git a/vendor/github.com/docker/libnetwork/iptables/iptables.go b/vendor/github.com/docker/libnetwork/iptables/iptables.go
index 34f7dee..818bcb5 100644
--- a/vendor/github.com/docker/libnetwork/iptables/iptables.go
+++ b/vendor/github.com/docker/libnetwork/iptables/iptables.go
@@ -100,14 +100,14 @@
 	supportsCOpt = supportsCOption(mj, mn, mc)
 }
 
-func initIptables() {
+func initDependencies() {
 	probe()
 	initFirewalld()
 	detectIptables()
 }
 
 func initCheck() error {
-	initOnce.Do(initIptables)
+	initOnce.Do(initDependencies)
 
 	if iptablesPath == "" {
 		return ErrIptablesNotFound
diff --git a/vendor/github.com/docker/libnetwork/networkdb/delegate.go b/vendor/github.com/docker/libnetwork/networkdb/delegate.go
index 2f8ca48..2096ea6 100644
--- a/vendor/github.com/docker/libnetwork/networkdb/delegate.go
+++ b/vendor/github.com/docker/libnetwork/networkdb/delegate.go
@@ -88,12 +88,25 @@
 }
 
 func (nDB *NetworkDB) handleNetworkEvent(nEvent *NetworkEvent) bool {
+	var flushEntries bool
 	// Update our local clock if the received messages has newer
 	// time.
 	nDB.networkClock.Witness(nEvent.LTime)
 
 	nDB.Lock()
-	defer nDB.Unlock()
+	defer func() {
+		nDB.Unlock()
+		// When a node leaves a network on the last task removal cleanup the
+		// local entries for this network & node combination. When the tasks
+		// on a network are removed we could have missed the gossip updates.
+		// Not doing this cleanup can leave stale entries because bulksyncs
+		// from the node will no longer include this network state.
+		//
+		// deleteNodeNetworkEntries takes nDB lock.
+		if flushEntries {
+			nDB.deleteNodeNetworkEntries(nEvent.NetworkID, nEvent.NodeName)
+		}
+	}()
 
 	if nEvent.NodeName == nDB.config.NodeName {
 		return false
@@ -121,6 +134,7 @@
 		n.leaving = nEvent.Type == NetworkEventTypeLeave
 		if n.leaving {
 			n.reapTime = reapInterval
+			flushEntries = true
 		}
 
 		nDB.addNetworkNode(nEvent.NetworkID, nEvent.NodeName)
diff --git a/vendor/github.com/docker/libnetwork/networkdb/networkdb.go b/vendor/github.com/docker/libnetwork/networkdb/networkdb.go
index 9e5e61c..86b0128 100644
--- a/vendor/github.com/docker/libnetwork/networkdb/networkdb.go
+++ b/vendor/github.com/docker/libnetwork/networkdb/networkdb.go
@@ -372,6 +372,37 @@
 	nDB.Unlock()
 }
 
+func (nDB *NetworkDB) deleteNodeNetworkEntries(nid, node string) {
+	nDB.Lock()
+	nDB.indexes[byNetwork].WalkPrefix(fmt.Sprintf("/%s", nid),
+		func(path string, v interface{}) bool {
+			oldEntry := v.(*entry)
+			params := strings.Split(path[1:], "/")
+			nid := params[0]
+			tname := params[1]
+			key := params[2]
+
+			if oldEntry.node != node {
+				return false
+			}
+
+			entry := &entry{
+				ltime:    oldEntry.ltime,
+				node:     node,
+				value:    oldEntry.value,
+				deleting: true,
+				reapTime: reapInterval,
+			}
+
+			nDB.indexes[byTable].Insert(fmt.Sprintf("/%s/%s/%s", tname, nid, key), entry)
+			nDB.indexes[byNetwork].Insert(fmt.Sprintf("/%s/%s/%s", nid, tname, key), entry)
+
+			nDB.broadcaster.Write(makeEvent(opDelete, tname, nid, key, entry.value))
+			return false
+		})
+	nDB.Unlock()
+}
+
 func (nDB *NetworkDB) deleteNodeTableEntries(node string) {
 	nDB.Lock()
 	nDB.indexes[byTable].Walk(func(path string, v interface{}) bool {
diff --git a/vendor/github.com/docker/libnetwork/ns/init_linux.go b/vendor/github.com/docker/libnetwork/ns/init_linux.go
index 2c3aff5..84d4950 100644
--- a/vendor/github.com/docker/libnetwork/ns/init_linux.go
+++ b/vendor/github.com/docker/libnetwork/ns/init_linux.go
@@ -75,13 +75,28 @@
 
 func getSupportedNlFamilies() []int {
 	fams := []int{syscall.NETLINK_ROUTE}
+	// NETLINK_XFRM test
 	if err := loadXfrmModules(); err != nil {
 		if checkXfrmSocket() != nil {
 			logrus.Warnf("Could not load necessary modules for IPSEC rules: %v", err)
-			return fams
+		} else {
+			fams = append(fams, syscall.NETLINK_XFRM)
 		}
+	} else {
+		fams = append(fams, syscall.NETLINK_XFRM)
 	}
-	return append(fams, syscall.NETLINK_XFRM)
+	// NETLINK_NETFILTER test
+	if err := loadNfConntrackModules(); err != nil {
+		if checkNfSocket() != nil {
+			logrus.Warnf("Could not load necessary modules for Conntrack: %v", err)
+		} else {
+			fams = append(fams, syscall.NETLINK_NETFILTER)
+		}
+	} else {
+		fams = append(fams, syscall.NETLINK_NETFILTER)
+	}
+
+	return fams
 }
 
 func loadXfrmModules() error {
@@ -103,3 +118,23 @@
 	syscall.Close(fd)
 	return nil
 }
+
+func loadNfConntrackModules() error {
+	if out, err := exec.Command("modprobe", "-va", "nf_conntrack").CombinedOutput(); err != nil {
+		return fmt.Errorf("Running modprobe nf_conntrack failed with message: `%s`, error: %v", strings.TrimSpace(string(out)), err)
+	}
+	if out, err := exec.Command("modprobe", "-va", "nf_conntrack_netlink").CombinedOutput(); err != nil {
+		return fmt.Errorf("Running modprobe nf_conntrack_netlink failed with message: `%s`, error: %v", strings.TrimSpace(string(out)), err)
+	}
+	return nil
+}
+
+// API check on required nf_conntrack* modules (nf_conntrack, nf_conntrack_netlink)
+func checkNfSocket() error {
+	fd, err := syscall.Socket(syscall.AF_NETLINK, syscall.SOCK_RAW, syscall.NETLINK_NETFILTER)
+	if err != nil {
+		return err
+	}
+	syscall.Close(fd)
+	return nil
+}
diff --git a/vendor/github.com/docker/libnetwork/sandbox.go b/vendor/github.com/docker/libnetwork/sandbox.go
index a2811af..c820cc0 100644
--- a/vendor/github.com/docker/libnetwork/sandbox.go
+++ b/vendor/github.com/docker/libnetwork/sandbox.go
@@ -644,13 +644,6 @@
 	sb.Lock()
 	sb.osSbox = osSbox
 	sb.Unlock()
-	defer func() {
-		if err != nil {
-			sb.Lock()
-			sb.osSbox = nil
-			sb.Unlock()
-		}
-	}()
 
 	// If the resolver was setup before stop it and set it up in the
 	// new osl sandbox.
diff --git a/vendor/github.com/vishvananda/netlink/addr_linux.go b/vendor/github.com/vishvananda/netlink/addr_linux.go
index 5348e403..220f0f2 100644
--- a/vendor/github.com/vishvananda/netlink/addr_linux.go
+++ b/vendor/github.com/vishvananda/netlink/addr_linux.go
@@ -27,6 +27,19 @@
 	return h.addrHandle(link, addr, req)
 }
 
+// AddrReplace will replace (or, if not present, add) an IP address on a link device.
+// Equivalent to: `ip addr replace $addr dev $link`
+func AddrReplace(link Link, addr *Addr) error {
+	return pkgHandle.AddrReplace(link, addr)
+}
+
+// AddrReplace will replace (or, if not present, add) an IP address on a link device.
+// Equivalent to: `ip addr replace $addr dev $link`
+func (h *Handle) AddrReplace(link Link, addr *Addr) error {
+	req := h.newNetlinkRequest(syscall.RTM_NEWADDR, syscall.NLM_F_CREATE|syscall.NLM_F_REPLACE|syscall.NLM_F_ACK)
+	return h.addrHandle(link, addr, req)
+}
+
 // AddrDel will delete an IP address from a link device.
 // Equivalent to: `ip addr del $addr dev $link`
 func AddrDel(link Link, addr *Addr) error {
diff --git a/vendor/github.com/vishvananda/netlink/conntrack_linux.go b/vendor/github.com/vishvananda/netlink/conntrack_linux.go
new file mode 100644
index 0000000..20df903
--- /dev/null
+++ b/vendor/github.com/vishvananda/netlink/conntrack_linux.go
@@ -0,0 +1,344 @@
+package netlink
+
+import (
+	"bytes"
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"net"
+	"syscall"
+
+	"github.com/vishvananda/netlink/nl"
+)
+
+// ConntrackTableType Conntrack table for the netlink operation
+type ConntrackTableType uint8
+
+const (
+	// ConntrackTable Conntrack table
+	// https://github.com/torvalds/linux/blob/master/include/uapi/linux/netfilter/nfnetlink.h -> #define NFNL_SUBSYS_CTNETLINK		 1
+	ConntrackTable = 1
+	// ConntrackExpectTable Conntrack expect table
+	// https://github.com/torvalds/linux/blob/master/include/uapi/linux/netfilter/nfnetlink.h -> #define NFNL_SUBSYS_CTNETLINK_EXP 2
+	ConntrackExpectTable = 2
+)
+
+const (
+	// backward compatibility with golang 1.6 which does not have io.SeekCurrent
+	seekCurrent = 1
+)
+
+// InetFamily Family type
+type InetFamily uint8
+
+//  -L [table] [options]          List conntrack or expectation table
+//  -G [table] parameters         Get conntrack or expectation
+
+//  -I [table] parameters         Create a conntrack or expectation
+//  -U [table] parameters         Update a conntrack
+//  -E [table] [options]          Show events
+
+//  -C [table]                    Show counter
+//  -S                            Show statistics
+
+// ConntrackTableList returns the flow list of a table of a specific family
+// conntrack -L [table] [options]          List conntrack or expectation table
+func ConntrackTableList(table ConntrackTableType, family InetFamily) ([]*ConntrackFlow, error) {
+	return pkgHandle.ConntrackTableList(table, family)
+}
+
+// ConntrackTableFlush flushes all the flows of a specified table
+// conntrack -F [table]            Flush table
+// The flush operation applies to all the family types
+func ConntrackTableFlush(table ConntrackTableType) error {
+	return pkgHandle.ConntrackTableFlush(table)
+}
+
+// ConntrackDeleteFilter deletes entries on the specified table on the base of the filter
+// conntrack -D [table] parameters         Delete conntrack or expectation
+func ConntrackDeleteFilter(table ConntrackTableType, family InetFamily, filter *ConntrackFilter) (uint, error) {
+	return pkgHandle.ConntrackDeleteFilter(table, family, filter)
+}
+
+// ConntrackTableList returns the flow list of a table of a specific family using the netlink handle passed
+// conntrack -L [table] [options]          List conntrack or expectation table
+func (h *Handle) ConntrackTableList(table ConntrackTableType, family InetFamily) ([]*ConntrackFlow, error) {
+	res, err := h.dumpConntrackTable(table, family)
+	if err != nil {
+		return nil, err
+	}
+
+	// Deserialize all the flows
+	var result []*ConntrackFlow
+	for _, dataRaw := range res {
+		result = append(result, parseRawData(dataRaw))
+	}
+
+	return result, nil
+}
+
+// ConntrackTableFlush flushes all the flows of a specified table using the netlink handle passed
+// conntrack -F [table]            Flush table
+// The flush operation applies to all the family types
+func (h *Handle) ConntrackTableFlush(table ConntrackTableType) error {
+	req := h.newConntrackRequest(table, syscall.AF_INET, nl.IPCTNL_MSG_CT_DELETE, syscall.NLM_F_ACK)
+	_, err := req.Execute(syscall.NETLINK_NETFILTER, 0)
+	return err
+}
+
+// ConntrackDeleteFilter deletes entries on the specified table on the base of the filter using the netlink handle passed
+// conntrack -D [table] parameters         Delete conntrack or expectation
+func (h *Handle) ConntrackDeleteFilter(table ConntrackTableType, family InetFamily, filter *ConntrackFilter) (uint, error) {
+	res, err := h.dumpConntrackTable(table, family)
+	if err != nil {
+		return 0, err
+	}
+
+	var matched uint
+	for _, dataRaw := range res {
+		flow := parseRawData(dataRaw)
+		if match := filter.MatchConntrackFlow(flow); match {
+			req2 := h.newConntrackRequest(table, family, nl.IPCTNL_MSG_CT_DELETE, syscall.NLM_F_ACK)
+			// skip the first 4 byte that are the netfilter header, the newConntrackRequest is adding it already
+			req2.AddRawData(dataRaw[4:])
+			req2.Execute(syscall.NETLINK_NETFILTER, 0)
+			matched++
+		}
+	}
+
+	return matched, nil
+}
+
+func (h *Handle) newConntrackRequest(table ConntrackTableType, family InetFamily, operation, flags int) *nl.NetlinkRequest {
+	// Create the Netlink request object
+	req := h.newNetlinkRequest((int(table)<<8)|operation, flags)
+	// Add the netfilter header
+	msg := &nl.Nfgenmsg{
+		NfgenFamily: uint8(family),
+		Version:     nl.NFNETLINK_V0,
+		ResId:       0,
+	}
+	req.AddData(msg)
+	return req
+}
+
+func (h *Handle) dumpConntrackTable(table ConntrackTableType, family InetFamily) ([][]byte, error) {
+	req := h.newConntrackRequest(table, family, nl.IPCTNL_MSG_CT_GET, syscall.NLM_F_DUMP)
+	return req.Execute(syscall.NETLINK_NETFILTER, 0)
+}
+
+// The full conntrack flow structure is very complicated and can be found in the file:
+// http://git.netfilter.org/libnetfilter_conntrack/tree/include/internal/object.h
+// For the time being, the structure below allows to parse and extract the base information of a flow
+type ipTuple struct {
+	SrcIP    net.IP
+	DstIP    net.IP
+	Protocol uint8
+	SrcPort  uint16
+	DstPort  uint16
+}
+
+type ConntrackFlow struct {
+	FamilyType uint8
+	Forward    ipTuple
+	Reverse    ipTuple
+}
+
+func (s *ConntrackFlow) String() string {
+	// conntrack cmd output:
+	// udp      17 src=127.0.0.1 dst=127.0.0.1 sport=4001 dport=1234 [UNREPLIED] src=127.0.0.1 dst=127.0.0.1 sport=1234 dport=4001
+	return fmt.Sprintf("%s\t%d src=%s dst=%s sport=%d dport=%d\tsrc=%s dst=%s sport=%d dport=%d",
+		nl.L4ProtoMap[s.Forward.Protocol], s.Forward.Protocol,
+		s.Forward.SrcIP.String(), s.Forward.DstIP.String(), s.Forward.SrcPort, s.Forward.DstPort,
+		s.Reverse.SrcIP.String(), s.Reverse.DstIP.String(), s.Reverse.SrcPort, s.Reverse.DstPort)
+}
+
+// This method parse the ip tuple structure
+// The message structure is the following:
+// <len, [CTA_IP_V4_SRC|CTA_IP_V6_SRC], 16 bytes for the IP>
+// <len, [CTA_IP_V4_DST|CTA_IP_V6_DST], 16 bytes for the IP>
+// <len, NLA_F_NESTED|nl.CTA_TUPLE_PROTO, 1 byte for the protocol, 3 bytes of padding>
+// <len, CTA_PROTO_SRC_PORT, 2 bytes for the source port, 2 bytes of padding>
+// <len, CTA_PROTO_DST_PORT, 2 bytes for the source port, 2 bytes of padding>
+func parseIpTuple(reader *bytes.Reader, tpl *ipTuple) {
+	for i := 0; i < 2; i++ {
+		_, t, _, v := parseNfAttrTLV(reader)
+		switch t {
+		case nl.CTA_IP_V4_SRC, nl.CTA_IP_V6_SRC:
+			tpl.SrcIP = v
+		case nl.CTA_IP_V4_DST, nl.CTA_IP_V6_DST:
+			tpl.DstIP = v
+		}
+	}
+	// Skip the next 4 bytes  nl.NLA_F_NESTED|nl.CTA_TUPLE_PROTO
+	reader.Seek(4, seekCurrent)
+	_, t, _, v := parseNfAttrTLV(reader)
+	if t == nl.CTA_PROTO_NUM {
+		tpl.Protocol = uint8(v[0])
+	}
+	// Skip some padding 3 bytes
+	reader.Seek(3, seekCurrent)
+	for i := 0; i < 2; i++ {
+		_, t, _ := parseNfAttrTL(reader)
+		switch t {
+		case nl.CTA_PROTO_SRC_PORT:
+			parseBERaw16(reader, &tpl.SrcPort)
+		case nl.CTA_PROTO_DST_PORT:
+			parseBERaw16(reader, &tpl.DstPort)
+		}
+		// Skip some padding 2 byte
+		reader.Seek(2, seekCurrent)
+	}
+}
+
+func parseNfAttrTLV(r *bytes.Reader) (isNested bool, attrType, len uint16, value []byte) {
+	isNested, attrType, len = parseNfAttrTL(r)
+
+	value = make([]byte, len)
+	binary.Read(r, binary.BigEndian, &value)
+	return isNested, attrType, len, value
+}
+
+func parseNfAttrTL(r *bytes.Reader) (isNested bool, attrType, len uint16) {
+	binary.Read(r, nl.NativeEndian(), &len)
+	len -= nl.SizeofNfattr
+
+	binary.Read(r, nl.NativeEndian(), &attrType)
+	isNested = (attrType & nl.NLA_F_NESTED) == nl.NLA_F_NESTED
+	attrType = attrType & (nl.NLA_F_NESTED - 1)
+
+	return isNested, attrType, len
+}
+
+func parseBERaw16(r *bytes.Reader, v *uint16) {
+	binary.Read(r, binary.BigEndian, v)
+}
+
+func parseRawData(data []byte) *ConntrackFlow {
+	s := &ConntrackFlow{}
+	// First there is the Nfgenmsg header
+	// consume only the family field
+	reader := bytes.NewReader(data)
+	binary.Read(reader, nl.NativeEndian(), &s.FamilyType)
+
+	// skip rest of the Netfilter header
+	reader.Seek(3, seekCurrent)
+	// The message structure is the following:
+	// <len, NLA_F_NESTED|CTA_TUPLE_ORIG> 4 bytes
+	// <len, NLA_F_NESTED|CTA_TUPLE_IP> 4 bytes
+	// flow information of the forward flow
+	// <len, NLA_F_NESTED|CTA_TUPLE_REPLY> 4 bytes
+	// <len, NLA_F_NESTED|CTA_TUPLE_IP> 4 bytes
+	// flow information of the reverse flow
+	for reader.Len() > 0 {
+		nested, t, l := parseNfAttrTL(reader)
+		if nested && t == nl.CTA_TUPLE_ORIG {
+			if nested, t, _ = parseNfAttrTL(reader); nested && t == nl.CTA_TUPLE_IP {
+				parseIpTuple(reader, &s.Forward)
+			}
+		} else if nested && t == nl.CTA_TUPLE_REPLY {
+			if nested, t, _ = parseNfAttrTL(reader); nested && t == nl.CTA_TUPLE_IP {
+				parseIpTuple(reader, &s.Reverse)
+
+				// Got all the useful information stop parsing
+				break
+			} else {
+				// Header not recognized skip it
+				reader.Seek(int64(l), seekCurrent)
+			}
+		}
+	}
+
+	return s
+}
+
+// Conntrack parameters and options:
+//   -n, --src-nat ip                      source NAT ip
+//   -g, --dst-nat ip                      destination NAT ip
+//   -j, --any-nat ip                      source or destination NAT ip
+//   -m, --mark mark                       Set mark
+//   -c, --secmark secmark                 Set selinux secmark
+//   -e, --event-mask eventmask            Event mask, eg. NEW,DESTROY
+//   -z, --zero                            Zero counters while listing
+//   -o, --output type[,...]               Output format, eg. xml
+//   -l, --label label[,...]               conntrack labels
+
+// Common parameters and options:
+//   -s, --src, --orig-src ip              Source address from original direction
+//   -d, --dst, --orig-dst ip              Destination address from original direction
+//   -r, --reply-src ip            Source addres from reply direction
+//   -q, --reply-dst ip            Destination address from reply direction
+//   -p, --protonum proto          Layer 4 Protocol, eg. 'tcp'
+//   -f, --family proto            Layer 3 Protocol, eg. 'ipv6'
+//   -t, --timeout timeout         Set timeout
+//   -u, --status status           Set status, eg. ASSURED
+//   -w, --zone value              Set conntrack zone
+//   --orig-zone value             Set zone for original direction
+//   --reply-zone value            Set zone for reply direction
+//   -b, --buffer-size             Netlink socket buffer size
+//   --mask-src ip                 Source mask address
+//   --mask-dst ip                 Destination mask address
+
+// Filter types
+type ConntrackFilterType uint8
+
+const (
+	ConntrackOrigSrcIP = iota // -orig-src ip   Source address from original direction
+	ConntrackOrigDstIP        // -orig-dst ip   Destination address from original direction
+	ConntrackNatSrcIP         // -src-nat ip    Source NAT ip
+	ConntrackNatDstIP         // -dst-nat ip    Destination NAT ip
+	ConntrackNatAnyIP         // -any-nat ip    Source or destination NAT ip
+)
+
+type ConntrackFilter struct {
+	ipFilter map[ConntrackFilterType]net.IP
+}
+
+// AddIP adds an IP to the conntrack filter
+func (f *ConntrackFilter) AddIP(tp ConntrackFilterType, ip net.IP) error {
+	if f.ipFilter == nil {
+		f.ipFilter = make(map[ConntrackFilterType]net.IP)
+	}
+	if _, ok := f.ipFilter[tp]; ok {
+		return errors.New("Filter attribute already present")
+	}
+	f.ipFilter[tp] = ip
+	return nil
+}
+
+// MatchConntrackFlow applies the filter to the flow and returns true if the flow matches the filter
+// false otherwise
+func (f *ConntrackFilter) MatchConntrackFlow(flow *ConntrackFlow) bool {
+	if len(f.ipFilter) == 0 {
+		// empty filter always not match
+		return false
+	}
+
+	match := true
+	// -orig-src ip   Source address from original direction
+	if elem, found := f.ipFilter[ConntrackOrigSrcIP]; found {
+		match = match && elem.Equal(flow.Forward.SrcIP)
+	}
+
+	// -orig-dst ip   Destination address from original direction
+	if elem, found := f.ipFilter[ConntrackOrigDstIP]; match && found {
+		match = match && elem.Equal(flow.Forward.DstIP)
+	}
+
+	// -src-nat ip    Source NAT ip
+	if elem, found := f.ipFilter[ConntrackNatSrcIP]; match && found {
+		match = match && elem.Equal(flow.Reverse.SrcIP)
+	}
+
+	// -dst-nat ip    Destination NAT ip
+	if elem, found := f.ipFilter[ConntrackNatDstIP]; match && found {
+		match = match && elem.Equal(flow.Reverse.DstIP)
+	}
+
+	// -any-nat ip    Source or destination NAT ip
+	if elem, found := f.ipFilter[ConntrackNatAnyIP]; match && found {
+		match = match && (elem.Equal(flow.Reverse.SrcIP) || elem.Equal(flow.Reverse.DstIP))
+	}
+
+	return match
+}
diff --git a/vendor/github.com/vishvananda/netlink/conntrack_unspecified.go b/vendor/github.com/vishvananda/netlink/conntrack_unspecified.go
new file mode 100644
index 0000000..af7af79
--- /dev/null
+++ b/vendor/github.com/vishvananda/netlink/conntrack_unspecified.go
@@ -0,0 +1,53 @@
+// +build !linux
+
+package netlink
+
+// ConntrackTableType Conntrack table for the netlink operation
+type ConntrackTableType uint8
+
+// InetFamily Family type
+type InetFamily uint8
+
+// ConntrackFlow placeholder
+type ConntrackFlow struct{}
+
+// ConntrackFilter placeholder
+type ConntrackFilter struct{}
+
+// ConntrackTableList returns the flow list of a table of a specific family
+// conntrack -L [table] [options]          List conntrack or expectation table
+func ConntrackTableList(table ConntrackTableType, family InetFamily) ([]*ConntrackFlow, error) {
+	return nil, ErrNotImplemented
+}
+
+// ConntrackTableFlush flushes all the flows of a specified table
+// conntrack -F [table]            Flush table
+// The flush operation applies to all the family types
+func ConntrackTableFlush(table ConntrackTableType) error {
+	return ErrNotImplemented
+}
+
+// ConntrackDeleteFilter deletes entries on the specified table on the base of the filter
+// conntrack -D [table] parameters         Delete conntrack or expectation
+func ConntrackDeleteFilter(table ConntrackTableType, family InetFamily, filter *ConntrackFilter) (uint, error) {
+	return 0, ErrNotImplemented
+}
+
+// ConntrackTableList returns the flow list of a table of a specific family using the netlink handle passed
+// conntrack -L [table] [options]          List conntrack or expectation table
+func (h *Handle) ConntrackTableList(table ConntrackTableType, family InetFamily) ([]*ConntrackFlow, error) {
+	return nil, ErrNotImplemented
+}
+
+// ConntrackTableFlush flushes all the flows of a specified table using the netlink handle passed
+// conntrack -F [table]            Flush table
+// The flush operation applies to all the family types
+func (h *Handle) ConntrackTableFlush(table ConntrackTableType) error {
+	return ErrNotImplemented
+}
+
+// ConntrackDeleteFilter deletes entries on the specified table on the base of the filter using the netlink handle passed
+// conntrack -D [table] parameters         Delete conntrack or expectation
+func (h *Handle) ConntrackDeleteFilter(table ConntrackTableType, family InetFamily, filter *ConntrackFilter) (uint, error) {
+	return 0, ErrNotImplemented
+}
diff --git a/vendor/github.com/vishvananda/netlink/filter.go b/vendor/github.com/vishvananda/netlink/filter.go
index bc8a1e9..938b28b 100644
--- a/vendor/github.com/vishvananda/netlink/filter.go
+++ b/vendor/github.com/vishvananda/netlink/filter.go
@@ -1,6 +1,10 @@
 package netlink
 
-import "fmt"
+import (
+	"fmt"
+
+	"github.com/vishvananda/netlink/nl"
+)
 
 type Filter interface {
 	Attrs() *FilterAttrs
@@ -180,11 +184,46 @@
 	}
 }
 
+// Constants used in TcU32Sel.Flags.
+const (
+	TC_U32_TERMINAL  = nl.TC_U32_TERMINAL
+	TC_U32_OFFSET    = nl.TC_U32_OFFSET
+	TC_U32_VAROFFSET = nl.TC_U32_VAROFFSET
+	TC_U32_EAT       = nl.TC_U32_EAT
+)
+
+// Sel of the U32 filters that contains multiple TcU32Key. This is the copy
+// and the frontend representation of nl.TcU32Sel. It is serialized into canonical
+// nl.TcU32Sel with the appropriate endianness.
+type TcU32Sel struct {
+	Flags    uint8
+	Offshift uint8
+	Nkeys    uint8
+	Pad      uint8
+	Offmask  uint16
+	Off      uint16
+	Offoff   int16
+	Hoff     int16
+	Hmask    uint32
+	Keys     []TcU32Key
+}
+
+// TcU32Key contained of Sel in the U32 filters. This is the copy and the frontend
+// representation of nl.TcU32Key. It is serialized into chanonical nl.TcU32Sel
+// with the appropriate endianness.
+type TcU32Key struct {
+	Mask    uint32
+	Val     uint32
+	Off     int32
+	OffMask int32
+}
+
 // U32 filters on many packet related properties
 type U32 struct {
 	FilterAttrs
 	ClassId    uint32
 	RedirIndex int
+	Sel        *TcU32Sel
 	Actions    []Action
 }
 
diff --git a/vendor/github.com/vishvananda/netlink/filter_linux.go b/vendor/github.com/vishvananda/netlink/filter_linux.go
index d9aedca..eb1802c 100644
--- a/vendor/github.com/vishvananda/netlink/filter_linux.go
+++ b/vendor/github.com/vishvananda/netlink/filter_linux.go
@@ -6,6 +6,7 @@
 	"errors"
 	"fmt"
 	"syscall"
+	"unsafe"
 
 	"github.com/vishvananda/netlink/nl"
 )
@@ -128,12 +129,34 @@
 
 	options := nl.NewRtAttr(nl.TCA_OPTIONS, nil)
 	if u32, ok := filter.(*U32); ok {
-		// match all
-		sel := nl.TcU32Sel{
-			Nkeys: 1,
-			Flags: nl.TC_U32_TERMINAL,
+		// Convert TcU32Sel into nl.TcU32Sel as it is without copy.
+		sel := (*nl.TcU32Sel)(unsafe.Pointer(u32.Sel))
+		if sel == nil {
+			// match all
+			sel = &nl.TcU32Sel{
+				Nkeys: 1,
+				Flags: nl.TC_U32_TERMINAL,
+			}
+			sel.Keys = append(sel.Keys, nl.TcU32Key{})
 		}
-		sel.Keys = append(sel.Keys, nl.TcU32Key{})
+
+		if native != networkOrder {
+			// Copy Tcu32Sel.
+			cSel := sel
+			keys := make([]nl.TcU32Key, cap(sel.Keys))
+			copy(keys, sel.Keys)
+			cSel.Keys = keys
+			sel = cSel
+
+			// Handle the endianness of attributes
+			sel.Offmask = native.Uint16(htons(sel.Offmask))
+			sel.Hmask = native.Uint32(htonl(sel.Hmask))
+			for _, key := range sel.Keys {
+				key.Mask = native.Uint32(htonl(key.Mask))
+				key.Val = native.Uint32(htonl(key.Val))
+			}
+		}
+		sel.Nkeys = uint8(len(sel.Keys))
 		nl.NewRtAttrChild(options, nl.TCA_U32_SEL, sel.Serialize())
 		if u32.ClassId != 0 {
 			nl.NewRtAttrChild(options, nl.TCA_U32_CLASSID, nl.Uint32Attr(u32.ClassId))
@@ -425,6 +448,16 @@
 		case nl.TCA_U32_SEL:
 			detailed = true
 			sel := nl.DeserializeTcU32Sel(datum.Value)
+			u32.Sel = (*TcU32Sel)(unsafe.Pointer(sel))
+			if native != networkOrder {
+				// Handle the endianness of attributes
+				u32.Sel.Offmask = native.Uint16(htons(sel.Offmask))
+				u32.Sel.Hmask = native.Uint32(htonl(sel.Hmask))
+				for _, key := range u32.Sel.Keys {
+					key.Mask = native.Uint32(htonl(key.Mask))
+					key.Val = native.Uint32(htonl(key.Val))
+				}
+			}
 			// only parse if we have a very basic redirect
 			if sel.Flags&nl.TC_U32_TERMINAL == 0 || sel.Nkeys != 1 {
 				return detailed, nil
@@ -443,6 +476,8 @@
 					u32.RedirIndex = int(action.Ifindex)
 				}
 			}
+		case nl.TCA_U32_CLASSID:
+			u32.ClassId = native.Uint32(datum.Value)
 		}
 	}
 	return detailed, nil
diff --git a/vendor/github.com/vishvananda/netlink/nl/conntrack_linux.go b/vendor/github.com/vishvananda/netlink/nl/conntrack_linux.go
new file mode 100644
index 0000000..6692b53
--- /dev/null
+++ b/vendor/github.com/vishvananda/netlink/nl/conntrack_linux.go
@@ -0,0 +1,189 @@
+package nl
+
+import "unsafe"
+
+// Track the message sizes for the correct serialization/deserialization
+const (
+	SizeofNfgenmsg      = 4
+	SizeofNfattr        = 4
+	SizeofNfConntrack   = 376
+	SizeofNfctTupleHead = 52
+)
+
+var L4ProtoMap = map[uint8]string{
+	6:  "tcp",
+	17: "udp",
+}
+
+// All the following constants are coming from:
+// https://github.com/torvalds/linux/blob/master/include/uapi/linux/netfilter/nfnetlink_conntrack.h
+
+// enum cntl_msg_types {
+// 	IPCTNL_MSG_CT_NEW,
+// 	IPCTNL_MSG_CT_GET,
+// 	IPCTNL_MSG_CT_DELETE,
+// 	IPCTNL_MSG_CT_GET_CTRZERO,
+// 	IPCTNL_MSG_CT_GET_STATS_CPU,
+// 	IPCTNL_MSG_CT_GET_STATS,
+// 	IPCTNL_MSG_CT_GET_DYING,
+// 	IPCTNL_MSG_CT_GET_UNCONFIRMED,
+//
+// 	IPCTNL_MSG_MAX
+// };
+const (
+	IPCTNL_MSG_CT_GET    = 1
+	IPCTNL_MSG_CT_DELETE = 2
+)
+
+// #define NFNETLINK_V0	0
+const (
+	NFNETLINK_V0 = 0
+)
+
+// #define NLA_F_NESTED (1 << 15)
+const (
+	NLA_F_NESTED = (1 << 15)
+)
+
+// enum ctattr_type {
+// 	CTA_UNSPEC,
+// 	CTA_TUPLE_ORIG,
+// 	CTA_TUPLE_REPLY,
+// 	CTA_STATUS,
+// 	CTA_PROTOINFO,
+// 	CTA_HELP,
+// 	CTA_NAT_SRC,
+// #define CTA_NAT	CTA_NAT_SRC	/* backwards compatibility */
+// 	CTA_TIMEOUT,
+// 	CTA_MARK,
+// 	CTA_COUNTERS_ORIG,
+// 	CTA_COUNTERS_REPLY,
+// 	CTA_USE,
+// 	CTA_ID,
+// 	CTA_NAT_DST,
+// 	CTA_TUPLE_MASTER,
+// 	CTA_SEQ_ADJ_ORIG,
+// 	CTA_NAT_SEQ_ADJ_ORIG	= CTA_SEQ_ADJ_ORIG,
+// 	CTA_SEQ_ADJ_REPLY,
+// 	CTA_NAT_SEQ_ADJ_REPLY	= CTA_SEQ_ADJ_REPLY,
+// 	CTA_SECMARK,		/* obsolete */
+// 	CTA_ZONE,
+// 	CTA_SECCTX,
+// 	CTA_TIMESTAMP,
+// 	CTA_MARK_MASK,
+// 	CTA_LABELS,
+// 	CTA_LABELS_MASK,
+// 	__CTA_MAX
+// };
+const (
+	CTA_TUPLE_ORIG  = 1
+	CTA_TUPLE_REPLY = 2
+	CTA_STATUS      = 3
+	CTA_TIMEOUT     = 8
+	CTA_MARK        = 9
+	CTA_PROTOINFO   = 4
+)
+
+// enum ctattr_tuple {
+// 	CTA_TUPLE_UNSPEC,
+// 	CTA_TUPLE_IP,
+// 	CTA_TUPLE_PROTO,
+// 	CTA_TUPLE_ZONE,
+// 	__CTA_TUPLE_MAX
+// };
+// #define CTA_TUPLE_MAX (__CTA_TUPLE_MAX - 1)
+const (
+	CTA_TUPLE_IP    = 1
+	CTA_TUPLE_PROTO = 2
+)
+
+// enum ctattr_ip {
+// 	CTA_IP_UNSPEC,
+// 	CTA_IP_V4_SRC,
+// 	CTA_IP_V4_DST,
+// 	CTA_IP_V6_SRC,
+// 	CTA_IP_V6_DST,
+// 	__CTA_IP_MAX
+// };
+// #define CTA_IP_MAX (__CTA_IP_MAX - 1)
+const (
+	CTA_IP_V4_SRC = 1
+	CTA_IP_V4_DST = 2
+	CTA_IP_V6_SRC = 3
+	CTA_IP_V6_DST = 4
+)
+
+// enum ctattr_l4proto {
+// 	CTA_PROTO_UNSPEC,
+// 	CTA_PROTO_NUM,
+// 	CTA_PROTO_SRC_PORT,
+// 	CTA_PROTO_DST_PORT,
+// 	CTA_PROTO_ICMP_ID,
+// 	CTA_PROTO_ICMP_TYPE,
+// 	CTA_PROTO_ICMP_CODE,
+// 	CTA_PROTO_ICMPV6_ID,
+// 	CTA_PROTO_ICMPV6_TYPE,
+// 	CTA_PROTO_ICMPV6_CODE,
+// 	__CTA_PROTO_MAX
+// };
+// #define CTA_PROTO_MAX (__CTA_PROTO_MAX - 1)
+const (
+	CTA_PROTO_NUM      = 1
+	CTA_PROTO_SRC_PORT = 2
+	CTA_PROTO_DST_PORT = 3
+)
+
+// enum ctattr_protoinfo {
+// 	CTA_PROTOINFO_UNSPEC,
+// 	CTA_PROTOINFO_TCP,
+// 	CTA_PROTOINFO_DCCP,
+// 	CTA_PROTOINFO_SCTP,
+// 	__CTA_PROTOINFO_MAX
+// };
+// #define CTA_PROTOINFO_MAX (__CTA_PROTOINFO_MAX - 1)
+const (
+	CTA_PROTOINFO_TCP = 1
+)
+
+// enum ctattr_protoinfo_tcp {
+// 	CTA_PROTOINFO_TCP_UNSPEC,
+// 	CTA_PROTOINFO_TCP_STATE,
+// 	CTA_PROTOINFO_TCP_WSCALE_ORIGINAL,
+// 	CTA_PROTOINFO_TCP_WSCALE_REPLY,
+// 	CTA_PROTOINFO_TCP_FLAGS_ORIGINAL,
+// 	CTA_PROTOINFO_TCP_FLAGS_REPLY,
+// 	__CTA_PROTOINFO_TCP_MAX
+// };
+// #define CTA_PROTOINFO_TCP_MAX (__CTA_PROTOINFO_TCP_MAX - 1)
+const (
+	CTA_PROTOINFO_TCP_STATE           = 1
+	CTA_PROTOINFO_TCP_WSCALE_ORIGINAL = 2
+	CTA_PROTOINFO_TCP_WSCALE_REPLY    = 3
+	CTA_PROTOINFO_TCP_FLAGS_ORIGINAL  = 4
+	CTA_PROTOINFO_TCP_FLAGS_REPLY     = 5
+)
+
+// /* General form of address family dependent message.
+//  */
+// struct nfgenmsg {
+// 	__u8  nfgen_family;		/* AF_xxx */
+// 	__u8  version;		/* nfnetlink version */
+// 	__be16    res_id;		/* resource id */
+// };
+type Nfgenmsg struct {
+	NfgenFamily uint8
+	Version     uint8
+	ResId       uint16 // big endian
+}
+
+func (msg *Nfgenmsg) Len() int {
+	return SizeofNfgenmsg
+}
+
+func DeserializeNfgenmsg(b []byte) *Nfgenmsg {
+	return (*Nfgenmsg)(unsafe.Pointer(&b[0:SizeofNfgenmsg][0]))
+}
+
+func (msg *Nfgenmsg) Serialize() []byte {
+	return (*(*[SizeofNfgenmsg]byte)(unsafe.Pointer(msg)))[:]
+}
diff --git a/vendor/github.com/vishvananda/netlink/nl/nl_linux.go b/vendor/github.com/vishvananda/netlink/nl/nl_linux.go
index 2612c21..5820e84 100644
--- a/vendor/github.com/vishvananda/netlink/nl/nl_linux.go
+++ b/vendor/github.com/vishvananda/netlink/nl/nl_linux.go
@@ -24,7 +24,7 @@
 )
 
 // SupportedNlFamilies contains the list of netlink families this netlink package supports
-var SupportedNlFamilies = []int{syscall.NETLINK_ROUTE, syscall.NETLINK_XFRM}
+var SupportedNlFamilies = []int{syscall.NETLINK_ROUTE, syscall.NETLINK_XFRM, syscall.NETLINK_NETFILTER}
 
 var nextSeqNr uint32
 
@@ -321,6 +321,7 @@
 type NetlinkRequest struct {
 	syscall.NlMsghdr
 	Data    []NetlinkRequestData
+	RawData []byte
 	Sockets map[int]*SocketHandle
 }
 
@@ -332,6 +333,8 @@
 		dataBytes[i] = data.Serialize()
 		length = length + len(dataBytes[i])
 	}
+	length += len(req.RawData)
+
 	req.Len = uint32(length)
 	b := make([]byte, length)
 	hdr := (*(*[syscall.SizeofNlMsghdr]byte)(unsafe.Pointer(req)))[:]
@@ -343,6 +346,10 @@
 			next = next + 1
 		}
 	}
+	// Add the raw data if any
+	if len(req.RawData) > 0 {
+		copy(b[next:length], req.RawData)
+	}
 	return b
 }
 
@@ -352,6 +359,13 @@
 	}
 }
 
+// AddRawData adds raw bytes to the end of the NetlinkRequest object during serialization
+func (req *NetlinkRequest) AddRawData(data []byte) {
+	if data != nil {
+		req.RawData = append(req.RawData, data...)
+	}
+}
+
 // Execute the request against a the given sockType.
 // Returns a list of netlink messages in serialized format, optionally filtered
 // by resType.