Merge pull request #296 from thaJeztah/19.03_backport_exec_hang

[19.03 backport] Handle blocked I/O of exec'd processes
diff --git a/daemon/images/image_delete.go b/daemon/images/image_delete.go
index 94d6f87..fbd6c16 100644
--- a/daemon/images/image_delete.go
+++ b/daemon/images/image_delete.go
@@ -369,7 +369,7 @@
 	if mask&conflictRunningContainer != 0 {
 		// Check if any running container is using the image.
 		running := func(c *container.Container) bool {
-			return c.IsRunning() && c.ImageID == imgID
+			return c.ImageID == imgID && c.IsRunning()
 		}
 		if container := i.containers.First(running); container != nil {
 			return &imageDeleteConflict{
diff --git a/integration/build/build_squash_test.go b/integration/build/build_squash_test.go
index 2d9ed10..4398ebc 100644
--- a/integration/build/build_squash_test.go
+++ b/integration/build/build_squash_test.go
@@ -79,7 +79,7 @@
 	resp.Body.Close()
 	assert.NilError(t, err)
 
-	cid := container.Run(t, ctx, client,
+	cid := container.Run(ctx, t, client,
 		container.WithImage(name),
 		container.WithCmd("/bin/sh", "-c", "cat /hello"),
 	)
@@ -94,11 +94,11 @@
 	assert.NilError(t, err)
 	assert.Check(t, is.Equal(strings.TrimSpace(actualStdout.String()), "hello\nworld"))
 
-	container.Run(t, ctx, client,
+	container.Run(ctx, t, client,
 		container.WithImage(name),
 		container.WithCmd("/bin/sh", "-c", "[ ! -f /remove_me ]"),
 	)
-	container.Run(t, ctx, client,
+	container.Run(ctx, t, client,
 		container.WithImage(name),
 		container.WithCmd("/bin/sh", "-c", `[ "$(echo $HELLO)" == "world" ]`),
 	)
diff --git a/integration/container/checkpoint_test.go b/integration/container/checkpoint_test.go
index 079452e..7f7c74e 100644
--- a/integration/container/checkpoint_test.go
+++ b/integration/container/checkpoint_test.go
@@ -50,7 +50,7 @@
 	}
 
 	t.Log("Start a container")
-	cID := container.Run(t, ctx, client, container.WithMount(mnt))
+	cID := container.Run(ctx, t, client, container.WithMount(mnt))
 	poll.WaitOn(t,
 		container.IsInState(ctx, client, cID, "running"),
 		poll.WithDelay(100*time.Millisecond),
diff --git a/integration/container/copy_test.go b/integration/container/copy_test.go
index 9020b80..0b6123a 100644
--- a/integration/container/copy_test.go
+++ b/integration/container/copy_test.go
@@ -26,7 +26,7 @@
 
 	ctx := context.Background()
 	apiclient := testEnv.APIClient()
-	cid := container.Create(t, ctx, apiclient)
+	cid := container.Create(ctx, t, apiclient)
 
 	_, _, err := apiclient.CopyFromContainer(ctx, cid, "/dne")
 	assert.Check(t, client.IsErrNotFound(err))
@@ -40,7 +40,7 @@
 
 	ctx := context.Background()
 	apiclient := testEnv.APIClient()
-	cid := container.Create(t, ctx, apiclient)
+	cid := container.Create(ctx, t, apiclient)
 
 	_, _, err := apiclient.CopyFromContainer(ctx, cid, "/etc/passwd/")
 	assert.Assert(t, is.ErrorContains(err, "not a directory"))
@@ -52,7 +52,7 @@
 
 	ctx := context.Background()
 	apiclient := testEnv.APIClient()
-	cid := container.Create(t, ctx, apiclient)
+	cid := container.Create(ctx, t, apiclient)
 
 	err := apiclient.CopyToContainer(ctx, cid, "/dne", nil, types.CopyToContainerOptions{})
 	assert.Check(t, client.IsErrNotFound(err))
@@ -66,7 +66,7 @@
 
 	ctx := context.Background()
 	apiclient := testEnv.APIClient()
-	cid := container.Create(t, ctx, apiclient)
+	cid := container.Create(ctx, t, apiclient)
 
 	err := apiclient.CopyToContainer(ctx, cid, "/etc/passwd/", nil, types.CopyToContainerOptions{})
 	assert.Assert(t, is.ErrorContains(err, "not a directory"))
@@ -105,7 +105,7 @@
 	assert.NilError(t, err)
 	assert.Assert(t, imageID != "")
 
-	cid := container.Create(t, ctx, apiClient, container.WithImage(imageID))
+	cid := container.Create(ctx, t, apiClient, container.WithImage(imageID))
 
 	for _, x := range []struct {
 		src    string
diff --git a/integration/container/daemon_linux_test.go b/integration/container/daemon_linux_test.go
index efb97c5..3278837 100644
--- a/integration/container/daemon_linux_test.go
+++ b/integration/container/daemon_linux_test.go
@@ -40,7 +40,7 @@
 
 	ctx := context.Background()
 
-	cID := container.Create(t, ctx, c)
+	cID := container.Create(ctx, t, c)
 	defer c.ContainerRemove(ctx, cID, types.ContainerRemoveOptions{Force: true})
 
 	err := c.ContainerStart(ctx, cID, types.ContainerStartOptions{})
@@ -94,7 +94,7 @@
 	ctx := context.Background()
 
 	// check the container is created with private ipc mode as per daemon default
-	cID := container.Run(t, ctx, c,
+	cID := container.Run(ctx, t, c,
 		container.WithCmd("top"),
 		container.WithRestartPolicy("always"),
 	)
@@ -113,7 +113,7 @@
 	assert.Check(t, is.Equal(string(inspect.HostConfig.IpcMode), "private"))
 
 	// check a new container is created with shareable ipc mode as per new daemon default
-	cID = container.Run(t, ctx, c)
+	cID = container.Run(ctx, t, c)
 	defer c.ContainerRemove(ctx, cID, types.ContainerRemoveOptions{Force: true})
 
 	inspect, err = c.ContainerInspect(ctx, cID)
diff --git a/integration/container/diff_test.go b/integration/container/diff_test.go
index ac0bac9..32e6744 100644
--- a/integration/container/diff_test.go
+++ b/integration/container/diff_test.go
@@ -19,7 +19,7 @@
 	client := testEnv.APIClient()
 	ctx := context.Background()
 
-	cID := container.Run(t, ctx, client, container.WithCmd("sh", "-c", `mkdir /foo; echo xyzzy > /foo/bar`))
+	cID := container.Run(ctx, t, client, container.WithCmd("sh", "-c", `mkdir /foo; echo xyzzy > /foo/bar`))
 
 	// Wait for it to exit as cannot diff a running container on Windows, and
 	// it will take a few seconds to exit. Also there's no way in Windows to
diff --git a/integration/container/exec_test.go b/integration/container/exec_test.go
index c7603eb..392b5bb 100644
--- a/integration/container/exec_test.go
+++ b/integration/container/exec_test.go
@@ -24,7 +24,7 @@
 	client := testEnv.APIClient()
 
 	// run top with detached mode
-	cID := container.Run(t, ctx, client)
+	cID := container.Run(ctx, t, client)
 
 	expected := "closeIO"
 	execResp, err := client.ContainerExecCreate(ctx, cID,
@@ -90,7 +90,7 @@
 	ctx := context.Background()
 	client := testEnv.APIClient()
 
-	cID := container.Run(t, ctx, client, container.WithTty(true), container.WithWorkingDir("/root"))
+	cID := container.Run(ctx, t, client, container.WithTty(true), container.WithWorkingDir("/root"))
 
 	id, err := client.ContainerExecCreate(ctx, cID,
 		types.ExecConfig{
@@ -125,7 +125,7 @@
 	ctx := context.Background()
 	client := testEnv.APIClient()
 
-	cID := container.Run(t, ctx, client, container.WithTty(true), container.WithUser("1:1"))
+	cID := container.Run(ctx, t, client, container.WithTty(true), container.WithUser("1:1"))
 
 	result, err := container.Exec(ctx, client, cID, []string{"id"})
 	assert.NilError(t, err)
diff --git a/integration/container/export_test.go b/integration/container/export_test.go
index 6727a67..98b5245 100644
--- a/integration/container/export_test.go
+++ b/integration/container/export_test.go
@@ -25,7 +25,7 @@
 	client := testEnv.APIClient()
 	ctx := context.Background()
 
-	cID := container.Run(t, ctx, client, container.WithCmd("true"))
+	cID := container.Run(ctx, t, client, container.WithCmd("true"))
 	poll.WaitOn(t, container.IsStopped(ctx, client, cID), poll.WithDelay(100*time.Millisecond))
 
 	reference := "repo/testexp:v1"
@@ -67,7 +67,7 @@
 	defer d.Stop(t)
 
 	ctx := context.Background()
-	ctrID := container.Create(t, ctx, c)
+	ctrID := container.Create(ctx, t, c)
 
 	d.Restart(t)
 
diff --git a/integration/container/health_test.go b/integration/container/health_test.go
index 64284c8..2f90afd 100644
--- a/integration/container/health_test.go
+++ b/integration/container/health_test.go
@@ -22,7 +22,7 @@
 	ctx := context.Background()
 	client := testEnv.APIClient()
 
-	cID := container.Run(t, ctx, client, container.WithTty(true), container.WithWorkingDir("/foo"), func(c *container.TestContainerConfig) {
+	cID := container.Run(ctx, t, client, container.WithTty(true), container.WithWorkingDir("/foo"), func(c *container.TestContainerConfig) {
 		c.Config.Healthcheck = &containertypes.HealthConfig{
 			Test:     []string{"CMD-SHELL", "if [ \"$PWD\" = \"/foo\" ]; then exit 0; else exit 1; fi;"},
 			Interval: 50 * time.Millisecond,
@@ -42,7 +42,7 @@
 	ctx := context.Background()
 	client := testEnv.APIClient()
 
-	id := container.Run(t, ctx, client, func(c *container.TestContainerConfig) {
+	id := container.Run(ctx, t, client, func(c *container.TestContainerConfig) {
 		c.Config.Healthcheck = &containertypes.HealthConfig{
 			Test:     []string{"CMD-SHELL", "sleep 1"},
 			Interval: time.Second,
diff --git a/integration/container/inspect_test.go b/integration/container/inspect_test.go
index 11f78b9..b865085 100644
--- a/integration/container/inspect_test.go
+++ b/integration/container/inspect_test.go
@@ -24,7 +24,7 @@
 
 	name := "cpusetinconfig-pre120-" + t.Name()
 	// Create container with up to-date-API
-	container.Run(t, ctx, request.NewAPIClient(t), container.WithName(name),
+	container.Run(ctx, t, request.NewAPIClient(t), container.WithName(name),
 		container.WithCmd("true"),
 		func(c *container.TestContainerConfig) {
 			c.HostConfig.Resources.CpusetCpus = "0"
diff --git a/integration/container/ipcmode_linux_test.go b/integration/container/ipcmode_linux_test.go
index 5b07c0e..2cb5f9b 100644
--- a/integration/container/ipcmode_linux_test.go
+++ b/integration/container/ipcmode_linux_test.go
@@ -308,7 +308,7 @@
 	ctx := context.Background()
 
 	// pre-check: default ipc mode in daemon is private
-	cID := container.Create(t, ctx, c, container.WithAutoRemove)
+	cID := container.Create(ctx, t, c, container.WithAutoRemove)
 
 	inspect, err := c.ContainerInspect(ctx, cID)
 	assert.NilError(t, err)
@@ -316,7 +316,7 @@
 
 	// main check: using older client creates "shareable" container
 	c = request.NewAPIClient(t, client.WithVersion("1.39"))
-	cID = container.Create(t, ctx, c, container.WithAutoRemove)
+	cID = container.Create(ctx, t, c, container.WithAutoRemove)
 
 	inspect, err = c.ContainerInspect(ctx, cID)
 	assert.NilError(t, err)
diff --git a/integration/container/kill_test.go b/integration/container/kill_test.go
index 60f4f17..b2c98a8 100644
--- a/integration/container/kill_test.go
+++ b/integration/container/kill_test.go
@@ -18,7 +18,7 @@
 	defer setupTest(t)()
 	client := testEnv.APIClient()
 	ctx := context.Background()
-	id := container.Run(t, ctx, client)
+	id := container.Run(ctx, t, client)
 
 	err := client.ContainerKill(ctx, id, "0")
 	assert.Error(t, err, "Error response from daemon: Invalid signal: 0")
@@ -60,7 +60,7 @@
 		tc := tc
 		t.Run(tc.doc, func(t *testing.T) {
 			ctx := context.Background()
-			id := container.Run(t, ctx, client)
+			id := container.Run(ctx, t, client)
 			err := client.ContainerKill(ctx, id, tc.signal)
 			assert.NilError(t, err)
 
@@ -95,7 +95,7 @@
 		tc := tc
 		t.Run(tc.doc, func(t *testing.T) {
 			ctx := context.Background()
-			id := container.Run(t, ctx, client,
+			id := container.Run(ctx, t, client,
 				container.WithRestartPolicy("always"),
 				func(c *container.TestContainerConfig) {
 					c.Config.StopSignal = tc.stopsignal
@@ -113,7 +113,7 @@
 	defer setupTest(t)()
 	ctx := context.Background()
 	client := testEnv.APIClient()
-	id := container.Create(t, ctx, client)
+	id := container.Create(ctx, t, client)
 	err := client.ContainerKill(ctx, id, "SIGKILL")
 	assert.Assert(t, is.ErrorContains(err, ""))
 	assert.Assert(t, is.Contains(err.Error(), "is not running"))
@@ -124,7 +124,7 @@
 	defer setupTest(t)()
 	ctx := context.Background()
 	client := request.NewAPIClient(t, client.WithVersion("1.19"))
-	id := container.Create(t, ctx, client)
+	id := container.Create(ctx, t, client)
 	err := client.ContainerKill(ctx, id, "SIGKILL")
 	assert.NilError(t, err)
 }
@@ -137,7 +137,7 @@
 	ctx := context.Background()
 	client := request.NewAPIClient(t, client.WithVersion("1.19"))
 
-	id := container.Run(t, ctx, client, func(c *container.TestContainerConfig) {
+	id := container.Run(ctx, t, client, func(c *container.TestContainerConfig) {
 		c.Config.User = "daemon"
 	})
 	poll.WaitOn(t, container.IsInState(ctx, client, id, "running"), poll.WithDelay(100*time.Millisecond))
@@ -154,7 +154,7 @@
 	ctx := context.Background()
 	client := testEnv.APIClient()
 
-	cID := container.Run(t, ctx, client, container.WithCmd("sh", "-c", "x=a; while true; do x=$x$x$x$x; done"), func(c *container.TestContainerConfig) {
+	cID := container.Run(ctx, t, client, container.WithCmd("sh", "-c", "x=a; while true; do x=$x$x$x$x; done"), func(c *container.TestContainerConfig) {
 		c.HostConfig.Resources.Memory = 32 * 1024 * 1024
 	})
 
@@ -172,7 +172,7 @@
 	ctx := context.Background()
 	client := testEnv.APIClient()
 
-	cID := container.Run(t, ctx, client, container.WithCmd("sh", "-c", "echo hello world"))
+	cID := container.Run(ctx, t, client, container.WithCmd("sh", "-c", "echo hello world"))
 
 	poll.WaitOn(t, container.IsInState(ctx, client, cID, "exited"), poll.WithDelay(100*time.Millisecond))
 
diff --git a/integration/container/links_linux_test.go b/integration/container/links_linux_test.go
index 9a3c9d2..e0fb9d6 100644
--- a/integration/container/links_linux_test.go
+++ b/integration/container/links_linux_test.go
@@ -24,7 +24,7 @@
 	client := testEnv.APIClient()
 	ctx := context.Background()
 
-	cID := container.Run(t, ctx, client, container.WithNetworkMode("host"))
+	cID := container.Run(ctx, t, client, container.WithNetworkMode("host"))
 	res, err := container.Exec(ctx, client, cID, []string{"cat", "/etc/hosts"})
 	assert.NilError(t, err)
 	assert.Assert(t, is.Len(res.Stderr(), 0))
@@ -42,8 +42,8 @@
 
 	containerA := "first_" + t.Name()
 	containerB := "second_" + t.Name()
-	container.Run(t, ctx, client, container.WithName(containerA))
-	container.Run(t, ctx, client, container.WithName(containerB), container.WithLinks(containerA+":"+containerA))
+	container.Run(ctx, t, client, container.WithName(containerA))
+	container.Run(ctx, t, client, container.WithName(containerB), container.WithLinks(containerA+":"+containerA))
 
 	f := filters.NewArgs(filters.Arg("name", containerA))
 
diff --git a/integration/container/logs_test.go b/integration/container/logs_test.go
index 0a3e831..79e13fa 100644
--- a/integration/container/logs_test.go
+++ b/integration/container/logs_test.go
@@ -21,7 +21,7 @@
 	client := testEnv.APIClient()
 	ctx := context.Background()
 
-	id := container.Run(t, ctx, client, container.WithCmd("sleep", "100000"))
+	id := container.Run(ctx, t, client, container.WithCmd("sleep", "100000"))
 
 	logs, err := client.ContainerLogs(ctx, id, types.ContainerLogsOptions{ShowStdout: true, Tail: "2"})
 	if logs != nil {
diff --git a/integration/container/mounts_linux_test.go b/integration/container/mounts_linux_test.go
index 0148f50..1ee5dd1 100644
--- a/integration/container/mounts_linux_test.go
+++ b/integration/container/mounts_linux_test.go
@@ -256,9 +256,9 @@
 	ctx := context.Background()
 	client := testEnv.APIClient()
 	containers := []string{
-		container.Run(t, ctx, client, container.WithMount(implicit), container.WithCmd(recursiveVerifier...)),
-		container.Run(t, ctx, client, container.WithMount(recursive), container.WithCmd(recursiveVerifier...)),
-		container.Run(t, ctx, client, container.WithMount(nonRecursive), container.WithCmd(nonRecursiveVerifier...)),
+		container.Run(ctx, t, client, container.WithMount(implicit), container.WithCmd(recursiveVerifier...)),
+		container.Run(ctx, t, client, container.WithMount(recursive), container.WithCmd(recursiveVerifier...)),
+		container.Run(ctx, t, client, container.WithMount(nonRecursive), container.WithCmd(nonRecursiveVerifier...)),
 	}
 
 	for _, c := range containers {
diff --git a/integration/container/nat_test.go b/integration/container/nat_test.go
index 3e81446..8a19d78 100644
--- a/integration/container/nat_test.go
+++ b/integration/container/nat_test.go
@@ -71,7 +71,7 @@
 	client := testEnv.APIClient()
 	ctx := context.Background()
 
-	cID := container.Run(t, ctx, client, container.WithCmd("sh", "-c", fmt.Sprintf("stty raw && nc -w 5 %s 8080", endpoint.String())), container.WithTty(true), container.WithNetworkMode("container:"+serverContainerID))
+	cID := container.Run(ctx, t, client, container.WithCmd("sh", "-c", fmt.Sprintf("stty raw && nc -w 5 %s 8080", endpoint.String())), container.WithTty(true), container.WithNetworkMode("container:"+serverContainerID))
 
 	poll.WaitOn(t, container.IsStopped(ctx, client, cID), poll.WithDelay(100*time.Millisecond))
 
@@ -93,7 +93,7 @@
 	client := testEnv.APIClient()
 	ctx := context.Background()
 
-	cID := container.Run(t, ctx, client, container.WithName("server-"+t.Name()), container.WithCmd("sh", "-c", fmt.Sprintf("echo %q | nc -lp %d", msg, port)), container.WithExposedPorts(fmt.Sprintf("%d/tcp", port)), func(c *container.TestContainerConfig) {
+	cID := container.Run(ctx, t, client, container.WithName("server-"+t.Name()), container.WithCmd("sh", "-c", fmt.Sprintf("echo %q | nc -lp %d", msg, port)), container.WithExposedPorts(fmt.Sprintf("%d/tcp", port)), func(c *container.TestContainerConfig) {
 		c.HostConfig.PortBindings = nat.PortMap{
 			nat.Port(fmt.Sprintf("%d/tcp", port)): []nat.PortBinding{
 				{
diff --git a/integration/container/pause_test.go b/integration/container/pause_test.go
index 875841d..5e91ee3 100644
--- a/integration/container/pause_test.go
+++ b/integration/container/pause_test.go
@@ -25,7 +25,7 @@
 	client := testEnv.APIClient()
 	ctx := context.Background()
 
-	cID := container.Run(t, ctx, client)
+	cID := container.Run(ctx, t, client)
 	poll.WaitOn(t, container.IsInState(ctx, client, cID, "running"), poll.WithDelay(100*time.Millisecond))
 
 	since := request.DaemonUnixTime(ctx, t, client, testEnv)
@@ -57,7 +57,7 @@
 	client := testEnv.APIClient()
 	ctx := context.Background()
 
-	cID := container.Run(t, ctx, client)
+	cID := container.Run(ctx, t, client)
 	poll.WaitOn(t, container.IsInState(ctx, client, cID, "running"), poll.WithDelay(100*time.Millisecond))
 
 	err := client.ContainerPause(ctx, cID)
@@ -71,7 +71,7 @@
 	client := testEnv.APIClient()
 	ctx := context.Background()
 
-	cID := container.Run(t, ctx, client)
+	cID := container.Run(ctx, t, client)
 	poll.WaitOn(t, container.IsInState(ctx, client, cID, "running"), poll.WithDelay(100*time.Millisecond))
 
 	err := client.ContainerPause(ctx, cID)
diff --git a/integration/container/ps_test.go b/integration/container/ps_test.go
index 93721bc..7c04ac3 100644
--- a/integration/container/ps_test.go
+++ b/integration/container/ps_test.go
@@ -16,9 +16,9 @@
 	client := testEnv.APIClient()
 	ctx := context.Background()
 
-	prev := container.Create(t, ctx, client)
-	top := container.Create(t, ctx, client)
-	next := container.Create(t, ctx, client)
+	prev := container.Create(ctx, t, client)
+	top := container.Create(ctx, t, client)
+	next := container.Create(ctx, t, client)
 
 	containerIDs := func(containers []types.Container) []string {
 		var entries []string
diff --git a/integration/container/remove_test.go b/integration/container/remove_test.go
index 027c920..0239a97 100644
--- a/integration/container/remove_test.go
+++ b/integration/container/remove_test.go
@@ -36,7 +36,7 @@
 	tempDir := fs.NewDir(t, "test-rm-container-with-removed-volume", fs.WithMode(0755))
 	defer tempDir.Remove()
 
-	cID := container.Run(t, ctx, client, container.WithCmd("true"), container.WithBind(tempDir.Path(), prefix+slash+"test"))
+	cID := container.Run(ctx, t, client, container.WithCmd("true"), container.WithBind(tempDir.Path(), prefix+slash+"test"))
 	poll.WaitOn(t, container.IsInState(ctx, client, cID, "exited"), poll.WithDelay(100*time.Millisecond))
 
 	err := os.RemoveAll(tempDir.Path())
@@ -59,7 +59,7 @@
 
 	prefix, slash := getPrefixAndSlashFromDaemonPlatform()
 
-	cID := container.Run(t, ctx, client, container.WithCmd("true"), container.WithVolume(prefix+slash+"srv"))
+	cID := container.Run(ctx, t, client, container.WithCmd("true"), container.WithVolume(prefix+slash+"srv"))
 	poll.WaitOn(t, container.IsInState(ctx, client, cID, "exited"), poll.WithDelay(100*time.Millisecond))
 
 	insp, _, err := client.ContainerInspectWithRaw(ctx, cID, true)
@@ -82,7 +82,7 @@
 	ctx := context.Background()
 	client := testEnv.APIClient()
 
-	cID := container.Run(t, ctx, client)
+	cID := container.Run(ctx, t, client)
 
 	err := client.ContainerRemove(ctx, cID, types.ContainerRemoveOptions{})
 	assert.Check(t, is.ErrorContains(err, "cannot remove a running container"))
@@ -93,7 +93,7 @@
 	ctx := context.Background()
 	client := testEnv.APIClient()
 
-	cID := container.Run(t, ctx, client)
+	cID := container.Run(ctx, t, client)
 
 	err := client.ContainerRemove(ctx, cID, types.ContainerRemoveOptions{
 		Force: true,
diff --git a/integration/container/rename_test.go b/integration/container/rename_test.go
index 976e600..fc94b33 100644
--- a/integration/container/rename_test.go
+++ b/integration/container/rename_test.go
@@ -30,18 +30,18 @@
 
 	aName := "a0" + t.Name()
 	bName := "b0" + t.Name()
-	aID := container.Run(t, ctx, client, container.WithName(aName))
-	bID := container.Run(t, ctx, client, container.WithName(bName), container.WithLinks(aName))
+	aID := container.Run(ctx, t, client, container.WithName(aName))
+	bID := container.Run(ctx, t, client, container.WithName(bName), container.WithLinks(aName))
 
 	err := client.ContainerRename(ctx, aID, "a1"+t.Name())
 	assert.NilError(t, err)
 
-	container.Run(t, ctx, client, container.WithName(aName))
+	container.Run(ctx, t, client, container.WithName(aName))
 
 	err = client.ContainerRemove(ctx, bID, types.ContainerRemoveOptions{Force: true})
 	assert.NilError(t, err)
 
-	bID = container.Run(t, ctx, client, container.WithName(bName), container.WithLinks(aName))
+	bID = container.Run(ctx, t, client, container.WithName(bName), container.WithLinks(aName))
 
 	inspect, err := client.ContainerInspect(ctx, bID)
 	assert.NilError(t, err)
@@ -54,7 +54,7 @@
 	client := testEnv.APIClient()
 
 	oldName := "first_name" + t.Name()
-	cID := container.Run(t, ctx, client, container.WithName(oldName), container.WithCmd("sh"))
+	cID := container.Run(ctx, t, client, container.WithName(oldName), container.WithCmd("sh"))
 	poll.WaitOn(t, container.IsInState(ctx, client, cID, "exited"), poll.WithDelay(100*time.Millisecond))
 
 	inspect, err := client.ContainerInspect(ctx, cID)
@@ -76,7 +76,7 @@
 	client := testEnv.APIClient()
 
 	oldName := "first_name" + t.Name()
-	cID := container.Run(t, ctx, client, container.WithName(oldName))
+	cID := container.Run(ctx, t, client, container.WithName(oldName))
 	poll.WaitOn(t, container.IsInState(ctx, client, cID, "running"), poll.WithDelay(100*time.Millisecond))
 
 	newName := "new_name" + stringid.GenerateRandomID()
@@ -90,7 +90,7 @@
 	_, err = client.ContainerInspect(ctx, oldName)
 	assert.Check(t, is.ErrorContains(err, "No such container: "+oldName))
 
-	cID = container.Run(t, ctx, client, container.WithName(oldName))
+	cID = container.Run(ctx, t, client, container.WithName(oldName))
 	poll.WaitOn(t, container.IsInState(ctx, client, cID, "running"), poll.WithDelay(100*time.Millisecond))
 
 	inspect, err = client.ContainerInspect(ctx, cID)
@@ -104,7 +104,7 @@
 	client := testEnv.APIClient()
 
 	oldName := "first_name" + t.Name()
-	cID := container.Run(t, ctx, client, container.WithName(oldName))
+	cID := container.Run(ctx, t, client, container.WithName(oldName))
 	poll.WaitOn(t, container.IsInState(ctx, client, cID, "running"), poll.WithDelay(100*time.Millisecond))
 
 	err := client.ContainerRename(ctx, oldName, "new:invalid")
@@ -132,7 +132,7 @@
 	_, err := client.NetworkCreate(ctx, networkName, types.NetworkCreate{})
 
 	assert.NilError(t, err)
-	cID := container.Run(t, ctx, client, func(c *container.TestContainerConfig) {
+	cID := container.Run(ctx, t, client, func(c *container.TestContainerConfig) {
 		c.NetworkingConfig.EndpointsConfig = map[string]*network.EndpointSettings{
 			networkName: {},
 		}
@@ -155,7 +155,7 @@
 	if testEnv.OSType == "windows" {
 		count = "-n"
 	}
-	cID = container.Run(t, ctx, client, func(c *container.TestContainerConfig) {
+	cID = container.Run(ctx, t, client, func(c *container.TestContainerConfig) {
 		c.NetworkingConfig.EndpointsConfig = map[string]*network.EndpointSettings{
 			networkName: {},
 		}
@@ -175,7 +175,7 @@
 	client := testEnv.APIClient()
 
 	oldName := "old" + t.Name()
-	cID := container.Run(t, ctx, client, container.WithName(oldName))
+	cID := container.Run(ctx, t, client, container.WithName(oldName))
 
 	poll.WaitOn(t, container.IsInState(ctx, client, cID, "running"), poll.WithDelay(100*time.Millisecond))
 	err := client.ContainerRename(ctx, oldName, oldName)
@@ -198,12 +198,12 @@
 	client := testEnv.APIClient()
 
 	db1Name := "db1" + t.Name()
-	db1ID := container.Run(t, ctx, client, container.WithName(db1Name))
+	db1ID := container.Run(ctx, t, client, container.WithName(db1Name))
 	poll.WaitOn(t, container.IsInState(ctx, client, db1ID, "running"), poll.WithDelay(100*time.Millisecond))
 
 	app1Name := "app1" + t.Name()
 	app2Name := "app2" + t.Name()
-	app1ID := container.Run(t, ctx, client, container.WithName(app1Name), container.WithLinks(db1Name+":/mysql"))
+	app1ID := container.Run(ctx, t, client, container.WithName(app1Name), container.WithLinks(db1Name+":/mysql"))
 	poll.WaitOn(t, container.IsInState(ctx, client, app1ID, "running"), poll.WithDelay(100*time.Millisecond))
 
 	err := client.ContainerRename(ctx, app1Name, app2Name)
diff --git a/integration/container/resize_test.go b/integration/container/resize_test.go
index 8d2ee7d..27b5be4 100644
--- a/integration/container/resize_test.go
+++ b/integration/container/resize_test.go
@@ -22,7 +22,7 @@
 	client := testEnv.APIClient()
 	ctx := context.Background()
 
-	cID := container.Run(t, ctx, client)
+	cID := container.Run(ctx, t, client)
 
 	poll.WaitOn(t, container.IsInState(ctx, client, cID, "running"), poll.WithDelay(100*time.Millisecond))
 
@@ -40,7 +40,7 @@
 	client := testEnv.APIClient()
 	ctx := context.Background()
 
-	cID := container.Run(t, ctx, client)
+	cID := container.Run(ctx, t, client)
 
 	poll.WaitOn(t, container.IsInState(ctx, client, cID, "running"), poll.WithDelay(100*time.Millisecond))
 
@@ -55,7 +55,7 @@
 	client := testEnv.APIClient()
 	ctx := context.Background()
 
-	cID := container.Run(t, ctx, client, container.WithCmd("echo"))
+	cID := container.Run(ctx, t, client, container.WithCmd("echo"))
 
 	poll.WaitOn(t, container.IsInState(ctx, client, cID, "exited"), poll.WithDelay(100*time.Millisecond))
 
diff --git a/integration/container/run_linux_test.go b/integration/container/run_linux_test.go
index ec088ec..6df19c7 100644
--- a/integration/container/run_linux_test.go
+++ b/integration/container/run_linux_test.go
@@ -29,7 +29,7 @@
 		kernelMemoryTCP int64 = 200 * 1024 * 1024
 	)
 
-	cID := container.Run(t, ctx, client, func(c *container.TestContainerConfig) {
+	cID := container.Run(ctx, t, client, func(c *container.TestContainerConfig) {
 		c.HostConfig.Resources = containertypes.Resources{
 			KernelMemoryTCP: kernelMemoryTCP,
 		}
@@ -65,7 +65,7 @@
 		domainname = "baz.cyphar.com"
 	)
 
-	cID := container.Run(t, ctx, client, func(c *container.TestContainerConfig) {
+	cID := container.Run(ctx, t, client, func(c *container.TestContainerConfig) {
 		c.Config.Hostname = hostname
 		c.Config.Domainname = domainname
 	})
diff --git a/integration/container/stats_test.go b/integration/container/stats_test.go
index 8472cc9..66ce5a2 100644
--- a/integration/container/stats_test.go
+++ b/integration/container/stats_test.go
@@ -25,7 +25,7 @@
 	info, err := client.Info(ctx)
 	assert.NilError(t, err)
 
-	cID := container.Run(t, ctx, client)
+	cID := container.Run(ctx, t, client)
 
 	poll.WaitOn(t, container.IsInState(ctx, client, cID, "running"), poll.WithDelay(100*time.Millisecond))
 
diff --git a/integration/container/stop_linux_test.go b/integration/container/stop_linux_test.go
index 552706c..1aa80c9 100644
--- a/integration/container/stop_linux_test.go
+++ b/integration/container/stop_linux_test.go
@@ -54,7 +54,7 @@
 		d := d
 		t.Run(strconv.Itoa(d.timeout), func(t *testing.T) {
 			t.Parallel()
-			id := container.Run(t, ctx, client, testCmd)
+			id := container.Run(ctx, t, client, testCmd)
 
 			timeout := time.Duration(d.timeout) * time.Second
 			err := client.ContainerStop(ctx, id, &timeout)
@@ -78,7 +78,7 @@
 	client := testEnv.APIClient()
 	ctx := context.Background()
 
-	id := container.Run(t, ctx, client, container.WithName("foo-"+t.Name()), container.WithCmd("echo"))
+	id := container.Run(ctx, t, client, container.WithName("foo-"+t.Name()), container.WithCmd("echo"))
 
 	poll.WaitOn(t, container.IsStopped(ctx, client, id), poll.WithDelay(100*time.Millisecond))
 
diff --git a/integration/container/stop_test.go b/integration/container/stop_test.go
index 8a95f32..c88e1c9 100644
--- a/integration/container/stop_test.go
+++ b/integration/container/stop_test.go
@@ -17,7 +17,7 @@
 
 	names := []string{"verifyRestart1-" + t.Name(), "verifyRestart2-" + t.Name()}
 	for _, name := range names {
-		container.Run(t, ctx, client,
+		container.Run(ctx, t, client,
 			container.WithName(name),
 			container.WithCmd("false"),
 			container.WithRestartPolicy("always"),
diff --git a/integration/container/stop_windows_test.go b/integration/container/stop_windows_test.go
index 2dd5a93..ecda947 100644
--- a/integration/container/stop_windows_test.go
+++ b/integration/container/stop_windows_test.go
@@ -51,7 +51,7 @@
 		d := d
 		t.Run(strconv.Itoa(d.timeout), func(t *testing.T) {
 			t.Parallel()
-			id := container.Run(t, ctx, client, testCmd)
+			id := container.Run(ctx, t, client, testCmd)
 
 			timeout := time.Duration(d.timeout) * time.Second
 			err := client.ContainerStop(ctx, id, &timeout)
diff --git a/integration/container/update_linux_test.go b/integration/container/update_linux_test.go
index e4f4e2c..3a72aba 100644
--- a/integration/container/update_linux_test.go
+++ b/integration/container/update_linux_test.go
@@ -26,7 +26,7 @@
 	client := testEnv.APIClient()
 	ctx := context.Background()
 
-	cID := container.Run(t, ctx, client, func(c *container.TestContainerConfig) {
+	cID := container.Run(ctx, t, client, func(c *container.TestContainerConfig) {
 		c.HostConfig.Resources = containertypes.Resources{
 			Memory: 200 * 1024 * 1024,
 		}
@@ -72,7 +72,7 @@
 	client := testEnv.APIClient()
 	ctx := context.Background()
 
-	cID := container.Run(t, ctx, client)
+	cID := container.Run(ctx, t, client)
 
 	for _, test := range []struct {
 		desc   string
@@ -140,7 +140,7 @@
 
 		t.Run(test.desc, func(t *testing.T) {
 			// Using "network=host" to speed up creation (13.96s vs 6.54s)
-			cID := container.Run(t, ctx, apiClient, container.WithPidsLimit(test.initial), container.WithNetworkMode("host"))
+			cID := container.Run(ctx, t, apiClient, container.WithPidsLimit(test.initial), container.WithNetworkMode("host"))
 
 			_, err := c.ContainerUpdate(ctx, cID, containertypes.UpdateConfig{
 				Resources: containertypes.Resources{
diff --git a/integration/container/update_test.go b/integration/container/update_test.go
index 129f8ea..53b6480 100644
--- a/integration/container/update_test.go
+++ b/integration/container/update_test.go
@@ -17,7 +17,7 @@
 	client := testEnv.APIClient()
 	ctx := context.Background()
 
-	cID := container.Run(t, ctx, client, container.WithCmd("sh", "-c", "sleep 1 && false"), func(c *container.TestContainerConfig) {
+	cID := container.Run(ctx, t, client, container.WithCmd("sh", "-c", "sleep 1 && false"), func(c *container.TestContainerConfig) {
 		c.HostConfig.RestartPolicy = containertypes.RestartPolicy{
 			Name:              "on-failure",
 			MaximumRetryCount: 3,
@@ -50,7 +50,7 @@
 	client := testEnv.APIClient()
 	ctx := context.Background()
 
-	cID := container.Run(t, ctx, client, container.WithAutoRemove)
+	cID := container.Run(ctx, t, client, container.WithAutoRemove)
 
 	_, err := client.ContainerUpdate(ctx, cID, containertypes.UpdateConfig{
 		RestartPolicy: containertypes.RestartPolicy{
diff --git a/integration/container/wait_test.go b/integration/container/wait_test.go
index 09edcab..53787d6 100644
--- a/integration/container/wait_test.go
+++ b/integration/container/wait_test.go
@@ -39,7 +39,7 @@
 		t.Run(tc.doc, func(t *testing.T) {
 			t.Parallel()
 			ctx := context.Background()
-			containerID := container.Run(t, ctx, cli, container.WithCmd("sh", "-c", tc.cmd))
+			containerID := container.Run(ctx, t, cli, container.WithCmd("sh", "-c", tc.cmd))
 			poll.WaitOn(t, container.IsInState(ctx, cli, containerID, "exited"), poll.WithTimeout(30*time.Second), poll.WithDelay(100*time.Millisecond))
 
 			waitresC, errC := cli.ContainerWait(ctx, containerID, "")
@@ -81,7 +81,7 @@
 		t.Run(tc.doc, func(t *testing.T) {
 			t.Parallel()
 			ctx := context.Background()
-			containerID := container.Run(t, ctx, cli, container.WithCmd("sh", "-c", tc.cmd))
+			containerID := container.Run(ctx, t, cli, container.WithCmd("sh", "-c", tc.cmd))
 			poll.WaitOn(t, container.IsInState(ctx, cli, containerID, "running"), poll.WithTimeout(30*time.Second), poll.WithDelay(100*time.Millisecond))
 
 			waitresC, errC := cli.ContainerWait(ctx, containerID, "")
diff --git a/integration/image/commit_test.go b/integration/image/commit_test.go
index 996f371..26850c5 100644
--- a/integration/image/commit_test.go
+++ b/integration/image/commit_test.go
@@ -19,7 +19,7 @@
 	client := testEnv.APIClient()
 	ctx := context.Background()
 
-	cID1 := container.Create(t, ctx, client)
+	cID1 := container.Create(ctx, t, client)
 
 	commitResp1, err := client.ContainerCommit(ctx, cID1, types.ContainerCommitOptions{
 		Changes:   []string{"ENV PATH=/bin"},
@@ -33,7 +33,7 @@
 	expectedEnv1 := []string{"PATH=/bin"}
 	assert.Check(t, is.DeepEqual(expectedEnv1, image1.Config.Env))
 
-	cID2 := container.Create(t, ctx, client, container.WithImage(image1.ID))
+	cID2 := container.Create(ctx, t, client, container.WithImage(image1.ID))
 
 	commitResp2, err := client.ContainerCommit(ctx, cID2, types.ContainerCommitOptions{
 		Changes:   []string{"ENV PATH=/usr/bin:$PATH"},
diff --git a/integration/image/remove_test.go b/integration/image/remove_test.go
index 68ef9aa..603f191 100644
--- a/integration/image/remove_test.go
+++ b/integration/image/remove_test.go
@@ -20,7 +20,7 @@
 	img := "test-container-orphaning"
 
 	// Create a container from busybox, and commit a small change so we have a new image
-	cID1 := container.Create(t, ctx, client, container.WithCmd(""))
+	cID1 := container.Create(ctx, t, client, container.WithCmd(""))
 	commitResp1, err := client.ContainerCommit(ctx, cID1, types.ContainerCommitOptions{
 		Changes:   []string{`ENTRYPOINT ["true"]`},
 		Reference: img,
@@ -33,7 +33,7 @@
 	assert.Check(t, is.Equal(resp.ID, commitResp1.ID))
 
 	// Create a container from created image, and commit a small change with same reference name
-	cID2 := container.Create(t, ctx, client, container.WithImage(img), container.WithCmd(""))
+	cID2 := container.Create(ctx, t, client, container.WithImage(img), container.WithCmd(""))
 	commitResp2, err := client.ContainerCommit(ctx, cID2, types.ContainerCommitOptions{
 		Changes:   []string{`LABEL Maintainer="Integration Tests"`},
 		Reference: img,
diff --git a/integration/internal/container/container.go b/integration/internal/container/container.go
index 85e6a24..7ea06c6 100644
--- a/integration/internal/container/container.go
+++ b/integration/internal/container/container.go
@@ -22,8 +22,7 @@
 }
 
 // Create creates a container with the specified options
-// nolint: golint
-func Create(t *testing.T, ctx context.Context, client client.APIClient, ops ...func(*TestContainerConfig)) string { // nolint: golint
+func Create(ctx context.Context, t *testing.T, client client.APIClient, ops ...func(*TestContainerConfig)) string {
 	t.Helper()
 	cmd := []string{"top"}
 	if runtime.GOOS == "windows" {
@@ -49,10 +48,9 @@
 }
 
 // Run creates and start a container with the specified options
-// nolint: golint
-func Run(t *testing.T, ctx context.Context, client client.APIClient, ops ...func(*TestContainerConfig)) string { // nolint: golint
+func Run(ctx context.Context, t *testing.T, client client.APIClient, ops ...func(*TestContainerConfig)) string {
 	t.Helper()
-	id := Create(t, ctx, client, ops...)
+	id := Create(ctx, t, client, ops...)
 
 	err := client.ContainerStart(ctx, id, types.ContainerStartOptions{})
 	assert.NilError(t, err)
diff --git a/integration/internal/network/network.go b/integration/internal/network/network.go
index 07d3664..c8194d7 100644
--- a/integration/internal/network/network.go
+++ b/integration/internal/network/network.go
@@ -26,8 +26,7 @@
 }
 
 // CreateNoError creates a network with the specified options and verifies there were no errors
-// nolint: golint
-func CreateNoError(t *testing.T, ctx context.Context, client client.APIClient, name string, ops ...func(*types.NetworkCreate)) string { // nolint: golint
+func CreateNoError(ctx context.Context, t *testing.T, client client.APIClient, name string, ops ...func(*types.NetworkCreate)) string { // nolint: golint
 	t.Helper()
 
 	name, err := createNetwork(ctx, client, name, ops...)
diff --git a/integration/network/delete_test.go b/integration/network/delete_test.go
index 5989eba..1d2b0c5 100644
--- a/integration/network/delete_test.go
+++ b/integration/network/delete_test.go
@@ -27,10 +27,10 @@
 // first network's ID as name.
 //
 // After successful creation, properties of all three networks is returned
-func createAmbiguousNetworks(t *testing.T, ctx context.Context, client dclient.APIClient) (string, string, string) { // nolint: golint
-	testNet := network.CreateNoError(t, ctx, client, "testNet")
-	idPrefixNet := network.CreateNoError(t, ctx, client, testNet[:12])
-	fullIDNet := network.CreateNoError(t, ctx, client, testNet)
+func createAmbiguousNetworks(ctx context.Context, t *testing.T, client dclient.APIClient) (string, string, string) {
+	testNet := network.CreateNoError(ctx, t, client, "testNet")
+	idPrefixNet := network.CreateNoError(ctx, t, client, testNet[:12])
+	fullIDNet := network.CreateNoError(ctx, t, client, testNet)
 
 	nws, err := client.NetworkList(ctx, types.NetworkListOptions{})
 	assert.NilError(t, err)
@@ -49,7 +49,7 @@
 	ctx := context.Background()
 
 	netName := "testnetwork_" + t.Name()
-	network.CreateNoError(t, ctx, client, netName,
+	network.CreateNoError(ctx, t, client, netName,
 		network.WithCheckDuplicate(),
 	)
 	assert.Check(t, IsNetworkAvailable(client, netName))
@@ -70,7 +70,7 @@
 	defer setupTest(t)()
 	client := testEnv.APIClient()
 	ctx := context.Background()
-	testNet, idPrefixNet, fullIDNet := createAmbiguousNetworks(t, ctx, client)
+	testNet, idPrefixNet, fullIDNet := createAmbiguousNetworks(ctx, t, client)
 
 	// Delete the network using a prefix of the first network's ID as name.
 	// This should the network name with the id-prefix, not the original network.
diff --git a/integration/network/inspect_test.go b/integration/network/inspect_test.go
index 02d2b75..dabf969 100644
--- a/integration/network/inspect_test.go
+++ b/integration/network/inspect_test.go
@@ -21,7 +21,7 @@
 	defer c.Close()
 
 	networkName := "Overlay" + t.Name()
-	overlayID := network.CreateNoError(t, context.Background(), c, networkName,
+	overlayID := network.CreateNoError(context.Background(), t, c, networkName,
 		network.WithDriver("overlay"),
 		network.WithCheckDuplicate(),
 	)
diff --git a/integration/network/ipvlan/ipvlan_test.go b/integration/network/ipvlan/ipvlan_test.go
index e0b2b4e..a02b0db 100644
--- a/integration/network/ipvlan/ipvlan_test.go
+++ b/integration/network/ipvlan/ipvlan_test.go
@@ -39,7 +39,7 @@
 
 	// create a network specifying the desired sub-interface name
 	netName := "di-persist"
-	net.CreateNoError(t, context.Background(), c, netName,
+	net.CreateNoError(context.Background(), t, c, netName,
 		net.WithIPvlan("di-dummy0.70", ""),
 	)
 
@@ -105,7 +105,7 @@
 		defer n.DeleteInterface(t, master)
 
 		netName := "di-subinterface"
-		net.CreateNoError(t, context.Background(), client, netName,
+		net.CreateNoError(context.Background(), t, client, netName,
 			net.WithIPvlan("di-dummy0.60", ""),
 		)
 		assert.Check(t, n.IsNetworkAvailable(client, netName))
@@ -130,7 +130,7 @@
 		n.CreateVlanInterface(t, master, parent, "30")
 
 		netName := "di-subinterface"
-		net.CreateNoError(t, context.Background(), client, netName,
+		net.CreateNoError(context.Background(), t, client, netName,
 			net.WithIPvlan(parent, ""),
 		)
 		assert.Check(t, n.IsNetworkAvailable(client, netName))
@@ -147,14 +147,14 @@
 	return func(t *testing.T) {
 		// ipvlan l2 mode - dummy parent interface is provisioned dynamically
 		netName := "di-nil-parent"
-		net.CreateNoError(t, context.Background(), client, netName,
+		net.CreateNoError(context.Background(), t, client, netName,
 			net.WithIPvlan("", ""),
 		)
 		assert.Check(t, n.IsNetworkAvailable(client, netName))
 
 		ctx := context.Background()
-		id1 := container.Run(t, ctx, client, container.WithNetworkMode(netName))
-		id2 := container.Run(t, ctx, client, container.WithNetworkMode(netName))
+		id1 := container.Run(ctx, t, client, container.WithNetworkMode(netName))
+		id2 := container.Run(ctx, t, client, container.WithNetworkMode(netName))
 
 		_, err := container.Exec(ctx, client, id2, []string{"ping", "-c", "1", id1})
 		assert.NilError(t, err)
@@ -164,15 +164,15 @@
 func testIpvlanL2InternalMode(client dclient.APIClient) func(*testing.T) {
 	return func(t *testing.T) {
 		netName := "di-internal"
-		net.CreateNoError(t, context.Background(), client, netName,
+		net.CreateNoError(context.Background(), t, client, netName,
 			net.WithIPvlan("", ""),
 			net.WithInternal(),
 		)
 		assert.Check(t, n.IsNetworkAvailable(client, netName))
 
 		ctx := context.Background()
-		id1 := container.Run(t, ctx, client, container.WithNetworkMode(netName))
-		id2 := container.Run(t, ctx, client, container.WithNetworkMode(netName))
+		id1 := container.Run(ctx, t, client, container.WithNetworkMode(netName))
+		id2 := container.Run(ctx, t, client, container.WithNetworkMode(netName))
 
 		timeoutCtx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
 		defer cancel()
@@ -189,7 +189,7 @@
 func testIpvlanL3NilParent(client dclient.APIClient) func(*testing.T) {
 	return func(t *testing.T) {
 		netName := "di-nil-parent-l3"
-		net.CreateNoError(t, context.Background(), client, netName,
+		net.CreateNoError(context.Background(), t, client, netName,
 			net.WithIPvlan("", "l3"),
 			net.WithIPAM("172.28.230.0/24", ""),
 			net.WithIPAM("172.28.220.0/24", ""),
@@ -197,11 +197,11 @@
 		assert.Check(t, n.IsNetworkAvailable(client, netName))
 
 		ctx := context.Background()
-		id1 := container.Run(t, ctx, client,
+		id1 := container.Run(ctx, t, client,
 			container.WithNetworkMode(netName),
 			container.WithIPv4(netName, "172.28.220.10"),
 		)
-		id2 := container.Run(t, ctx, client,
+		id2 := container.Run(ctx, t, client,
 			container.WithNetworkMode(netName),
 			container.WithIPv4(netName, "172.28.230.10"),
 		)
@@ -214,7 +214,7 @@
 func testIpvlanL3InternalMode(client dclient.APIClient) func(*testing.T) {
 	return func(t *testing.T) {
 		netName := "di-internal-l3"
-		net.CreateNoError(t, context.Background(), client, netName,
+		net.CreateNoError(context.Background(), t, client, netName,
 			net.WithIPvlan("", "l3"),
 			net.WithInternal(),
 			net.WithIPAM("172.28.230.0/24", ""),
@@ -223,11 +223,11 @@
 		assert.Check(t, n.IsNetworkAvailable(client, netName))
 
 		ctx := context.Background()
-		id1 := container.Run(t, ctx, client,
+		id1 := container.Run(ctx, t, client,
 			container.WithNetworkMode(netName),
 			container.WithIPv4(netName, "172.28.220.10"),
 		)
-		id2 := container.Run(t, ctx, client,
+		id2 := container.Run(ctx, t, client,
 			container.WithNetworkMode(netName),
 			container.WithIPv4(netName, "172.28.230.10"),
 		)
@@ -247,7 +247,7 @@
 func testIpvlanL2MultiSubnet(client dclient.APIClient) func(*testing.T) {
 	return func(t *testing.T) {
 		netName := "dualstackl2"
-		net.CreateNoError(t, context.Background(), client, netName,
+		net.CreateNoError(context.Background(), t, client, netName,
 			net.WithIPvlan("", ""),
 			net.WithIPv6(),
 			net.WithIPAM("172.28.200.0/24", ""),
@@ -259,12 +259,12 @@
 
 		// start dual stack containers and verify the user specified --ip and --ip6 addresses on subnets 172.28.100.0/24 and 2001:db8:abc2::/64
 		ctx := context.Background()
-		id1 := container.Run(t, ctx, client,
+		id1 := container.Run(ctx, t, client,
 			container.WithNetworkMode(netName),
 			container.WithIPv4(netName, "172.28.200.20"),
 			container.WithIPv6(netName, "2001:db8:abc8::20"),
 		)
-		id2 := container.Run(t, ctx, client,
+		id2 := container.Run(ctx, t, client,
 			container.WithNetworkMode(netName),
 			container.WithIPv4(netName, "172.28.200.21"),
 			container.WithIPv6(netName, "2001:db8:abc8::21"),
@@ -280,12 +280,12 @@
 		assert.NilError(t, err)
 
 		// start dual stack containers and verify the user specified --ip and --ip6 addresses on subnets 172.28.102.0/24 and 2001:db8:abc4::/64
-		id3 := container.Run(t, ctx, client,
+		id3 := container.Run(ctx, t, client,
 			container.WithNetworkMode(netName),
 			container.WithIPv4(netName, "172.28.202.20"),
 			container.WithIPv6(netName, "2001:db8:abc6::20"),
 		)
-		id4 := container.Run(t, ctx, client,
+		id4 := container.Run(ctx, t, client,
 			container.WithNetworkMode(netName),
 			container.WithIPv4(netName, "172.28.202.21"),
 			container.WithIPv6(netName, "2001:db8:abc6::21"),
@@ -314,7 +314,7 @@
 func testIpvlanL3MultiSubnet(client dclient.APIClient) func(*testing.T) {
 	return func(t *testing.T) {
 		netName := "dualstackl3"
-		net.CreateNoError(t, context.Background(), client, netName,
+		net.CreateNoError(context.Background(), t, client, netName,
 			net.WithIPvlan("", "l3"),
 			net.WithIPv6(),
 			net.WithIPAM("172.28.10.0/24", ""),
@@ -326,12 +326,12 @@
 
 		// start dual stack containers and verify the user specified --ip and --ip6 addresses on subnets 172.28.100.0/24 and 2001:db8:abc2::/64
 		ctx := context.Background()
-		id1 := container.Run(t, ctx, client,
+		id1 := container.Run(ctx, t, client,
 			container.WithNetworkMode(netName),
 			container.WithIPv4(netName, "172.28.10.20"),
 			container.WithIPv6(netName, "2001:db8:abc9::20"),
 		)
-		id2 := container.Run(t, ctx, client,
+		id2 := container.Run(ctx, t, client,
 			container.WithNetworkMode(netName),
 			container.WithIPv4(netName, "172.28.10.21"),
 			container.WithIPv6(netName, "2001:db8:abc9::21"),
@@ -347,12 +347,12 @@
 		assert.NilError(t, err)
 
 		// start dual stack containers and verify the user specified --ip and --ip6 addresses on subnets 172.28.102.0/24 and 2001:db8:abc4::/64
-		id3 := container.Run(t, ctx, client,
+		id3 := container.Run(ctx, t, client,
 			container.WithNetworkMode(netName),
 			container.WithIPv4(netName, "172.28.12.20"),
 			container.WithIPv6(netName, "2001:db8:abc7::20"),
 		)
-		id4 := container.Run(t, ctx, client,
+		id4 := container.Run(ctx, t, client,
 			container.WithNetworkMode(netName),
 			container.WithIPv4(netName, "172.28.12.21"),
 			container.WithIPv6(netName, "2001:db8:abc7::21"),
@@ -383,7 +383,7 @@
 		// Verify ipvlan l2 mode sets the proper default gateway routes via netlink
 		// for either an explicitly set route by the user or inferred via default IPAM
 		netNameL2 := "dualstackl2"
-		net.CreateNoError(t, context.Background(), client, netNameL2,
+		net.CreateNoError(context.Background(), t, client, netNameL2,
 			net.WithIPvlan("", "l2"),
 			net.WithIPv6(),
 			net.WithIPAM("172.28.140.0/24", "172.28.140.254"),
@@ -392,7 +392,7 @@
 		assert.Check(t, n.IsNetworkAvailable(client, netNameL2))
 
 		ctx := context.Background()
-		id1 := container.Run(t, ctx, client,
+		id1 := container.Run(ctx, t, client,
 			container.WithNetworkMode(netNameL2),
 		)
 		// Validate ipvlan l2 mode defaults gateway sets the default IPAM next-hop inferred from the subnet
@@ -406,7 +406,7 @@
 
 		// Validate ipvlan l3 mode sets the v4 gateway to dev eth0 and disregards any explicit or inferred next-hops
 		netNameL3 := "dualstackl3"
-		net.CreateNoError(t, context.Background(), client, netNameL3,
+		net.CreateNoError(context.Background(), t, client, netNameL3,
 			net.WithIPvlan("", "l3"),
 			net.WithIPv6(),
 			net.WithIPAM("172.28.160.0/24", "172.28.160.254"),
@@ -414,7 +414,7 @@
 		)
 		assert.Check(t, n.IsNetworkAvailable(client, netNameL3))
 
-		id2 := container.Run(t, ctx, client,
+		id2 := container.Run(ctx, t, client,
 			container.WithNetworkMode(netNameL3),
 		)
 		// Validate ipvlan l3 mode sets the v4 gateway to dev eth0 and disregards any explicit or inferred next-hops
diff --git a/integration/network/macvlan/macvlan_test.go b/integration/network/macvlan/macvlan_test.go
index 164dc4d..cb1f70a 100644
--- a/integration/network/macvlan/macvlan_test.go
+++ b/integration/network/macvlan/macvlan_test.go
@@ -33,7 +33,7 @@
 	c := d.NewClientT(t)
 
 	netName := "dm-persist"
-	net.CreateNoError(t, context.Background(), c, netName,
+	net.CreateNoError(context.Background(), t, c, netName,
 		net.WithMacvlan("dm-dummy0.60"),
 	)
 	assert.Check(t, n.IsNetworkAvailable(c, netName))
@@ -86,7 +86,7 @@
 
 		netName := "dm-subinterface"
 		parentName := "dm-dummy0.40"
-		net.CreateNoError(t, context.Background(), client, netName,
+		net.CreateNoError(context.Background(), t, client, netName,
 			net.WithMacvlan(parentName),
 		)
 		assert.Check(t, n.IsNetworkAvailable(client, netName))
@@ -116,7 +116,7 @@
 		n.CreateVlanInterface(t, master, parentName, "20")
 
 		netName := "dm-subinterface"
-		net.CreateNoError(t, context.Background(), client, netName,
+		net.CreateNoError(context.Background(), t, client, netName,
 			net.WithMacvlan(parentName),
 		)
 		assert.Check(t, n.IsNetworkAvailable(client, netName))
@@ -135,14 +135,14 @@
 	return func(t *testing.T) {
 		// macvlan bridge mode - dummy parent interface is provisioned dynamically
 		netName := "dm-nil-parent"
-		net.CreateNoError(t, context.Background(), client, netName,
+		net.CreateNoError(context.Background(), t, client, netName,
 			net.WithMacvlan(""),
 		)
 		assert.Check(t, n.IsNetworkAvailable(client, netName))
 
 		ctx := context.Background()
-		id1 := container.Run(t, ctx, client, container.WithNetworkMode(netName))
-		id2 := container.Run(t, ctx, client, container.WithNetworkMode(netName))
+		id1 := container.Run(ctx, t, client, container.WithNetworkMode(netName))
+		id2 := container.Run(ctx, t, client, container.WithNetworkMode(netName))
 
 		_, err := container.Exec(ctx, client, id2, []string{"ping", "-c", "1", id1})
 		assert.Check(t, err == nil)
@@ -153,15 +153,15 @@
 	return func(t *testing.T) {
 		// macvlan bridge mode - dummy parent interface is provisioned dynamically
 		netName := "dm-internal"
-		net.CreateNoError(t, context.Background(), client, netName,
+		net.CreateNoError(context.Background(), t, client, netName,
 			net.WithMacvlan(""),
 			net.WithInternal(),
 		)
 		assert.Check(t, n.IsNetworkAvailable(client, netName))
 
 		ctx := context.Background()
-		id1 := container.Run(t, ctx, client, container.WithNetworkMode(netName))
-		id2 := container.Run(t, ctx, client, container.WithNetworkMode(netName))
+		id1 := container.Run(ctx, t, client, container.WithNetworkMode(netName))
+		id2 := container.Run(ctx, t, client, container.WithNetworkMode(netName))
 
 		timeoutCtx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
 		defer cancel()
@@ -178,7 +178,7 @@
 func testMacvlanMultiSubnet(client client.APIClient) func(*testing.T) {
 	return func(t *testing.T) {
 		netName := "dualstackbridge"
-		net.CreateNoError(t, context.Background(), client, netName,
+		net.CreateNoError(context.Background(), t, client, netName,
 			net.WithMacvlan(""),
 			net.WithIPv6(),
 			net.WithIPAM("172.28.100.0/24", ""),
@@ -191,12 +191,12 @@
 
 		// start dual stack containers and verify the user specified --ip and --ip6 addresses on subnets 172.28.100.0/24 and 2001:db8:abc2::/64
 		ctx := context.Background()
-		id1 := container.Run(t, ctx, client,
+		id1 := container.Run(ctx, t, client,
 			container.WithNetworkMode("dualstackbridge"),
 			container.WithIPv4("dualstackbridge", "172.28.100.20"),
 			container.WithIPv6("dualstackbridge", "2001:db8:abc2::20"),
 		)
-		id2 := container.Run(t, ctx, client,
+		id2 := container.Run(ctx, t, client,
 			container.WithNetworkMode("dualstackbridge"),
 			container.WithIPv4("dualstackbridge", "172.28.100.21"),
 			container.WithIPv6("dualstackbridge", "2001:db8:abc2::21"),
@@ -212,12 +212,12 @@
 		assert.NilError(t, err)
 
 		// start dual stack containers and verify the user specified --ip and --ip6 addresses on subnets 172.28.102.0/24 and 2001:db8:abc4::/64
-		id3 := container.Run(t, ctx, client,
+		id3 := container.Run(ctx, t, client,
 			container.WithNetworkMode("dualstackbridge"),
 			container.WithIPv4("dualstackbridge", "172.28.102.20"),
 			container.WithIPv6("dualstackbridge", "2001:db8:abc4::20"),
 		)
-		id4 := container.Run(t, ctx, client,
+		id4 := container.Run(ctx, t, client,
 			container.WithNetworkMode("dualstackbridge"),
 			container.WithIPv4("dualstackbridge", "172.28.102.21"),
 			container.WithIPv6("dualstackbridge", "2001:db8:abc4::21"),
@@ -247,7 +247,7 @@
 	return func(t *testing.T) {
 		// Ensure the default gateways, next-hops and default dev devices are properly set
 		netName := "dualstackbridge"
-		net.CreateNoError(t, context.Background(), client, netName,
+		net.CreateNoError(context.Background(), t, client, netName,
 			net.WithMacvlan(""),
 			net.WithIPv6(),
 			net.WithOption("macvlan_mode", "bridge"),
@@ -257,7 +257,7 @@
 		assert.Check(t, n.IsNetworkAvailable(client, netName))
 
 		ctx := context.Background()
-		id1 := container.Run(t, ctx, client,
+		id1 := container.Run(ctx, t, client,
 			container.WithNetworkMode("dualstackbridge"),
 		)
 
diff --git a/integration/network/network_test.go b/integration/network/network_test.go
index e3c2d08..b8c9cc4 100644
--- a/integration/network/network_test.go
+++ b/integration/network/network_test.go
@@ -29,14 +29,14 @@
 	c := d.NewClientT(t)
 	ctx := context.Background()
 
-	id1 := container.Run(t, ctx, c)
+	id1 := container.Run(ctx, t, c)
 	defer c.ContainerRemove(ctx, id1, types.ContainerRemoveOptions{Force: true})
 
 	result, err := container.Exec(ctx, c, id1, []string{"ip", "l"})
 	assert.NilError(t, err)
 	assert.Check(t, is.Equal(false, strings.Contains(result.Combined(), "eth0")), "There shouldn't be eth0 in container in default(bridge) mode when bridge network is disabled")
 
-	id2 := container.Run(t, ctx, c, container.WithNetworkMode("bridge"))
+	id2 := container.Run(ctx, t, c, container.WithNetworkMode("bridge"))
 	defer c.ContainerRemove(ctx, id2, types.ContainerRemoveOptions{Force: true})
 
 	result, err = container.Exec(ctx, c, id2, []string{"ip", "l"})
@@ -50,7 +50,7 @@
 	err = cmd.Run()
 	assert.NilError(t, err, "Failed to get current process network namespace: %+v", err)
 
-	id3 := container.Run(t, ctx, c, container.WithNetworkMode("host"))
+	id3 := container.Run(ctx, t, c, container.WithNetworkMode("host"))
 	defer c.ContainerRemove(ctx, id3, types.ContainerRemoveOptions{Force: true})
 
 	result, err = container.Exec(ctx, c, id3, []string{"sh", "-c", nsCommand})
diff --git a/integration/network/service_test.go b/integration/network/service_test.go
index 36360a2..d2b2bb0 100644
--- a/integration/network/service_test.go
+++ b/integration/network/service_test.go
@@ -71,7 +71,7 @@
 
 	// Create a bridge network and verify its subnet is the second default pool
 	name := "elango" + t.Name()
-	network.CreateNoError(t, context.Background(), c, name,
+	network.CreateNoError(context.Background(), t, c, name,
 		network.WithDriver("bridge"),
 	)
 	out, err = c.NetworkInspect(context.Background(), name, types.NetworkInspectOptions{})
@@ -80,7 +80,7 @@
 
 	// Create a bridge network and verify its subnet is the third default pool
 	name = "saanvi" + t.Name()
-	network.CreateNoError(t, context.Background(), c, name,
+	network.CreateNoError(context.Background(), t, c, name,
 		network.WithDriver("bridge"),
 	)
 	out, err = c.NetworkInspect(context.Background(), name, types.NetworkInspectOptions{})
@@ -103,7 +103,7 @@
 
 	// Create a bridge network
 	name := "elango" + t.Name()
-	network.CreateNoError(t, context.Background(), c, name,
+	network.CreateNoError(context.Background(), t, c, name,
 		network.WithDriver("bridge"),
 	)
 
@@ -136,7 +136,7 @@
 
 	// Create a bridge network
 	name := "elango" + t.Name()
-	network.CreateNoError(t, context.Background(), c, name,
+	network.CreateNoError(context.Background(), t, c, name,
 		network.WithDriver("bridge"),
 	)
 
@@ -147,7 +147,7 @@
 
 	// Create a bridge network
 	name = "sthira" + t.Name()
-	network.CreateNoError(t, context.Background(), c, name,
+	network.CreateNoError(context.Background(), t, c, name,
 		network.WithDriver("bridge"),
 	)
 	out, err = c.NetworkInspect(context.Background(), name, types.NetworkInspectOptions{})
@@ -162,7 +162,7 @@
 
 	// Create a bridge network
 	name = "saanvi" + t.Name()
-	network.CreateNoError(t, context.Background(), c, name,
+	network.CreateNoError(context.Background(), t, c, name,
 		network.WithDriver("bridge"),
 	)
 	out1, err := c.NetworkInspect(context.Background(), name, types.NetworkInspectOptions{})
@@ -343,7 +343,7 @@
 
 	// Create a overlay network
 	name := "saanvisthira" + t.Name()
-	network.CreateNoError(t, context.Background(), c, name,
+	network.CreateNoError(context.Background(), t, c, name,
 		network.WithDriver("overlay"))
 
 	var instances uint64 = 1
@@ -369,7 +369,7 @@
 
 	// Create a overlay network
 	name = "saanvisthira" + t.Name()
-	network.CreateNoError(t, context.Background(), c, name,
+	network.CreateNoError(context.Background(), t, c, name,
 		network.WithDriver("overlay"))
 
 	serviceID = swarm.CreateService(t, d,
@@ -402,7 +402,7 @@
 
 	// Create a overlay network
 	name := "saanvisthira" + t.Name()
-	network.CreateNoError(t, context.Background(), cli, name,
+	network.CreateNoError(context.Background(), t, cli, name,
 		network.WithDriver("overlay"))
 
 	var instances uint64 = 1
diff --git a/integration/plugin/authz/authz_plugin_test.go b/integration/plugin/authz/authz_plugin_test.go
index 0cbf045..108cb83 100644
--- a/integration/plugin/authz/authz_plugin_test.go
+++ b/integration/plugin/authz/authz_plugin_test.go
@@ -92,7 +92,7 @@
 	ctx := context.Background()
 
 	// Ensure command successful
-	cID := container.Run(t, ctx, c)
+	cID := container.Run(ctx, t, c)
 
 	assertURIRecorded(t, ctrl.requestsURIs, "/containers/create")
 	assertURIRecorded(t, ctrl.requestsURIs, fmt.Sprintf("/containers/%s/start", cID))
@@ -224,7 +224,7 @@
 	defer cancel()
 
 	// Create a container and wait for the creation events
-	cID := container.Run(t, ctx, c)
+	cID := container.Run(ctx, t, c)
 	poll.WaitOn(t, container.IsInState(ctx, c, cID, "running"))
 
 	created := false
@@ -348,7 +348,7 @@
 
 	exportedImagePath := filepath.Join(tmp, "export.tar")
 
-	cID := container.Run(t, ctx, c)
+	cID := container.Run(ctx, t, c)
 
 	responseReader, err := c.ContainerExport(context.Background(), cID)
 	assert.NilError(t, err)
@@ -388,7 +388,7 @@
 	c := d.NewClientT(t)
 	ctx := context.Background()
 
-	cID := container.Run(t, ctx, c)
+	cID := container.Run(ctx, t, c)
 	defer c.ContainerRemove(ctx, cID, types.ContainerRemoveOptions{Force: true})
 
 	_, err = f.Seek(0, io.SeekStart)
diff --git a/integration/plugin/authz/authz_plugin_v2_test.go b/integration/plugin/authz/authz_plugin_v2_test.go
index 6b44108..937bcd3 100644
--- a/integration/plugin/authz/authz_plugin_v2_test.go
+++ b/integration/plugin/authz/authz_plugin_v2_test.go
@@ -55,7 +55,7 @@
 	d.LoadBusybox(t)
 
 	// Ensure docker run command and accompanying docker ps are successful
-	cID := container.Run(t, ctx, c)
+	cID := container.Run(ctx, t, c)
 
 	_, err = c.ContainerInspect(ctx, cID)
 	assert.NilError(t, err)
diff --git a/integration/plugin/graphdriver/external_test.go b/integration/plugin/graphdriver/external_test.go
index 99ce60c..c89f996 100644
--- a/integration/plugin/graphdriver/external_test.go
+++ b/integration/plugin/graphdriver/external_test.go
@@ -360,7 +360,7 @@
 
 			ctx := context.Background()
 
-			testGraphDriver(t, c, ctx, driverName, func(t *testing.T) {
+			testGraphDriver(ctx, t, c, driverName, func(t *testing.T) {
 				d.Restart(t, "-s", driverName)
 			})
 
@@ -399,7 +399,7 @@
 		_, err = io.Copy(ioutil.Discard, r)
 		assert.NilError(t, err)
 
-		container.Run(t, ctx, c, container.WithImage("busybox:latest@sha256:bbc3a03235220b170ba48a157dd097dd1379299370e1ed99ce976df0355d24f0"))
+		container.Run(ctx, t, c, container.WithImage("busybox:latest@sha256:bbc3a03235220b170ba48a157dd097dd1379299370e1ed99ce976df0355d24f0"))
 	}
 }
 
@@ -434,12 +434,11 @@
 	d.Stop(t)
 	d.StartWithBusybox(t, "-s", plugin, "--storage-opt", "overlay2.override_kernel_check=1")
 
-	testGraphDriver(t, client, ctx, plugin, nil)
+	testGraphDriver(ctx, t, client, plugin, nil)
 }
 
-// nolint: golint
-func testGraphDriver(t *testing.T, c client.APIClient, ctx context.Context, driverName string, afterContainerRunFn func(*testing.T)) { //nolint: golint
-	id := container.Run(t, ctx, c, container.WithCmd("sh", "-c", "echo hello > /hello"))
+func testGraphDriver(ctx context.Context, t *testing.T, c client.APIClient, driverName string, afterContainerRunFn func(*testing.T)) {
+	id := container.Run(ctx, t, c, container.WithCmd("sh", "-c", "echo hello > /hello"))
 
 	if afterContainerRunFn != nil {
 		afterContainerRunFn(t)
diff --git a/integration/plugin/logging/logging_linux_test.go b/integration/plugin/logging/logging_linux_test.go
index edddb1a..1bb2d941 100644
--- a/integration/plugin/logging/logging_linux_test.go
+++ b/integration/plugin/logging/logging_linux_test.go
@@ -33,7 +33,7 @@
 
 	ctx, cancel = context.WithTimeout(context.Background(), 60*time.Second)
 
-	id := container.Run(t, ctx, client,
+	id := container.Run(ctx, t, client,
 		container.WithAutoRemove,
 		container.WithLogDriver("test"),
 		container.WithCmd(
diff --git a/integration/service/create_test.go b/integration/service/create_test.go
index f4948f8..3e2baaf 100644
--- a/integration/service/create_test.go
+++ b/integration/service/create_test.go
@@ -84,7 +84,7 @@
 	ctx := context.Background()
 
 	overlayName := "overlay1_" + t.Name()
-	overlayID := network.CreateNoError(t, ctx, client, overlayName,
+	overlayID := network.CreateNoError(ctx, t, client, overlayName,
 		network.WithCheckDuplicate(),
 		network.WithDriver("overlay"),
 	)
@@ -175,11 +175,11 @@
 	ctx := context.Background()
 
 	name := "foo_" + t.Name()
-	n1 := network.CreateNoError(t, ctx, client, name, network.WithDriver("bridge"))
-	n2 := network.CreateNoError(t, ctx, client, name, network.WithDriver("bridge"))
+	n1 := network.CreateNoError(ctx, t, client, name, network.WithDriver("bridge"))
+	n2 := network.CreateNoError(ctx, t, client, name, network.WithDriver("bridge"))
 
 	// Duplicates with name but with different driver
-	n3 := network.CreateNoError(t, ctx, client, name, network.WithDriver("overlay"))
+	n3 := network.CreateNoError(ctx, t, client, name, network.WithDriver("overlay"))
 
 	// Create Service with the same name
 	var instances uint64 = 1
diff --git a/integration/service/network_test.go b/integration/service/network_test.go
index 8bde429..3961963 100644
--- a/integration/service/network_test.go
+++ b/integration/service/network_test.go
@@ -24,12 +24,12 @@
 	ctx := context.Background()
 
 	name := t.Name() + "test-alias"
-	net.CreateNoError(t, ctx, client, name,
+	net.CreateNoError(ctx, t, client, name,
 		net.WithDriver("overlay"),
 		net.WithAttachable(),
 	)
 
-	cID1 := container.Create(t, ctx, client, func(c *container.TestContainerConfig) {
+	cID1 := container.Create(ctx, t, client, func(c *container.TestContainerConfig) {
 		c.NetworkingConfig = &network.NetworkingConfig{
 			EndpointsConfig: map[string]*network.EndpointSettings{
 				name: {},
@@ -52,7 +52,7 @@
 	assert.Check(t, is.Equal(len(ng1.NetworkSettings.Networks[name].Aliases), 2))
 	assert.Check(t, is.Equal(ng1.NetworkSettings.Networks[name].Aliases[0], "aaa"))
 
-	cID2 := container.Create(t, ctx, client, func(c *container.TestContainerConfig) {
+	cID2 := container.Create(ctx, t, client, func(c *container.TestContainerConfig) {
 		c.NetworkingConfig = &network.NetworkingConfig{
 			EndpointsConfig: map[string]*network.EndpointSettings{
 				name: {},
@@ -86,12 +86,12 @@
 	ctx := context.Background()
 
 	name := t.Name() + "dummyNet"
-	net.CreateNoError(t, ctx, client, name,
+	net.CreateNoError(ctx, t, client, name,
 		net.WithDriver("overlay"),
 		net.WithAttachable(),
 	)
 
-	c1 := container.Create(t, ctx, client, func(c *container.TestContainerConfig) {
+	c1 := container.Create(ctx, t, client, func(c *container.TestContainerConfig) {
 		c.NetworkingConfig = &network.NetworkingConfig{
 			EndpointsConfig: map[string]*network.EndpointSettings{
 				name: {},
diff --git a/integration/service/update_test.go b/integration/service/update_test.go
index 8575e56..93e2ab4 100644
--- a/integration/service/update_test.go
+++ b/integration/service/update_test.go
@@ -207,7 +207,7 @@
 
 	// Create a overlay network
 	testNet := "testNet" + t.Name()
-	overlayID := network.CreateNoError(t, ctx, cli, testNet,
+	overlayID := network.CreateNoError(ctx, t, cli, testNet,
 		network.WithDriver("overlay"))
 
 	var instances uint64 = 1
diff --git a/integration/system/cgroupdriver_systemd_test.go b/integration/system/cgroupdriver_systemd_test.go
index bc076aa..b277cca 100644
--- a/integration/system/cgroupdriver_systemd_test.go
+++ b/integration/system/cgroupdriver_systemd_test.go
@@ -41,7 +41,7 @@
 	const mem = 64 * 1024 * 1024 // 64 MB
 
 	ctx := context.Background()
-	ctrID := container.Create(t, ctx, c, func(ctr *container.TestContainerConfig) {
+	ctrID := container.Create(ctx, t, c, func(ctr *container.TestContainerConfig) {
 		ctr.HostConfig.Resources.Memory = mem
 	})
 	defer c.ContainerRemove(ctx, ctrID, types.ContainerRemoveOptions{Force: true})
diff --git a/integration/system/event_test.go b/integration/system/event_test.go
index c7548a6..61adeed 100644
--- a/integration/system/event_test.go
+++ b/integration/system/event_test.go
@@ -30,7 +30,7 @@
 	ctx := context.Background()
 	client := testEnv.APIClient()
 
-	cID := container.Run(t, ctx, client)
+	cID := container.Run(ctx, t, client)
 
 	id, err := client.ContainerExecCreate(ctx, cID,
 		types.ExecConfig{
@@ -83,7 +83,7 @@
 	since := request.DaemonTime(ctx, t, client, testEnv)
 	ts := strconv.FormatInt(since.Unix(), 10)
 
-	cID := container.Create(t, ctx, client)
+	cID := container.Create(ctx, t, client)
 
 	// In case there is no events, the API should have responded immediately (not blocking),
 	// The test here makes sure the response time is less than 3 sec.
diff --git a/integration/volume/volume_test.go b/integration/volume/volume_test.go
index a16a1da..0336b94 100644
--- a/integration/volume/volume_test.go
+++ b/integration/volume/volume_test.go
@@ -68,7 +68,7 @@
 
 	prefix, slash := getPrefixAndSlashFromDaemonPlatform()
 
-	id := container.Create(t, ctx, client, container.WithVolume(prefix+slash+"foo"))
+	id := container.Create(ctx, t, client, container.WithVolume(prefix+slash+"foo"))
 
 	c, err := client.ContainerInspect(ctx, id)
 	assert.NilError(t, err)
diff --git a/vendor.conf b/vendor.conf
index 641e968..f205def 100644
--- a/vendor.conf
+++ b/vendor.conf
@@ -74,7 +74,7 @@
 # get go-zfs packages
 github.com/mistifyio/go-zfs                         f784269be439d704d3dfa1906f45dd848fed2beb
 
-google.golang.org/grpc                              25c4f928eaa6d96443009bd842389fb4fa48664e # v1.20.1
+google.golang.org/grpc                              6eaf6f47437a6b4e2153a190160ef39a92c7eceb # v1.23.0
 
 # The version of runc should match the version that is used by the containerd
 # version that is used. If you need to update runc, open a pull request in
@@ -127,11 +127,11 @@
 github.com/containerd/console                       0650fd9eeb50bab4fc99dceb9f2e14cf58f36e7f
 github.com/containerd/go-runc                       7d11b49dc0769f6dbb0d1b19f3d48524d1bad9ad
 github.com/containerd/typeurl                       2a93cfde8c20b23de8eb84a5adbc234ddf7a9e8d
-github.com/containerd/ttrpc                         699c4e40d1e7416e08bf7019c7ce2e9beced4636
+github.com/containerd/ttrpc                         92c8520ef9f86600c650dd540266a007bf03670f
 github.com/gogo/googleapis                          d31c731455cb061f42baff3bda55bad0118b126b # v1.2.0
 
 # cluster
-github.com/docker/swarmkit                          f35d9100f2c6ac810cc8d7de6e8f93dcc7a42d29 # bump_v19.03 branch
+github.com/docker/swarmkit                          bbe341867eae1615faf8a702ec05bfe986e73e06 # bump_v19.03 branch
 github.com/gogo/protobuf                            ba06b47c162d49f2af050fb4c75bcbc86a159d5c # v1.2.1
 github.com/cloudflare/cfssl                         5d63dbd981b5c408effbb58c442d54761ff94fbd # 1.3.2
 github.com/fernet/fernet-go                         1b2437bc582b3cfbb341ee5a29f8ef5b42912ff2
diff --git a/vendor/github.com/containerd/ttrpc/channel.go b/vendor/github.com/containerd/ttrpc/channel.go
index 22f5496..aa8c954 100644
--- a/vendor/github.com/containerd/ttrpc/channel.go
+++ b/vendor/github.com/containerd/ttrpc/channel.go
@@ -18,7 +18,6 @@
 
 import (
 	"bufio"
-	"context"
 	"encoding/binary"
 	"io"
 	"net"
@@ -98,7 +97,7 @@
 // returned will be valid and caller should send that along to
 // the correct consumer. The bytes on the underlying channel
 // will be discarded.
-func (ch *channel) recv(ctx context.Context) (messageHeader, []byte, error) {
+func (ch *channel) recv() (messageHeader, []byte, error) {
 	mh, err := readMessageHeader(ch.hrbuf[:], ch.br)
 	if err != nil {
 		return messageHeader{}, nil, err
@@ -120,7 +119,7 @@
 	return mh, p, nil
 }
 
-func (ch *channel) send(ctx context.Context, streamID uint32, t messageType, p []byte) error {
+func (ch *channel) send(streamID uint32, t messageType, p []byte) error {
 	if err := writeMessageHeader(ch.bw, ch.hwbuf[:], messageHeader{Length: uint32(len(p)), StreamID: streamID, Type: t}); err != nil {
 		return err
 	}
diff --git a/vendor/github.com/containerd/ttrpc/client.go b/vendor/github.com/containerd/ttrpc/client.go
index 35ca91f..bdd1d12 100644
--- a/vendor/github.com/containerd/ttrpc/client.go
+++ b/vendor/github.com/containerd/ttrpc/client.go
@@ -29,6 +29,7 @@
 	"github.com/gogo/protobuf/proto"
 	"github.com/pkg/errors"
 	"github.com/sirupsen/logrus"
+	"google.golang.org/grpc/codes"
 	"google.golang.org/grpc/status"
 )
 
@@ -36,36 +37,52 @@
 // closed.
 var ErrClosed = errors.New("ttrpc: closed")
 
+// Client for a ttrpc server
 type Client struct {
 	codec   codec
 	conn    net.Conn
 	channel *channel
 	calls   chan *callRequest
 
-	closed    chan struct{}
-	closeOnce sync.Once
-	closeFunc func()
-	done      chan struct{}
-	err       error
+	ctx    context.Context
+	closed func()
+
+	closeOnce     sync.Once
+	userCloseFunc func()
+
+	errOnce     sync.Once
+	err         error
+	interceptor UnaryClientInterceptor
 }
 
+// ClientOpts configures a client
 type ClientOpts func(c *Client)
 
+// WithOnClose sets the close func whenever the client's Close() method is called
 func WithOnClose(onClose func()) ClientOpts {
 	return func(c *Client) {
-		c.closeFunc = onClose
+		c.userCloseFunc = onClose
+	}
+}
+
+// WithUnaryClientInterceptor sets the provided client interceptor
+func WithUnaryClientInterceptor(i UnaryClientInterceptor) ClientOpts {
+	return func(c *Client) {
+		c.interceptor = i
 	}
 }
 
 func NewClient(conn net.Conn, opts ...ClientOpts) *Client {
+	ctx, cancel := context.WithCancel(context.Background())
 	c := &Client{
-		codec:     codec{},
-		conn:      conn,
-		channel:   newChannel(conn),
-		calls:     make(chan *callRequest),
-		closed:    make(chan struct{}),
-		done:      make(chan struct{}),
-		closeFunc: func() {},
+		codec:         codec{},
+		conn:          conn,
+		channel:       newChannel(conn),
+		calls:         make(chan *callRequest),
+		closed:        cancel,
+		ctx:           ctx,
+		userCloseFunc: func() {},
+		interceptor:   defaultClientInterceptor,
 	}
 
 	for _, o := range opts {
@@ -99,11 +116,18 @@
 		cresp = &Response{}
 	)
 
+	if metadata, ok := GetMetadata(ctx); ok {
+		metadata.setRequest(creq)
+	}
+
 	if dl, ok := ctx.Deadline(); ok {
 		creq.TimeoutNano = dl.Sub(time.Now()).Nanoseconds()
 	}
 
-	if err := c.dispatch(ctx, creq, cresp); err != nil {
+	info := &UnaryClientInfo{
+		FullMethod: fullPath(service, method),
+	}
+	if err := c.interceptor(ctx, creq, cresp, info, c.dispatch); err != nil {
 		return err
 	}
 
@@ -111,11 +135,10 @@
 		return err
 	}
 
-	if cresp.Status == nil {
-		return errors.New("no status provided on response")
+	if cresp.Status != nil && cresp.Status.Code != int32(codes.OK) {
+		return status.ErrorProto(cresp.Status)
 	}
-
-	return status.ErrorProto(cresp.Status)
+	return nil
 }
 
 func (c *Client) dispatch(ctx context.Context, req *Request, resp *Response) error {
@@ -131,8 +154,8 @@
 	case <-ctx.Done():
 		return ctx.Err()
 	case c.calls <- call:
-	case <-c.done:
-		return c.err
+	case <-c.ctx.Done():
+		return c.error()
 	}
 
 	select {
@@ -140,16 +163,15 @@
 		return ctx.Err()
 	case err := <-errs:
 		return filterCloseErr(err)
-	case <-c.done:
-		return c.err
+	case <-c.ctx.Done():
+		return c.error()
 	}
 }
 
 func (c *Client) Close() error {
 	c.closeOnce.Do(func() {
-		close(c.closed)
+		c.closed()
 	})
-
 	return nil
 }
 
@@ -159,51 +181,82 @@
 	err error
 }
 
-func (c *Client) run() {
-	var (
-		streamID    uint32 = 1
-		waiters            = make(map[uint32]*callRequest)
-		calls              = c.calls
-		incoming           = make(chan *message)
-		shutdown           = make(chan struct{})
-		shutdownErr error
-	)
+type receiver struct {
+	wg       *sync.WaitGroup
+	messages chan *message
+	err      error
+}
 
-	go func() {
-		defer close(shutdown)
+func (r *receiver) run(ctx context.Context, c *channel) {
+	defer r.wg.Done()
 
-		// start one more goroutine to recv messages without blocking.
-		for {
-			mh, p, err := c.channel.recv(context.TODO())
+	for {
+		select {
+		case <-ctx.Done():
+			r.err = ctx.Err()
+			return
+		default:
+			mh, p, err := c.recv()
 			if err != nil {
 				_, ok := status.FromError(err)
 				if !ok {
 					// treat all errors that are not an rpc status as terminal.
 					// all others poison the connection.
-					shutdownErr = err
+					r.err = filterCloseErr(err)
 					return
 				}
 			}
 			select {
-			case incoming <- &message{
+			case r.messages <- &message{
 				messageHeader: mh,
 				p:             p[:mh.Length],
 				err:           err,
 			}:
-			case <-c.done:
+			case <-ctx.Done():
+				r.err = ctx.Err()
 				return
 			}
 		}
-	}()
+	}
+}
 
-	defer c.conn.Close()
-	defer close(c.done)
-	defer c.closeFunc()
+func (c *Client) run() {
+	var (
+		streamID      uint32 = 1
+		waiters              = make(map[uint32]*callRequest)
+		calls                = c.calls
+		incoming             = make(chan *message)
+		receiversDone        = make(chan struct{})
+		wg            sync.WaitGroup
+	)
+
+	// broadcast the shutdown error to the remaining waiters.
+	abortWaiters := func(wErr error) {
+		for _, waiter := range waiters {
+			waiter.errs <- wErr
+		}
+	}
+	recv := &receiver{
+		wg:       &wg,
+		messages: incoming,
+	}
+	wg.Add(1)
+
+	go func() {
+		wg.Wait()
+		close(receiversDone)
+	}()
+	go recv.run(c.ctx, c.channel)
+
+	defer func() {
+		c.conn.Close()
+		c.userCloseFunc()
+	}()
 
 	for {
 		select {
 		case call := <-calls:
-			if err := c.send(call.ctx, streamID, messageTypeRequest, call.req); err != nil {
+			if err := c.send(streamID, messageTypeRequest, call.req); err != nil {
 				call.errs <- err
 				continue
 			}
@@ -219,41 +272,42 @@
 
 			call.errs <- c.recv(call.resp, msg)
 			delete(waiters, msg.StreamID)
-		case <-shutdown:
-			if shutdownErr != nil {
-				shutdownErr = filterCloseErr(shutdownErr)
-			} else {
-				shutdownErr = ErrClosed
+		case <-receiversDone:
+			// all the receivers have exited
+			if recv.err != nil {
+				c.setError(recv.err)
 			}
-
-			shutdownErr = errors.Wrapf(shutdownErr, "ttrpc: client shutting down")
-
-			c.err = shutdownErr
-			for _, waiter := range waiters {
-				waiter.errs <- shutdownErr
-			}
+			// don't return out, let the close of the context trigger the abort of waiters
 			c.Close()
-			return
-		case <-c.closed:
-			if c.err == nil {
-				c.err = ErrClosed
-			}
-			// broadcast the shutdown error to the remaining waiters.
-			for _, waiter := range waiters {
-				waiter.errs <- c.err
-			}
+		case <-c.ctx.Done():
+			abortWaiters(c.error())
 			return
 		}
 	}
 }
 
-func (c *Client) send(ctx context.Context, streamID uint32, mtype messageType, msg interface{}) error {
+func (c *Client) error() error {
+	c.errOnce.Do(func() {
+		if c.err == nil {
+			c.err = ErrClosed
+		}
+	})
+	return c.err
+}
+
+func (c *Client) setError(err error) {
+	c.errOnce.Do(func() {
+		c.err = err
+	})
+}
+
+func (c *Client) send(streamID uint32, mtype messageType, msg interface{}) error {
 	p, err := c.codec.Marshal(msg)
 	if err != nil {
 		return err
 	}
 
-	return c.channel.send(ctx, streamID, mtype, p)
+	return c.channel.send(streamID, mtype, p)
 }
 
 func (c *Client) recv(resp *Response, msg *message) error {
@@ -274,22 +328,21 @@
 //
 // This purposely ignores errors with a wrapped cause.
 func filterCloseErr(err error) error {
-	if err == nil {
+	switch {
+	case err == nil:
 		return nil
-	}
-
-	if err == io.EOF {
+	case err == io.EOF:
 		return ErrClosed
-	}
-
-	if strings.Contains(err.Error(), "use of closed network connection") {
+	case errors.Cause(err) == io.EOF:
 		return ErrClosed
-	}
-
-	// if we have an epipe on a write, we cast to errclosed
-	if oerr, ok := err.(*net.OpError); ok && oerr.Op == "write" {
-		if serr, ok := oerr.Err.(*os.SyscallError); ok && serr.Err == syscall.EPIPE {
-			return ErrClosed
+	case strings.Contains(err.Error(), "use of closed network connection"):
+		return ErrClosed
+	default:
+		// if we have an epipe on a write, we cast to errclosed
+		if oerr, ok := err.(*net.OpError); ok && oerr.Op == "write" {
+			if serr, ok := oerr.Err.(*os.SyscallError); ok && serr.Err == syscall.EPIPE {
+				return ErrClosed
+			}
 		}
 	}
 
diff --git a/vendor/github.com/containerd/ttrpc/config.go b/vendor/github.com/containerd/ttrpc/config.go
index 019b7a0..6a53c11 100644
--- a/vendor/github.com/containerd/ttrpc/config.go
+++ b/vendor/github.com/containerd/ttrpc/config.go
@@ -19,9 +19,11 @@
 import "github.com/pkg/errors"
 
 type serverConfig struct {
-	handshaker Handshaker
+	handshaker  Handshaker
+	interceptor UnaryServerInterceptor
 }
 
+// ServerOpt for configuring a ttrpc server
 type ServerOpt func(*serverConfig) error
 
 // WithServerHandshaker can be passed to NewServer to ensure that the
@@ -37,3 +39,14 @@
 		return nil
 	}
 }
+
+// WithUnaryServerInterceptor sets the provided interceptor on the server
+func WithUnaryServerInterceptor(i UnaryServerInterceptor) ServerOpt {
+	return func(c *serverConfig) error {
+		if c.interceptor != nil {
+			return errors.New("only one interceptor allowed per server")
+		}
+		c.interceptor = i
+		return nil
+	}
+}
diff --git a/vendor/github.com/containerd/ttrpc/interceptor.go b/vendor/github.com/containerd/ttrpc/interceptor.go
new file mode 100644
index 0000000..c1219da
--- /dev/null
+++ b/vendor/github.com/containerd/ttrpc/interceptor.go
@@ -0,0 +1,50 @@
+/*
+   Copyright The containerd Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package ttrpc
+
+import "context"
+
+// UnaryServerInfo provides information about the server request
+type UnaryServerInfo struct {
+	FullMethod string
+}
+
+// UnaryClientInfo provides information about the client request
+type UnaryClientInfo struct {
+	FullMethod string
+}
+
+// Unmarshaler contains the server request data and allows it to be unmarshaled
+// into a concrete type
+type Unmarshaler func(interface{}) error
+
+// Invoker invokes the client's request and response from the ttrpc server
+type Invoker func(context.Context, *Request, *Response) error
+
+// UnaryServerInterceptor specifies the interceptor function for server request/response
+type UnaryServerInterceptor func(context.Context, Unmarshaler, *UnaryServerInfo, Method) (interface{}, error)
+
+// UnaryClientInterceptor specifies the interceptor function for client request/response
+type UnaryClientInterceptor func(context.Context, *Request, *Response, *UnaryClientInfo, Invoker) error
+
+func defaultServerInterceptor(ctx context.Context, unmarshal Unmarshaler, info *UnaryServerInfo, method Method) (interface{}, error) {
+	return method(ctx, unmarshal)
+}
+
+func defaultClientInterceptor(ctx context.Context, req *Request, resp *Response, _ *UnaryClientInfo, invoker Invoker) error {
+	return invoker(ctx, req, resp)
+}
diff --git a/vendor/github.com/containerd/ttrpc/metadata.go b/vendor/github.com/containerd/ttrpc/metadata.go
new file mode 100644
index 0000000..ce8c0d1
--- /dev/null
+++ b/vendor/github.com/containerd/ttrpc/metadata.go
@@ -0,0 +1,107 @@
+/*
+   Copyright The containerd Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package ttrpc
+
+import (
+	"context"
+	"strings"
+)
+
+// MD is the user type for ttrpc metadata
+type MD map[string][]string
+
+// Get returns the metadata for a given key when they exist.
+// If there is no metadata, a nil slice and false are returned.
+func (m MD) Get(key string) ([]string, bool) {
+	key = strings.ToLower(key)
+	list, ok := m[key]
+	if !ok || len(list) == 0 {
+		return nil, false
+	}
+
+	return list, true
+}
+
+// Set sets the provided values for a given key.
+// The values will overwrite any existing values.
+// If no values provided, a key will be deleted.
+func (m MD) Set(key string, values ...string) {
+	key = strings.ToLower(key)
+	if len(values) == 0 {
+		delete(m, key)
+		return
+	}
+	m[key] = values
+}
+
+// Append appends additional values to the given key.
+func (m MD) Append(key string, values ...string) {
+	key = strings.ToLower(key)
+	if len(values) == 0 {
+		return
+	}
+	current, ok := m[key]
+	if ok {
+		m.Set(key, append(current, values...)...)
+	} else {
+		m.Set(key, values...)
+	}
+}
+
+func (m MD) setRequest(r *Request) {
+	for k, values := range m {
+		for _, v := range values {
+			r.Metadata = append(r.Metadata, &KeyValue{
+				Key:   k,
+				Value: v,
+			})
+		}
+	}
+}
+
+func (m MD) fromRequest(r *Request) {
+	for _, kv := range r.Metadata {
+		m[kv.Key] = append(m[kv.Key], kv.Value)
+	}
+}
+
+type metadataKey struct{}
+
+// GetMetadata retrieves metadata from context.Context (previously attached with WithMetadata)
+func GetMetadata(ctx context.Context) (MD, bool) {
+	metadata, ok := ctx.Value(metadataKey{}).(MD)
+	return metadata, ok
+}
+
+// GetMetadataValue gets a specific metadata value by name from context.Context
+func GetMetadataValue(ctx context.Context, name string) (string, bool) {
+	metadata, ok := GetMetadata(ctx)
+	if !ok {
+		return "", false
+	}
+
+	if list, ok := metadata.Get(name); ok {
+		return list[0], true
+	}
+
+	return "", false
+}
+
+// WithMetadata attaches metadata map to a context.Context
+func WithMetadata(ctx context.Context, md MD) context.Context {
+	return context.WithValue(ctx, metadataKey{}, md)
+}
diff --git a/vendor/github.com/containerd/ttrpc/server.go b/vendor/github.com/containerd/ttrpc/server.go
index 40804ea..1d4f1df 100644
--- a/vendor/github.com/containerd/ttrpc/server.go
+++ b/vendor/github.com/containerd/ttrpc/server.go
@@ -53,10 +53,13 @@
 			return nil, err
 		}
 	}
+	if config.interceptor == nil {
+		config.interceptor = defaultServerInterceptor
+	}
 
 	return &Server{
 		config:      config,
-		services:    newServiceSet(),
+		services:    newServiceSet(config.interceptor),
 		done:        make(chan struct{}),
 		listeners:   make(map[net.Listener]struct{}),
 		connections: make(map[*serverConn]struct{}),
@@ -341,7 +344,7 @@
 			default: // proceed
 			}
 
-			mh, p, err := ch.recv(ctx)
+			mh, p, err := ch.recv()
 			if err != nil {
 				status, ok := status.FromError(err)
 				if !ok {
@@ -438,7 +441,7 @@
 				return
 			}
 
-			if err := ch.send(ctx, response.id, messageTypeResponse, p); err != nil {
+			if err := ch.send(response.id, messageTypeResponse, p); err != nil {
 				logrus.WithError(err).Error("failed sending message on channel")
 				return
 			}
@@ -449,7 +452,12 @@
 			// branch. Basically, it means that we are no longer receiving
 			// requests due to a terminal error.
 			recvErr = nil // connection is now "closing"
-			if err != nil && err != io.EOF {
+			if err == io.EOF || err == io.ErrUnexpectedEOF {
+				// The client went away and we should stop processing
+				// requests, so that the client connection is closed
+				return
+			}
+			if err != nil {
 				logrus.WithError(err).Error("error receiving message")
 			}
 		case <-shutdown:
@@ -461,6 +469,12 @@
 var noopFunc = func() {}
 
 func getRequestContext(ctx context.Context, req *Request) (retCtx context.Context, cancel func()) {
+	if len(req.Metadata) > 0 {
+		md := MD{}
+		md.fromRequest(req)
+		ctx = WithMetadata(ctx, md)
+	}
+
 	cancel = noopFunc
 	if req.TimeoutNano == 0 {
 		return ctx, cancel
diff --git a/vendor/github.com/containerd/ttrpc/services.go b/vendor/github.com/containerd/ttrpc/services.go
index fe1cade..0eacfd7 100644
--- a/vendor/github.com/containerd/ttrpc/services.go
+++ b/vendor/github.com/containerd/ttrpc/services.go
@@ -37,12 +37,14 @@
 }
 
 type serviceSet struct {
-	services map[string]ServiceDesc
+	services    map[string]ServiceDesc
+	interceptor UnaryServerInterceptor
 }
 
-func newServiceSet() *serviceSet {
+func newServiceSet(interceptor UnaryServerInterceptor) *serviceSet {
 	return &serviceSet{
-		services: make(map[string]ServiceDesc),
+		services:    make(map[string]ServiceDesc),
+		interceptor: interceptor,
 	}
 }
 
@@ -84,7 +86,11 @@
 		return nil
 	}
 
-	resp, err := method(ctx, unmarshal)
+	info := &UnaryServerInfo{
+		FullMethod: fullPath(serviceName, methodName),
+	}
+
+	resp, err := s.interceptor(ctx, unmarshal, info, method)
 	if err != nil {
 		return nil, err
 	}
@@ -146,5 +152,5 @@
 }
 
 func fullPath(service, method string) string {
-	return "/" + path.Join("/", service, method)
+	return "/" + path.Join(service, method)
 }
diff --git a/vendor/github.com/containerd/ttrpc/types.go b/vendor/github.com/containerd/ttrpc/types.go
index a6b3b81..9a1c19a 100644
--- a/vendor/github.com/containerd/ttrpc/types.go
+++ b/vendor/github.com/containerd/ttrpc/types.go
@@ -23,10 +23,11 @@
 )
 
 type Request struct {
-	Service     string `protobuf:"bytes,1,opt,name=service,proto3"`
-	Method      string `protobuf:"bytes,2,opt,name=method,proto3"`
-	Payload     []byte `protobuf:"bytes,3,opt,name=payload,proto3"`
-	TimeoutNano int64  `protobuf:"varint,4,opt,name=timeout_nano,proto3"`
+	Service     string      `protobuf:"bytes,1,opt,name=service,proto3"`
+	Method      string      `protobuf:"bytes,2,opt,name=method,proto3"`
+	Payload     []byte      `protobuf:"bytes,3,opt,name=payload,proto3"`
+	TimeoutNano int64       `protobuf:"varint,4,opt,name=timeout_nano,proto3"`
+	Metadata    []*KeyValue `protobuf:"bytes,5,rep,name=metadata,proto3"`
 }
 
 func (r *Request) Reset()         { *r = Request{} }
@@ -41,3 +42,22 @@
 func (r *Response) Reset()         { *r = Response{} }
 func (r *Response) String() string { return fmt.Sprintf("%+#v", r) }
 func (r *Response) ProtoMessage()  {}
+
+type StringList struct {
+	List []string `protobuf:"bytes,1,rep,name=list,proto3"`
+}
+
+func (r *StringList) Reset()         { *r = StringList{} }
+func (r *StringList) String() string { return fmt.Sprintf("%+#v", r) }
+func (r *StringList) ProtoMessage()  {}
+
+func makeStringList(item ...string) StringList { return StringList{List: item} }
+
+type KeyValue struct {
+	Key   string `protobuf:"bytes,1,opt,name=key,proto3"`
+	Value string `protobuf:"bytes,2,opt,name=value,proto3"`
+}
+
+func (m *KeyValue) Reset()         { *m = KeyValue{} }
+func (*KeyValue) ProtoMessage()    {}
+func (m *KeyValue) String() string { return fmt.Sprintf("%+#v", m) }
diff --git a/vendor/github.com/docker/swarmkit/manager/manager.go b/vendor/github.com/docker/swarmkit/manager/manager.go
index cba72c2..9112fb2 100644
--- a/vendor/github.com/docker/swarmkit/manager/manager.go
+++ b/vendor/github.com/docker/swarmkit/manager/manager.go
@@ -1224,8 +1224,12 @@
 			},
 			DriverConfig: &api.Driver{},
 			IPAM: &api.IPAMOptions{
-				Driver:  &api.Driver{},
-				Configs: []*api.IPAMConfig{},
+				Driver: &api.Driver{},
+				Configs: []*api.IPAMConfig{
+					{
+						Subnet: "10.255.0.0/16",
+					},
+				},
 			},
 		},
 	}
diff --git a/vendor/google.golang.org/grpc/README.md b/vendor/google.golang.org/grpc/README.md
index f5eec67..afbc43d 100644
--- a/vendor/google.golang.org/grpc/README.md
+++ b/vendor/google.golang.org/grpc/README.md
@@ -1,42 +1,96 @@
 # gRPC-Go
 
-[![Build Status](https://travis-ci.org/grpc/grpc-go.svg)](https://travis-ci.org/grpc/grpc-go) [![GoDoc](https://godoc.org/google.golang.org/grpc?status.svg)](https://godoc.org/google.golang.org/grpc) [![GoReportCard](https://goreportcard.com/badge/grpc/grpc-go)](https://goreportcard.com/report/github.com/grpc/grpc-go)
+[![Build Status](https://travis-ci.org/grpc/grpc-go.svg)](https://travis-ci.org/grpc/grpc-go)
+[![GoDoc](https://godoc.org/google.golang.org/grpc?status.svg)](https://godoc.org/google.golang.org/grpc)
+[![GoReportCard](https://goreportcard.com/badge/grpc/grpc-go)](https://goreportcard.com/report/github.com/grpc/grpc-go)
 
-The Go implementation of [gRPC](https://grpc.io/): A high performance, open source, general RPC framework that puts mobile and HTTP/2 first. For more information see the [gRPC Quick Start: Go](https://grpc.io/docs/quickstart/go.html) guide.
+The Go implementation of [gRPC](https://grpc.io/): A high performance, open
+source, general RPC framework that puts mobile and HTTP/2 first. For more
+information see the [gRPC Quick Start:
+Go](https://grpc.io/docs/quickstart/go.html) guide.
 
 Installation
 ------------
 
-To install this package, you need to install Go and setup your Go workspace on your computer. The simplest way to install the library is to run:
+To install this package, you need to install Go and setup your Go workspace on
+your computer. The simplest way to install the library is to run:
 
 ```
 $ go get -u google.golang.org/grpc
 ```
 
+With Go module support (Go 1.11+), simply `import "google.golang.org/grpc"` in
+your source code and `go [build|run|test]` will automatically download the
+necessary dependencies ([Go modules
+ref](https://github.com/golang/go/wiki/Modules)).
+
+If you are trying to access grpc-go from within China, please see the
+[FAQ](#FAQ) below.
+
 Prerequisites
 -------------
-
 gRPC-Go requires Go 1.9 or later.
 
-Constraints
------------
-The grpc package should only depend on standard Go packages and a small number of exceptions. If your contribution introduces new dependencies which are NOT in the [list](https://godoc.org/google.golang.org/grpc?imports), you need a discussion with gRPC-Go authors and consultants.
-
 Documentation
 -------------
-See [API documentation](https://godoc.org/google.golang.org/grpc) for package and API descriptions and find examples in the [examples directory](examples/).
+- See [godoc](https://godoc.org/google.golang.org/grpc) for package and API
+  descriptions.
+- Documentation on specific topics can be found in the [Documentation
+  directory](Documentation/).
+- Examples can be found in the [examples directory](examples/).
 
 Performance
 -----------
-See the current benchmarks for some of the languages supported in [this dashboard](https://performance-dot-grpc-testing.appspot.com/explore?dashboard=5652536396611584&widget=490377658&container=1286539696).
+Performance benchmark data for grpc-go and other languages is maintained in
+[this
+dashboard](https://performance-dot-grpc-testing.appspot.com/explore?dashboard=5652536396611584&widget=490377658&container=1286539696).
 
 Status
 ------
-General Availability [Google Cloud Platform Launch Stages](https://cloud.google.com/terms/launch-stages).
+General Availability [Google Cloud Platform Launch
+Stages](https://cloud.google.com/terms/launch-stages).
 
 FAQ
 ---
 
+#### I/O Timeout Errors
+
+The `golang.org` domain may be blocked from some countries.  `go get` usually
+produces an error like the following when this happens:
+
+```
+$ go get -u google.golang.org/grpc
+package google.golang.org/grpc: unrecognized import path "google.golang.org/grpc" (https fetch: Get https://google.golang.org/grpc?go-get=1: dial tcp 216.239.37.1:443: i/o timeout)
+```
+
+To build Go code, there are several options:
+
+- Set up a VPN and access google.golang.org through that.
+
+- Without Go module support: `git clone` the repo manually:
+
+  ```
+  git clone https://github.com/grpc/grpc-go.git $GOPATH/src/google.golang.org/grpc
+  ```
+
+  You will need to do the same for all of grpc's dependencies in `golang.org`,
+  e.g. `golang.org/x/net`.
+
+- With Go module support: it is possible to use the `replace` feature of `go
+  mod` to create aliases for golang.org packages.  In your project's directory:
+
+  ```
+  go mod edit -replace=google.golang.org/grpc=github.com/grpc/grpc-go@latest
+  go mod tidy
+  go mod vendor
+  go build -mod=vendor
+  ```
+
+  Again, this will need to be done for all transitive dependencies hosted on
+  golang.org as well.  Please refer to [this
+  issue](https://github.com/golang/go/issues/28652) in the golang repo regarding
+  this concern.
+
 #### Compiling error, undefined: grpc.SupportPackageIsVersion
 
 Please update proto package, gRPC package and rebuild the proto files:
diff --git a/vendor/google.golang.org/grpc/balancer.go b/vendor/google.golang.org/grpc/balancer.go
index a78e702..a8eb0f4 100644
--- a/vendor/google.golang.org/grpc/balancer.go
+++ b/vendor/google.golang.org/grpc/balancer.go
@@ -43,7 +43,7 @@
 
 // BalancerConfig specifies the configurations for Balancer.
 //
-// Deprecated: please use package balancer.
+// Deprecated: please use package balancer.  May be removed in a future 1.x release.
 type BalancerConfig struct {
 	// DialCreds is the transport credential the Balancer implementation can
 	// use to dial to a remote load balancer server. The Balancer implementations
@@ -57,7 +57,7 @@
 
 // BalancerGetOptions configures a Get call.
 //
-// Deprecated: please use package balancer.
+// Deprecated: please use package balancer.  May be removed in a future 1.x release.
 type BalancerGetOptions struct {
 	// BlockingWait specifies whether Get should block when there is no
 	// connected address.
@@ -66,7 +66,7 @@
 
 // Balancer chooses network addresses for RPCs.
 //
-// Deprecated: please use package balancer.
+// Deprecated: please use package balancer.  May be removed in a future 1.x release.
 type Balancer interface {
 	// Start does the initialization work to bootstrap a Balancer. For example,
 	// this function may start the name resolution and watch the updates. It will
@@ -120,7 +120,7 @@
 // RoundRobin returns a Balancer that selects addresses round-robin. It uses r to watch
 // the name resolution updates and updates the addresses available correspondingly.
 //
-// Deprecated: please use package balancer/roundrobin.
+// Deprecated: please use package balancer/roundrobin. May be removed in a future 1.x release.
 func RoundRobin(r naming.Resolver) Balancer {
 	return &roundRobin{r: r}
 }
diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go
index fafede2..c266f4e 100644
--- a/vendor/google.golang.org/grpc/balancer/balancer.go
+++ b/vendor/google.golang.org/grpc/balancer/balancer.go
@@ -22,6 +22,7 @@
 
 import (
 	"context"
+	"encoding/json"
 	"errors"
 	"net"
 	"strings"
@@ -31,6 +32,7 @@
 	"google.golang.org/grpc/internal"
 	"google.golang.org/grpc/metadata"
 	"google.golang.org/grpc/resolver"
+	"google.golang.org/grpc/serviceconfig"
 )
 
 var (
@@ -39,7 +41,10 @@
 )
 
 // Register registers the balancer builder to the balancer map. b.Name
-// (lowercased) will be used as the name registered with this builder.
+// (lowercased) will be used as the name registered with this builder.  If the
+// Builder implements ConfigParser, ParseConfig will be called when new service
+// configs are received by the resolver, and the result will be provided to the
+// Balancer in UpdateClientConnState.
 //
 // NOTE: this function must only be called during initialization time (i.e. in
 // an init() function), and is not thread-safe. If multiple Balancers are
@@ -138,6 +143,8 @@
 	ResolveNow(resolver.ResolveNowOption)
 
 	// Target returns the dial target for this ClientConn.
+	//
+	// Deprecated: Use the Target field in the BuildOptions instead.
 	Target() string
 }
 
@@ -155,6 +162,10 @@
 	Dialer func(context.Context, string) (net.Conn, error)
 	// ChannelzParentID is the entity parent's channelz unique identification number.
 	ChannelzParentID int64
+	// Target contains the parsed address info of the dial target. It is the same resolver.Target as
+	// passed to the resolver.
+	// See the documentation for the resolver.Target type for details about what it contains.
+	Target resolver.Target
 }
 
 // Builder creates a balancer.
@@ -166,6 +177,14 @@
 	Name() string
 }
 
+// ConfigParser parses load balancer configs.
+type ConfigParser interface {
+	// ParseConfig parses the JSON load balancer config provided into an
+	// internal form or returns an error if the config is invalid.  For future
+	// compatibility reasons, unknown fields in the config should be ignored.
+	ParseConfig(LoadBalancingConfigJSON json.RawMessage) (serviceconfig.LoadBalancingConfig, error)
+}
+
 // PickOptions contains addition information for the Pick operation.
 type PickOptions struct {
 	// FullMethodName is the method name that NewClientStream() is called
@@ -264,7 +283,7 @@
 	// non-nil error to gRPC.
 	//
 	// Deprecated: if V2Balancer is implemented by the Balancer,
-	// UpdateResolverState will be called instead.
+	// UpdateClientConnState will be called instead.
 	HandleResolvedAddrs([]resolver.Address, error)
 	// Close closes the balancer. The balancer is not required to call
 	// ClientConn.RemoveSubConn for its existing SubConns.
@@ -277,14 +296,23 @@
 	// TODO: add last connection error
 }
 
+// ClientConnState describes the state of a ClientConn relevant to the
+// balancer.
+type ClientConnState struct {
+	ResolverState resolver.State
+	// The parsed load balancing configuration returned by the builder's
+	// ParseConfig method, if implemented.
+	BalancerConfig serviceconfig.LoadBalancingConfig
+}
+
 // V2Balancer is defined for documentation purposes.  If a Balancer also
-// implements V2Balancer, its UpdateResolverState method will be called instead
-// of HandleResolvedAddrs and its UpdateSubConnState will be called instead of
-// HandleSubConnStateChange.
+// implements V2Balancer, its UpdateClientConnState method will be called
+// instead of HandleResolvedAddrs and its UpdateSubConnState will be called
+// instead of HandleSubConnStateChange.
 type V2Balancer interface {
-	// UpdateResolverState is called by gRPC when the state of the resolver
+	// UpdateClientConnState is called by gRPC when the state of the ClientConn
 	// changes.
-	UpdateResolverState(resolver.State)
+	UpdateClientConnState(ClientConnState)
 	// UpdateSubConnState is called by gRPC when the state of a SubConn
 	// changes.
 	UpdateSubConnState(SubConn, SubConnState)
diff --git a/vendor/google.golang.org/grpc/balancer/base/balancer.go b/vendor/google.golang.org/grpc/balancer/base/balancer.go
index c5a51bd..1af88f0 100644
--- a/vendor/google.golang.org/grpc/balancer/base/balancer.go
+++ b/vendor/google.golang.org/grpc/balancer/base/balancer.go
@@ -70,13 +70,15 @@
 	panic("not implemented")
 }
 
-func (b *baseBalancer) UpdateResolverState(s resolver.State) {
-	// TODO: handle s.Err (log if not nil) once implemented.
-	// TODO: handle s.ServiceConfig?
-	grpclog.Infoln("base.baseBalancer: got new resolver state: ", s)
+func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) {
+	// TODO: handle s.ResolverState.Err (log if not nil) once implemented.
+	// TODO: handle s.ResolverState.ServiceConfig?
+	if grpclog.V(2) {
+		grpclog.Infoln("base.baseBalancer: got new ClientConn state: ", s)
+	}
 	// addrsSet is the set converted from addrs, it's used for quick lookup of an address.
 	addrsSet := make(map[resolver.Address]struct{})
-	for _, a := range s.Addresses {
+	for _, a := range s.ResolverState.Addresses {
 		addrsSet[a] = struct{}{}
 		if _, ok := b.subConns[a]; !ok {
 			// a is a new address (not existing in b.subConns).
@@ -127,10 +129,14 @@
 
 func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) {
 	s := state.ConnectivityState
-	grpclog.Infof("base.baseBalancer: handle SubConn state change: %p, %v", sc, s)
+	if grpclog.V(2) {
+		grpclog.Infof("base.baseBalancer: handle SubConn state change: %p, %v", sc, s)
+	}
 	oldS, ok := b.scStates[sc]
 	if !ok {
-		grpclog.Infof("base.baseBalancer: got state changes for an unknown SubConn: %p, %v", sc, s)
+		if grpclog.V(2) {
+			grpclog.Infof("base.baseBalancer: got state changes for an unknown SubConn: %p, %v", sc, s)
+		}
 		return
 	}
 	b.scStates[sc] = s
diff --git a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go
index bc965f0..8df4095 100644
--- a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go
+++ b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go
@@ -88,7 +88,7 @@
 	cc               *ClientConn
 	balancer         balancer.Balancer
 	stateChangeQueue *scStateUpdateBuffer
-	resolverUpdateCh chan *resolver.State
+	ccUpdateCh       chan *balancer.ClientConnState
 	done             chan struct{}
 
 	mu       sync.Mutex
@@ -99,7 +99,7 @@
 	ccb := &ccBalancerWrapper{
 		cc:               cc,
 		stateChangeQueue: newSCStateUpdateBuffer(),
-		resolverUpdateCh: make(chan *resolver.State, 1),
+		ccUpdateCh:       make(chan *balancer.ClientConnState, 1),
 		done:             make(chan struct{}),
 		subConns:         make(map[*acBalancerWrapper]struct{}),
 	}
@@ -126,7 +126,7 @@
 			} else {
 				ccb.balancer.HandleSubConnStateChange(t.sc, t.state)
 			}
-		case s := <-ccb.resolverUpdateCh:
+		case s := <-ccb.ccUpdateCh:
 			select {
 			case <-ccb.done:
 				ccb.balancer.Close()
@@ -134,9 +134,9 @@
 			default:
 			}
 			if ub, ok := ccb.balancer.(balancer.V2Balancer); ok {
-				ub.UpdateResolverState(*s)
+				ub.UpdateClientConnState(*s)
 			} else {
-				ccb.balancer.HandleResolvedAddrs(s.Addresses, nil)
+				ccb.balancer.HandleResolvedAddrs(s.ResolverState.Addresses, nil)
 			}
 		case <-ccb.done:
 		}
@@ -151,9 +151,11 @@
 			for acbw := range scs {
 				ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain)
 			}
+			ccb.UpdateBalancerState(connectivity.Connecting, nil)
 			return
 		default:
 		}
+		ccb.cc.firstResolveEvent.Fire()
 	}
 }
 
@@ -178,9 +180,10 @@
 	})
 }
 
-func (ccb *ccBalancerWrapper) updateResolverState(s resolver.State) {
+func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) {
 	if ccb.cc.curBalancerName != grpclbName {
 		// Filter any grpclb addresses since we don't have the grpclb balancer.
+		s := &ccs.ResolverState
 		for i := 0; i < len(s.Addresses); {
 			if s.Addresses[i].Type == resolver.GRPCLB {
 				copy(s.Addresses[i:], s.Addresses[i+1:])
@@ -191,10 +194,10 @@
 		}
 	}
 	select {
-	case <-ccb.resolverUpdateCh:
+	case <-ccb.ccUpdateCh:
 	default:
 	}
-	ccb.resolverUpdateCh <- &s
+	ccb.ccUpdateCh <- ccs
 }
 
 func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) {
diff --git a/vendor/google.golang.org/grpc/balancer_v1_wrapper.go b/vendor/google.golang.org/grpc/balancer_v1_wrapper.go
index 29bda63..66e9a44 100644
--- a/vendor/google.golang.org/grpc/balancer_v1_wrapper.go
+++ b/vendor/google.golang.org/grpc/balancer_v1_wrapper.go
@@ -20,7 +20,6 @@
 
 import (
 	"context"
-	"strings"
 	"sync"
 
 	"google.golang.org/grpc/balancer"
@@ -34,13 +33,7 @@
 }
 
 func (bwb *balancerWrapperBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer {
-	targetAddr := cc.Target()
-	targetSplitted := strings.Split(targetAddr, ":///")
-	if len(targetSplitted) >= 2 {
-		targetAddr = targetSplitted[1]
-	}
-
-	bwb.b.Start(targetAddr, BalancerConfig{
+	bwb.b.Start(opts.Target.Endpoint, BalancerConfig{
 		DialCreds: opts.DialCreds,
 		Dialer:    opts.Dialer,
 	})
@@ -49,7 +42,7 @@
 		balancer:   bwb.b,
 		pickfirst:  pickfirst,
 		cc:         cc,
-		targetAddr: targetAddr,
+		targetAddr: opts.Target.Endpoint,
 		startCh:    make(chan struct{}),
 		conns:      make(map[resolver.Address]balancer.SubConn),
 		connSt:     make(map[balancer.SubConn]*scState),
@@ -120,7 +113,7 @@
 	}
 
 	for addrs := range notifyCh {
-		grpclog.Infof("balancerWrapper: got update addr from Notify: %v\n", addrs)
+		grpclog.Infof("balancerWrapper: got update addr from Notify: %v", addrs)
 		if bw.pickfirst {
 			var (
 				oldA  resolver.Address
diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go
index bd2d2b3..a7643df 100644
--- a/vendor/google.golang.org/grpc/clientconn.go
+++ b/vendor/google.golang.org/grpc/clientconn.go
@@ -38,13 +38,13 @@
 	"google.golang.org/grpc/grpclog"
 	"google.golang.org/grpc/internal/backoff"
 	"google.golang.org/grpc/internal/channelz"
-	"google.golang.org/grpc/internal/envconfig"
 	"google.golang.org/grpc/internal/grpcsync"
 	"google.golang.org/grpc/internal/transport"
 	"google.golang.org/grpc/keepalive"
 	"google.golang.org/grpc/resolver"
 	_ "google.golang.org/grpc/resolver/dns"         // To register dns resolver.
 	_ "google.golang.org/grpc/resolver/passthrough" // To register passthrough resolver.
+	"google.golang.org/grpc/serviceconfig"
 	"google.golang.org/grpc/status"
 )
 
@@ -137,6 +137,9 @@
 		opt.apply(&cc.dopts)
 	}
 
+	chainUnaryClientInterceptors(cc)
+	chainStreamClientInterceptors(cc)
+
 	defer func() {
 		if err != nil {
 			cc.Close()
@@ -290,6 +293,7 @@
 		CredsBundle:      cc.dopts.copts.CredsBundle,
 		Dialer:           cc.dopts.copts.Dialer,
 		ChannelzParentID: cc.channelzID,
+		Target:           cc.parsedTarget,
 	}
 
 	// Build the resolver.
@@ -327,6 +331,68 @@
 	return cc, nil
 }
 
+// chainUnaryClientInterceptors chains all unary client interceptors into one.
+func chainUnaryClientInterceptors(cc *ClientConn) {
+	interceptors := cc.dopts.chainUnaryInts
+	// Prepend dopts.unaryInt to the chaining interceptors if it exists, since unaryInt will
+	// be executed before any other chained interceptors.
+	if cc.dopts.unaryInt != nil {
+		interceptors = append([]UnaryClientInterceptor{cc.dopts.unaryInt}, interceptors...)
+	}
+	var chainedInt UnaryClientInterceptor
+	if len(interceptors) == 0 {
+		chainedInt = nil
+	} else if len(interceptors) == 1 {
+		chainedInt = interceptors[0]
+	} else {
+		chainedInt = func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error {
+			return interceptors[0](ctx, method, req, reply, cc, getChainUnaryInvoker(interceptors, 0, invoker), opts...)
+		}
+	}
+	cc.dopts.unaryInt = chainedInt
+}
+
+// getChainUnaryInvoker recursively generate the chained unary invoker.
+func getChainUnaryInvoker(interceptors []UnaryClientInterceptor, curr int, finalInvoker UnaryInvoker) UnaryInvoker {
+	if curr == len(interceptors)-1 {
+		return finalInvoker
+	}
+	return func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error {
+		return interceptors[curr+1](ctx, method, req, reply, cc, getChainUnaryInvoker(interceptors, curr+1, finalInvoker), opts...)
+	}
+}
+
+// chainStreamClientInterceptors chains all stream client interceptors into one.
+func chainStreamClientInterceptors(cc *ClientConn) {
+	interceptors := cc.dopts.chainStreamInts
+	// Prepend dopts.streamInt to the chaining interceptors if it exists, since streamInt will
+	// be executed before any other chained interceptors.
+	if cc.dopts.streamInt != nil {
+		interceptors = append([]StreamClientInterceptor{cc.dopts.streamInt}, interceptors...)
+	}
+	var chainedInt StreamClientInterceptor
+	if len(interceptors) == 0 {
+		chainedInt = nil
+	} else if len(interceptors) == 1 {
+		chainedInt = interceptors[0]
+	} else {
+		chainedInt = func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, streamer Streamer, opts ...CallOption) (ClientStream, error) {
+			return interceptors[0](ctx, desc, cc, method, getChainStreamer(interceptors, 0, streamer), opts...)
+		}
+	}
+	cc.dopts.streamInt = chainedInt
+}
+
+// getChainStreamer recursively generate the chained client stream constructor.
+func getChainStreamer(interceptors []StreamClientInterceptor, curr int, finalStreamer Streamer) Streamer {
+	if curr == len(interceptors)-1 {
+		return finalStreamer
+	}
+	return func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) {
+		return interceptors[curr+1](ctx, desc, cc, method, getChainStreamer(interceptors, curr+1, finalStreamer), opts...)
+	}
+}
+
 // connectivityStateManager keeps the connectivity.State of ClientConn.
 // This struct will eventually be exported so the balancers can access it.
 type connectivityStateManager struct {
@@ -466,24 +532,6 @@
 	}
 }
 
-// gRPC should resort to default service config when:
-// * resolver service config is disabled
-// * or, resolver does not return a service config or returns an invalid one.
-func (cc *ClientConn) fallbackToDefaultServiceConfig(sc string) bool {
-	if cc.dopts.disableServiceConfig {
-		return true
-	}
-	// The logic below is temporary, will be removed once we change the resolver.State ServiceConfig field type.
-	// Right now, we assume that empty service config string means resolver does not return a config.
-	if sc == "" {
-		return true
-	}
-	// TODO: the logic below is temporary. Once we finish the logic to validate service config
-	// in resolver, we will replace the logic below.
-	_, err := parseServiceConfig(sc)
-	return err != nil
-}
-
 func (cc *ClientConn) updateResolverState(s resolver.State) error {
 	cc.mu.Lock()
 	defer cc.mu.Unlock()
@@ -494,54 +542,47 @@
 		return nil
 	}
 
-	if cc.fallbackToDefaultServiceConfig(s.ServiceConfig) {
+	if cc.dopts.disableServiceConfig || s.ServiceConfig == nil {
 		if cc.dopts.defaultServiceConfig != nil && cc.sc == nil {
 			cc.applyServiceConfig(cc.dopts.defaultServiceConfig)
 		}
-	} else {
-		// TODO: the parsing logic below will be moved inside resolver.
-		sc, err := parseServiceConfig(s.ServiceConfig)
-		if err != nil {
-			return err
-		}
-		if cc.sc == nil || cc.sc.rawJSONString != s.ServiceConfig {
-			cc.applyServiceConfig(sc)
-		}
+	} else if sc, ok := s.ServiceConfig.(*ServiceConfig); ok {
+		cc.applyServiceConfig(sc)
 	}
 
-	// update the service config that will be sent to balancer.
-	if cc.sc != nil {
-		s.ServiceConfig = cc.sc.rawJSONString
-	}
-
+	var balCfg serviceconfig.LoadBalancingConfig
 	if cc.dopts.balancerBuilder == nil {
 		// Only look at balancer types and switch balancer if balancer dial
 		// option is not set.
-		var isGRPCLB bool
-		for _, a := range s.Addresses {
-			if a.Type == resolver.GRPCLB {
-				isGRPCLB = true
-				break
-			}
-		}
 		var newBalancerName string
-		// TODO: use new loadBalancerConfig field with appropriate priority.
-		if isGRPCLB {
-			newBalancerName = grpclbName
-		} else if cc.sc != nil && cc.sc.LB != nil {
-			newBalancerName = *cc.sc.LB
+		if cc.sc != nil && cc.sc.lbConfig != nil {
+			newBalancerName = cc.sc.lbConfig.name
+			balCfg = cc.sc.lbConfig.cfg
 		} else {
-			newBalancerName = PickFirstBalancerName
+			var isGRPCLB bool
+			for _, a := range s.Addresses {
+				if a.Type == resolver.GRPCLB {
+					isGRPCLB = true
+					break
+				}
+			}
+			if isGRPCLB {
+				newBalancerName = grpclbName
+			} else if cc.sc != nil && cc.sc.LB != nil {
+				newBalancerName = *cc.sc.LB
+			} else {
+				newBalancerName = PickFirstBalancerName
+			}
 		}
 		cc.switchBalancer(newBalancerName)
 	} else if cc.balancerWrapper == nil {
 		// Balancer dial option was set, and this is the first time handling
 		// resolved addresses. Build a balancer with dopts.balancerBuilder.
+		cc.curBalancerName = cc.dopts.balancerBuilder.Name()
 		cc.balancerWrapper = newCCBalancerWrapper(cc, cc.dopts.balancerBuilder, cc.balancerBuildOpts)
 	}
 
-	cc.balancerWrapper.updateResolverState(s)
-	cc.firstResolveEvent.Fire()
+	cc.balancerWrapper.updateClientConnState(&balancer.ClientConnState{ResolverState: s, BalancerConfig: balCfg})
 	return nil
 }
 
@@ -554,7 +595,7 @@
 //
 // Caller must hold cc.mu.
 func (cc *ClientConn) switchBalancer(name string) {
-	if strings.ToLower(cc.curBalancerName) == strings.ToLower(name) {
+	if strings.EqualFold(cc.curBalancerName, name) {
 		return
 	}
 
@@ -693,6 +734,8 @@
 		ac.mu.Unlock()
 		return nil
 	}
+	// Update connectivity state within the lock to prevent subsequent or
+	// concurrent calls from resetting the transport more than once.
 	ac.updateConnectivityState(connectivity.Connecting)
 	ac.mu.Unlock()
 
@@ -703,7 +746,16 @@
 
 // tryUpdateAddrs tries to update ac.addrs with the new addresses list.
 //
-// It checks whether current connected address of ac is in the new addrs list.
+// If ac is Connecting, it returns false. The caller should tear down the ac and
+// create a new one. Note that the backoff will be reset when this happens.
+//
+// If ac is TransientFailure, it updates ac.addrs and returns true. The updated
+// addresses will be picked up by retry in the next iteration after backoff.
+//
+// If ac is Shutdown or Idle, it updates ac.addrs and returns true.
+//
+// If ac is Ready, it checks whether current connected address of ac is in the
+// new addrs list.
 //  - If true, it updates ac.addrs and returns true. The ac will keep using
 //    the existing connection.
 //  - If false, it does nothing and returns false.
@@ -711,17 +763,18 @@
 	ac.mu.Lock()
 	defer ac.mu.Unlock()
 	grpclog.Infof("addrConn: tryUpdateAddrs curAddr: %v, addrs: %v", ac.curAddr, addrs)
-	if ac.state == connectivity.Shutdown {
+	if ac.state == connectivity.Shutdown ||
+		ac.state == connectivity.TransientFailure ||
+		ac.state == connectivity.Idle {
 		ac.addrs = addrs
 		return true
 	}
 
-	// Unless we're busy reconnecting already, let's reconnect from the top of
-	// the list.
-	if ac.state != connectivity.Ready {
+	if ac.state == connectivity.Connecting {
 		return false
 	}
 
+	// ac.state is Ready, try to find the connected address.
 	var curAddrFound bool
 	for _, a := range addrs {
 		if reflect.DeepEqual(ac.curAddr, a) {
@@ -970,6 +1023,9 @@
 		// The spec doesn't mention what should be done for multiple addresses.
 		// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md#proposed-backoff-algorithm
 		connectDeadline := time.Now().Add(dialDuration)
+
+		ac.updateConnectivityState(connectivity.Connecting)
+		ac.transport = nil
 		ac.mu.Unlock()
 
 		newTr, addr, reconnect, err := ac.tryAllAddrs(addrs, connectDeadline)
@@ -1004,55 +1060,32 @@
 
 		ac.mu.Lock()
 		if ac.state == connectivity.Shutdown {
-			newTr.Close()
 			ac.mu.Unlock()
+			newTr.Close()
 			return
 		}
 		ac.curAddr = addr
 		ac.transport = newTr
 		ac.backoffIdx = 0
 
-		healthCheckConfig := ac.cc.healthCheckConfig()
-		// LB channel health checking is only enabled when all the four requirements below are met:
-		// 1. it is not disabled by the user with the WithDisableHealthCheck DialOption,
-		// 2. the internal.HealthCheckFunc is set by importing the grpc/healthcheck package,
-		// 3. a service config with non-empty healthCheckConfig field is provided,
-		// 4. the current load balancer allows it.
 		hctx, hcancel := context.WithCancel(ac.ctx)
-		healthcheckManagingState := false
-		if !ac.cc.dopts.disableHealthCheck && healthCheckConfig != nil && ac.scopts.HealthCheckEnabled {
-			if ac.cc.dopts.healthCheckFunc == nil {
-				// TODO: add a link to the health check doc in the error message.
-				grpclog.Error("the client side LB channel health check function has not been set.")
-			} else {
-				// TODO(deklerk) refactor to just return transport
-				go ac.startHealthCheck(hctx, newTr, addr, healthCheckConfig.ServiceName)
-				healthcheckManagingState = true
-			}
-		}
-		if !healthcheckManagingState {
-			ac.updateConnectivityState(connectivity.Ready)
-		}
+		ac.startHealthCheck(hctx)
 		ac.mu.Unlock()
 
 		// Block until the created transport is down. And when this happens,
 		// we restart from the top of the addr list.
 		<-reconnect.Done()
 		hcancel()
-
-		// Need to reconnect after a READY, the addrConn enters
-		// TRANSIENT_FAILURE.
+		// restart connecting - the top of the loop will set state to
+		// CONNECTING.  This is against the current connectivity semantics doc,
+		// however it allows for graceful behavior for RPCs not yet dispatched
+		// - unfortunate timing would otherwise lead to the RPC failing even
+		// though the TRANSIENT_FAILURE state (called for by the doc) would be
+		// instantaneous.
 		//
-		// This will set addrConn to TRANSIENT_FAILURE for a very short period
-		// of time, and turns CONNECTING. It seems reasonable to skip this, but
-		// READY-CONNECTING is not a valid transition.
-		ac.mu.Lock()
-		if ac.state == connectivity.Shutdown {
-			ac.mu.Unlock()
-			return
-		}
-		ac.updateConnectivityState(connectivity.TransientFailure)
-		ac.mu.Unlock()
+		// Ideally we should transition to Idle here and block until there is
+		// RPC activity that leads to the balancer requesting a reconnect of
+		// the associated SubConn.
 	}
 }
 
@@ -1066,8 +1099,6 @@
 			ac.mu.Unlock()
 			return nil, resolver.Address{}, nil, errConnClosing
 		}
-		ac.updateConnectivityState(connectivity.Connecting)
-		ac.transport = nil
 
 		ac.cc.mu.RLock()
 		ac.dopts.copts.KeepaliveParams = ac.cc.mkp
@@ -1111,14 +1142,35 @@
 		Authority: ac.cc.authority,
 	}
 
+	once := sync.Once{}
 	onGoAway := func(r transport.GoAwayReason) {
 		ac.mu.Lock()
 		ac.adjustParams(r)
+		once.Do(func() {
+			if ac.state == connectivity.Ready {
+				// Prevent this SubConn from being used for new RPCs by setting its
+				// state to Connecting.
+				//
+				// TODO: this should be Idle when grpc-go properly supports it.
+				ac.updateConnectivityState(connectivity.Connecting)
+			}
+		})
 		ac.mu.Unlock()
 		reconnect.Fire()
 	}
 
 	onClose := func() {
+		ac.mu.Lock()
+		once.Do(func() {
+			if ac.state == connectivity.Ready {
+				// Prevent this SubConn from being used for new RPCs by setting its
+				// state to Connecting.
+				//
+				// TODO: this should be Idle when grpc-go properly supports it.
+				ac.updateConnectivityState(connectivity.Connecting)
+			}
+		})
+		ac.mu.Unlock()
 		close(onCloseCalled)
 		reconnect.Fire()
 	}
@@ -1140,60 +1192,99 @@
 		return nil, nil, err
 	}
 
-	if ac.dopts.reqHandshake == envconfig.RequireHandshakeOn {
-		select {
-		case <-time.After(connectDeadline.Sub(time.Now())):
-			// We didn't get the preface in time.
-			newTr.Close()
-			grpclog.Warningf("grpc: addrConn.createTransport failed to connect to %v: didn't receive server preface in time. Reconnecting...", addr)
-			return nil, nil, errors.New("timed out waiting for server handshake")
-		case <-prefaceReceived:
-			// We got the preface - huzzah! things are good.
-		case <-onCloseCalled:
-			// The transport has already closed - noop.
-			return nil, nil, errors.New("connection closed")
-			// TODO(deklerk) this should bail on ac.ctx.Done(). Add a test and fix.
-		}
+	select {
+	case <-time.After(connectDeadline.Sub(time.Now())):
+		// We didn't get the preface in time.
+		newTr.Close()
+		grpclog.Warningf("grpc: addrConn.createTransport failed to connect to %v: didn't receive server preface in time. Reconnecting...", addr)
+		return nil, nil, errors.New("timed out waiting for server handshake")
+	case <-prefaceReceived:
+		// We got the preface - huzzah! things are good.
+	case <-onCloseCalled:
+		// The transport has already closed - noop.
+		return nil, nil, errors.New("connection closed")
+		// TODO(deklerk) this should bail on ac.ctx.Done(). Add a test and fix.
 	}
 	return newTr, reconnect, nil
 }
 
-func (ac *addrConn) startHealthCheck(ctx context.Context, newTr transport.ClientTransport, addr resolver.Address, serviceName string) {
-	// Set up the health check helper functions
-	newStream := func() (interface{}, error) {
-		return ac.newClientStream(ctx, &StreamDesc{ServerStreams: true}, "/grpc.health.v1.Health/Watch", newTr)
+// startHealthCheck starts the health checking stream (RPC) to watch the health
+// stats of this connection if health checking is requested and configured.
+//
+// LB channel health checking is enabled when all requirements below are met:
+// 1. it is not disabled by the user with the WithDisableHealthCheck DialOption
+// 2. internal.HealthCheckFunc is set by importing the grpc/healthcheck package
+// 3. a service config with non-empty healthCheckConfig field is provided
+// 4. the load balancer requests it
+//
+// It sets addrConn to READY if the health checking stream is not started.
+//
+// Caller must hold ac.mu.
+func (ac *addrConn) startHealthCheck(ctx context.Context) {
+	var healthcheckManagingState bool
+	defer func() {
+		if !healthcheckManagingState {
+			ac.updateConnectivityState(connectivity.Ready)
+		}
+	}()
+
+	if ac.cc.dopts.disableHealthCheck {
+		return
 	}
-	firstReady := true
-	reportHealth := func(ok bool) {
+	healthCheckConfig := ac.cc.healthCheckConfig()
+	if healthCheckConfig == nil {
+		return
+	}
+	if !ac.scopts.HealthCheckEnabled {
+		return
+	}
+	healthCheckFunc := ac.cc.dopts.healthCheckFunc
+	if healthCheckFunc == nil {
+		// The health package is not imported to set health check function.
+		//
+		// TODO: add a link to the health check doc in the error message.
+		grpclog.Error("Health check is requested but health check function is not set.")
+		return
+	}
+
+	healthcheckManagingState = true
+
+	// Set up the health check helper functions.
+	currentTr := ac.transport
+	newStream := func(method string) (interface{}, error) {
+		ac.mu.Lock()
+		if ac.transport != currentTr {
+			ac.mu.Unlock()
+			return nil, status.Error(codes.Canceled, "the provided transport is no longer valid to use")
+		}
+		ac.mu.Unlock()
+		return newNonRetryClientStream(ctx, &StreamDesc{ServerStreams: true}, method, currentTr, ac)
+	}
+	setConnectivityState := func(s connectivity.State) {
 		ac.mu.Lock()
 		defer ac.mu.Unlock()
-		if ac.transport != newTr {
+		if ac.transport != currentTr {
 			return
 		}
-		if ok {
-			if firstReady {
-				firstReady = false
-				ac.curAddr = addr
-			}
-			ac.updateConnectivityState(connectivity.Ready)
-		} else {
-			ac.updateConnectivityState(connectivity.TransientFailure)
-		}
+		ac.updateConnectivityState(s)
 	}
-	err := ac.cc.dopts.healthCheckFunc(ctx, newStream, reportHealth, serviceName)
-	if err != nil {
-		if status.Code(err) == codes.Unimplemented {
-			if channelz.IsOn() {
-				channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{
-					Desc:     "Subchannel health check is unimplemented at server side, thus health check is disabled",
-					Severity: channelz.CtError,
-				})
+	// Start the health checking stream.
+	go func() {
+		err := ac.cc.dopts.healthCheckFunc(ctx, newStream, setConnectivityState, healthCheckConfig.ServiceName)
+		if err != nil {
+			if status.Code(err) == codes.Unimplemented {
+				if channelz.IsOn() {
+					channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{
+						Desc:     "Subchannel health check is unimplemented at server side, thus health check is disabled",
+						Severity: channelz.CtError,
+					})
+				}
+				grpclog.Error("Subchannel health check is unimplemented at server side, thus health check is disabled")
+			} else {
+				grpclog.Errorf("HealthCheckFunc exits with unexpected error %v", err)
 			}
-			grpclog.Error("Subchannel health check is unimplemented at server side, thus health check is disabled")
-		} else {
-			grpclog.Errorf("HealthCheckFunc exits with unexpected error %v", err)
 		}
-	}
+	}()
 }
 
 func (ac *addrConn) resetConnectBackoff() {
diff --git a/vendor/google.golang.org/grpc/codes/codes.go b/vendor/google.golang.org/grpc/codes/codes.go
index d9b9d57..0273883 100644
--- a/vendor/google.golang.org/grpc/codes/codes.go
+++ b/vendor/google.golang.org/grpc/codes/codes.go
@@ -132,7 +132,8 @@
 
 	// Unavailable indicates the service is currently unavailable.
 	// This is a most likely a transient condition and may be corrected
-	// by retrying with a backoff.
+	// by retrying with a backoff. Note that it is not always safe to retry
+	// non-idempotent operations.
 	//
 	// See litmus test above for deciding between FailedPrecondition,
 	// Aborted, and Unavailable.
diff --git a/vendor/google.golang.org/grpc/credentials/credentials.go b/vendor/google.golang.org/grpc/credentials/credentials.go
index 88aff94..8ea3d4a 100644
--- a/vendor/google.golang.org/grpc/credentials/credentials.go
+++ b/vendor/google.golang.org/grpc/credentials/credentials.go
@@ -278,24 +278,22 @@
 // TLSChannelzSecurityValue defines the struct that TLS protocol should return
 // from GetSecurityValue(), containing security info like cipher and certificate used.
 type TLSChannelzSecurityValue struct {
+	ChannelzSecurityValue
 	StandardName      string
 	LocalCertificate  []byte
 	RemoteCertificate []byte
 }
 
-func (*TLSChannelzSecurityValue) isChannelzSecurityValue() {}
-
 // OtherChannelzSecurityValue defines the struct that non-TLS protocol should return
 // from GetSecurityValue(), which contains protocol specific security info. Note
 // the Value field will be sent to users of channelz requesting channel info, and
 // thus sensitive info should better be avoided.
 type OtherChannelzSecurityValue struct {
+	ChannelzSecurityValue
 	Name  string
 	Value proto.Message
 }
 
-func (*OtherChannelzSecurityValue) isChannelzSecurityValue() {}
-
 var cipherSuiteLookup = map[uint16]string{
 	tls.TLS_RSA_WITH_RC4_128_SHA:                "TLS_RSA_WITH_RC4_128_SHA",
 	tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA:           "TLS_RSA_WITH_3DES_EDE_CBC_SHA",
diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go
index e114fec..e8f34d0 100644
--- a/vendor/google.golang.org/grpc/dialoptions.go
+++ b/vendor/google.golang.org/grpc/dialoptions.go
@@ -39,8 +39,12 @@
 // dialOptions configure a Dial call. dialOptions are set by the DialOption
 // values passed to Dial.
 type dialOptions struct {
-	unaryInt    UnaryClientInterceptor
-	streamInt   StreamClientInterceptor
+	unaryInt  UnaryClientInterceptor
+	streamInt StreamClientInterceptor
+
+	chainUnaryInts  []UnaryClientInterceptor
+	chainStreamInts []StreamClientInterceptor
+
 	cp          Compressor
 	dc          Decompressor
 	bs          backoff.Strategy
@@ -56,7 +60,6 @@
 	balancerBuilder balancer.Builder
 	// This is to support grpclb.
 	resolverBuilder             resolver.Builder
-	reqHandshake                envconfig.RequireHandshakeSetting
 	channelzParentID            int64
 	disableServiceConfig        bool
 	disableRetry                bool
@@ -96,17 +99,6 @@
 	}
 }
 
-// WithWaitForHandshake blocks until the initial settings frame is received from
-// the server before assigning RPCs to the connection.
-//
-// Deprecated: this is the default behavior, and this option will be removed
-// after the 1.18 release.
-func WithWaitForHandshake() DialOption {
-	return newFuncDialOption(func(o *dialOptions) {
-		o.reqHandshake = envconfig.RequireHandshakeOn
-	})
-}
-
 // WithWriteBufferSize determines how much data can be batched before doing a
 // write on the wire. The corresponding memory allocation for this buffer will
 // be twice the size to keep syscalls low. The default value for this buffer is
@@ -152,7 +144,8 @@
 // WithMaxMsgSize returns a DialOption which sets the maximum message size the
 // client can receive.
 //
-// Deprecated: use WithDefaultCallOptions(MaxCallRecvMsgSize(s)) instead.
+// Deprecated: use WithDefaultCallOptions(MaxCallRecvMsgSize(s)) instead.  Will
+// be supported throughout 1.x.
 func WithMaxMsgSize(s int) DialOption {
 	return WithDefaultCallOptions(MaxCallRecvMsgSize(s))
 }
@@ -168,7 +161,8 @@
 // WithCodec returns a DialOption which sets a codec for message marshaling and
 // unmarshaling.
 //
-// Deprecated: use WithDefaultCallOptions(ForceCodec(_)) instead.
+// Deprecated: use WithDefaultCallOptions(ForceCodec(_)) instead.  Will be
+// supported throughout 1.x.
 func WithCodec(c Codec) DialOption {
 	return WithDefaultCallOptions(CallCustomCodec(c))
 }
@@ -177,7 +171,7 @@
 // message compression. It has lower priority than the compressor set by the
 // UseCompressor CallOption.
 //
-// Deprecated: use UseCompressor instead.
+// Deprecated: use UseCompressor instead.  Will be supported throughout 1.x.
 func WithCompressor(cp Compressor) DialOption {
 	return newFuncDialOption(func(o *dialOptions) {
 		o.cp = cp
@@ -192,7 +186,8 @@
 // message.  If no compressor is registered for the encoding, an Unimplemented
 // status error will be returned.
 //
-// Deprecated: use encoding.RegisterCompressor instead.
+// Deprecated: use encoding.RegisterCompressor instead.  Will be supported
+// throughout 1.x.
 func WithDecompressor(dc Decompressor) DialOption {
 	return newFuncDialOption(func(o *dialOptions) {
 		o.dc = dc
@@ -203,7 +198,7 @@
 // Name resolver will be ignored if this DialOption is specified.
 //
 // Deprecated: use the new balancer APIs in balancer package and
-// WithBalancerName.
+// WithBalancerName.  Will be removed in a future 1.x release.
 func WithBalancer(b Balancer) DialOption {
 	return newFuncDialOption(func(o *dialOptions) {
 		o.balancerBuilder = &balancerWrapperBuilder{
@@ -219,7 +214,8 @@
 // The balancer cannot be overridden by balancer option specified by service
 // config.
 //
-// This is an EXPERIMENTAL API.
+// Deprecated: use WithDefaultServiceConfig and WithDisableServiceConfig
+// instead.  Will be removed in a future 1.x release.
 func WithBalancerName(balancerName string) DialOption {
 	builder := balancer.Get(balancerName)
 	if builder == nil {
@@ -240,9 +236,10 @@
 // WithServiceConfig returns a DialOption which has a channel to read the
 // service configuration.
 //
-// Deprecated: service config should be received through name resolver, as
-// specified here.
-// https://github.com/grpc/grpc/blob/master/doc/service_config.md
+// Deprecated: service config should be received through name resolver or via
+// WithDefaultServiceConfig, as specified at
+// https://github.com/grpc/grpc/blob/master/doc/service_config.md.  Will be
+// removed in a future 1.x release.
 func WithServiceConfig(c <-chan ServiceConfig) DialOption {
 	return newFuncDialOption(func(o *dialOptions) {
 		o.scChan = c
@@ -325,7 +322,8 @@
 // WithTimeout returns a DialOption that configures a timeout for dialing a
 // ClientConn initially. This is valid if and only if WithBlock() is present.
 //
-// Deprecated: use DialContext and context.WithTimeout instead.
+// Deprecated: use DialContext and context.WithTimeout instead.  Will be
+// supported throughout 1.x.
 func WithTimeout(d time.Duration) DialOption {
 	return newFuncDialOption(func(o *dialOptions) {
 		o.timeout = d
@@ -352,7 +350,8 @@
 // is returned by f, gRPC checks the error's Temporary() method to decide if it
 // should try to reconnect to the network address.
 //
-// Deprecated: use WithContextDialer instead
+// Deprecated: use WithContextDialer instead.  Will be supported throughout
+// 1.x.
 func WithDialer(f func(string, time.Duration) (net.Conn, error)) DialOption {
 	return WithContextDialer(
 		func(ctx context.Context, addr string) (net.Conn, error) {
@@ -414,6 +413,17 @@
 	})
 }
 
+// WithChainUnaryInterceptor returns a DialOption that specifies the chained
+// interceptor for unary RPCs. The first interceptor will be the outer most,
+// while the last interceptor will be the inner most wrapper around the real call.
+// All interceptors added by this method will be chained, and the interceptor
+// defined by WithUnaryInterceptor will always be prepended to the chain.
+func WithChainUnaryInterceptor(interceptors ...UnaryClientInterceptor) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.chainUnaryInts = append(o.chainUnaryInts, interceptors...)
+	})
+}
+
 // WithStreamInterceptor returns a DialOption that specifies the interceptor for
 // streaming RPCs.
 func WithStreamInterceptor(f StreamClientInterceptor) DialOption {
@@ -422,6 +432,17 @@
 	})
 }
 
+// WithChainStreamInterceptor returns a DialOption that specifies the chained
+// interceptor for unary RPCs. The first interceptor will be the outer most,
+// while the last interceptor will be the inner most wrapper around the real call.
+// All interceptors added by this method will be chained, and the interceptor
+// defined by WithStreamInterceptor will always be prepended to the chain.
+func WithChainStreamInterceptor(interceptors ...StreamClientInterceptor) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.chainStreamInts = append(o.chainStreamInts, interceptors...)
+	})
+}
+
 // WithAuthority returns a DialOption that specifies the value to be used as the
 // :authority pseudo-header. This value only works with WithInsecure and has no
 // effect if TransportCredentials are present.
@@ -440,12 +461,12 @@
 	})
 }
 
-// WithDisableServiceConfig returns a DialOption that causes grpc to ignore any
+// WithDisableServiceConfig returns a DialOption that causes gRPC to ignore any
 // service config provided by the resolver and provides a hint to the resolver
 // to not fetch service configs.
 //
-// Note that, this dial option only disables service config from resolver. If
-// default service config is provided, grpc will use the default service config.
+// Note that this dial option only disables service config from resolver. If
+// default service config is provided, gRPC will use the default service config.
 func WithDisableServiceConfig() DialOption {
 	return newFuncDialOption(func(o *dialOptions) {
 		o.disableServiceConfig = true
@@ -454,8 +475,10 @@
 
 // WithDefaultServiceConfig returns a DialOption that configures the default
 // service config, which will be used in cases where:
-// 1. WithDisableServiceConfig is called.
-// 2. Resolver does not return service config or if the resolver gets and invalid config.
+//
+// 1. WithDisableServiceConfig is also used.
+// 2. Resolver does not return a service config or if the resolver returns an
+//    invalid service config.
 //
 // This API is EXPERIMENTAL.
 func WithDefaultServiceConfig(s string) DialOption {
@@ -511,7 +534,6 @@
 func defaultDialOptions() dialOptions {
 	return dialOptions{
 		disableRetry:    !envconfig.Retry,
-		reqHandshake:    envconfig.RequireHandshake,
 		healthCheckFunc: internal.HealthCheckFunc,
 		copts: transport.ConnectOptions{
 			WriteBufferSize: defaultWriteBufSize,
diff --git a/vendor/google.golang.org/grpc/go.mod b/vendor/google.golang.org/grpc/go.mod
index 9f3ef3a..c1a8340 100644
--- a/vendor/google.golang.org/grpc/go.mod
+++ b/vendor/google.golang.org/grpc/go.mod
@@ -7,13 +7,13 @@
 	github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b
 	github.com/golang/mock v1.1.1
 	github.com/golang/protobuf v1.2.0
+	github.com/google/go-cmp v0.2.0
 	golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3
 	golang.org/x/net v0.0.0-20190311183353-d8887717615a
 	golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be
-	golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f // indirect
 	golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a
-	golang.org/x/tools v0.0.0-20190311212946-11955173bddd
+	golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135
 	google.golang.org/appengine v1.1.0 // indirect
 	google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8
-	honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099
+	honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc
 )
diff --git a/vendor/google.golang.org/grpc/health/client.go b/vendor/google.golang.org/grpc/health/client.go
index e15f04c..b43746e 100644
--- a/vendor/google.golang.org/grpc/health/client.go
+++ b/vendor/google.golang.org/grpc/health/client.go
@@ -26,6 +26,7 @@
 
 	"google.golang.org/grpc"
 	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/connectivity"
 	healthpb "google.golang.org/grpc/health/grpc_health_v1"
 	"google.golang.org/grpc/internal"
 	"google.golang.org/grpc/internal/backoff"
@@ -51,7 +52,11 @@
 	internal.HealthCheckFunc = clientHealthCheck
 }
 
-func clientHealthCheck(ctx context.Context, newStream func() (interface{}, error), reportHealth func(bool), service string) error {
+const healthCheckMethod = "/grpc.health.v1.Health/Watch"
+
+// This function implements the protocol defined at:
+// https://github.com/grpc/grpc/blob/master/doc/health-checking.md
+func clientHealthCheck(ctx context.Context, newStream func(string) (interface{}, error), setConnectivityState func(connectivity.State), service string) error {
 	tryCnt := 0
 
 retryConnection:
@@ -65,7 +70,8 @@
 		if ctx.Err() != nil {
 			return nil
 		}
-		rawS, err := newStream()
+		setConnectivityState(connectivity.Connecting)
+		rawS, err := newStream(healthCheckMethod)
 		if err != nil {
 			continue retryConnection
 		}
@@ -73,7 +79,7 @@
 		s, ok := rawS.(grpc.ClientStream)
 		// Ideally, this should never happen. But if it happens, the server is marked as healthy for LBing purposes.
 		if !ok {
-			reportHealth(true)
+			setConnectivityState(connectivity.Ready)
 			return fmt.Errorf("newStream returned %v (type %T); want grpc.ClientStream", rawS, rawS)
 		}
 
@@ -89,19 +95,23 @@
 
 			// Reports healthy for the LBing purposes if health check is not implemented in the server.
 			if status.Code(err) == codes.Unimplemented {
-				reportHealth(true)
+				setConnectivityState(connectivity.Ready)
 				return err
 			}
 
 			// Reports unhealthy if server's Watch method gives an error other than UNIMPLEMENTED.
 			if err != nil {
-				reportHealth(false)
+				setConnectivityState(connectivity.TransientFailure)
 				continue retryConnection
 			}
 
 			// As a message has been received, removes the need for backoff for the next retry by reseting the try count.
 			tryCnt = 0
-			reportHealth(resp.Status == healthpb.HealthCheckResponse_SERVING)
+			if resp.Status == healthpb.HealthCheckResponse_SERVING {
+				setConnectivityState(connectivity.Ready)
+			} else {
+				setConnectivityState(connectivity.TransientFailure)
+			}
 		}
 	}
 }
diff --git a/vendor/google.golang.org/grpc/internal/channelz/funcs.go b/vendor/google.golang.org/grpc/internal/channelz/funcs.go
index 041520d..f0744f9 100644
--- a/vendor/google.golang.org/grpc/internal/channelz/funcs.go
+++ b/vendor/google.golang.org/grpc/internal/channelz/funcs.go
@@ -24,6 +24,7 @@
 package channelz
 
 import (
+	"fmt"
 	"sort"
 	"sync"
 	"sync/atomic"
@@ -95,9 +96,14 @@
 
 // NewChannelzStorage initializes channelz data storage and id generator.
 //
+// This function returns a cleanup function to wait for all channelz state to be reset by the
+// grpc goroutines when those entities get closed. By using this cleanup function, we make sure tests
+// don't mess up each other, i.e. lingering goroutine from previous test doing entity removal happen
+// to remove some entity just register by the new test, since the id space is the same.
+//
 // Note: This function is exported for testing purpose only. User should not call
 // it in most cases.
-func NewChannelzStorage() {
+func NewChannelzStorage() (cleanup func() error) {
 	db.set(&channelMap{
 		topLevelChannels: make(map[int64]struct{}),
 		channels:         make(map[int64]*channel),
@@ -107,6 +113,28 @@
 		subChannels:      make(map[int64]*subChannel),
 	})
 	idGen.reset()
+	return func() error {
+		var err error
+		cm := db.get()
+		if cm == nil {
+			return nil
+		}
+		for i := 0; i < 1000; i++ {
+			cm.mu.Lock()
+			if len(cm.topLevelChannels) == 0 && len(cm.servers) == 0 && len(cm.channels) == 0 && len(cm.subChannels) == 0 && len(cm.listenSockets) == 0 && len(cm.normalSockets) == 0 {
+				cm.mu.Unlock()
+				// all things stored in the channelz map have been cleared.
+				return nil
+			}
+			cm.mu.Unlock()
+			time.Sleep(10 * time.Millisecond)
+		}
+
+		cm.mu.Lock()
+		err = fmt.Errorf("after 10s the channelz map has not been cleaned up yet, topchannels: %d, servers: %d, channels: %d, subchannels: %d, listen sockets: %d, normal sockets: %d", len(cm.topLevelChannels), len(cm.servers), len(cm.channels), len(cm.subChannels), len(cm.listenSockets), len(cm.normalSockets))
+		cm.mu.Unlock()
+		return err
+	}
 }
 
 // GetTopChannels returns a slice of top channel's ChannelMetric, along with a
diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
index 11be7cd..3ee8740 100644
--- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
+++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
@@ -25,40 +25,11 @@
 )
 
 const (
-	prefix              = "GRPC_GO_"
-	retryStr            = prefix + "RETRY"
-	requireHandshakeStr = prefix + "REQUIRE_HANDSHAKE"
-)
-
-// RequireHandshakeSetting describes the settings for handshaking.
-type RequireHandshakeSetting int
-
-const (
-	// RequireHandshakeOn indicates to wait for handshake before considering a
-	// connection ready/successful.
-	RequireHandshakeOn RequireHandshakeSetting = iota
-	// RequireHandshakeOff indicates to not wait for handshake before
-	// considering a connection ready/successful.
-	RequireHandshakeOff
+	prefix   = "GRPC_GO_"
+	retryStr = prefix + "RETRY"
 )
 
 var (
 	// Retry is set if retry is explicitly enabled via "GRPC_GO_RETRY=on".
 	Retry = strings.EqualFold(os.Getenv(retryStr), "on")
-	// RequireHandshake is set based upon the GRPC_GO_REQUIRE_HANDSHAKE
-	// environment variable.
-	//
-	// Will be removed after the 1.18 release.
-	RequireHandshake = RequireHandshakeOn
 )
-
-func init() {
-	switch strings.ToLower(os.Getenv(requireHandshakeStr)) {
-	case "on":
-		fallthrough
-	default:
-		RequireHandshake = RequireHandshakeOn
-	case "off":
-		RequireHandshake = RequireHandshakeOff
-	}
-}
diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go
index c1d2c69..bc1f99a 100644
--- a/vendor/google.golang.org/grpc/internal/internal.go
+++ b/vendor/google.golang.org/grpc/internal/internal.go
@@ -23,6 +23,8 @@
 import (
 	"context"
 	"time"
+
+	"google.golang.org/grpc/connectivity"
 )
 
 var (
@@ -37,10 +39,25 @@
 	// KeepaliveMinPingTime is the minimum ping interval.  This must be 10s by
 	// default, but tests may wish to set it lower for convenience.
 	KeepaliveMinPingTime = 10 * time.Second
+	// ParseServiceConfig is a function to parse JSON service configs into
+	// opaque data structures.
+	ParseServiceConfig func(sc string) (interface{}, error)
+	// StatusRawProto is exported by status/status.go. This func returns a
+	// pointer to the wrapped Status proto for a given status.Status without a
+	// call to proto.Clone(). The returned Status proto should not be mutated by
+	// the caller.
+	StatusRawProto interface{} // func (*status.Status) *spb.Status
 )
 
 // HealthChecker defines the signature of the client-side LB channel health checking function.
-type HealthChecker func(ctx context.Context, newStream func() (interface{}, error), reportHealth func(bool), serviceName string) error
+//
+// The implementation is expected to create a health checking RPC stream by
+// calling newStream(), watch for the health status of serviceName, and report
+// it's health back by calling setConnectivityState().
+//
+// The health checking protocol is defined at:
+// https://github.com/grpc/grpc/blob/master/doc/health-checking.md
+type HealthChecker func(ctx context.Context, newStream func(string) (interface{}, error), setConnectivityState func(connectivity.State), serviceName string) error
 
 const (
 	// CredsBundleModeFallback switches GoogleDefaultCreds to fallback mode.
diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go
index 204ba15..b8e0aa4 100644
--- a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go
+++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go
@@ -23,6 +23,7 @@
 	"fmt"
 	"runtime"
 	"sync"
+	"sync/atomic"
 
 	"golang.org/x/net/http2"
 	"golang.org/x/net/http2/hpack"
@@ -84,12 +85,24 @@
 // the control buffer of transport. They represent different aspects of
 // control tasks, e.g., flow control, settings, streaming resetting, etc.
 
+// maxQueuedTransportResponseFrames is the most queued "transport response"
+// frames we will buffer before preventing new reads from occurring on the
+// transport.  These are control frames sent in response to client requests,
+// such as RST_STREAM due to bad headers or settings acks.
+const maxQueuedTransportResponseFrames = 50
+
+type cbItem interface {
+	isTransportResponseFrame() bool
+}
+
 // registerStream is used to register an incoming stream with loopy writer.
 type registerStream struct {
 	streamID uint32
 	wq       *writeQuota
 }
 
+func (*registerStream) isTransportResponseFrame() bool { return false }
+
 // headerFrame is also used to register stream on the client-side.
 type headerFrame struct {
 	streamID   uint32
@@ -102,6 +115,10 @@
 	onOrphaned func(error)    // Valid on client-side
 }
 
+func (h *headerFrame) isTransportResponseFrame() bool {
+	return h.cleanup != nil && h.cleanup.rst // Results in a RST_STREAM
+}
+
 type cleanupStream struct {
 	streamID uint32
 	rst      bool
@@ -109,6 +126,8 @@
 	onWrite  func()
 }
 
+func (c *cleanupStream) isTransportResponseFrame() bool { return c.rst } // Results in a RST_STREAM
+
 type dataFrame struct {
 	streamID  uint32
 	endStream bool
@@ -119,27 +138,41 @@
 	onEachWrite func()
 }
 
+func (*dataFrame) isTransportResponseFrame() bool { return false }
+
 type incomingWindowUpdate struct {
 	streamID  uint32
 	increment uint32
 }
 
+func (*incomingWindowUpdate) isTransportResponseFrame() bool { return false }
+
 type outgoingWindowUpdate struct {
 	streamID  uint32
 	increment uint32
 }
 
+func (*outgoingWindowUpdate) isTransportResponseFrame() bool {
+	return false // window updates are throttled by thresholds
+}
+
 type incomingSettings struct {
 	ss []http2.Setting
 }
 
+func (*incomingSettings) isTransportResponseFrame() bool { return true } // Results in a settings ACK
+
 type outgoingSettings struct {
 	ss []http2.Setting
 }
 
+func (*outgoingSettings) isTransportResponseFrame() bool { return false }
+
 type incomingGoAway struct {
 }
 
+func (*incomingGoAway) isTransportResponseFrame() bool { return false }
+
 type goAway struct {
 	code      http2.ErrCode
 	debugData []byte
@@ -147,15 +180,21 @@
 	closeConn bool
 }
 
+func (*goAway) isTransportResponseFrame() bool { return false }
+
 type ping struct {
 	ack  bool
 	data [8]byte
 }
 
+func (*ping) isTransportResponseFrame() bool { return true }
+
 type outFlowControlSizeRequest struct {
 	resp chan uint32
 }
 
+func (*outFlowControlSizeRequest) isTransportResponseFrame() bool { return false }
+
 type outStreamState int
 
 const (
@@ -238,6 +277,14 @@
 	consumerWaiting bool
 	list            *itemList
 	err             error
+
+	// transportResponseFrames counts the number of queued items that represent
+	// the response of an action initiated by the peer.  trfChan is created
+	// when transportResponseFrames >= maxQueuedTransportResponseFrames and is
+	// closed and nilled when transportResponseFrames drops below the
+	// threshold.  Both fields are protected by mu.
+	transportResponseFrames int
+	trfChan                 atomic.Value // *chan struct{}
 }
 
 func newControlBuffer(done <-chan struct{}) *controlBuffer {
@@ -248,12 +295,24 @@
 	}
 }
 
-func (c *controlBuffer) put(it interface{}) error {
+// throttle blocks if there are too many incomingSettings/cleanupStreams in the
+// controlbuf.
+func (c *controlBuffer) throttle() {
+	ch, _ := c.trfChan.Load().(*chan struct{})
+	if ch != nil {
+		select {
+		case <-*ch:
+		case <-c.done:
+		}
+	}
+}
+
+func (c *controlBuffer) put(it cbItem) error {
 	_, err := c.executeAndPut(nil, it)
 	return err
 }
 
-func (c *controlBuffer) executeAndPut(f func(it interface{}) bool, it interface{}) (bool, error) {
+func (c *controlBuffer) executeAndPut(f func(it interface{}) bool, it cbItem) (bool, error) {
 	var wakeUp bool
 	c.mu.Lock()
 	if c.err != nil {
@@ -271,6 +330,15 @@
 		c.consumerWaiting = false
 	}
 	c.list.enqueue(it)
+	if it.isTransportResponseFrame() {
+		c.transportResponseFrames++
+		if c.transportResponseFrames == maxQueuedTransportResponseFrames {
+			// We are adding the frame that puts us over the threshold; create
+			// a throttling channel.
+			ch := make(chan struct{})
+			c.trfChan.Store(&ch)
+		}
+	}
 	c.mu.Unlock()
 	if wakeUp {
 		select {
@@ -304,7 +372,17 @@
 			return nil, c.err
 		}
 		if !c.list.isEmpty() {
-			h := c.list.dequeue()
+			h := c.list.dequeue().(cbItem)
+			if h.isTransportResponseFrame() {
+				if c.transportResponseFrames == maxQueuedTransportResponseFrames {
+					// We are removing the frame that put us over the
+					// threshold; close and clear the throttling channel.
+					ch := c.trfChan.Load().(*chan struct{})
+					close(*ch)
+					c.trfChan.Store((*chan struct{})(nil))
+				}
+				c.transportResponseFrames--
+			}
 			c.mu.Unlock()
 			return h, nil
 		}
diff --git a/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go
index 5ea997a..f262edd 100644
--- a/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go
+++ b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go
@@ -149,6 +149,7 @@
 		n = uint32(math.MaxInt32)
 	}
 	f.mu.Lock()
+	defer f.mu.Unlock()
 	// estSenderQuota is the receiver's view of the maximum number of bytes the sender
 	// can send without a window update.
 	estSenderQuota := int32(f.limit - (f.pendingData + f.pendingUpdate))
@@ -169,10 +170,8 @@
 			// is padded; We will fallback on the current available window(at least a 1/4th of the limit).
 			f.delta = n
 		}
-		f.mu.Unlock()
 		return f.delta
 	}
-	f.mu.Unlock()
 	return 0
 }
 
diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go
index f2de84d..78f9ddc 100644
--- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go
+++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go
@@ -24,6 +24,7 @@
 package transport
 
 import (
+	"bytes"
 	"context"
 	"errors"
 	"fmt"
@@ -347,7 +348,7 @@
 		ht.stats.HandleRPC(s.ctx, inHeader)
 	}
 	s.trReader = &transportReader{
-		reader:        &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf},
+		reader:        &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf, freeBuffer: func(*bytes.Buffer) {}},
 		windowHandler: func(int) {},
 	}
 
@@ -361,7 +362,7 @@
 		for buf := make([]byte, readSize); ; {
 			n, err := req.Body.Read(buf)
 			if n > 0 {
-				s.buf.put(recvMsg{data: buf[:n:n]})
+				s.buf.put(recvMsg{buffer: bytes.NewBuffer(buf[:n:n])})
 				buf = buf[n:]
 			}
 			if err != nil {
diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go
index 9dee6db..41a79c5 100644
--- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go
+++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go
@@ -117,6 +117,8 @@
 
 	onGoAway func(GoAwayReason)
 	onClose  func()
+
+	bufferPool *bufferPool
 }
 
 func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr string) (net.Conn, error) {
@@ -249,6 +251,7 @@
 		onGoAway:              onGoAway,
 		onClose:               onClose,
 		keepaliveEnabled:      keepaliveEnabled,
+		bufferPool:            newBufferPool(),
 	}
 	t.controlBuf = newControlBuffer(t.ctxDone)
 	if opts.InitialWindowSize >= defaultWindowSize {
@@ -367,6 +370,7 @@
 			closeStream: func(err error) {
 				t.CloseStream(s, err)
 			},
+			freeBuffer: t.bufferPool.put,
 		},
 		windowHandler: func(n int) {
 			t.updateWindow(s, uint32(n))
@@ -437,6 +441,15 @@
 
 	if md, added, ok := metadata.FromOutgoingContextRaw(ctx); ok {
 		var k string
+		for k, vv := range md {
+			// HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set.
+			if isReservedHeader(k) {
+				continue
+			}
+			for _, v := range vv {
+				headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
+			}
+		}
 		for _, vv := range added {
 			for i, v := range vv {
 				if i%2 == 0 {
@@ -450,15 +463,6 @@
 				headerFields = append(headerFields, hpack.HeaderField{Name: strings.ToLower(k), Value: encodeMetadataHeader(k, v)})
 			}
 		}
-		for k, vv := range md {
-			// HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set.
-			if isReservedHeader(k) {
-				continue
-			}
-			for _, v := range vv {
-				headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
-			}
-		}
 	}
 	if md, ok := t.md.(*metadata.MD); ok {
 		for k, vv := range *md {
@@ -489,6 +493,9 @@
 }
 
 func (t *http2Client) getTrAuthData(ctx context.Context, audience string) (map[string]string, error) {
+	if len(t.perRPCCreds) == 0 {
+		return nil, nil
+	}
 	authData := map[string]string{}
 	for _, c := range t.perRPCCreds {
 		data, err := c.GetRequestMetadata(ctx, audience)
@@ -509,7 +516,7 @@
 }
 
 func (t *http2Client) getCallAuthData(ctx context.Context, audience string, callHdr *CallHdr) (map[string]string, error) {
-	callAuthData := map[string]string{}
+	var callAuthData map[string]string
 	// Check if credentials.PerRPCCredentials were provided via call options.
 	// Note: if these credentials are provided both via dial options and call
 	// options, then both sets of credentials will be applied.
@@ -521,6 +528,7 @@
 		if err != nil {
 			return nil, status.Errorf(codes.Internal, "transport: %v", err)
 		}
+		callAuthData = make(map[string]string, len(data))
 		for k, v := range data {
 			// Capital header names are illegal in HTTP/2
 			k = strings.ToLower(k)
@@ -549,10 +557,9 @@
 		s.write(recvMsg{err: err})
 		close(s.done)
 		// If headerChan isn't closed, then close it.
-		if atomic.SwapUint32(&s.headerDone, 1) == 0 {
+		if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) {
 			close(s.headerChan)
 		}
-
 	}
 	hdr := &headerFrame{
 		hf:        headerFields,
@@ -713,7 +720,7 @@
 		s.write(recvMsg{err: err})
 	}
 	// If headerChan isn't closed, then close it.
-	if atomic.SwapUint32(&s.headerDone, 1) == 0 {
+	if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) {
 		s.noHeaders = true
 		close(s.headerChan)
 	}
@@ -765,6 +772,9 @@
 		t.mu.Unlock()
 		return nil
 	}
+	// Call t.onClose before setting the state to closing to prevent the client
+	// from attempting to create new streams ASAP.
+	t.onClose()
 	t.state = closing
 	streams := t.activeStreams
 	t.activeStreams = nil
@@ -785,7 +795,6 @@
 		}
 		t.statsHandler.HandleConn(t.ctx, connEnd)
 	}
-	t.onClose()
 	return err
 }
 
@@ -794,21 +803,21 @@
 // stream is closed.  If there are no active streams, the transport is closed
 // immediately.  This does nothing if the transport is already draining or
 // closing.
-func (t *http2Client) GracefulClose() error {
+func (t *http2Client) GracefulClose() {
 	t.mu.Lock()
 	// Make sure we move to draining only from active.
 	if t.state == draining || t.state == closing {
 		t.mu.Unlock()
-		return nil
+		return
 	}
 	t.state = draining
 	active := len(t.activeStreams)
 	t.mu.Unlock()
 	if active == 0 {
-		return t.Close()
+		t.Close()
+		return
 	}
 	t.controlBuf.put(&incomingGoAway{})
-	return nil
 }
 
 // Write formats the data into HTTP2 data frame(s) and sends it out. The caller
@@ -946,9 +955,10 @@
 		// guarantee f.Data() is consumed before the arrival of next frame.
 		// Can this copy be eliminated?
 		if len(f.Data()) > 0 {
-			data := make([]byte, len(f.Data()))
-			copy(data, f.Data())
-			s.write(recvMsg{data: data})
+			buffer := t.bufferPool.get()
+			buffer.Reset()
+			buffer.Write(f.Data())
+			s.write(recvMsg{buffer: buffer})
 		}
 	}
 	// The server has closed the stream without sending trailers.  Record that
@@ -973,9 +983,9 @@
 		statusCode = codes.Unknown
 	}
 	if statusCode == codes.Canceled {
-		// Our deadline was already exceeded, and that was likely the cause of
-		// this cancelation.  Alter the status code accordingly.
-		if d, ok := s.ctx.Deadline(); ok && d.After(time.Now()) {
+		if d, ok := s.ctx.Deadline(); ok && !d.After(time.Now()) {
+			// Our deadline was already exceeded, and that was likely the cause
+			// of this cancelation.  Alter the status code accordingly.
 			statusCode = codes.DeadlineExceeded
 		}
 	}
@@ -1080,11 +1090,12 @@
 	default:
 		t.setGoAwayReason(f)
 		close(t.goAway)
-		t.state = draining
 		t.controlBuf.put(&incomingGoAway{})
-
-		// This has to be a new goroutine because we're still using the current goroutine to read in the transport.
+		// Notify the clientconn about the GOAWAY before we set the state to
+		// draining, to allow the client to stop attempting to create streams
+		// before disallowing new streams on this connection.
 		t.onGoAway(t.goAwayReason)
+		t.state = draining
 	}
 	// All streams with IDs greater than the GoAwayId
 	// and smaller than the previous GoAway ID should be killed.
@@ -1142,26 +1153,24 @@
 	}
 	endStream := frame.StreamEnded()
 	atomic.StoreUint32(&s.bytesReceived, 1)
-	initialHeader := atomic.SwapUint32(&s.headerDone, 1) == 0
+	initialHeader := atomic.LoadUint32(&s.headerChanClosed) == 0
 
 	if !initialHeader && !endStream {
-		// As specified by RFC 7540, a HEADERS frame (and associated CONTINUATION frames) can only appear
-		// at the start or end of a stream. Therefore, second HEADERS frame must have EOS bit set.
+		// As specified by gRPC over HTTP2, a HEADERS frame (and associated CONTINUATION frames) can only appear at the start or end of a stream. Therefore, second HEADERS frame must have EOS bit set.
 		st := status.New(codes.Internal, "a HEADERS frame cannot appear in the middle of a stream")
 		t.closeStream(s, st.Err(), true, http2.ErrCodeProtocol, st, nil, false)
 		return
 	}
 
 	state := &decodeState{}
-	// Initialize isGRPC value to be !initialHeader, since if a gRPC ResponseHeader has been received
-	// which indicates peer speaking gRPC, we are in gRPC mode.
+	// Initialize isGRPC value to be !initialHeader, since if a gRPC Response-Headers has already been received, then it means that the peer is speaking gRPC and we are in gRPC mode.
 	state.data.isGRPC = !initialHeader
 	if err := state.decodeHeader(frame); err != nil {
 		t.closeStream(s, err, true, http2.ErrCodeProtocol, status.Convert(err), nil, endStream)
 		return
 	}
 
-	var isHeader bool
+	isHeader := false
 	defer func() {
 		if t.statsHandler != nil {
 			if isHeader {
@@ -1180,10 +1189,10 @@
 		}
 	}()
 
-	// If headers haven't been received yet.
-	if initialHeader {
+	// If headerChan hasn't been closed yet
+	if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) {
 		if !endStream {
-			// Headers frame is ResponseHeader.
+			// HEADERS frame block carries a Response-Headers.
 			isHeader = true
 			// These values can be set without any synchronization because
 			// stream goroutine will read it only after seeing a closed
@@ -1192,14 +1201,17 @@
 			if len(state.data.mdata) > 0 {
 				s.header = state.data.mdata
 			}
-			close(s.headerChan)
-			return
+		} else {
+			// HEADERS frame block carries a Trailers-Only.
+			s.noHeaders = true
 		}
-		// Headers frame is Trailers-only.
-		s.noHeaders = true
 		close(s.headerChan)
 	}
 
+	if !endStream {
+		return
+	}
+
 	// if client received END_STREAM from server while stream was still active, send RST_STREAM
 	rst := s.getState() == streamActive
 	t.closeStream(s, io.EOF, rst, http2.ErrCodeNo, state.status(), state.data.mdata, true)
@@ -1233,6 +1245,7 @@
 
 	// loop to keep reading incoming messages on this transport.
 	for {
+		t.controlBuf.throttle()
 		frame, err := t.framer.fr.ReadFrame()
 		if t.keepaliveEnabled {
 			atomic.CompareAndSwapUint32(&t.activity, 0, 1)
@@ -1320,6 +1333,7 @@
 					timer.Reset(t.kp.Time)
 					continue
 				}
+				infof("transport: closing client transport due to idleness.")
 				t.Close()
 				return
 			case <-t.ctx.Done():
diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go
index 435092e..83439b5 100644
--- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go
+++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go
@@ -35,9 +35,11 @@
 	"golang.org/x/net/http2"
 	"golang.org/x/net/http2/hpack"
 
+	spb "google.golang.org/genproto/googleapis/rpc/status"
 	"google.golang.org/grpc/codes"
 	"google.golang.org/grpc/credentials"
 	"google.golang.org/grpc/grpclog"
+	"google.golang.org/grpc/internal"
 	"google.golang.org/grpc/internal/channelz"
 	"google.golang.org/grpc/internal/grpcrand"
 	"google.golang.org/grpc/keepalive"
@@ -55,6 +57,9 @@
 	// ErrHeaderListSizeLimitViolation indicates that the header list size is larger
 	// than the limit set by peer.
 	ErrHeaderListSizeLimitViolation = errors.New("transport: trying to send header list size larger than the limit set by peer")
+	// statusRawProto is a function to get to the raw status proto wrapped in a
+	// status.Status without a proto.Clone().
+	statusRawProto = internal.StatusRawProto.(func(*status.Status) *spb.Status)
 )
 
 // http2Server implements the ServerTransport interface with HTTP2.
@@ -119,6 +124,7 @@
 	// Fields below are for channelz metric collection.
 	channelzID int64 // channelz unique identification number
 	czData     *channelzData
+	bufferPool *bufferPool
 }
 
 // newHTTP2Server constructs a ServerTransport based on HTTP2. ConnectionError is
@@ -220,6 +226,7 @@
 		kep:               kep,
 		initialWindowSize: iwz,
 		czData:            new(channelzData),
+		bufferPool:        newBufferPool(),
 	}
 	t.controlBuf = newControlBuffer(t.ctxDone)
 	if dynamicWindow {
@@ -405,9 +412,10 @@
 	s.wq = newWriteQuota(defaultWriteQuota, s.ctxDone)
 	s.trReader = &transportReader{
 		reader: &recvBufferReader{
-			ctx:     s.ctx,
-			ctxDone: s.ctxDone,
-			recv:    s.buf,
+			ctx:        s.ctx,
+			ctxDone:    s.ctxDone,
+			recv:       s.buf,
+			freeBuffer: t.bufferPool.put,
 		},
 		windowHandler: func(n int) {
 			t.updateWindow(s, uint32(n))
@@ -428,6 +436,7 @@
 func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.Context, string) context.Context) {
 	defer close(t.readerDone)
 	for {
+		t.controlBuf.throttle()
 		frame, err := t.framer.fr.ReadFrame()
 		atomic.StoreUint32(&t.activity, 1)
 		if err != nil {
@@ -591,9 +600,10 @@
 		// guarantee f.Data() is consumed before the arrival of next frame.
 		// Can this copy be eliminated?
 		if len(f.Data()) > 0 {
-			data := make([]byte, len(f.Data()))
-			copy(data, f.Data())
-			s.write(recvMsg{data: data})
+			buffer := t.bufferPool.get()
+			buffer.Reset()
+			buffer.Write(f.Data())
+			s.write(recvMsg{buffer: buffer})
 		}
 	}
 	if f.Header().Flags.Has(http2.FlagDataEndStream) {
@@ -757,6 +767,10 @@
 	return nil
 }
 
+func (t *http2Server) setResetPingStrikes() {
+	atomic.StoreUint32(&t.resetPingStrikes, 1)
+}
+
 func (t *http2Server) writeHeaderLocked(s *Stream) error {
 	// TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields
 	// first and create a slice of that exact size.
@@ -771,9 +785,7 @@
 		streamID:  s.id,
 		hf:        headerFields,
 		endStream: false,
-		onWrite: func() {
-			atomic.StoreUint32(&t.resetPingStrikes, 1)
-		},
+		onWrite:   t.setResetPingStrikes,
 	})
 	if !success {
 		if err != nil {
@@ -817,7 +829,7 @@
 	headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status", Value: strconv.Itoa(int(st.Code()))})
 	headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(st.Message())})
 
-	if p := st.Proto(); p != nil && len(p.Details) > 0 {
+	if p := statusRawProto(st); p != nil && len(p.Details) > 0 {
 		stBytes, err := proto.Marshal(p)
 		if err != nil {
 			// TODO: return error instead, when callers are able to handle it.
@@ -833,9 +845,7 @@
 		streamID:  s.id,
 		hf:        headerFields,
 		endStream: true,
-		onWrite: func() {
-			atomic.StoreUint32(&t.resetPingStrikes, 1)
-		},
+		onWrite:   t.setResetPingStrikes,
 	}
 	s.hdrMu.Unlock()
 	success, err := t.controlBuf.execute(t.checkForHeaderListSize, trailingHeader)
@@ -887,12 +897,10 @@
 	hdr = append(hdr, data[:emptyLen]...)
 	data = data[emptyLen:]
 	df := &dataFrame{
-		streamID: s.id,
-		h:        hdr,
-		d:        data,
-		onEachWrite: func() {
-			atomic.StoreUint32(&t.resetPingStrikes, 1)
-		},
+		streamID:    s.id,
+		h:           hdr,
+		d:           data,
+		onEachWrite: t.setResetPingStrikes,
 	}
 	if err := s.wq.get(int32(len(hdr) + len(data))); err != nil {
 		select {
@@ -958,6 +966,7 @@
 			select {
 			case <-maxAge.C:
 				// Close the connection after grace period.
+				infof("transport: closing server transport due to maximum connection age.")
 				t.Close()
 				// Resetting the timer so that the clean-up doesn't deadlock.
 				maxAge.Reset(infinity)
@@ -971,6 +980,7 @@
 				continue
 			}
 			if pingSent {
+				infof("transport: closing server transport due to idleness.")
 				t.Close()
 				// Resetting the timer so that the clean-up doesn't deadlock.
 				keepalive.Reset(infinity)
@@ -1019,13 +1029,7 @@
 }
 
 // deleteStream deletes the stream s from transport's active streams.
-func (t *http2Server) deleteStream(s *Stream, eosReceived bool) (oldState streamState) {
-	oldState = s.swapState(streamDone)
-	if oldState == streamDone {
-		// If the stream was already done, return.
-		return oldState
-	}
-
+func (t *http2Server) deleteStream(s *Stream, eosReceived bool) {
 	// In case stream sending and receiving are invoked in separate
 	// goroutines (e.g., bi-directional streaming), cancel needs to be
 	// called to interrupt the potential blocking on other goroutines.
@@ -1047,15 +1051,13 @@
 			atomic.AddInt64(&t.czData.streamsFailed, 1)
 		}
 	}
-
-	return oldState
 }
 
 // finishStream closes the stream and puts the trailing headerFrame into controlbuf.
 func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) {
-	oldState := t.deleteStream(s, eosReceived)
-	// If the stream is already closed, then don't put trailing header to controlbuf.
+	oldState := s.swapState(streamDone)
 	if oldState == streamDone {
+		// If the stream was already done, return.
 		return
 	}
 
@@ -1063,14 +1065,18 @@
 		streamID: s.id,
 		rst:      rst,
 		rstCode:  rstCode,
-		onWrite:  func() {},
+		onWrite: func() {
+			t.deleteStream(s, eosReceived)
+		},
 	}
 	t.controlBuf.put(hdr)
 }
 
 // closeStream clears the footprint of a stream when the stream is not needed any more.
 func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, eosReceived bool) {
+	s.swapState(streamDone)
 	t.deleteStream(s, eosReceived)
+
 	t.controlBuf.put(&cleanupStream{
 		streamID: s.id,
 		rst:      rst,
diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go
index 7f82cbb..1c1d106 100644
--- a/vendor/google.golang.org/grpc/internal/transport/transport.go
+++ b/vendor/google.golang.org/grpc/internal/transport/transport.go
@@ -22,6 +22,7 @@
 package transport
 
 import (
+	"bytes"
 	"context"
 	"errors"
 	"fmt"
@@ -39,10 +40,32 @@
 	"google.golang.org/grpc/tap"
 )
 
+type bufferPool struct {
+	pool sync.Pool
+}
+
+func newBufferPool() *bufferPool {
+	return &bufferPool{
+		pool: sync.Pool{
+			New: func() interface{} {
+				return new(bytes.Buffer)
+			},
+		},
+	}
+}
+
+func (p *bufferPool) get() *bytes.Buffer {
+	return p.pool.Get().(*bytes.Buffer)
+}
+
+func (p *bufferPool) put(b *bytes.Buffer) {
+	p.pool.Put(b)
+}
+
 // recvMsg represents the received msg from the transport. All transport
 // protocol specific info has been removed.
 type recvMsg struct {
-	data []byte
+	buffer *bytes.Buffer
 	// nil: received some data
 	// io.EOF: stream is completed. data is nil.
 	// other non-nil error: transport failure. data is nil.
@@ -117,8 +140,9 @@
 	ctx         context.Context
 	ctxDone     <-chan struct{} // cache of ctx.Done() (for performance).
 	recv        *recvBuffer
-	last        []byte // Stores the remaining data in the previous calls.
+	last        *bytes.Buffer // Stores the remaining data in the previous calls.
 	err         error
+	freeBuffer  func(*bytes.Buffer)
 }
 
 // Read reads the next len(p) bytes from last. If last is drained, it tries to
@@ -128,10 +152,13 @@
 	if r.err != nil {
 		return 0, r.err
 	}
-	if r.last != nil && len(r.last) > 0 {
+	if r.last != nil {
 		// Read remaining data left in last call.
-		copied := copy(p, r.last)
-		r.last = r.last[copied:]
+		copied, _ := r.last.Read(p)
+		if r.last.Len() == 0 {
+			r.freeBuffer(r.last)
+			r.last = nil
+		}
 		return copied, nil
 	}
 	if r.closeStream != nil {
@@ -157,6 +184,19 @@
 	// r.readAdditional acts on that message and returns the necessary error.
 	select {
 	case <-r.ctxDone:
+		// Note that this adds the ctx error to the end of recv buffer, and
+		// reads from the head. This will delay the error until recv buffer is
+		// empty, thus will delay ctx cancellation in Recv().
+		//
+		// It's done this way to fix a race between ctx cancel and trailer. The
+		// race was, stream.Recv() may return ctx error if ctxDone wins the
+		// race, but stream.Trailer() may return a non-nil md because the stream
+		// was not marked as done when trailer is received. This closeStream
+		// call will mark stream as done, thus fix the race.
+		//
+		// TODO: delaying ctx error seems like a unnecessary side effect. What
+		// we really want is to mark the stream as done, and return ctx error
+		// faster.
 		r.closeStream(ContextErr(r.ctx.Err()))
 		m := <-r.recv.get()
 		return r.readAdditional(m, p)
@@ -170,8 +210,13 @@
 	if m.err != nil {
 		return 0, m.err
 	}
-	copied := copy(p, m.data)
-	r.last = m.data[copied:]
+	copied, _ := m.buffer.Read(p)
+	if m.buffer.Len() == 0 {
+		r.freeBuffer(m.buffer)
+		r.last = nil
+	} else {
+		r.last = m.buffer
+	}
 	return copied, nil
 }
 
@@ -204,8 +249,8 @@
 	// is used to adjust flow control, if needed.
 	requestRead func(int)
 
-	headerChan chan struct{} // closed to indicate the end of header metadata.
-	headerDone uint32        // set when headerChan is closed. Used to avoid closing headerChan multiple times.
+	headerChan       chan struct{} // closed to indicate the end of header metadata.
+	headerChanClosed uint32        // set when headerChan is closed. Used to avoid closing headerChan multiple times.
 
 	// hdrMu protects header and trailer metadata on the server-side.
 	hdrMu sync.Mutex
@@ -266,6 +311,14 @@
 	}
 	select {
 	case <-s.ctx.Done():
+		// We prefer success over failure when reading messages because we delay
+		// context error in stream.Read(). To keep behavior consistent, we also
+		// prefer success here.
+		select {
+		case <-s.headerChan:
+			return nil
+		default:
+		}
 		return ContextErr(s.ctx.Err())
 	case <-s.headerChan:
 		return nil
@@ -578,9 +631,12 @@
 	// is called only once.
 	Close() error
 
-	// GracefulClose starts to tear down the transport. It stops accepting
-	// new RPCs and wait the completion of the pending RPCs.
-	GracefulClose() error
+	// GracefulClose starts to tear down the transport: the transport will stop
+	// accepting new RPCs and NewStream will return error. Once all streams are
+	// finished, the transport will close.
+	//
+	// It does not block.
+	GracefulClose()
 
 	// Write sends the data for the given stream. A nil stream indicates
 	// the write is to be performed on the transport as a whole.
diff --git a/vendor/google.golang.org/grpc/naming/naming.go b/vendor/google.golang.org/grpc/naming/naming.go
index c99fdbe..f4c1c8b 100644
--- a/vendor/google.golang.org/grpc/naming/naming.go
+++ b/vendor/google.golang.org/grpc/naming/naming.go
@@ -17,9 +17,8 @@
  */
 
 // Package naming defines the naming API and related data structures for gRPC.
-// The interface is EXPERIMENTAL and may be subject to change.
 //
-// Deprecated: please use package resolver.
+// This package is deprecated: please use package resolver instead.
 package naming
 
 // Operation defines the corresponding operations for a name resolution change.
diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go
index f962549..45baa2a 100644
--- a/vendor/google.golang.org/grpc/picker_wrapper.go
+++ b/vendor/google.golang.org/grpc/picker_wrapper.go
@@ -120,6 +120,14 @@
 			bp.mu.Unlock()
 			select {
 			case <-ctx.Done():
+				if connectionErr := bp.connectionError(); connectionErr != nil {
+					switch ctx.Err() {
+					case context.DeadlineExceeded:
+						return nil, nil, status.Errorf(codes.DeadlineExceeded, "latest connection error: %v", connectionErr)
+					case context.Canceled:
+						return nil, nil, status.Errorf(codes.Canceled, "latest connection error: %v", connectionErr)
+					}
+				}
 				return nil, nil, ctx.Err()
 			case <-ch:
 			}
diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/pickfirst.go
index d1e38aa..ed05b02 100644
--- a/vendor/google.golang.org/grpc/pickfirst.go
+++ b/vendor/google.golang.org/grpc/pickfirst.go
@@ -51,14 +51,18 @@
 
 func (b *pickfirstBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) {
 	if err != nil {
-		grpclog.Infof("pickfirstBalancer: HandleResolvedAddrs called with error %v", err)
+		if grpclog.V(2) {
+			grpclog.Infof("pickfirstBalancer: HandleResolvedAddrs called with error %v", err)
+		}
 		return
 	}
 	if b.sc == nil {
 		b.sc, err = b.cc.NewSubConn(addrs, balancer.NewSubConnOptions{})
 		if err != nil {
 			//TODO(yuxuanli): why not change the cc state to Idle?
-			grpclog.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err)
+			if grpclog.V(2) {
+				grpclog.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err)
+			}
 			return
 		}
 		b.cc.UpdateBalancerState(connectivity.Idle, &picker{sc: b.sc})
@@ -70,9 +74,13 @@
 }
 
 func (b *pickfirstBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
-	grpclog.Infof("pickfirstBalancer: HandleSubConnStateChange: %p, %v", sc, s)
+	if grpclog.V(2) {
+		grpclog.Infof("pickfirstBalancer: HandleSubConnStateChange: %p, %v", sc, s)
+	}
 	if b.sc != sc {
-		grpclog.Infof("pickfirstBalancer: ignored state change because sc is not recognized")
+		if grpclog.V(2) {
+			grpclog.Infof("pickfirstBalancer: ignored state change because sc is not recognized")
+		}
 		return
 	}
 	if s == connectivity.Shutdown {
diff --git a/vendor/google.golang.org/grpc/preloader.go b/vendor/google.golang.org/grpc/preloader.go
new file mode 100644
index 0000000..76acbbc
--- /dev/null
+++ b/vendor/google.golang.org/grpc/preloader.go
@@ -0,0 +1,64 @@
+/*
+ *
+ * Copyright 2019 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpc
+
+import (
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/status"
+)
+
+// PreparedMsg is responsible for creating a Marshalled and Compressed object.
+//
+// This API is EXPERIMENTAL.
+type PreparedMsg struct {
+	// Struct for preparing msg before sending them
+	encodedData []byte
+	hdr         []byte
+	payload     []byte
+}
+
+// Encode marshalls and compresses the message using the codec and compressor for the stream.
+func (p *PreparedMsg) Encode(s Stream, msg interface{}) error {
+	ctx := s.Context()
+	rpcInfo, ok := rpcInfoFromContext(ctx)
+	if !ok {
+		return status.Errorf(codes.Internal, "grpc: unable to get rpcInfo")
+	}
+
+	// check if the context has the relevant information to prepareMsg
+	if rpcInfo.preloaderInfo == nil {
+		return status.Errorf(codes.Internal, "grpc: rpcInfo.preloaderInfo is nil")
+	}
+	if rpcInfo.preloaderInfo.codec == nil {
+		return status.Errorf(codes.Internal, "grpc: rpcInfo.preloaderInfo.codec is nil")
+	}
+
+	// prepare the msg
+	data, err := encode(rpcInfo.preloaderInfo.codec, msg)
+	if err != nil {
+		return err
+	}
+	p.encodedData = data
+	compData, err := compress(data, rpcInfo.preloaderInfo.cp, rpcInfo.preloaderInfo.comp)
+	if err != nil {
+		return err
+	}
+	p.hdr, p.payload = msgHeader(data, compData)
+	return nil
+}
diff --git a/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go
index 5835599..297492e 100644
--- a/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go
+++ b/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go
@@ -66,6 +66,9 @@
 
 var (
 	defaultResolver netResolver = net.DefaultResolver
+	// To prevent excessive re-resolution, we enforce a rate limit on DNS
+	// resolution requests.
+	minDNSResRate = 30 * time.Second
 )
 
 var customAuthorityDialler = func(authority string) func(ctx context.Context, network, address string) (net.Conn, error) {
@@ -241,7 +244,13 @@
 			return
 		case <-d.t.C:
 		case <-d.rn:
+			if !d.t.Stop() {
+				// Before resetting a timer, it should be stopped to prevent racing with
+				// reads on it's channel.
+				<-d.t.C
+			}
 		}
+
 		result, sc := d.lookup()
 		// Next lookup should happen within an interval defined by d.freq. It may be
 		// more often due to exponential retry on empty address list.
@@ -254,6 +263,16 @@
 		}
 		d.cc.NewServiceConfig(sc)
 		d.cc.NewAddress(result)
+
+		// Sleep to prevent excessive re-resolutions. Incoming resolution requests
+		// will be queued in d.rn.
+		t := time.NewTimer(minDNSResRate)
+		select {
+		case <-t.C:
+		case <-d.ctx.Done():
+			t.Stop()
+			return
+		}
 	}
 }
 
diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go
index 52ec603..e83da34 100644
--- a/vendor/google.golang.org/grpc/resolver/resolver.go
+++ b/vendor/google.golang.org/grpc/resolver/resolver.go
@@ -20,6 +20,10 @@
 // All APIs in this package are experimental.
 package resolver
 
+import (
+	"google.golang.org/grpc/serviceconfig"
+)
+
 var (
 	// m is a map from scheme to resolver builder.
 	m = make(map[string]Builder)
@@ -100,11 +104,12 @@
 
 // State contains the current Resolver state relevant to the ClientConn.
 type State struct {
-	Addresses     []Address // Resolved addresses for the target
-	ServiceConfig string    // JSON representation of the service config
+	Addresses []Address // Resolved addresses for the target
+	// ServiceConfig is the parsed service config; obtained from
+	// serviceconfig.Parse.
+	ServiceConfig serviceconfig.Config
 
 	// TODO: add Err error
-	// TODO: add ParsedServiceConfig interface{}
 }
 
 // ClientConn contains the callbacks for resolver to notify any updates
@@ -132,6 +137,21 @@
 
 // Target represents a target for gRPC, as specified in:
 // https://github.com/grpc/grpc/blob/master/doc/naming.md.
+// It is parsed from the target string that gets passed into Dial or DialContext by the user. And
+// grpc passes it to the resolver and the balancer.
+//
+// If the target follows the naming spec, and the parsed scheme is registered with grpc, we will
+// parse the target string according to the spec. e.g. "dns://some_authority/foo.bar" will be parsed
+// into &Target{Scheme: "dns", Authority: "some_authority", Endpoint: "foo.bar"}
+//
+// If the target does not contain a scheme, we will apply the default scheme, and set the Target to
+// be the full target string. e.g. "foo.bar" will be parsed into
+// &Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "foo.bar"}.
+//
+// If the parsed scheme is not registered (i.e. no corresponding resolver available to resolve the
+// endpoint), we set the Scheme to be the default scheme, and set the Endpoint to be the full target
+// string. e.g. target string "unknown_scheme://authority/endpoint" will be parsed into
+// &Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "unknown_scheme://authority/endpoint"}.
 type Target struct {
 	Scheme    string
 	Authority string
diff --git a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go
index e9cef3a..6934905 100644
--- a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go
+++ b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go
@@ -138,19 +138,22 @@
 		return
 	}
 	grpclog.Infof("ccResolverWrapper: got new service config: %v", sc)
-	if channelz.IsOn() {
-		ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: sc})
+	c, err := parseServiceConfig(sc)
+	if err != nil {
+		return
 	}
-	ccr.curState.ServiceConfig = sc
+	if channelz.IsOn() {
+		ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: c})
+	}
+	ccr.curState.ServiceConfig = c
 	ccr.cc.updateResolverState(ccr.curState)
 }
 
 func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) {
-	if s.ServiceConfig == ccr.curState.ServiceConfig && (len(ccr.curState.Addresses) == 0) == (len(s.Addresses) == 0) {
-		return
-	}
 	var updates []string
-	if s.ServiceConfig != ccr.curState.ServiceConfig {
+	oldSC, oldOK := ccr.curState.ServiceConfig.(*ServiceConfig)
+	newSC, newOK := s.ServiceConfig.(*ServiceConfig)
+	if oldOK != newOK || (oldOK && newOK && oldSC.rawJSONString != newSC.rawJSONString) {
 		updates = append(updates, "service config updated")
 	}
 	if len(ccr.curState.Addresses) > 0 && len(s.Addresses) == 0 {
diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go
index 2a59562..088c3f1 100644
--- a/vendor/google.golang.org/grpc/rpc_util.go
+++ b/vendor/google.golang.org/grpc/rpc_util.go
@@ -694,14 +694,34 @@
 	return nil
 }
 
+// Information about RPC
 type rpcInfo struct {
-	failfast bool
+	failfast      bool
+	preloaderInfo *compressorInfo
+}
+
+// Information about Preloader
+// Responsible for storing codec, and compressors
+// If stream (s) has  context s.Context which stores rpcInfo that has non nil
+// pointers to codec, and compressors, then we can use preparedMsg for Async message prep
+// and reuse marshalled bytes
+type compressorInfo struct {
+	codec baseCodec
+	cp    Compressor
+	comp  encoding.Compressor
 }
 
 type rpcInfoContextKey struct{}
 
-func newContextWithRPCInfo(ctx context.Context, failfast bool) context.Context {
-	return context.WithValue(ctx, rpcInfoContextKey{}, &rpcInfo{failfast: failfast})
+func newContextWithRPCInfo(ctx context.Context, failfast bool, codec baseCodec, cp Compressor, comp encoding.Compressor) context.Context {
+	return context.WithValue(ctx, rpcInfoContextKey{}, &rpcInfo{
+		failfast: failfast,
+		preloaderInfo: &compressorInfo{
+			codec: codec,
+			cp:    cp,
+			comp:  comp,
+		},
+	})
 }
 
 func rpcInfoFromContext(ctx context.Context) (s *rpcInfo, ok bool) {
diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go
index 8115828..f064b73 100644
--- a/vendor/google.golang.org/grpc/server.go
+++ b/vendor/google.golang.org/grpc/server.go
@@ -42,6 +42,7 @@
 	"google.golang.org/grpc/grpclog"
 	"google.golang.org/grpc/internal/binarylog"
 	"google.golang.org/grpc/internal/channelz"
+	"google.golang.org/grpc/internal/grpcsync"
 	"google.golang.org/grpc/internal/transport"
 	"google.golang.org/grpc/keepalive"
 	"google.golang.org/grpc/metadata"
@@ -56,6 +57,8 @@
 	defaultServerMaxSendMessageSize    = math.MaxInt32
 )
 
+var statusOK = status.New(codes.OK, "")
+
 type methodHandler func(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor UnaryServerInterceptor) (interface{}, error)
 
 // MethodDesc represents an RPC service's method specification.
@@ -86,21 +89,19 @@
 
 // Server is a gRPC server to serve RPC requests.
 type Server struct {
-	opts options
+	opts serverOptions
 
 	mu     sync.Mutex // guards following
 	lis    map[net.Listener]bool
-	conns  map[io.Closer]bool
+	conns  map[transport.ServerTransport]bool
 	serve  bool
 	drain  bool
 	cv     *sync.Cond          // signaled when connections close for GracefulStop
 	m      map[string]*service // service name -> service info
 	events trace.EventLog
 
-	quit               chan struct{}
-	done               chan struct{}
-	quitOnce           sync.Once
-	doneOnce           sync.Once
+	quit               *grpcsync.Event
+	done               *grpcsync.Event
 	channelzRemoveOnce sync.Once
 	serveWG            sync.WaitGroup // counts active Serve goroutines for GracefulStop
 
@@ -108,7 +109,7 @@
 	czData     *channelzData
 }
 
-type options struct {
+type serverOptions struct {
 	creds                 credentials.TransportCredentials
 	codec                 baseCodec
 	cp                    Compressor
@@ -131,7 +132,7 @@
 	maxHeaderListSize     *uint32
 }
 
-var defaultServerOptions = options{
+var defaultServerOptions = serverOptions{
 	maxReceiveMessageSize: defaultServerMaxReceiveMessageSize,
 	maxSendMessageSize:    defaultServerMaxSendMessageSize,
 	connectionTimeout:     120 * time.Second,
@@ -140,7 +141,33 @@
 }
 
 // A ServerOption sets options such as credentials, codec and keepalive parameters, etc.
-type ServerOption func(*options)
+type ServerOption interface {
+	apply(*serverOptions)
+}
+
+// EmptyServerOption does not alter the server configuration. It can be embedded
+// in another structure to build custom server options.
+//
+// This API is EXPERIMENTAL.
+type EmptyServerOption struct{}
+
+func (EmptyServerOption) apply(*serverOptions) {}
+
+// funcServerOption wraps a function that modifies serverOptions into an
+// implementation of the ServerOption interface.
+type funcServerOption struct {
+	f func(*serverOptions)
+}
+
+func (fdo *funcServerOption) apply(do *serverOptions) {
+	fdo.f(do)
+}
+
+func newFuncServerOption(f func(*serverOptions)) *funcServerOption {
+	return &funcServerOption{
+		f: f,
+	}
+}
 
 // WriteBufferSize determines how much data can be batched before doing a write on the wire.
 // The corresponding memory allocation for this buffer will be twice the size to keep syscalls low.
@@ -148,9 +175,9 @@
 // Zero will disable the write buffer such that each write will be on underlying connection.
 // Note: A Send call may not directly translate to a write.
 func WriteBufferSize(s int) ServerOption {
-	return func(o *options) {
+	return newFuncServerOption(func(o *serverOptions) {
 		o.writeBufferSize = s
-	}
+	})
 }
 
 // ReadBufferSize lets you set the size of read buffer, this determines how much data can be read at most
@@ -159,25 +186,25 @@
 // Zero will disable read buffer for a connection so data framer can access the underlying
 // conn directly.
 func ReadBufferSize(s int) ServerOption {
-	return func(o *options) {
+	return newFuncServerOption(func(o *serverOptions) {
 		o.readBufferSize = s
-	}
+	})
 }
 
 // InitialWindowSize returns a ServerOption that sets window size for stream.
 // The lower bound for window size is 64K and any value smaller than that will be ignored.
 func InitialWindowSize(s int32) ServerOption {
-	return func(o *options) {
+	return newFuncServerOption(func(o *serverOptions) {
 		o.initialWindowSize = s
-	}
+	})
 }
 
 // InitialConnWindowSize returns a ServerOption that sets window size for a connection.
 // The lower bound for window size is 64K and any value smaller than that will be ignored.
 func InitialConnWindowSize(s int32) ServerOption {
-	return func(o *options) {
+	return newFuncServerOption(func(o *serverOptions) {
 		o.initialConnWindowSize = s
-	}
+	})
 }
 
 // KeepaliveParams returns a ServerOption that sets keepalive and max-age parameters for the server.
@@ -187,25 +214,25 @@
 		kp.Time = time.Second
 	}
 
-	return func(o *options) {
+	return newFuncServerOption(func(o *serverOptions) {
 		o.keepaliveParams = kp
-	}
+	})
 }
 
 // KeepaliveEnforcementPolicy returns a ServerOption that sets keepalive enforcement policy for the server.
 func KeepaliveEnforcementPolicy(kep keepalive.EnforcementPolicy) ServerOption {
-	return func(o *options) {
+	return newFuncServerOption(func(o *serverOptions) {
 		o.keepalivePolicy = kep
-	}
+	})
 }
 
 // CustomCodec returns a ServerOption that sets a codec for message marshaling and unmarshaling.
 //
 // This will override any lookups by content-subtype for Codecs registered with RegisterCodec.
 func CustomCodec(codec Codec) ServerOption {
-	return func(o *options) {
+	return newFuncServerOption(func(o *serverOptions) {
 		o.codec = codec
-	}
+	})
 }
 
 // RPCCompressor returns a ServerOption that sets a compressor for outbound
@@ -216,9 +243,9 @@
 //
 // Deprecated: use encoding.RegisterCompressor instead.
 func RPCCompressor(cp Compressor) ServerOption {
-	return func(o *options) {
+	return newFuncServerOption(func(o *serverOptions) {
 		o.cp = cp
-	}
+	})
 }
 
 // RPCDecompressor returns a ServerOption that sets a decompressor for inbound
@@ -227,9 +254,9 @@
 //
 // Deprecated: use encoding.RegisterCompressor instead.
 func RPCDecompressor(dc Decompressor) ServerOption {
-	return func(o *options) {
+	return newFuncServerOption(func(o *serverOptions) {
 		o.dc = dc
-	}
+	})
 }
 
 // MaxMsgSize returns a ServerOption to set the max message size in bytes the server can receive.
@@ -243,73 +270,73 @@
 // MaxRecvMsgSize returns a ServerOption to set the max message size in bytes the server can receive.
 // If this is not set, gRPC uses the default 4MB.
 func MaxRecvMsgSize(m int) ServerOption {
-	return func(o *options) {
+	return newFuncServerOption(func(o *serverOptions) {
 		o.maxReceiveMessageSize = m
-	}
+	})
 }
 
 // MaxSendMsgSize returns a ServerOption to set the max message size in bytes the server can send.
 // If this is not set, gRPC uses the default `math.MaxInt32`.
 func MaxSendMsgSize(m int) ServerOption {
-	return func(o *options) {
+	return newFuncServerOption(func(o *serverOptions) {
 		o.maxSendMessageSize = m
-	}
+	})
 }
 
 // MaxConcurrentStreams returns a ServerOption that will apply a limit on the number
 // of concurrent streams to each ServerTransport.
 func MaxConcurrentStreams(n uint32) ServerOption {
-	return func(o *options) {
+	return newFuncServerOption(func(o *serverOptions) {
 		o.maxConcurrentStreams = n
-	}
+	})
 }
 
 // Creds returns a ServerOption that sets credentials for server connections.
 func Creds(c credentials.TransportCredentials) ServerOption {
-	return func(o *options) {
+	return newFuncServerOption(func(o *serverOptions) {
 		o.creds = c
-	}
+	})
 }
 
 // UnaryInterceptor returns a ServerOption that sets the UnaryServerInterceptor for the
 // server. Only one unary interceptor can be installed. The construction of multiple
 // interceptors (e.g., chaining) can be implemented at the caller.
 func UnaryInterceptor(i UnaryServerInterceptor) ServerOption {
-	return func(o *options) {
+	return newFuncServerOption(func(o *serverOptions) {
 		if o.unaryInt != nil {
 			panic("The unary server interceptor was already set and may not be reset.")
 		}
 		o.unaryInt = i
-	}
+	})
 }
 
 // StreamInterceptor returns a ServerOption that sets the StreamServerInterceptor for the
 // server. Only one stream interceptor can be installed.
 func StreamInterceptor(i StreamServerInterceptor) ServerOption {
-	return func(o *options) {
+	return newFuncServerOption(func(o *serverOptions) {
 		if o.streamInt != nil {
 			panic("The stream server interceptor was already set and may not be reset.")
 		}
 		o.streamInt = i
-	}
+	})
 }
 
 // InTapHandle returns a ServerOption that sets the tap handle for all the server
 // transport to be created. Only one can be installed.
 func InTapHandle(h tap.ServerInHandle) ServerOption {
-	return func(o *options) {
+	return newFuncServerOption(func(o *serverOptions) {
 		if o.inTapHandle != nil {
 			panic("The tap handle was already set and may not be reset.")
 		}
 		o.inTapHandle = h
-	}
+	})
 }
 
 // StatsHandler returns a ServerOption that sets the stats handler for the server.
 func StatsHandler(h stats.Handler) ServerOption {
-	return func(o *options) {
+	return newFuncServerOption(func(o *serverOptions) {
 		o.statsHandler = h
-	}
+	})
 }
 
 // UnknownServiceHandler returns a ServerOption that allows for adding a custom
@@ -319,7 +346,7 @@
 // The handling function has full access to the Context of the request and the
 // stream, and the invocation bypasses interceptors.
 func UnknownServiceHandler(streamHandler StreamHandler) ServerOption {
-	return func(o *options) {
+	return newFuncServerOption(func(o *serverOptions) {
 		o.unknownStreamDesc = &StreamDesc{
 			StreamName: "unknown_service_handler",
 			Handler:    streamHandler,
@@ -327,7 +354,7 @@
 			ClientStreams: true,
 			ServerStreams: true,
 		}
-	}
+	})
 }
 
 // ConnectionTimeout returns a ServerOption that sets the timeout for
@@ -337,17 +364,17 @@
 //
 // This API is EXPERIMENTAL.
 func ConnectionTimeout(d time.Duration) ServerOption {
-	return func(o *options) {
+	return newFuncServerOption(func(o *serverOptions) {
 		o.connectionTimeout = d
-	}
+	})
 }
 
 // MaxHeaderListSize returns a ServerOption that sets the max (uncompressed) size
 // of header list that the server is prepared to accept.
 func MaxHeaderListSize(s uint32) ServerOption {
-	return func(o *options) {
+	return newFuncServerOption(func(o *serverOptions) {
 		o.maxHeaderListSize = &s
-	}
+	})
 }
 
 // NewServer creates a gRPC server which has no service registered and has not
@@ -355,15 +382,15 @@
 func NewServer(opt ...ServerOption) *Server {
 	opts := defaultServerOptions
 	for _, o := range opt {
-		o(&opts)
+		o.apply(&opts)
 	}
 	s := &Server{
 		lis:    make(map[net.Listener]bool),
 		opts:   opts,
-		conns:  make(map[io.Closer]bool),
+		conns:  make(map[transport.ServerTransport]bool),
 		m:      make(map[string]*service),
-		quit:   make(chan struct{}),
-		done:   make(chan struct{}),
+		quit:   grpcsync.NewEvent(),
+		done:   grpcsync.NewEvent(),
 		czData: new(channelzData),
 	}
 	s.cv = sync.NewCond(&s.mu)
@@ -530,11 +557,9 @@
 	s.serveWG.Add(1)
 	defer func() {
 		s.serveWG.Done()
-		select {
-		// Stop or GracefulStop called; block until done and return nil.
-		case <-s.quit:
-			<-s.done
-		default:
+		if s.quit.HasFired() {
+			// Stop or GracefulStop called; block until done and return nil.
+			<-s.done.Done()
 		}
 	}()
 
@@ -577,7 +602,7 @@
 				timer := time.NewTimer(tempDelay)
 				select {
 				case <-timer.C:
-				case <-s.quit:
+				case <-s.quit.Done():
 					timer.Stop()
 					return nil
 				}
@@ -587,10 +612,8 @@
 			s.printf("done serving; Accept = %v", err)
 			s.mu.Unlock()
 
-			select {
-			case <-s.quit:
+			if s.quit.HasFired() {
 				return nil
-			default:
 			}
 			return err
 		}
@@ -611,6 +634,10 @@
 // handleRawConn forks a goroutine to handle a just-accepted connection that
 // has not had any I/O performed on it yet.
 func (s *Server) handleRawConn(rawConn net.Conn) {
+	if s.quit.HasFired() {
+		rawConn.Close()
+		return
+	}
 	rawConn.SetDeadline(time.Now().Add(s.opts.connectionTimeout))
 	conn, authInfo, err := s.useTransportAuthenticator(rawConn)
 	if err != nil {
@@ -627,14 +654,6 @@
 		return
 	}
 
-	s.mu.Lock()
-	if s.conns == nil {
-		s.mu.Unlock()
-		conn.Close()
-		return
-	}
-	s.mu.Unlock()
-
 	// Finish handshaking (HTTP2)
 	st := s.newHTTP2Transport(conn, authInfo)
 	if st == nil {
@@ -742,6 +761,9 @@
 // traceInfo returns a traceInfo and associates it with stream, if tracing is enabled.
 // If tracing is not enabled, it returns nil.
 func (s *Server) traceInfo(st transport.ServerTransport, stream *transport.Stream) (trInfo *traceInfo) {
+	if !EnableTracing {
+		return nil
+	}
 	tr, ok := trace.FromContext(stream.Context())
 	if !ok {
 		return nil
@@ -760,27 +782,27 @@
 	return trInfo
 }
 
-func (s *Server) addConn(c io.Closer) bool {
+func (s *Server) addConn(st transport.ServerTransport) bool {
 	s.mu.Lock()
 	defer s.mu.Unlock()
 	if s.conns == nil {
-		c.Close()
+		st.Close()
 		return false
 	}
 	if s.drain {
 		// Transport added after we drained our existing conns: drain it
 		// immediately.
-		c.(transport.ServerTransport).Drain()
+		st.Drain()
 	}
-	s.conns[c] = true
+	s.conns[st] = true
 	return true
 }
 
-func (s *Server) removeConn(c io.Closer) {
+func (s *Server) removeConn(st transport.ServerTransport) {
 	s.mu.Lock()
 	defer s.mu.Unlock()
 	if s.conns != nil {
-		delete(s.conns, c)
+		delete(s.conns, st)
 		s.cv.Broadcast()
 	}
 }
@@ -952,10 +974,11 @@
 		}
 		if sh != nil {
 			sh.HandleRPC(stream.Context(), &stats.InPayload{
-				RecvTime: time.Now(),
-				Payload:  v,
-				Data:     d,
-				Length:   len(d),
+				RecvTime:   time.Now(),
+				Payload:    v,
+				WireLength: payInfo.wireLength,
+				Data:       d,
+				Length:     len(d),
 			})
 		}
 		if binlog != nil {
@@ -1051,7 +1074,7 @@
 	// TODO: Should we be logging if writing status failed here, like above?
 	// Should the logging be in WriteStatus?  Should we ignore the WriteStatus
 	// error or allow the stats handler to see it?
-	err = t.WriteStatus(stream, status.New(codes.OK, ""))
+	err = t.WriteStatus(stream, statusOK)
 	if binlog != nil {
 		binlog.Log(&binarylog.ServerTrailer{
 			Trailer: stream.Trailer(),
@@ -1209,7 +1232,7 @@
 		ss.trInfo.tr.LazyLog(stringer("OK"), false)
 		ss.mu.Unlock()
 	}
-	err = t.WriteStatus(ss.s, status.New(codes.OK, ""))
+	err = t.WriteStatus(ss.s, statusOK)
 	if ss.binlog != nil {
 		ss.binlog.Log(&binarylog.ServerTrailer{
 			Trailer: ss.s.Trailer(),
@@ -1326,15 +1349,11 @@
 // pending RPCs on the client side will get notified by connection
 // errors.
 func (s *Server) Stop() {
-	s.quitOnce.Do(func() {
-		close(s.quit)
-	})
+	s.quit.Fire()
 
 	defer func() {
 		s.serveWG.Wait()
-		s.doneOnce.Do(func() {
-			close(s.done)
-		})
+		s.done.Fire()
 	}()
 
 	s.channelzRemoveOnce.Do(func() {
@@ -1371,15 +1390,8 @@
 // accepting new connections and RPCs and blocks until all the pending RPCs are
 // finished.
 func (s *Server) GracefulStop() {
-	s.quitOnce.Do(func() {
-		close(s.quit)
-	})
-
-	defer func() {
-		s.doneOnce.Do(func() {
-			close(s.done)
-		})
-	}()
+	s.quit.Fire()
+	defer s.done.Fire()
 
 	s.channelzRemoveOnce.Do(func() {
 		if channelz.IsOn() {
@@ -1397,8 +1409,8 @@
 	}
 	s.lis = nil
 	if !s.drain {
-		for c := range s.conns {
-			c.(transport.ServerTransport).Drain()
+		for st := range s.conns {
+			st.Drain()
 		}
 		s.drain = true
 	}
diff --git a/vendor/google.golang.org/grpc/service_config.go b/vendor/google.golang.org/grpc/service_config.go
index 1c52274..d0787f1 100644
--- a/vendor/google.golang.org/grpc/service_config.go
+++ b/vendor/google.golang.org/grpc/service_config.go
@@ -25,8 +25,11 @@
 	"strings"
 	"time"
 
+	"google.golang.org/grpc/balancer"
 	"google.golang.org/grpc/codes"
 	"google.golang.org/grpc/grpclog"
+	"google.golang.org/grpc/internal"
+	"google.golang.org/grpc/serviceconfig"
 )
 
 const maxInt = int(^uint(0) >> 1)
@@ -61,6 +64,11 @@
 	retryPolicy *retryPolicy
 }
 
+type lbConfig struct {
+	name string
+	cfg  serviceconfig.LoadBalancingConfig
+}
+
 // ServiceConfig is provided by the service provider and contains parameters for how
 // clients that connect to the service should behave.
 //
@@ -68,10 +76,18 @@
 // through name resolver, as specified here
 // https://github.com/grpc/grpc/blob/master/doc/service_config.md
 type ServiceConfig struct {
-	// LB is the load balancer the service providers recommends. The balancer specified
-	// via grpc.WithBalancer will override this.
+	serviceconfig.Config
+
+	// LB is the load balancer the service providers recommends. The balancer
+	// specified via grpc.WithBalancer will override this.  This is deprecated;
+	// lbConfigs is preferred.  If lbConfig and LB are both present, lbConfig
+	// will be used.
 	LB *string
 
+	// lbConfig is the service config's load balancing configuration.  If
+	// lbConfig and LB are both present, lbConfig will be used.
+	lbConfig *lbConfig
+
 	// Methods contains a map for the methods in this service.  If there is an
 	// exact match for a method (i.e. /service/method) in the map, use the
 	// corresponding MethodConfig.  If there's no exact match, look for the
@@ -233,15 +249,27 @@
 	RetryPolicy             *jsonRetryPolicy
 }
 
+type loadBalancingConfig map[string]json.RawMessage
+
 // TODO(lyuxuan): delete this struct after cleaning up old service config implementation.
 type jsonSC struct {
 	LoadBalancingPolicy *string
+	LoadBalancingConfig *[]loadBalancingConfig
 	MethodConfig        *[]jsonMC
 	RetryThrottling     *retryThrottlingPolicy
 	HealthCheckConfig   *healthCheckConfig
 }
 
+func init() {
+	internal.ParseServiceConfig = func(sc string) (interface{}, error) {
+		return parseServiceConfig(sc)
+	}
+}
+
 func parseServiceConfig(js string) (*ServiceConfig, error) {
+	if len(js) == 0 {
+		return nil, fmt.Errorf("no JSON service config provided")
+	}
 	var rsc jsonSC
 	err := json.Unmarshal([]byte(js), &rsc)
 	if err != nil {
@@ -255,10 +283,38 @@
 		healthCheckConfig: rsc.HealthCheckConfig,
 		rawJSONString:     js,
 	}
+	if rsc.LoadBalancingConfig != nil {
+		for i, lbcfg := range *rsc.LoadBalancingConfig {
+			if len(lbcfg) != 1 {
+				err := fmt.Errorf("invalid loadBalancingConfig: entry %v does not contain exactly 1 policy/config pair: %q", i, lbcfg)
+				grpclog.Warningf(err.Error())
+				return nil, err
+			}
+			var name string
+			var jsonCfg json.RawMessage
+			for name, jsonCfg = range lbcfg {
+			}
+			builder := balancer.Get(name)
+			if builder == nil {
+				continue
+			}
+			sc.lbConfig = &lbConfig{name: name}
+			if parser, ok := builder.(balancer.ConfigParser); ok {
+				var err error
+				sc.lbConfig.cfg, err = parser.ParseConfig(jsonCfg)
+				if err != nil {
+					return nil, fmt.Errorf("error parsing loadBalancingConfig for policy %q: %v", name, err)
+				}
+			} else if string(jsonCfg) != "{}" {
+				grpclog.Warningf("non-empty balancer configuration %q, but balancer does not implement ParseConfig", string(jsonCfg))
+			}
+			break
+		}
+	}
+
 	if rsc.MethodConfig == nil {
 		return &sc, nil
 	}
-
 	for _, m := range *rsc.MethodConfig {
 		if m.Name == nil {
 			continue
@@ -299,11 +355,11 @@
 	}
 
 	if sc.retryThrottling != nil {
-		if sc.retryThrottling.MaxTokens <= 0 ||
-			sc.retryThrottling.MaxTokens > 1000 ||
-			sc.retryThrottling.TokenRatio <= 0 {
-			// Illegal throttling config; disable throttling.
-			sc.retryThrottling = nil
+		if mt := sc.retryThrottling.MaxTokens; mt <= 0 || mt > 1000 {
+			return nil, fmt.Errorf("invalid retry throttling config: maxTokens (%v) out of range (0, 1000]", mt)
+		}
+		if tr := sc.retryThrottling.TokenRatio; tr <= 0 {
+			return nil, fmt.Errorf("invalid retry throttling config: tokenRatio (%v) may not be negative", tr)
 		}
 	}
 	return &sc, nil
diff --git a/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go b/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go
new file mode 100644
index 0000000..53b2787
--- /dev/null
+++ b/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go
@@ -0,0 +1,48 @@
+/*
+ *
+ * Copyright 2019 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package serviceconfig defines types and methods for operating on gRPC
+// service configs.
+//
+// This package is EXPERIMENTAL.
+package serviceconfig
+
+import (
+	"google.golang.org/grpc/internal"
+)
+
+// Config represents an opaque data structure holding a service config.
+type Config interface {
+	isConfig()
+}
+
+// LoadBalancingConfig represents an opaque data structure holding a load
+// balancer config.
+type LoadBalancingConfig interface {
+	isLoadBalancingConfig()
+}
+
+// Parse parses the JSON service config provided into an internal form or
+// returns an error if the config is invalid.
+func Parse(ServiceConfigJSON string) (Config, error) {
+	c, err := internal.ParseServiceConfig(ServiceConfigJSON)
+	if err != nil {
+		return nil, err
+	}
+	return c.(Config), err
+}
diff --git a/vendor/google.golang.org/grpc/status/status.go b/vendor/google.golang.org/grpc/status/status.go
index ed36681..a1348e9 100644
--- a/vendor/google.golang.org/grpc/status/status.go
+++ b/vendor/google.golang.org/grpc/status/status.go
@@ -36,8 +36,15 @@
 	"github.com/golang/protobuf/ptypes"
 	spb "google.golang.org/genproto/googleapis/rpc/status"
 	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/internal"
 )
 
+func init() {
+	internal.StatusRawProto = statusRawProto
+}
+
+func statusRawProto(s *Status) *spb.Status { return s.s }
+
 // statusError is an alias of a status proto.  It implements error and Status,
 // and a nil statusError should never be returned by this package.
 type statusError spb.Status
@@ -51,6 +58,17 @@
 	return &Status{s: (*spb.Status)(se)}
 }
 
+// Is implements future error.Is functionality.
+// A statusError is equivalent if the code and message are identical.
+func (se *statusError) Is(target error) bool {
+	tse, ok := target.(*statusError)
+	if !ok {
+		return false
+	}
+
+	return proto.Equal((*spb.Status)(se), (*spb.Status)(tse))
+}
+
 // Status represents an RPC status code, message, and details.  It is immutable
 // and should be created with New, Newf, or FromProto.
 type Status struct {
@@ -125,7 +143,7 @@
 // Status is returned with codes.Unknown and the original error message.
 func FromError(err error) (s *Status, ok bool) {
 	if err == nil {
-		return &Status{s: &spb.Status{Code: int32(codes.OK)}}, true
+		return nil, true
 	}
 	if se, ok := err.(interface {
 		GRPCStatus() *Status
@@ -199,7 +217,7 @@
 func FromContextError(err error) *Status {
 	switch err {
 	case nil:
-		return New(codes.OK, "")
+		return nil
 	case context.DeadlineExceeded:
 		return New(codes.DeadlineExceeded, err.Error())
 	case context.Canceled:
diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go
index 6e2bf51..134a624 100644
--- a/vendor/google.golang.org/grpc/stream.go
+++ b/vendor/google.golang.org/grpc/stream.go
@@ -30,7 +30,6 @@
 	"golang.org/x/net/trace"
 	"google.golang.org/grpc/balancer"
 	"google.golang.org/grpc/codes"
-	"google.golang.org/grpc/connectivity"
 	"google.golang.org/grpc/encoding"
 	"google.golang.org/grpc/grpclog"
 	"google.golang.org/grpc/internal/balancerload"
@@ -245,7 +244,7 @@
 		trInfo.tr.LazyLog(&trInfo.firstLine, false)
 		ctx = trace.NewContext(ctx, trInfo.tr)
 	}
-	ctx = newContextWithRPCInfo(ctx, c.failFast)
+	ctx = newContextWithRPCInfo(ctx, c.failFast, c.codec, cp, comp)
 	sh := cc.dopts.copts.StatsHandler
 	var beginTime time.Time
 	if sh != nil {
@@ -328,13 +327,23 @@
 	return cs, nil
 }
 
-func (cs *clientStream) newAttemptLocked(sh stats.Handler, trInfo *traceInfo) error {
-	cs.attempt = &csAttempt{
+// newAttemptLocked creates a new attempt with a transport.
+// If it succeeds, then it replaces clientStream's attempt with this new attempt.
+func (cs *clientStream) newAttemptLocked(sh stats.Handler, trInfo *traceInfo) (retErr error) {
+	newAttempt := &csAttempt{
 		cs:           cs,
 		dc:           cs.cc.dopts.dc,
 		statsHandler: sh,
 		trInfo:       trInfo,
 	}
+	defer func() {
+		if retErr != nil {
+			// This attempt is not set in the clientStream, so it's finish won't
+			// be called. Call it here for stats and trace in case they are not
+			// nil.
+			newAttempt.finish(retErr)
+		}
+	}()
 
 	if err := cs.ctx.Err(); err != nil {
 		return toRPCErr(err)
@@ -346,8 +355,9 @@
 	if trInfo != nil {
 		trInfo.firstLine.SetRemoteAddr(t.RemoteAddr())
 	}
-	cs.attempt.t = t
-	cs.attempt.done = done
+	newAttempt.t = t
+	newAttempt.done = done
+	cs.attempt = newAttempt
 	return nil
 }
 
@@ -396,11 +406,18 @@
 	serverHeaderBinlogged bool
 
 	mu                      sync.Mutex
-	firstAttempt            bool       // if true, transparent retry is valid
-	numRetries              int        // exclusive of transparent retry attempt(s)
-	numRetriesSincePushback int        // retries since pushback; to reset backoff
-	finished                bool       // TODO: replace with atomic cmpxchg or sync.Once?
-	attempt                 *csAttempt // the active client stream attempt
+	firstAttempt            bool // if true, transparent retry is valid
+	numRetries              int  // exclusive of transparent retry attempt(s)
+	numRetriesSincePushback int  // retries since pushback; to reset backoff
+	finished                bool // TODO: replace with atomic cmpxchg or sync.Once?
+	// attempt is the active client stream attempt.
+	// The only place where it is written is the newAttemptLocked method and this method never writes nil.
+	// So, attempt can be nil only inside newClientStream function when clientStream is first created.
+	// One of the first things done after clientStream's creation, is to call newAttemptLocked which either
+	// assigns a non nil value to the attempt or returns an error. If an error is returned from newAttemptLocked,
+	// then newClientStream calls finish on the clientStream and returns. So, finish method is the only
+	// place where we need to check if the attempt is nil.
+	attempt *csAttempt
 	// TODO(hedging): hedging will have multiple attempts simultaneously.
 	committed  bool                       // active attempt committed for retry?
 	buffer     []func(a *csAttempt) error // operations to replay on retry
@@ -458,8 +475,8 @@
 	if cs.attempt.s != nil {
 		<-cs.attempt.s.Done()
 	}
-	if cs.firstAttempt && !cs.callInfo.failFast && (cs.attempt.s == nil || cs.attempt.s.Unprocessed()) {
-		// First attempt, wait-for-ready, stream unprocessed: transparently retry.
+	if cs.firstAttempt && (cs.attempt.s == nil || cs.attempt.s.Unprocessed()) {
+		// First attempt, stream unprocessed: transparently retry.
 		cs.firstAttempt = false
 		return nil
 	}
@@ -677,15 +694,13 @@
 	if !cs.desc.ClientStreams {
 		cs.sentLast = true
 	}
-	data, err := encode(cs.codec, m)
+
+	// load hdr, payload, data
+	hdr, payload, data, err := prepareMsg(m, cs.codec, cs.cp, cs.comp)
 	if err != nil {
 		return err
 	}
-	compData, err := compress(data, cs.cp, cs.comp)
-	if err != nil {
-		return err
-	}
-	hdr, payload := msgHeader(data, compData)
+
 	// TODO(dfawley): should we be checking len(data) instead?
 	if len(payload) > *cs.callInfo.maxSendMessageSize {
 		return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), *cs.callInfo.maxSendMessageSize)
@@ -808,11 +823,11 @@
 	}
 	if cs.attempt != nil {
 		cs.attempt.finish(err)
-	}
-	// after functions all rely upon having a stream.
-	if cs.attempt.s != nil {
-		for _, o := range cs.opts {
-			o.after(cs.callInfo)
+		// after functions all rely upon having a stream.
+		if cs.attempt.s != nil {
+			for _, o := range cs.opts {
+				o.after(cs.callInfo)
+			}
 		}
 	}
 	cs.cancel()
@@ -967,19 +982,18 @@
 	a.mu.Unlock()
 }
 
-func (ac *addrConn) newClientStream(ctx context.Context, desc *StreamDesc, method string, t transport.ClientTransport, opts ...CallOption) (_ ClientStream, err error) {
-	ac.mu.Lock()
-	if ac.transport != t {
-		ac.mu.Unlock()
-		return nil, status.Error(codes.Canceled, "the provided transport is no longer valid to use")
-	}
-	// transition to CONNECTING state when an attempt starts
-	if ac.state != connectivity.Connecting {
-		ac.updateConnectivityState(connectivity.Connecting)
-		ac.cc.handleSubConnStateChange(ac.acbw, ac.state)
-	}
-	ac.mu.Unlock()
-
+// newClientStream creates a ClientStream with the specified transport, on the
+// given addrConn.
+//
+// It's expected that the given transport is either the same one in addrConn, or
+// is already closed. To avoid race, transport is specified separately, instead
+// of using ac.transpot.
+//
+// Main difference between this and ClientConn.NewStream:
+// - no retry
+// - no service config (or wait for service config)
+// - no tracing or stats
+func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method string, t transport.ClientTransport, ac *addrConn, opts ...CallOption) (_ ClientStream, err error) {
 	if t == nil {
 		// TODO: return RPC error here?
 		return nil, errors.New("transport provided is nil")
@@ -987,14 +1001,6 @@
 	// defaultCallInfo contains unnecessary info(i.e. failfast, maxRetryRPCBufferSize), so we just initialize an empty struct.
 	c := &callInfo{}
 
-	for _, o := range opts {
-		if err := o.before(c); err != nil {
-			return nil, toRPCErr(err)
-		}
-	}
-	c.maxReceiveMessageSize = getMaxSize(nil, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize)
-	c.maxSendMessageSize = getMaxSize(nil, c.maxSendMessageSize, defaultServerMaxSendMessageSize)
-
 	// Possible context leak:
 	// The cancel function for the child context we create will only be called
 	// when RecvMsg returns a non-nil error, if the ClientConn is closed, or if
@@ -1007,6 +1013,13 @@
 		}
 	}()
 
+	for _, o := range opts {
+		if err := o.before(c); err != nil {
+			return nil, toRPCErr(err)
+		}
+	}
+	c.maxReceiveMessageSize = getMaxSize(nil, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize)
+	c.maxSendMessageSize = getMaxSize(nil, c.maxSendMessageSize, defaultServerMaxSendMessageSize)
 	if err := setCallInfoCodec(c); err != nil {
 		return nil, err
 	}
@@ -1039,6 +1052,7 @@
 		callHdr.Creds = c.creds
 	}
 
+	// Use a special addrConnStream to avoid retry.
 	as := &addrConnStream{
 		callHdr:  callHdr,
 		ac:       ac,
@@ -1150,15 +1164,13 @@
 	if !as.desc.ClientStreams {
 		as.sentLast = true
 	}
-	data, err := encode(as.codec, m)
+
+	// load hdr, payload, data
+	hdr, payld, _, err := prepareMsg(m, as.codec, as.cp, as.comp)
 	if err != nil {
 		return err
 	}
-	compData, err := compress(data, as.cp, as.comp)
-	if err != nil {
-		return err
-	}
-	hdr, payld := msgHeader(data, compData)
+
 	// TODO(dfawley): should we be checking len(data) instead?
 	if len(payld) > *as.callInfo.maxSendMessageSize {
 		return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payld), *as.callInfo.maxSendMessageSize)
@@ -1395,15 +1407,13 @@
 			ss.t.IncrMsgSent()
 		}
 	}()
-	data, err := encode(ss.codec, m)
+
+	// load hdr, payload, data
+	hdr, payload, data, err := prepareMsg(m, ss.codec, ss.cp, ss.comp)
 	if err != nil {
 		return err
 	}
-	compData, err := compress(data, ss.cp, ss.comp)
-	if err != nil {
-		return err
-	}
-	hdr, payload := msgHeader(data, compData)
+
 	// TODO(dfawley): should we be checking len(data) instead?
 	if len(payload) > ss.maxSendMessageSize {
 		return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), ss.maxSendMessageSize)
@@ -1496,3 +1506,24 @@
 func MethodFromServerStream(stream ServerStream) (string, bool) {
 	return Method(stream.Context())
 }
+
+// prepareMsg returns the hdr, payload and data
+// using the compressors passed or using the
+// passed preparedmsg
+func prepareMsg(m interface{}, codec baseCodec, cp Compressor, comp encoding.Compressor) (hdr, payload, data []byte, err error) {
+	if preparedMsg, ok := m.(*PreparedMsg); ok {
+		return preparedMsg.hdr, preparedMsg.payload, preparedMsg.encodedData, nil
+	}
+	// The input interface is not a prepared msg.
+	// Marshal and Compress the data at this point
+	data, err = encode(codec, m)
+	if err != nil {
+		return nil, nil, nil, err
+	}
+	compData, err := compress(data, cp, comp)
+	if err != nil {
+		return nil, nil, nil, err
+	}
+	hdr, payload = msgHeader(data, compData)
+	return hdr, payload, data, nil
+}
diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go
index 092e088..5411a73 100644
--- a/vendor/google.golang.org/grpc/version.go
+++ b/vendor/google.golang.org/grpc/version.go
@@ -19,4 +19,4 @@
 package grpc
 
 // Version is the current grpc version.
-const Version = "1.20.1"
+const Version = "1.23.0"