Merge pull request #215 from thaJeztah/19.03_backport_buildkit_fixes

[19.03 backport] BuildKit fixes
diff --git a/builder/builder-next/adapters/containerimage/pull.go b/builder/builder-next/adapters/containerimage/pull.go
index dd9f5cf..af8f868 100644
--- a/builder/builder-next/adapters/containerimage/pull.go
+++ b/builder/builder-next/adapters/containerimage/pull.go
@@ -545,10 +545,10 @@
 
 	r := image.NewRootFS()
 	rootFS, release, err := p.is.DownloadManager.Download(ctx, *r, runtime.GOOS, layers, pkgprogress.ChanOutput(pchan))
+	stopProgress()
 	if err != nil {
 		return nil, err
 	}
-	stopProgress()
 
 	ref, err := p.is.CacheAccessor.GetFromSnapshotter(ctx, string(rootFS.ChainID()), cache.WithDescription(fmt.Sprintf("pulled from %s", p.ref)))
 	release()
diff --git a/builder/builder-next/adapters/snapshot/snapshot.go b/builder/builder-next/adapters/snapshot/snapshot.go
index 314c99c..93af8f3 100644
--- a/builder/builder-next/adapters/snapshot/snapshot.go
+++ b/builder/builder-next/adapters/snapshot/snapshot.go
@@ -74,6 +74,10 @@
 	return s, nil
 }
 
+func (s *snapshotter) Name() string {
+	return "default"
+}
+
 func (s *snapshotter) IdentityMapping() *idtools.IdentityMapping {
 	return nil
 }
diff --git a/vendor.conf b/vendor.conf
index ae99800..d6b4309 100644
--- a/vendor.conf
+++ b/vendor.conf
@@ -27,7 +27,7 @@
 golang.org/x/sync                                   e225da77a7e68af35c70ccbf71af2b83e6acac3c
 
 # buildkit
-github.com/moby/buildkit                            8818c67cff663befa7b70f21454e340f71616581
+github.com/moby/buildkit                            f238f1efb04f00bf0cc147141fda9ddb55c8bc49
 github.com/tonistiigi/fsutil                        3bbb99cdbd76619ab717299830c60f6f2a533a6b
 github.com/grpc-ecosystem/grpc-opentracing          8e809c8a86450a29b90dcc9efbf062d0fe6d9746
 github.com/opentracing/opentracing-go               1361b9cd60be79c4c3a7fa9841b3c132e40066a7
@@ -73,7 +73,7 @@
 # get go-zfs packages
 github.com/mistifyio/go-zfs                         f784269be439d704d3dfa1906f45dd848fed2beb
 
-google.golang.org/grpc                              7a6a684ca69eb4cae85ad0a484f2e531598c047b # v1.12.2
+google.golang.org/grpc                              25c4f928eaa6d96443009bd842389fb4fa48664e # v1.20.1
 
 # The version of runc should match the version that is used by the containerd
 # version that is used. If you need to update runc, open a pull request in
@@ -119,23 +119,23 @@
 google.golang.org/genproto                          694d95ba50e67b2e363f3483057db5d4910c18f9
 
 # containerd
-github.com/containerd/containerd                    ceba56893a76f22cf0126c46d835c80fb3833408
+github.com/containerd/containerd                    3a3f0aac8819165839a41fee77a4f4ac8b103097
 github.com/containerd/fifo                          a9fb20d87448d386e6d50b1f2e1fa70dcf0de43c
-github.com/containerd/continuity                    004b46473808b3e7a4a3049c20e4376c91eb966d
+github.com/containerd/continuity                    aaeac12a7ffcd198ae25440a9dff125c2e2703a7
 github.com/containerd/cgroups                       4994991857f9b0ae8dc439551e8bebdbb4bf66c1
-github.com/containerd/console                       c12b1e7919c14469339a5d38f2f8ed9b64a9de23
+github.com/containerd/console                       0650fd9eeb50bab4fc99dceb9f2e14cf58f36e7f
 github.com/containerd/go-runc                       7d11b49dc0769f6dbb0d1b19f3d48524d1bad9ad
 github.com/containerd/typeurl                       2a93cfde8c20b23de8eb84a5adbc234ddf7a9e8d
 github.com/containerd/ttrpc                         f02858b1457c5ca3aaec3a0803eb0d59f96e41d6
-github.com/gogo/googleapis                          08a7655d27152912db7aaf4f983275eaf8d128ef
+github.com/gogo/googleapis                          d31c731455cb061f42baff3bda55bad0118b126b # v1.2.0
 
 # cluster
 github.com/docker/swarmkit                          59163bf75df38489d4a10392265d27156dc473c5
-github.com/gogo/protobuf                            4cbf7e384e768b4e01799441fdf2a706a5635ae7 # v1.2.0
+github.com/gogo/protobuf                            ba06b47c162d49f2af050fb4c75bcbc86a159d5c # v1.2.1
 github.com/cloudflare/cfssl                         5d63dbd981b5c408effbb58c442d54761ff94fbd # 1.3.2
 github.com/fernet/fernet-go                         1b2437bc582b3cfbb341ee5a29f8ef5b42912ff2
 github.com/google/certificate-transparency-go       37a384cd035e722ea46e55029093e26687138edf # v1.0.20
-golang.org/x/crypto                                 38d8ce5564a5b71b2e3a00553993f1b9a7ae852f
+golang.org/x/crypto                                 88737f569e3a9c7ab309cdc09a07fe7fc87233c3
 golang.org/x/time                                   fbb02b2291d28baffd63558aa44b4b56f178d650
 github.com/hashicorp/go-memdb                       cb9a474f84cc5e41b273b20c6927680b2a8776ad
 github.com/hashicorp/go-immutable-radix             826af9ccf0feeee615d546d69b11f8e98da8c8f1 git://github.com/tonistiigi/go-immutable-radix.git
diff --git a/vendor/github.com/containerd/console/LICENSE b/vendor/github.com/containerd/console/LICENSE
index 261eeb9..584149b 100644
--- a/vendor/github.com/containerd/console/LICENSE
+++ b/vendor/github.com/containerd/console/LICENSE
@@ -1,6 +1,7 @@
+
                                  Apache License
                            Version 2.0, January 2004
-                        http://www.apache.org/licenses/
+                        https://www.apache.org/licenses/
 
    TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
 
@@ -175,24 +176,13 @@
 
    END OF TERMS AND CONDITIONS
 
-   APPENDIX: How to apply the Apache License to your work.
-
-      To apply the Apache License to your work, attach the following
-      boilerplate notice, with the fields enclosed by brackets "[]"
-      replaced with your own identifying information. (Don't include
-      the brackets!)  The text should be enclosed in the appropriate
-      comment syntax for the file format. We also recommend that a
-      file or class name and description of purpose be included on the
-      same "printed page" as the copyright notice for easier
-      identification within third-party archives.
-
-   Copyright [yyyy] [name of copyright owner]
+   Copyright The containerd Authors
 
    Licensed under the Apache License, Version 2.0 (the "License");
    you may not use this file except in compliance with the License.
    You may obtain a copy of the License at
 
-       http://www.apache.org/licenses/LICENSE-2.0
+       https://www.apache.org/licenses/LICENSE-2.0
 
    Unless required by applicable law or agreed to in writing, software
    distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/vendor/github.com/containerd/console/README.md b/vendor/github.com/containerd/console/README.md
index 4c56d9d..5392fda 100644
--- a/vendor/github.com/containerd/console/README.md
+++ b/vendor/github.com/containerd/console/README.md
@@ -15,3 +15,13 @@
 ws, err := current.Size()
 current.Resize(ws)
 ```
+
+## Project details
+
+console is a containerd sub-project, licensed under the [Apache 2.0 license](./LICENSE).
+As a containerd sub-project, you will find the:
+ * [Project governance](https://github.com/containerd/project/blob/master/GOVERNANCE.md),
+ * [Maintainers](https://github.com/containerd/project/blob/master/MAINTAINERS),
+ * and [Contributing guidelines](https://github.com/containerd/project/blob/master/CONTRIBUTING.md)
+
+information in our [`containerd/project`](https://github.com/containerd/project) repository.
diff --git a/vendor/github.com/containerd/containerd/api/events/container.pb.go b/vendor/github.com/containerd/containerd/api/events/container.pb.go
index c89d97f..2d0d1fa 100644
--- a/vendor/github.com/containerd/containerd/api/events/container.pb.go
+++ b/vendor/github.com/containerd/containerd/api/events/container.pb.go
@@ -1,60 +1,19 @@
 // Code generated by protoc-gen-gogo. DO NOT EDIT.
 // source: github.com/containerd/containerd/api/events/container.proto
 
-/*
-	Package events is a generated protocol buffer package.
-
-	It is generated from these files:
-		github.com/containerd/containerd/api/events/container.proto
-		github.com/containerd/containerd/api/events/content.proto
-		github.com/containerd/containerd/api/events/image.proto
-		github.com/containerd/containerd/api/events/namespace.proto
-		github.com/containerd/containerd/api/events/snapshot.proto
-		github.com/containerd/containerd/api/events/task.proto
-
-	It has these top-level messages:
-		ContainerCreate
-		ContainerUpdate
-		ContainerDelete
-		ContentDelete
-		ImageCreate
-		ImageUpdate
-		ImageDelete
-		NamespaceCreate
-		NamespaceUpdate
-		NamespaceDelete
-		SnapshotPrepare
-		SnapshotCommit
-		SnapshotRemove
-		TaskCreate
-		TaskStart
-		TaskDelete
-		TaskIO
-		TaskExit
-		TaskOOM
-		TaskExecAdded
-		TaskExecStarted
-		TaskPaused
-		TaskResumed
-		TaskCheckpointed
-*/
 package events
 
-import proto "github.com/gogo/protobuf/proto"
-import fmt "fmt"
-import math "math"
-import google_protobuf "github.com/gogo/protobuf/types"
-
-// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto"
-// skipping weak import containerd_plugin "github.com/containerd/containerd/protobuf/plugin"
-
-import typeurl "github.com/containerd/typeurl"
-
-import strings "strings"
-import reflect "reflect"
-import sortkeys "github.com/gogo/protobuf/sortkeys"
-
-import io "io"
+import (
+	fmt "fmt"
+	github_com_containerd_typeurl "github.com/containerd/typeurl"
+	proto "github.com/gogo/protobuf/proto"
+	github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
+	types "github.com/gogo/protobuf/types"
+	io "io"
+	math "math"
+	reflect "reflect"
+	strings "strings"
+)
 
 // Reference imports to suppress errors if they are not otherwise used.
 var _ = proto.Marshal
@@ -68,52 +27,209 @@
 const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
 
 type ContainerCreate struct {
-	ID      string                   `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
-	Image   string                   `protobuf:"bytes,2,opt,name=image,proto3" json:"image,omitempty"`
-	Runtime *ContainerCreate_Runtime `protobuf:"bytes,3,opt,name=runtime" json:"runtime,omitempty"`
+	ID                   string                   `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+	Image                string                   `protobuf:"bytes,2,opt,name=image,proto3" json:"image,omitempty"`
+	Runtime              *ContainerCreate_Runtime `protobuf:"bytes,3,opt,name=runtime,proto3" json:"runtime,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}                 `json:"-"`
+	XXX_unrecognized     []byte                   `json:"-"`
+	XXX_sizecache        int32                    `json:"-"`
 }
 
-func (m *ContainerCreate) Reset()                    { *m = ContainerCreate{} }
-func (*ContainerCreate) ProtoMessage()               {}
-func (*ContainerCreate) Descriptor() ([]byte, []int) { return fileDescriptorContainer, []int{0} }
+func (m *ContainerCreate) Reset()      { *m = ContainerCreate{} }
+func (*ContainerCreate) ProtoMessage() {}
+func (*ContainerCreate) Descriptor() ([]byte, []int) {
+	return fileDescriptor_0d1f05b8626f83ea, []int{0}
+}
+func (m *ContainerCreate) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ContainerCreate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_ContainerCreate.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *ContainerCreate) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ContainerCreate.Merge(m, src)
+}
+func (m *ContainerCreate) XXX_Size() int {
+	return m.Size()
+}
+func (m *ContainerCreate) XXX_DiscardUnknown() {
+	xxx_messageInfo_ContainerCreate.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ContainerCreate proto.InternalMessageInfo
 
 type ContainerCreate_Runtime struct {
-	Name    string               `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
-	Options *google_protobuf.Any `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"`
+	Name                 string     `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	Options              *types.Any `protobuf:"bytes,2,opt,name=options,proto3" json:"options,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}   `json:"-"`
+	XXX_unrecognized     []byte     `json:"-"`
+	XXX_sizecache        int32      `json:"-"`
 }
 
 func (m *ContainerCreate_Runtime) Reset()      { *m = ContainerCreate_Runtime{} }
 func (*ContainerCreate_Runtime) ProtoMessage() {}
 func (*ContainerCreate_Runtime) Descriptor() ([]byte, []int) {
-	return fileDescriptorContainer, []int{0, 0}
+	return fileDescriptor_0d1f05b8626f83ea, []int{0, 0}
 }
+func (m *ContainerCreate_Runtime) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ContainerCreate_Runtime) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_ContainerCreate_Runtime.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *ContainerCreate_Runtime) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ContainerCreate_Runtime.Merge(m, src)
+}
+func (m *ContainerCreate_Runtime) XXX_Size() int {
+	return m.Size()
+}
+func (m *ContainerCreate_Runtime) XXX_DiscardUnknown() {
+	xxx_messageInfo_ContainerCreate_Runtime.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ContainerCreate_Runtime proto.InternalMessageInfo
 
 type ContainerUpdate struct {
-	ID          string            `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
-	Image       string            `protobuf:"bytes,2,opt,name=image,proto3" json:"image,omitempty"`
-	Labels      map[string]string `protobuf:"bytes,3,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
-	SnapshotKey string            `protobuf:"bytes,4,opt,name=snapshot_key,json=snapshotKey,proto3" json:"snapshot_key,omitempty"`
+	ID                   string            `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+	Image                string            `protobuf:"bytes,2,opt,name=image,proto3" json:"image,omitempty"`
+	Labels               map[string]string `protobuf:"bytes,3,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	SnapshotKey          string            `protobuf:"bytes,4,opt,name=snapshot_key,json=snapshotKey,proto3" json:"snapshot_key,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
+	XXX_unrecognized     []byte            `json:"-"`
+	XXX_sizecache        int32             `json:"-"`
 }
 
-func (m *ContainerUpdate) Reset()                    { *m = ContainerUpdate{} }
-func (*ContainerUpdate) ProtoMessage()               {}
-func (*ContainerUpdate) Descriptor() ([]byte, []int) { return fileDescriptorContainer, []int{1} }
+func (m *ContainerUpdate) Reset()      { *m = ContainerUpdate{} }
+func (*ContainerUpdate) ProtoMessage() {}
+func (*ContainerUpdate) Descriptor() ([]byte, []int) {
+	return fileDescriptor_0d1f05b8626f83ea, []int{1}
+}
+func (m *ContainerUpdate) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ContainerUpdate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_ContainerUpdate.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *ContainerUpdate) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ContainerUpdate.Merge(m, src)
+}
+func (m *ContainerUpdate) XXX_Size() int {
+	return m.Size()
+}
+func (m *ContainerUpdate) XXX_DiscardUnknown() {
+	xxx_messageInfo_ContainerUpdate.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ContainerUpdate proto.InternalMessageInfo
 
 type ContainerDelete struct {
-	ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+	ID                   string   `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
-func (m *ContainerDelete) Reset()                    { *m = ContainerDelete{} }
-func (*ContainerDelete) ProtoMessage()               {}
-func (*ContainerDelete) Descriptor() ([]byte, []int) { return fileDescriptorContainer, []int{2} }
+func (m *ContainerDelete) Reset()      { *m = ContainerDelete{} }
+func (*ContainerDelete) ProtoMessage() {}
+func (*ContainerDelete) Descriptor() ([]byte, []int) {
+	return fileDescriptor_0d1f05b8626f83ea, []int{2}
+}
+func (m *ContainerDelete) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ContainerDelete) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_ContainerDelete.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *ContainerDelete) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ContainerDelete.Merge(m, src)
+}
+func (m *ContainerDelete) XXX_Size() int {
+	return m.Size()
+}
+func (m *ContainerDelete) XXX_DiscardUnknown() {
+	xxx_messageInfo_ContainerDelete.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ContainerDelete proto.InternalMessageInfo
 
 func init() {
 	proto.RegisterType((*ContainerCreate)(nil), "containerd.events.ContainerCreate")
 	proto.RegisterType((*ContainerCreate_Runtime)(nil), "containerd.events.ContainerCreate.Runtime")
 	proto.RegisterType((*ContainerUpdate)(nil), "containerd.events.ContainerUpdate")
+	proto.RegisterMapType((map[string]string)(nil), "containerd.events.ContainerUpdate.LabelsEntry")
 	proto.RegisterType((*ContainerDelete)(nil), "containerd.events.ContainerDelete")
 }
 
+func init() {
+	proto.RegisterFile("github.com/containerd/containerd/api/events/container.proto", fileDescriptor_0d1f05b8626f83ea)
+}
+
+var fileDescriptor_0d1f05b8626f83ea = []byte{
+	// 413 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x92, 0xc1, 0x0a, 0xd3, 0x30,
+	0x18, 0xc7, 0x97, 0x76, 0x6e, 0x98, 0x0a, 0x6a, 0x18, 0x52, 0x7b, 0xa8, 0x73, 0xa7, 0xe9, 0x21,
+	0x85, 0x7a, 0x51, 0x77, 0xd1, 0x6d, 0x0a, 0xa2, 0x82, 0x14, 0x84, 0xe1, 0x45, 0xd2, 0x35, 0xeb,
+	0x82, 0x6d, 0x52, 0xda, 0x74, 0xd0, 0x9b, 0x8f, 0xe2, 0xe3, 0xec, 0xe8, 0xc1, 0x83, 0x27, 0x71,
+	0x05, 0xdf, 0xc0, 0x07, 0x90, 0x26, 0xeb, 0x56, 0x14, 0x95, 0x9d, 0xfa, 0xcf, 0xd7, 0xff, 0x3f,
+	0xdf, 0xf7, 0xfb, 0x08, 0x9c, 0xc5, 0x4c, 0x6e, 0xcb, 0x10, 0xaf, 0x45, 0xea, 0xad, 0x05, 0x97,
+	0x84, 0x71, 0x9a, 0x47, 0x5d, 0x49, 0x32, 0xe6, 0xd1, 0x1d, 0xe5, 0xb2, 0x38, 0x57, 0x71, 0x96,
+	0x0b, 0x29, 0xd0, 0xcd, 0xb3, 0x0d, 0x6b, 0x8b, 0x73, 0x3b, 0x16, 0x22, 0x4e, 0xa8, 0xa7, 0x0c,
+	0x61, 0xb9, 0xf1, 0x08, 0xaf, 0xb4, 0xdb, 0x19, 0xc5, 0x22, 0x16, 0x4a, 0x7a, 0x8d, 0x3a, 0x56,
+	0x9f, 0xfc, 0x77, 0x80, 0xd3, 0x55, 0x59, 0x52, 0xc6, 0x8c, 0x7b, 0x1b, 0x46, 0x93, 0x28, 0x23,
+	0x72, 0xab, 0x6f, 0x98, 0x7c, 0x01, 0xf0, 0xfa, 0xa2, 0xb5, 0x2f, 0x72, 0x4a, 0x24, 0x45, 0xb7,
+	0xa0, 0xc1, 0x22, 0x1b, 0x8c, 0xc1, 0xf4, 0xea, 0x7c, 0x50, 0x7f, 0xbb, 0x63, 0xbc, 0x58, 0x06,
+	0x06, 0x8b, 0xd0, 0x08, 0x5e, 0x61, 0x29, 0x89, 0xa9, 0x6d, 0x34, 0xbf, 0x02, 0x7d, 0x40, 0x4b,
+	0x38, 0xcc, 0x4b, 0x2e, 0x59, 0x4a, 0x6d, 0x73, 0x0c, 0xa6, 0x96, 0x7f, 0x1f, 0xff, 0x41, 0x86,
+	0x7f, 0x6b, 0x81, 0x03, 0x9d, 0x08, 0xda, 0xa8, 0xf3, 0x1a, 0x0e, 0x8f, 0x35, 0x84, 0x60, 0x9f,
+	0x93, 0x94, 0xea, 0x01, 0x02, 0xa5, 0x11, 0x86, 0x43, 0x91, 0x49, 0x26, 0x78, 0xa1, 0x9a, 0x5b,
+	0xfe, 0x08, 0xeb, 0x5d, 0xe1, 0x16, 0x10, 0x3f, 0xe5, 0x55, 0xd0, 0x9a, 0x26, 0x3f, 0xba, 0x58,
+	0x6f, 0xb3, 0xe8, 0x72, 0xac, 0xe7, 0x70, 0x90, 0x90, 0x90, 0x26, 0x85, 0x6d, 0x8e, 0xcd, 0xa9,
+	0xe5, 0xe3, 0x7f, 0x51, 0xe9, 0x0e, 0xf8, 0x95, 0x0a, 0x3c, 0xe3, 0x32, 0xaf, 0x82, 0x63, 0x1a,
+	0xdd, 0x85, 0xd7, 0x0a, 0x4e, 0xb2, 0x62, 0x2b, 0xe4, 0xfb, 0x0f, 0xb4, 0xb2, 0xfb, 0xaa, 0x89,
+	0xd5, 0xd6, 0x5e, 0xd2, 0xca, 0x79, 0x04, 0xad, 0x4e, 0x12, 0xdd, 0x80, 0x66, 0x63, 0xd4, 0xf8,
+	0x8d, 0x6c, 0x26, 0xdc, 0x91, 0xa4, 0x3c, 0x4d, 0xa8, 0x0e, 0x8f, 0x8d, 0x87, 0x60, 0x72, 0xaf,
+	0x83, 0xb9, 0xa4, 0x09, 0xfd, 0x3b, 0xe6, 0xfc, 0xcd, 0xfe, 0xe0, 0xf6, 0xbe, 0x1e, 0xdc, 0xde,
+	0xc7, 0xda, 0x05, 0xfb, 0xda, 0x05, 0x9f, 0x6b, 0x17, 0x7c, 0xaf, 0x5d, 0xf0, 0xe9, 0xa7, 0x0b,
+	0xde, 0xf9, 0x17, 0x3c, 0xe5, 0x99, 0xfe, 0xac, 0xc0, 0xca, 0x08, 0x07, 0x6a, 0xff, 0x0f, 0x7e,
+	0x05, 0x00, 0x00, 0xff, 0xff, 0xf5, 0x09, 0xe0, 0xd6, 0x0b, 0x03, 0x00, 0x00,
+}
+
 // Field returns the value for the given fieldpath as a string, if defined.
 // If the value is not defined, the second value will be false.
 func (m *ContainerCreate) Field(fieldpath []string) (string, bool) {
@@ -153,7 +269,7 @@
 	case "name":
 		return string(m.Name), len(m.Name) > 0
 	case "options":
-		decoded, err := typeurl.UnmarshalAny(m.Options)
+		decoded, err := github_com_containerd_typeurl.UnmarshalAny(m.Options)
 		if err != nil {
 			return "", false
 		}
@@ -243,6 +359,9 @@
 		}
 		i += n1
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -277,6 +396,9 @@
 		}
 		i += n2
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -330,6 +452,9 @@
 		i = encodeVarintContainer(dAtA, i, uint64(len(m.SnapshotKey)))
 		i += copy(dAtA[i:], m.SnapshotKey)
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -354,6 +479,9 @@
 		i = encodeVarintContainer(dAtA, i, uint64(len(m.ID)))
 		i += copy(dAtA[i:], m.ID)
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -367,6 +495,9 @@
 	return offset + 1
 }
 func (m *ContainerCreate) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = len(m.ID)
@@ -381,10 +512,16 @@
 		l = m.Runtime.Size()
 		n += 1 + l + sovContainer(uint64(l))
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *ContainerCreate_Runtime) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = len(m.Name)
@@ -395,10 +532,16 @@
 		l = m.Options.Size()
 		n += 1 + l + sovContainer(uint64(l))
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *ContainerUpdate) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = len(m.ID)
@@ -421,16 +564,25 @@
 	if l > 0 {
 		n += 1 + l + sovContainer(uint64(l))
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *ContainerDelete) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = len(m.ID)
 	if l > 0 {
 		n += 1 + l + sovContainer(uint64(l))
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
@@ -455,6 +607,7 @@
 		`ID:` + fmt.Sprintf("%v", this.ID) + `,`,
 		`Image:` + fmt.Sprintf("%v", this.Image) + `,`,
 		`Runtime:` + strings.Replace(fmt.Sprintf("%v", this.Runtime), "ContainerCreate_Runtime", "ContainerCreate_Runtime", 1) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -465,7 +618,8 @@
 	}
 	s := strings.Join([]string{`&ContainerCreate_Runtime{`,
 		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
-		`Options:` + strings.Replace(fmt.Sprintf("%v", this.Options), "Any", "google_protobuf.Any", 1) + `,`,
+		`Options:` + strings.Replace(fmt.Sprintf("%v", this.Options), "Any", "types.Any", 1) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -478,7 +632,7 @@
 	for k, _ := range this.Labels {
 		keysForLabels = append(keysForLabels, k)
 	}
-	sortkeys.Strings(keysForLabels)
+	github_com_gogo_protobuf_sortkeys.Strings(keysForLabels)
 	mapStringForLabels := "map[string]string{"
 	for _, k := range keysForLabels {
 		mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k])
@@ -489,6 +643,7 @@
 		`Image:` + fmt.Sprintf("%v", this.Image) + `,`,
 		`Labels:` + mapStringForLabels + `,`,
 		`SnapshotKey:` + fmt.Sprintf("%v", this.SnapshotKey) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -499,6 +654,7 @@
 	}
 	s := strings.Join([]string{`&ContainerDelete{`,
 		`ID:` + fmt.Sprintf("%v", this.ID) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -526,7 +682,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -554,7 +710,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -564,6 +720,9 @@
 				return ErrInvalidLengthContainer
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthContainer
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -583,7 +742,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -593,6 +752,9 @@
 				return ErrInvalidLengthContainer
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthContainer
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -612,7 +774,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -621,6 +783,9 @@
 				return ErrInvalidLengthContainer
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthContainer
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -640,9 +805,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthContainer
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthContainer
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -667,7 +836,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -695,7 +864,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -705,6 +874,9 @@
 				return ErrInvalidLengthContainer
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthContainer
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -724,7 +896,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -733,11 +905,14 @@
 				return ErrInvalidLengthContainer
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthContainer
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
 			if m.Options == nil {
-				m.Options = &google_protobuf.Any{}
+				m.Options = &types.Any{}
 			}
 			if err := m.Options.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
 				return err
@@ -752,9 +927,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthContainer
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthContainer
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -779,7 +958,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -807,7 +986,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -817,6 +996,9 @@
 				return ErrInvalidLengthContainer
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthContainer
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -836,7 +1018,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -846,6 +1028,9 @@
 				return ErrInvalidLengthContainer
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthContainer
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -865,7 +1050,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -874,6 +1059,9 @@
 				return ErrInvalidLengthContainer
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthContainer
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -894,7 +1082,7 @@
 					}
 					b := dAtA[iNdEx]
 					iNdEx++
-					wire |= (uint64(b) & 0x7F) << shift
+					wire |= uint64(b&0x7F) << shift
 					if b < 0x80 {
 						break
 					}
@@ -911,7 +1099,7 @@
 						}
 						b := dAtA[iNdEx]
 						iNdEx++
-						stringLenmapkey |= (uint64(b) & 0x7F) << shift
+						stringLenmapkey |= uint64(b&0x7F) << shift
 						if b < 0x80 {
 							break
 						}
@@ -921,6 +1109,9 @@
 						return ErrInvalidLengthContainer
 					}
 					postStringIndexmapkey := iNdEx + intStringLenmapkey
+					if postStringIndexmapkey < 0 {
+						return ErrInvalidLengthContainer
+					}
 					if postStringIndexmapkey > l {
 						return io.ErrUnexpectedEOF
 					}
@@ -937,7 +1128,7 @@
 						}
 						b := dAtA[iNdEx]
 						iNdEx++
-						stringLenmapvalue |= (uint64(b) & 0x7F) << shift
+						stringLenmapvalue |= uint64(b&0x7F) << shift
 						if b < 0x80 {
 							break
 						}
@@ -947,6 +1138,9 @@
 						return ErrInvalidLengthContainer
 					}
 					postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+					if postStringIndexmapvalue < 0 {
+						return ErrInvalidLengthContainer
+					}
 					if postStringIndexmapvalue > l {
 						return io.ErrUnexpectedEOF
 					}
@@ -983,7 +1177,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -993,6 +1187,9 @@
 				return ErrInvalidLengthContainer
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthContainer
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -1007,9 +1204,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthContainer
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthContainer
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -1034,7 +1235,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -1062,7 +1263,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1072,6 +1273,9 @@
 				return ErrInvalidLengthContainer
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthContainer
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -1086,9 +1290,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthContainer
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthContainer
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -1152,10 +1360,13 @@
 					break
 				}
 			}
-			iNdEx += length
 			if length < 0 {
 				return 0, ErrInvalidLengthContainer
 			}
+			iNdEx += length
+			if iNdEx < 0 {
+				return 0, ErrInvalidLengthContainer
+			}
 			return iNdEx, nil
 		case 3:
 			for {
@@ -1184,6 +1395,9 @@
 					return 0, err
 				}
 				iNdEx = start + next
+				if iNdEx < 0 {
+					return 0, ErrInvalidLengthContainer
+				}
 			}
 			return iNdEx, nil
 		case 4:
@@ -1202,37 +1416,3 @@
 	ErrInvalidLengthContainer = fmt.Errorf("proto: negative length found during unmarshaling")
 	ErrIntOverflowContainer   = fmt.Errorf("proto: integer overflow")
 )
-
-func init() {
-	proto.RegisterFile("github.com/containerd/containerd/api/events/container.proto", fileDescriptorContainer)
-}
-
-var fileDescriptorContainer = []byte{
-	// 413 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x92, 0xc1, 0x0a, 0xd3, 0x30,
-	0x18, 0xc7, 0x97, 0x76, 0x6e, 0x98, 0x0a, 0x6a, 0x18, 0x52, 0x7b, 0xa8, 0x73, 0xa7, 0xe9, 0x21,
-	0x85, 0x7a, 0x51, 0x77, 0xd1, 0x6d, 0x0a, 0xa2, 0x82, 0x14, 0x84, 0xe1, 0x45, 0xd2, 0x35, 0xeb,
-	0x82, 0x6d, 0x52, 0xda, 0x74, 0xd0, 0x9b, 0x8f, 0xe2, 0xe3, 0xec, 0xe8, 0xc1, 0x83, 0x27, 0x71,
-	0x05, 0xdf, 0xc0, 0x07, 0x90, 0x26, 0xeb, 0x56, 0x14, 0x95, 0x9d, 0xfa, 0xcf, 0xd7, 0xff, 0x3f,
-	0xdf, 0xf7, 0xfb, 0x08, 0x9c, 0xc5, 0x4c, 0x6e, 0xcb, 0x10, 0xaf, 0x45, 0xea, 0xad, 0x05, 0x97,
-	0x84, 0x71, 0x9a, 0x47, 0x5d, 0x49, 0x32, 0xe6, 0xd1, 0x1d, 0xe5, 0xb2, 0x38, 0x57, 0x71, 0x96,
-	0x0b, 0x29, 0xd0, 0xcd, 0xb3, 0x0d, 0x6b, 0x8b, 0x73, 0x3b, 0x16, 0x22, 0x4e, 0xa8, 0xa7, 0x0c,
-	0x61, 0xb9, 0xf1, 0x08, 0xaf, 0xb4, 0xdb, 0x19, 0xc5, 0x22, 0x16, 0x4a, 0x7a, 0x8d, 0x3a, 0x56,
-	0x9f, 0xfc, 0x77, 0x80, 0xd3, 0x55, 0x59, 0x52, 0xc6, 0x8c, 0x7b, 0x1b, 0x46, 0x93, 0x28, 0x23,
-	0x72, 0xab, 0x6f, 0x98, 0x7c, 0x01, 0xf0, 0xfa, 0xa2, 0xb5, 0x2f, 0x72, 0x4a, 0x24, 0x45, 0xb7,
-	0xa0, 0xc1, 0x22, 0x1b, 0x8c, 0xc1, 0xf4, 0xea, 0x7c, 0x50, 0x7f, 0xbb, 0x63, 0xbc, 0x58, 0x06,
-	0x06, 0x8b, 0xd0, 0x08, 0x5e, 0x61, 0x29, 0x89, 0xa9, 0x6d, 0x34, 0xbf, 0x02, 0x7d, 0x40, 0x4b,
-	0x38, 0xcc, 0x4b, 0x2e, 0x59, 0x4a, 0x6d, 0x73, 0x0c, 0xa6, 0x96, 0x7f, 0x1f, 0xff, 0x41, 0x86,
-	0x7f, 0x6b, 0x81, 0x03, 0x9d, 0x08, 0xda, 0xa8, 0xf3, 0x1a, 0x0e, 0x8f, 0x35, 0x84, 0x60, 0x9f,
-	0x93, 0x94, 0xea, 0x01, 0x02, 0xa5, 0x11, 0x86, 0x43, 0x91, 0x49, 0x26, 0x78, 0xa1, 0x9a, 0x5b,
-	0xfe, 0x08, 0xeb, 0x5d, 0xe1, 0x16, 0x10, 0x3f, 0xe5, 0x55, 0xd0, 0x9a, 0x26, 0x3f, 0xba, 0x58,
-	0x6f, 0xb3, 0xe8, 0x72, 0xac, 0xe7, 0x70, 0x90, 0x90, 0x90, 0x26, 0x85, 0x6d, 0x8e, 0xcd, 0xa9,
-	0xe5, 0xe3, 0x7f, 0x51, 0xe9, 0x0e, 0xf8, 0x95, 0x0a, 0x3c, 0xe3, 0x32, 0xaf, 0x82, 0x63, 0x1a,
-	0xdd, 0x85, 0xd7, 0x0a, 0x4e, 0xb2, 0x62, 0x2b, 0xe4, 0xfb, 0x0f, 0xb4, 0xb2, 0xfb, 0xaa, 0x89,
-	0xd5, 0xd6, 0x5e, 0xd2, 0xca, 0x79, 0x04, 0xad, 0x4e, 0x12, 0xdd, 0x80, 0x66, 0x63, 0xd4, 0xf8,
-	0x8d, 0x6c, 0x26, 0xdc, 0x91, 0xa4, 0x3c, 0x4d, 0xa8, 0x0e, 0x8f, 0x8d, 0x87, 0x60, 0x72, 0xaf,
-	0x83, 0xb9, 0xa4, 0x09, 0xfd, 0x3b, 0xe6, 0xfc, 0xcd, 0xfe, 0xe0, 0xf6, 0xbe, 0x1e, 0xdc, 0xde,
-	0xc7, 0xda, 0x05, 0xfb, 0xda, 0x05, 0x9f, 0x6b, 0x17, 0x7c, 0xaf, 0x5d, 0xf0, 0xe9, 0xa7, 0x0b,
-	0xde, 0xf9, 0x17, 0x3c, 0xe5, 0x99, 0xfe, 0xac, 0xc0, 0xca, 0x08, 0x07, 0x6a, 0xff, 0x0f, 0x7e,
-	0x05, 0x00, 0x00, 0xff, 0xff, 0xf5, 0x09, 0xe0, 0xd6, 0x0b, 0x03, 0x00, 0x00,
-}
diff --git a/vendor/github.com/containerd/containerd/api/events/content.pb.go b/vendor/github.com/containerd/containerd/api/events/content.pb.go
index 87648d1..c6ae8a5 100644
--- a/vendor/github.com/containerd/containerd/api/events/content.pb.go
+++ b/vendor/github.com/containerd/containerd/api/events/content.pb.go
@@ -3,37 +3,93 @@
 
 package events
 
-import proto "github.com/gogo/protobuf/proto"
-import fmt "fmt"
-import math "math"
-
-// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto"
-// skipping weak import containerd_plugin "github.com/containerd/containerd/protobuf/plugin"
-
-import github_com_opencontainers_go_digest "github.com/opencontainers/go-digest"
-
-import strings "strings"
-import reflect "reflect"
-
-import io "io"
+import (
+	fmt "fmt"
+	proto "github.com/gogo/protobuf/proto"
+	github_com_opencontainers_go_digest "github.com/opencontainers/go-digest"
+	io "io"
+	math "math"
+	reflect "reflect"
+	strings "strings"
+)
 
 // Reference imports to suppress errors if they are not otherwise used.
 var _ = proto.Marshal
 var _ = fmt.Errorf
 var _ = math.Inf
 
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+
 type ContentDelete struct {
-	Digest github_com_opencontainers_go_digest.Digest `protobuf:"bytes,1,opt,name=digest,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"digest"`
+	Digest               github_com_opencontainers_go_digest.Digest `protobuf:"bytes,1,opt,name=digest,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"digest"`
+	XXX_NoUnkeyedLiteral struct{}                                   `json:"-"`
+	XXX_unrecognized     []byte                                     `json:"-"`
+	XXX_sizecache        int32                                      `json:"-"`
 }
 
-func (m *ContentDelete) Reset()                    { *m = ContentDelete{} }
-func (*ContentDelete) ProtoMessage()               {}
-func (*ContentDelete) Descriptor() ([]byte, []int) { return fileDescriptorContent, []int{0} }
+func (m *ContentDelete) Reset()      { *m = ContentDelete{} }
+func (*ContentDelete) ProtoMessage() {}
+func (*ContentDelete) Descriptor() ([]byte, []int) {
+	return fileDescriptor_dfb34b8b808e2ecd, []int{0}
+}
+func (m *ContentDelete) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ContentDelete) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_ContentDelete.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *ContentDelete) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ContentDelete.Merge(m, src)
+}
+func (m *ContentDelete) XXX_Size() int {
+	return m.Size()
+}
+func (m *ContentDelete) XXX_DiscardUnknown() {
+	xxx_messageInfo_ContentDelete.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ContentDelete proto.InternalMessageInfo
 
 func init() {
 	proto.RegisterType((*ContentDelete)(nil), "containerd.events.ContentDelete")
 }
 
+func init() {
+	proto.RegisterFile("github.com/containerd/containerd/api/events/content.proto", fileDescriptor_dfb34b8b808e2ecd)
+}
+
+var fileDescriptor_dfb34b8b808e2ecd = []byte{
+	// 228 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0x4c, 0xcf, 0x2c, 0xc9,
+	0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
+	0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0xa7, 0x96, 0xa5, 0xe6, 0x95, 0x14, 0x83, 0x45, 0x53,
+	0xf3, 0x4a, 0xf4, 0x0a, 0x8a, 0xf2, 0x4b, 0xf2, 0x85, 0x04, 0x11, 0x8a, 0xf4, 0x20, 0x0a, 0xa4,
+	0x44, 0xd2, 0xf3, 0xd3, 0xf3, 0xc1, 0xb2, 0xfa, 0x20, 0x16, 0x44, 0xa1, 0x94, 0x03, 0x41, 0x3b,
+	0xc0, 0xea, 0x92, 0x4a, 0xd3, 0xf4, 0x0b, 0x72, 0x4a, 0xd3, 0x33, 0xf3, 0xf4, 0xd3, 0x32, 0x53,
+	0x73, 0x52, 0x0a, 0x12, 0x4b, 0x32, 0x20, 0x26, 0x28, 0x45, 0x73, 0xf1, 0x3a, 0x43, 0xec, 0x76,
+	0x49, 0xcd, 0x49, 0x2d, 0x49, 0x15, 0xf2, 0xe2, 0x62, 0x4b, 0xc9, 0x4c, 0x4f, 0x2d, 0x2e, 0x91,
+	0x60, 0x54, 0x60, 0xd4, 0xe0, 0x74, 0x32, 0x3a, 0x71, 0x4f, 0x9e, 0xe1, 0xd6, 0x3d, 0x79, 0x2d,
+	0x24, 0xab, 0xf2, 0x0b, 0x52, 0xf3, 0xe0, 0x76, 0x14, 0xeb, 0xa7, 0xe7, 0xeb, 0x42, 0xb4, 0xe8,
+	0xb9, 0x80, 0xa9, 0x20, 0xa8, 0x09, 0x4e, 0x01, 0x27, 0x1e, 0xca, 0x31, 0xdc, 0x78, 0x28, 0xc7,
+	0xd0, 0xf0, 0x48, 0x8e, 0xf1, 0xc4, 0x23, 0x39, 0xc6, 0x0b, 0x8f, 0xe4, 0x18, 0x1f, 0x3c, 0x92,
+	0x63, 0x5c, 0xf0, 0x45, 0x8e, 0x31, 0xca, 0x88, 0x84, 0x00, 0xb2, 0x86, 0x50, 0x11, 0x0c, 0x11,
+	0x8c, 0x49, 0x6c, 0x60, 0x97, 0x1b, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x4b, 0x78, 0x99, 0xee,
+	0x61, 0x01, 0x00, 0x00,
+}
+
 // Field returns the value for the given fieldpath as a string, if defined.
 // If the value is not defined, the second value will be false.
 func (m *ContentDelete) Field(fieldpath []string) (string, bool) {
@@ -68,6 +124,9 @@
 		i = encodeVarintContent(dAtA, i, uint64(len(m.Digest)))
 		i += copy(dAtA[i:], m.Digest)
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -81,12 +140,18 @@
 	return offset + 1
 }
 func (m *ContentDelete) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = len(m.Digest)
 	if l > 0 {
 		n += 1 + l + sovContent(uint64(l))
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
@@ -109,6 +174,7 @@
 	}
 	s := strings.Join([]string{`&ContentDelete{`,
 		`Digest:` + fmt.Sprintf("%v", this.Digest) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -136,7 +202,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -164,7 +230,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -174,6 +240,9 @@
 				return ErrInvalidLengthContent
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthContent
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -188,9 +257,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthContent
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthContent
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -254,10 +327,13 @@
 					break
 				}
 			}
-			iNdEx += length
 			if length < 0 {
 				return 0, ErrInvalidLengthContent
 			}
+			iNdEx += length
+			if iNdEx < 0 {
+				return 0, ErrInvalidLengthContent
+			}
 			return iNdEx, nil
 		case 3:
 			for {
@@ -286,6 +362,9 @@
 					return 0, err
 				}
 				iNdEx = start + next
+				if iNdEx < 0 {
+					return 0, ErrInvalidLengthContent
+				}
 			}
 			return iNdEx, nil
 		case 4:
@@ -304,26 +383,3 @@
 	ErrInvalidLengthContent = fmt.Errorf("proto: negative length found during unmarshaling")
 	ErrIntOverflowContent   = fmt.Errorf("proto: integer overflow")
 )
-
-func init() {
-	proto.RegisterFile("github.com/containerd/containerd/api/events/content.proto", fileDescriptorContent)
-}
-
-var fileDescriptorContent = []byte{
-	// 228 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0x4c, 0xcf, 0x2c, 0xc9,
-	0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
-	0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0xa7, 0x96, 0xa5, 0xe6, 0x95, 0x14, 0x83, 0x45, 0x53,
-	0xf3, 0x4a, 0xf4, 0x0a, 0x8a, 0xf2, 0x4b, 0xf2, 0x85, 0x04, 0x11, 0x8a, 0xf4, 0x20, 0x0a, 0xa4,
-	0x44, 0xd2, 0xf3, 0xd3, 0xf3, 0xc1, 0xb2, 0xfa, 0x20, 0x16, 0x44, 0xa1, 0x94, 0x03, 0x41, 0x3b,
-	0xc0, 0xea, 0x92, 0x4a, 0xd3, 0xf4, 0x0b, 0x72, 0x4a, 0xd3, 0x33, 0xf3, 0xf4, 0xd3, 0x32, 0x53,
-	0x73, 0x52, 0x0a, 0x12, 0x4b, 0x32, 0x20, 0x26, 0x28, 0x45, 0x73, 0xf1, 0x3a, 0x43, 0xec, 0x76,
-	0x49, 0xcd, 0x49, 0x2d, 0x49, 0x15, 0xf2, 0xe2, 0x62, 0x4b, 0xc9, 0x4c, 0x4f, 0x2d, 0x2e, 0x91,
-	0x60, 0x54, 0x60, 0xd4, 0xe0, 0x74, 0x32, 0x3a, 0x71, 0x4f, 0x9e, 0xe1, 0xd6, 0x3d, 0x79, 0x2d,
-	0x24, 0xab, 0xf2, 0x0b, 0x52, 0xf3, 0xe0, 0x76, 0x14, 0xeb, 0xa7, 0xe7, 0xeb, 0x42, 0xb4, 0xe8,
-	0xb9, 0x80, 0xa9, 0x20, 0xa8, 0x09, 0x4e, 0x01, 0x27, 0x1e, 0xca, 0x31, 0xdc, 0x78, 0x28, 0xc7,
-	0xd0, 0xf0, 0x48, 0x8e, 0xf1, 0xc4, 0x23, 0x39, 0xc6, 0x0b, 0x8f, 0xe4, 0x18, 0x1f, 0x3c, 0x92,
-	0x63, 0x5c, 0xf0, 0x45, 0x8e, 0x31, 0xca, 0x88, 0x84, 0x00, 0xb2, 0x86, 0x50, 0x11, 0x0c, 0x11,
-	0x8c, 0x49, 0x6c, 0x60, 0x97, 0x1b, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x4b, 0x78, 0x99, 0xee,
-	0x61, 0x01, 0x00, 0x00,
-}
diff --git a/vendor/github.com/containerd/containerd/api/events/image.pb.go b/vendor/github.com/containerd/containerd/api/events/image.pb.go
index 8197005..e0c58aa 100644
--- a/vendor/github.com/containerd/containerd/api/events/image.pb.go
+++ b/vendor/github.com/containerd/containerd/api/events/image.pb.go
@@ -3,55 +3,181 @@
 
 package events
 
-import proto "github.com/gogo/protobuf/proto"
-import fmt "fmt"
-import math "math"
-
-// skipping weak import containerd_plugin "github.com/containerd/containerd/protobuf/plugin"
-
-import strings "strings"
-import reflect "reflect"
-import sortkeys "github.com/gogo/protobuf/sortkeys"
-
-import io "io"
+import (
+	fmt "fmt"
+	proto "github.com/gogo/protobuf/proto"
+	github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
+	io "io"
+	math "math"
+	reflect "reflect"
+	strings "strings"
+)
 
 // Reference imports to suppress errors if they are not otherwise used.
 var _ = proto.Marshal
 var _ = fmt.Errorf
 var _ = math.Inf
 
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+
 type ImageCreate struct {
-	Name   string            `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
-	Labels map[string]string `protobuf:"bytes,2,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	Name                 string            `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	Labels               map[string]string `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
+	XXX_unrecognized     []byte            `json:"-"`
+	XXX_sizecache        int32             `json:"-"`
 }
 
-func (m *ImageCreate) Reset()                    { *m = ImageCreate{} }
-func (*ImageCreate) ProtoMessage()               {}
-func (*ImageCreate) Descriptor() ([]byte, []int) { return fileDescriptorImage, []int{0} }
+func (m *ImageCreate) Reset()      { *m = ImageCreate{} }
+func (*ImageCreate) ProtoMessage() {}
+func (*ImageCreate) Descriptor() ([]byte, []int) {
+	return fileDescriptor_7085610f7b33e042, []int{0}
+}
+func (m *ImageCreate) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ImageCreate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_ImageCreate.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *ImageCreate) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ImageCreate.Merge(m, src)
+}
+func (m *ImageCreate) XXX_Size() int {
+	return m.Size()
+}
+func (m *ImageCreate) XXX_DiscardUnknown() {
+	xxx_messageInfo_ImageCreate.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ImageCreate proto.InternalMessageInfo
 
 type ImageUpdate struct {
-	Name   string            `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
-	Labels map[string]string `protobuf:"bytes,2,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	Name                 string            `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	Labels               map[string]string `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
+	XXX_unrecognized     []byte            `json:"-"`
+	XXX_sizecache        int32             `json:"-"`
 }
 
-func (m *ImageUpdate) Reset()                    { *m = ImageUpdate{} }
-func (*ImageUpdate) ProtoMessage()               {}
-func (*ImageUpdate) Descriptor() ([]byte, []int) { return fileDescriptorImage, []int{1} }
+func (m *ImageUpdate) Reset()      { *m = ImageUpdate{} }
+func (*ImageUpdate) ProtoMessage() {}
+func (*ImageUpdate) Descriptor() ([]byte, []int) {
+	return fileDescriptor_7085610f7b33e042, []int{1}
+}
+func (m *ImageUpdate) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ImageUpdate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_ImageUpdate.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *ImageUpdate) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ImageUpdate.Merge(m, src)
+}
+func (m *ImageUpdate) XXX_Size() int {
+	return m.Size()
+}
+func (m *ImageUpdate) XXX_DiscardUnknown() {
+	xxx_messageInfo_ImageUpdate.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ImageUpdate proto.InternalMessageInfo
 
 type ImageDelete struct {
-	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	Name                 string   `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
-func (m *ImageDelete) Reset()                    { *m = ImageDelete{} }
-func (*ImageDelete) ProtoMessage()               {}
-func (*ImageDelete) Descriptor() ([]byte, []int) { return fileDescriptorImage, []int{2} }
+func (m *ImageDelete) Reset()      { *m = ImageDelete{} }
+func (*ImageDelete) ProtoMessage() {}
+func (*ImageDelete) Descriptor() ([]byte, []int) {
+	return fileDescriptor_7085610f7b33e042, []int{2}
+}
+func (m *ImageDelete) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ImageDelete) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_ImageDelete.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *ImageDelete) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ImageDelete.Merge(m, src)
+}
+func (m *ImageDelete) XXX_Size() int {
+	return m.Size()
+}
+func (m *ImageDelete) XXX_DiscardUnknown() {
+	xxx_messageInfo_ImageDelete.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ImageDelete proto.InternalMessageInfo
 
 func init() {
 	proto.RegisterType((*ImageCreate)(nil), "containerd.services.images.v1.ImageCreate")
+	proto.RegisterMapType((map[string]string)(nil), "containerd.services.images.v1.ImageCreate.LabelsEntry")
 	proto.RegisterType((*ImageUpdate)(nil), "containerd.services.images.v1.ImageUpdate")
+	proto.RegisterMapType((map[string]string)(nil), "containerd.services.images.v1.ImageUpdate.LabelsEntry")
 	proto.RegisterType((*ImageDelete)(nil), "containerd.services.images.v1.ImageDelete")
 }
 
+func init() {
+	proto.RegisterFile("github.com/containerd/containerd/api/events/image.proto", fileDescriptor_7085610f7b33e042)
+}
+
+var fileDescriptor_7085610f7b33e042 = []byte{
+	// 292 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x32, 0x4f, 0xcf, 0x2c, 0xc9,
+	0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
+	0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0xa7, 0x96, 0xa5, 0xe6, 0x95, 0x14, 0xeb, 0x67, 0xe6,
+	0x26, 0xa6, 0xa7, 0xea, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0xc9, 0x22, 0x94, 0xe8, 0x15, 0xa7,
+	0x16, 0x95, 0x65, 0x26, 0xa7, 0x16, 0xeb, 0x81, 0x15, 0x14, 0xeb, 0x95, 0x19, 0x4a, 0x39, 0x10,
+	0x34, 0x17, 0x6c, 0x4c, 0x52, 0x69, 0x9a, 0x7e, 0x41, 0x4e, 0x69, 0x7a, 0x66, 0x9e, 0x7e, 0x5a,
+	0x66, 0x6a, 0x4e, 0x4a, 0x41, 0x62, 0x49, 0x06, 0xc4, 0x02, 0xa5, 0x35, 0x8c, 0x5c, 0xdc, 0x9e,
+	0x20, 0xf3, 0x9c, 0x8b, 0x52, 0x13, 0x4b, 0x52, 0x85, 0x84, 0xb8, 0x58, 0xf2, 0x12, 0x73, 0x53,
+	0x25, 0x18, 0x15, 0x18, 0x35, 0x38, 0x83, 0xc0, 0x6c, 0x21, 0x3f, 0x2e, 0xb6, 0x9c, 0xc4, 0xa4,
+	0xd4, 0x9c, 0x62, 0x09, 0x26, 0x05, 0x66, 0x0d, 0x6e, 0x23, 0x33, 0x3d, 0xbc, 0xae, 0xd2, 0x43,
+	0x32, 0x4f, 0xcf, 0x07, 0xac, 0xd1, 0x35, 0xaf, 0xa4, 0xa8, 0x32, 0x08, 0x6a, 0x8a, 0x94, 0x25,
+	0x17, 0x37, 0x92, 0xb0, 0x90, 0x00, 0x17, 0x73, 0x76, 0x6a, 0x25, 0xd4, 0x46, 0x10, 0x53, 0x48,
+	0x84, 0x8b, 0xb5, 0x2c, 0x31, 0xa7, 0x34, 0x55, 0x82, 0x09, 0x2c, 0x06, 0xe1, 0x58, 0x31, 0x59,
+	0x30, 0x22, 0x9c, 0x1b, 0x5a, 0x90, 0x42, 0x55, 0xe7, 0x42, 0xcc, 0xa3, 0xb6, 0x73, 0x15, 0xa1,
+	0xae, 0x75, 0x49, 0xcd, 0x49, 0xc5, 0xee, 0x5a, 0xa7, 0x80, 0x13, 0x0f, 0xe5, 0x18, 0x6e, 0x3c,
+	0x94, 0x63, 0x68, 0x78, 0x24, 0xc7, 0x78, 0xe2, 0x91, 0x1c, 0xe3, 0x85, 0x47, 0x72, 0x8c, 0x0f,
+	0x1e, 0xc9, 0x31, 0x2e, 0xf8, 0x22, 0xc7, 0x18, 0x65, 0x44, 0x42, 0xc2, 0xb1, 0x86, 0x50, 0x11,
+	0x0c, 0x49, 0x6c, 0xe0, 0xb8, 0x35, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0x41, 0x80, 0x92, 0x17,
+	0x77, 0x02, 0x00, 0x00,
+}
+
 // Field returns the value for the given fieldpath as a string, if defined.
 // If the value is not defined, the second value will be false.
 func (m *ImageCreate) Field(fieldpath []string) (string, bool) {
@@ -147,6 +273,9 @@
 			i += copy(dAtA[i:], v)
 		}
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -188,6 +317,9 @@
 			i += copy(dAtA[i:], v)
 		}
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -212,6 +344,9 @@
 		i = encodeVarintImage(dAtA, i, uint64(len(m.Name)))
 		i += copy(dAtA[i:], m.Name)
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -225,6 +360,9 @@
 	return offset + 1
 }
 func (m *ImageCreate) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = len(m.Name)
@@ -239,10 +377,16 @@
 			n += mapEntrySize + 1 + sovImage(uint64(mapEntrySize))
 		}
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *ImageUpdate) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = len(m.Name)
@@ -257,16 +401,25 @@
 			n += mapEntrySize + 1 + sovImage(uint64(mapEntrySize))
 		}
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *ImageDelete) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = len(m.Name)
 	if l > 0 {
 		n += 1 + l + sovImage(uint64(l))
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
@@ -291,7 +444,7 @@
 	for k, _ := range this.Labels {
 		keysForLabels = append(keysForLabels, k)
 	}
-	sortkeys.Strings(keysForLabels)
+	github_com_gogo_protobuf_sortkeys.Strings(keysForLabels)
 	mapStringForLabels := "map[string]string{"
 	for _, k := range keysForLabels {
 		mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k])
@@ -300,6 +453,7 @@
 	s := strings.Join([]string{`&ImageCreate{`,
 		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
 		`Labels:` + mapStringForLabels + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -312,7 +466,7 @@
 	for k, _ := range this.Labels {
 		keysForLabels = append(keysForLabels, k)
 	}
-	sortkeys.Strings(keysForLabels)
+	github_com_gogo_protobuf_sortkeys.Strings(keysForLabels)
 	mapStringForLabels := "map[string]string{"
 	for _, k := range keysForLabels {
 		mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k])
@@ -321,6 +475,7 @@
 	s := strings.Join([]string{`&ImageUpdate{`,
 		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
 		`Labels:` + mapStringForLabels + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -331,6 +486,7 @@
 	}
 	s := strings.Join([]string{`&ImageDelete{`,
 		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -358,7 +514,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -386,7 +542,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -396,6 +552,9 @@
 				return ErrInvalidLengthImage
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthImage
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -415,7 +574,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -424,6 +583,9 @@
 				return ErrInvalidLengthImage
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthImage
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -444,7 +606,7 @@
 					}
 					b := dAtA[iNdEx]
 					iNdEx++
-					wire |= (uint64(b) & 0x7F) << shift
+					wire |= uint64(b&0x7F) << shift
 					if b < 0x80 {
 						break
 					}
@@ -461,7 +623,7 @@
 						}
 						b := dAtA[iNdEx]
 						iNdEx++
-						stringLenmapkey |= (uint64(b) & 0x7F) << shift
+						stringLenmapkey |= uint64(b&0x7F) << shift
 						if b < 0x80 {
 							break
 						}
@@ -471,6 +633,9 @@
 						return ErrInvalidLengthImage
 					}
 					postStringIndexmapkey := iNdEx + intStringLenmapkey
+					if postStringIndexmapkey < 0 {
+						return ErrInvalidLengthImage
+					}
 					if postStringIndexmapkey > l {
 						return io.ErrUnexpectedEOF
 					}
@@ -487,7 +652,7 @@
 						}
 						b := dAtA[iNdEx]
 						iNdEx++
-						stringLenmapvalue |= (uint64(b) & 0x7F) << shift
+						stringLenmapvalue |= uint64(b&0x7F) << shift
 						if b < 0x80 {
 							break
 						}
@@ -497,6 +662,9 @@
 						return ErrInvalidLengthImage
 					}
 					postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+					if postStringIndexmapvalue < 0 {
+						return ErrInvalidLengthImage
+					}
 					if postStringIndexmapvalue > l {
 						return io.ErrUnexpectedEOF
 					}
@@ -528,9 +696,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthImage
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthImage
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -555,7 +727,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -583,7 +755,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -593,6 +765,9 @@
 				return ErrInvalidLengthImage
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthImage
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -612,7 +787,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -621,6 +796,9 @@
 				return ErrInvalidLengthImage
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthImage
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -641,7 +819,7 @@
 					}
 					b := dAtA[iNdEx]
 					iNdEx++
-					wire |= (uint64(b) & 0x7F) << shift
+					wire |= uint64(b&0x7F) << shift
 					if b < 0x80 {
 						break
 					}
@@ -658,7 +836,7 @@
 						}
 						b := dAtA[iNdEx]
 						iNdEx++
-						stringLenmapkey |= (uint64(b) & 0x7F) << shift
+						stringLenmapkey |= uint64(b&0x7F) << shift
 						if b < 0x80 {
 							break
 						}
@@ -668,6 +846,9 @@
 						return ErrInvalidLengthImage
 					}
 					postStringIndexmapkey := iNdEx + intStringLenmapkey
+					if postStringIndexmapkey < 0 {
+						return ErrInvalidLengthImage
+					}
 					if postStringIndexmapkey > l {
 						return io.ErrUnexpectedEOF
 					}
@@ -684,7 +865,7 @@
 						}
 						b := dAtA[iNdEx]
 						iNdEx++
-						stringLenmapvalue |= (uint64(b) & 0x7F) << shift
+						stringLenmapvalue |= uint64(b&0x7F) << shift
 						if b < 0x80 {
 							break
 						}
@@ -694,6 +875,9 @@
 						return ErrInvalidLengthImage
 					}
 					postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+					if postStringIndexmapvalue < 0 {
+						return ErrInvalidLengthImage
+					}
 					if postStringIndexmapvalue > l {
 						return io.ErrUnexpectedEOF
 					}
@@ -725,9 +909,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthImage
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthImage
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -752,7 +940,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -780,7 +968,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -790,6 +978,9 @@
 				return ErrInvalidLengthImage
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthImage
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -804,9 +995,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthImage
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthImage
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -870,10 +1065,13 @@
 					break
 				}
 			}
-			iNdEx += length
 			if length < 0 {
 				return 0, ErrInvalidLengthImage
 			}
+			iNdEx += length
+			if iNdEx < 0 {
+				return 0, ErrInvalidLengthImage
+			}
 			return iNdEx, nil
 		case 3:
 			for {
@@ -902,6 +1100,9 @@
 					return 0, err
 				}
 				iNdEx = start + next
+				if iNdEx < 0 {
+					return 0, ErrInvalidLengthImage
+				}
 			}
 			return iNdEx, nil
 		case 4:
@@ -920,30 +1121,3 @@
 	ErrInvalidLengthImage = fmt.Errorf("proto: negative length found during unmarshaling")
 	ErrIntOverflowImage   = fmt.Errorf("proto: integer overflow")
 )
-
-func init() {
-	proto.RegisterFile("github.com/containerd/containerd/api/events/image.proto", fileDescriptorImage)
-}
-
-var fileDescriptorImage = []byte{
-	// 292 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x32, 0x4f, 0xcf, 0x2c, 0xc9,
-	0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
-	0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0xa7, 0x96, 0xa5, 0xe6, 0x95, 0x14, 0xeb, 0x67, 0xe6,
-	0x26, 0xa6, 0xa7, 0xea, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0xc9, 0x22, 0x94, 0xe8, 0x15, 0xa7,
-	0x16, 0x95, 0x65, 0x26, 0xa7, 0x16, 0xeb, 0x81, 0x15, 0x14, 0xeb, 0x95, 0x19, 0x4a, 0x39, 0x10,
-	0x34, 0x17, 0x6c, 0x4c, 0x52, 0x69, 0x9a, 0x7e, 0x41, 0x4e, 0x69, 0x7a, 0x66, 0x9e, 0x7e, 0x5a,
-	0x66, 0x6a, 0x4e, 0x4a, 0x41, 0x62, 0x49, 0x06, 0xc4, 0x02, 0xa5, 0x35, 0x8c, 0x5c, 0xdc, 0x9e,
-	0x20, 0xf3, 0x9c, 0x8b, 0x52, 0x13, 0x4b, 0x52, 0x85, 0x84, 0xb8, 0x58, 0xf2, 0x12, 0x73, 0x53,
-	0x25, 0x18, 0x15, 0x18, 0x35, 0x38, 0x83, 0xc0, 0x6c, 0x21, 0x3f, 0x2e, 0xb6, 0x9c, 0xc4, 0xa4,
-	0xd4, 0x9c, 0x62, 0x09, 0x26, 0x05, 0x66, 0x0d, 0x6e, 0x23, 0x33, 0x3d, 0xbc, 0xae, 0xd2, 0x43,
-	0x32, 0x4f, 0xcf, 0x07, 0xac, 0xd1, 0x35, 0xaf, 0xa4, 0xa8, 0x32, 0x08, 0x6a, 0x8a, 0x94, 0x25,
-	0x17, 0x37, 0x92, 0xb0, 0x90, 0x00, 0x17, 0x73, 0x76, 0x6a, 0x25, 0xd4, 0x46, 0x10, 0x53, 0x48,
-	0x84, 0x8b, 0xb5, 0x2c, 0x31, 0xa7, 0x34, 0x55, 0x82, 0x09, 0x2c, 0x06, 0xe1, 0x58, 0x31, 0x59,
-	0x30, 0x22, 0x9c, 0x1b, 0x5a, 0x90, 0x42, 0x55, 0xe7, 0x42, 0xcc, 0xa3, 0xb6, 0x73, 0x15, 0xa1,
-	0xae, 0x75, 0x49, 0xcd, 0x49, 0xc5, 0xee, 0x5a, 0xa7, 0x80, 0x13, 0x0f, 0xe5, 0x18, 0x6e, 0x3c,
-	0x94, 0x63, 0x68, 0x78, 0x24, 0xc7, 0x78, 0xe2, 0x91, 0x1c, 0xe3, 0x85, 0x47, 0x72, 0x8c, 0x0f,
-	0x1e, 0xc9, 0x31, 0x2e, 0xf8, 0x22, 0xc7, 0x18, 0x65, 0x44, 0x42, 0xc2, 0xb1, 0x86, 0x50, 0x11,
-	0x0c, 0x49, 0x6c, 0xe0, 0xb8, 0x35, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0x41, 0x80, 0x92, 0x17,
-	0x77, 0x02, 0x00, 0x00,
-}
diff --git a/vendor/github.com/containerd/containerd/api/events/namespace.pb.go b/vendor/github.com/containerd/containerd/api/events/namespace.pb.go
index 1c81f9f..84882e5 100644
--- a/vendor/github.com/containerd/containerd/api/events/namespace.pb.go
+++ b/vendor/github.com/containerd/containerd/api/events/namespace.pb.go
@@ -3,56 +3,181 @@
 
 package events
 
-import proto "github.com/gogo/protobuf/proto"
-import fmt "fmt"
-import math "math"
-
-// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto"
-// skipping weak import containerd_plugin "github.com/containerd/containerd/protobuf/plugin"
-
-import strings "strings"
-import reflect "reflect"
-import sortkeys "github.com/gogo/protobuf/sortkeys"
-
-import io "io"
+import (
+	fmt "fmt"
+	proto "github.com/gogo/protobuf/proto"
+	github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
+	io "io"
+	math "math"
+	reflect "reflect"
+	strings "strings"
+)
 
 // Reference imports to suppress errors if they are not otherwise used.
 var _ = proto.Marshal
 var _ = fmt.Errorf
 var _ = math.Inf
 
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+
 type NamespaceCreate struct {
-	Name   string            `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
-	Labels map[string]string `protobuf:"bytes,2,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	Name                 string            `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	Labels               map[string]string `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
+	XXX_unrecognized     []byte            `json:"-"`
+	XXX_sizecache        int32             `json:"-"`
 }
 
-func (m *NamespaceCreate) Reset()                    { *m = NamespaceCreate{} }
-func (*NamespaceCreate) ProtoMessage()               {}
-func (*NamespaceCreate) Descriptor() ([]byte, []int) { return fileDescriptorNamespace, []int{0} }
+func (m *NamespaceCreate) Reset()      { *m = NamespaceCreate{} }
+func (*NamespaceCreate) ProtoMessage() {}
+func (*NamespaceCreate) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6cd45d1d5adffe29, []int{0}
+}
+func (m *NamespaceCreate) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *NamespaceCreate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_NamespaceCreate.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *NamespaceCreate) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_NamespaceCreate.Merge(m, src)
+}
+func (m *NamespaceCreate) XXX_Size() int {
+	return m.Size()
+}
+func (m *NamespaceCreate) XXX_DiscardUnknown() {
+	xxx_messageInfo_NamespaceCreate.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_NamespaceCreate proto.InternalMessageInfo
 
 type NamespaceUpdate struct {
-	Name   string            `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
-	Labels map[string]string `protobuf:"bytes,2,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	Name                 string            `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	Labels               map[string]string `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
+	XXX_unrecognized     []byte            `json:"-"`
+	XXX_sizecache        int32             `json:"-"`
 }
 
-func (m *NamespaceUpdate) Reset()                    { *m = NamespaceUpdate{} }
-func (*NamespaceUpdate) ProtoMessage()               {}
-func (*NamespaceUpdate) Descriptor() ([]byte, []int) { return fileDescriptorNamespace, []int{1} }
+func (m *NamespaceUpdate) Reset()      { *m = NamespaceUpdate{} }
+func (*NamespaceUpdate) ProtoMessage() {}
+func (*NamespaceUpdate) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6cd45d1d5adffe29, []int{1}
+}
+func (m *NamespaceUpdate) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *NamespaceUpdate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_NamespaceUpdate.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *NamespaceUpdate) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_NamespaceUpdate.Merge(m, src)
+}
+func (m *NamespaceUpdate) XXX_Size() int {
+	return m.Size()
+}
+func (m *NamespaceUpdate) XXX_DiscardUnknown() {
+	xxx_messageInfo_NamespaceUpdate.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_NamespaceUpdate proto.InternalMessageInfo
 
 type NamespaceDelete struct {
-	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	Name                 string   `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
-func (m *NamespaceDelete) Reset()                    { *m = NamespaceDelete{} }
-func (*NamespaceDelete) ProtoMessage()               {}
-func (*NamespaceDelete) Descriptor() ([]byte, []int) { return fileDescriptorNamespace, []int{2} }
+func (m *NamespaceDelete) Reset()      { *m = NamespaceDelete{} }
+func (*NamespaceDelete) ProtoMessage() {}
+func (*NamespaceDelete) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6cd45d1d5adffe29, []int{2}
+}
+func (m *NamespaceDelete) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *NamespaceDelete) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_NamespaceDelete.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *NamespaceDelete) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_NamespaceDelete.Merge(m, src)
+}
+func (m *NamespaceDelete) XXX_Size() int {
+	return m.Size()
+}
+func (m *NamespaceDelete) XXX_DiscardUnknown() {
+	xxx_messageInfo_NamespaceDelete.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_NamespaceDelete proto.InternalMessageInfo
 
 func init() {
 	proto.RegisterType((*NamespaceCreate)(nil), "containerd.events.NamespaceCreate")
+	proto.RegisterMapType((map[string]string)(nil), "containerd.events.NamespaceCreate.LabelsEntry")
 	proto.RegisterType((*NamespaceUpdate)(nil), "containerd.events.NamespaceUpdate")
+	proto.RegisterMapType((map[string]string)(nil), "containerd.events.NamespaceUpdate.LabelsEntry")
 	proto.RegisterType((*NamespaceDelete)(nil), "containerd.events.NamespaceDelete")
 }
 
+func init() {
+	proto.RegisterFile("github.com/containerd/containerd/api/events/namespace.proto", fileDescriptor_6cd45d1d5adffe29)
+}
+
+var fileDescriptor_6cd45d1d5adffe29 = []byte{
+	// 296 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0x4e, 0xcf, 0x2c, 0xc9,
+	0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
+	0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0xa7, 0x96, 0xa5, 0xe6, 0x95, 0x14, 0xeb, 0xe7, 0x25,
+	0xe6, 0xa6, 0x16, 0x17, 0x24, 0x26, 0xa7, 0xea, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0x09, 0x22,
+	0x94, 0xe9, 0x41, 0x94, 0x48, 0x89, 0xa4, 0xe7, 0xa7, 0xe7, 0x83, 0x65, 0xf5, 0x41, 0x2c, 0x88,
+	0x42, 0x29, 0x07, 0x82, 0xb6, 0x80, 0xd5, 0x25, 0x95, 0xa6, 0xe9, 0x17, 0xe4, 0x94, 0xa6, 0x67,
+	0xe6, 0xe9, 0xa7, 0x65, 0xa6, 0xe6, 0xa4, 0x14, 0x24, 0x96, 0x64, 0x40, 0x4c, 0x50, 0x5a, 0xc1,
+	0xc8, 0xc5, 0xef, 0x07, 0xb3, 0xde, 0xb9, 0x28, 0x35, 0xb1, 0x24, 0x55, 0x48, 0x88, 0x8b, 0x05,
+	0xe4, 0x22, 0x09, 0x46, 0x05, 0x46, 0x0d, 0xce, 0x20, 0x30, 0x5b, 0xc8, 0x8d, 0x8b, 0x2d, 0x27,
+	0x31, 0x29, 0x35, 0xa7, 0x58, 0x82, 0x49, 0x81, 0x59, 0x83, 0xdb, 0x48, 0x4f, 0x0f, 0xc3, 0x8d,
+	0x7a, 0x68, 0xe6, 0xe8, 0xf9, 0x80, 0x35, 0xb8, 0xe6, 0x95, 0x14, 0x55, 0x06, 0x41, 0x75, 0x4b,
+	0x59, 0x72, 0x71, 0x23, 0x09, 0x0b, 0x09, 0x70, 0x31, 0x67, 0xa7, 0x56, 0x42, 0x6d, 0x02, 0x31,
+	0x85, 0x44, 0xb8, 0x58, 0xcb, 0x12, 0x73, 0x4a, 0x53, 0x25, 0x98, 0xc0, 0x62, 0x10, 0x8e, 0x15,
+	0x93, 0x05, 0x23, 0xaa, 0x53, 0x43, 0x0b, 0x52, 0xa8, 0xe2, 0x54, 0x88, 0x39, 0xd4, 0x76, 0xaa,
+	0x2a, 0x92, 0x4b, 0x5d, 0x52, 0x73, 0x52, 0xb1, 0xbb, 0xd4, 0x29, 0xe0, 0xc4, 0x43, 0x39, 0x86,
+	0x1b, 0x0f, 0xe5, 0x18, 0x1a, 0x1e, 0xc9, 0x31, 0x9e, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c,
+	0xe3, 0x83, 0x47, 0x72, 0x8c, 0x0b, 0xbe, 0xc8, 0x31, 0x46, 0x19, 0x91, 0x90, 0x84, 0xac, 0x21,
+	0x54, 0x04, 0x43, 0x04, 0x63, 0x12, 0x1b, 0x38, 0x66, 0x8d, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff,
+	0x00, 0x50, 0x87, 0x59, 0x83, 0x02, 0x00, 0x00,
+}
+
 // Field returns the value for the given fieldpath as a string, if defined.
 // If the value is not defined, the second value will be false.
 func (m *NamespaceCreate) Field(fieldpath []string) (string, bool) {
@@ -148,6 +273,9 @@
 			i += copy(dAtA[i:], v)
 		}
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -189,6 +317,9 @@
 			i += copy(dAtA[i:], v)
 		}
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -213,6 +344,9 @@
 		i = encodeVarintNamespace(dAtA, i, uint64(len(m.Name)))
 		i += copy(dAtA[i:], m.Name)
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -226,6 +360,9 @@
 	return offset + 1
 }
 func (m *NamespaceCreate) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = len(m.Name)
@@ -240,10 +377,16 @@
 			n += mapEntrySize + 1 + sovNamespace(uint64(mapEntrySize))
 		}
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *NamespaceUpdate) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = len(m.Name)
@@ -258,16 +401,25 @@
 			n += mapEntrySize + 1 + sovNamespace(uint64(mapEntrySize))
 		}
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *NamespaceDelete) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = len(m.Name)
 	if l > 0 {
 		n += 1 + l + sovNamespace(uint64(l))
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
@@ -292,7 +444,7 @@
 	for k, _ := range this.Labels {
 		keysForLabels = append(keysForLabels, k)
 	}
-	sortkeys.Strings(keysForLabels)
+	github_com_gogo_protobuf_sortkeys.Strings(keysForLabels)
 	mapStringForLabels := "map[string]string{"
 	for _, k := range keysForLabels {
 		mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k])
@@ -301,6 +453,7 @@
 	s := strings.Join([]string{`&NamespaceCreate{`,
 		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
 		`Labels:` + mapStringForLabels + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -313,7 +466,7 @@
 	for k, _ := range this.Labels {
 		keysForLabels = append(keysForLabels, k)
 	}
-	sortkeys.Strings(keysForLabels)
+	github_com_gogo_protobuf_sortkeys.Strings(keysForLabels)
 	mapStringForLabels := "map[string]string{"
 	for _, k := range keysForLabels {
 		mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k])
@@ -322,6 +475,7 @@
 	s := strings.Join([]string{`&NamespaceUpdate{`,
 		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
 		`Labels:` + mapStringForLabels + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -332,6 +486,7 @@
 	}
 	s := strings.Join([]string{`&NamespaceDelete{`,
 		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -359,7 +514,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -387,7 +542,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -397,6 +552,9 @@
 				return ErrInvalidLengthNamespace
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthNamespace
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -416,7 +574,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -425,6 +583,9 @@
 				return ErrInvalidLengthNamespace
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthNamespace
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -445,7 +606,7 @@
 					}
 					b := dAtA[iNdEx]
 					iNdEx++
-					wire |= (uint64(b) & 0x7F) << shift
+					wire |= uint64(b&0x7F) << shift
 					if b < 0x80 {
 						break
 					}
@@ -462,7 +623,7 @@
 						}
 						b := dAtA[iNdEx]
 						iNdEx++
-						stringLenmapkey |= (uint64(b) & 0x7F) << shift
+						stringLenmapkey |= uint64(b&0x7F) << shift
 						if b < 0x80 {
 							break
 						}
@@ -472,6 +633,9 @@
 						return ErrInvalidLengthNamespace
 					}
 					postStringIndexmapkey := iNdEx + intStringLenmapkey
+					if postStringIndexmapkey < 0 {
+						return ErrInvalidLengthNamespace
+					}
 					if postStringIndexmapkey > l {
 						return io.ErrUnexpectedEOF
 					}
@@ -488,7 +652,7 @@
 						}
 						b := dAtA[iNdEx]
 						iNdEx++
-						stringLenmapvalue |= (uint64(b) & 0x7F) << shift
+						stringLenmapvalue |= uint64(b&0x7F) << shift
 						if b < 0x80 {
 							break
 						}
@@ -498,6 +662,9 @@
 						return ErrInvalidLengthNamespace
 					}
 					postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+					if postStringIndexmapvalue < 0 {
+						return ErrInvalidLengthNamespace
+					}
 					if postStringIndexmapvalue > l {
 						return io.ErrUnexpectedEOF
 					}
@@ -529,9 +696,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthNamespace
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthNamespace
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -556,7 +727,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -584,7 +755,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -594,6 +765,9 @@
 				return ErrInvalidLengthNamespace
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthNamespace
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -613,7 +787,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -622,6 +796,9 @@
 				return ErrInvalidLengthNamespace
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthNamespace
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -642,7 +819,7 @@
 					}
 					b := dAtA[iNdEx]
 					iNdEx++
-					wire |= (uint64(b) & 0x7F) << shift
+					wire |= uint64(b&0x7F) << shift
 					if b < 0x80 {
 						break
 					}
@@ -659,7 +836,7 @@
 						}
 						b := dAtA[iNdEx]
 						iNdEx++
-						stringLenmapkey |= (uint64(b) & 0x7F) << shift
+						stringLenmapkey |= uint64(b&0x7F) << shift
 						if b < 0x80 {
 							break
 						}
@@ -669,6 +846,9 @@
 						return ErrInvalidLengthNamespace
 					}
 					postStringIndexmapkey := iNdEx + intStringLenmapkey
+					if postStringIndexmapkey < 0 {
+						return ErrInvalidLengthNamespace
+					}
 					if postStringIndexmapkey > l {
 						return io.ErrUnexpectedEOF
 					}
@@ -685,7 +865,7 @@
 						}
 						b := dAtA[iNdEx]
 						iNdEx++
-						stringLenmapvalue |= (uint64(b) & 0x7F) << shift
+						stringLenmapvalue |= uint64(b&0x7F) << shift
 						if b < 0x80 {
 							break
 						}
@@ -695,6 +875,9 @@
 						return ErrInvalidLengthNamespace
 					}
 					postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+					if postStringIndexmapvalue < 0 {
+						return ErrInvalidLengthNamespace
+					}
 					if postStringIndexmapvalue > l {
 						return io.ErrUnexpectedEOF
 					}
@@ -726,9 +909,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthNamespace
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthNamespace
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -753,7 +940,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -781,7 +968,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -791,6 +978,9 @@
 				return ErrInvalidLengthNamespace
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthNamespace
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -805,9 +995,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthNamespace
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthNamespace
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -871,10 +1065,13 @@
 					break
 				}
 			}
-			iNdEx += length
 			if length < 0 {
 				return 0, ErrInvalidLengthNamespace
 			}
+			iNdEx += length
+			if iNdEx < 0 {
+				return 0, ErrInvalidLengthNamespace
+			}
 			return iNdEx, nil
 		case 3:
 			for {
@@ -903,6 +1100,9 @@
 					return 0, err
 				}
 				iNdEx = start + next
+				if iNdEx < 0 {
+					return 0, ErrInvalidLengthNamespace
+				}
 			}
 			return iNdEx, nil
 		case 4:
@@ -921,30 +1121,3 @@
 	ErrInvalidLengthNamespace = fmt.Errorf("proto: negative length found during unmarshaling")
 	ErrIntOverflowNamespace   = fmt.Errorf("proto: integer overflow")
 )
-
-func init() {
-	proto.RegisterFile("github.com/containerd/containerd/api/events/namespace.proto", fileDescriptorNamespace)
-}
-
-var fileDescriptorNamespace = []byte{
-	// 296 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0x4e, 0xcf, 0x2c, 0xc9,
-	0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
-	0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0xa7, 0x96, 0xa5, 0xe6, 0x95, 0x14, 0xeb, 0xe7, 0x25,
-	0xe6, 0xa6, 0x16, 0x17, 0x24, 0x26, 0xa7, 0xea, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0x09, 0x22,
-	0x94, 0xe9, 0x41, 0x94, 0x48, 0x89, 0xa4, 0xe7, 0xa7, 0xe7, 0x83, 0x65, 0xf5, 0x41, 0x2c, 0x88,
-	0x42, 0x29, 0x07, 0x82, 0xb6, 0x80, 0xd5, 0x25, 0x95, 0xa6, 0xe9, 0x17, 0xe4, 0x94, 0xa6, 0x67,
-	0xe6, 0xe9, 0xa7, 0x65, 0xa6, 0xe6, 0xa4, 0x14, 0x24, 0x96, 0x64, 0x40, 0x4c, 0x50, 0x5a, 0xc1,
-	0xc8, 0xc5, 0xef, 0x07, 0xb3, 0xde, 0xb9, 0x28, 0x35, 0xb1, 0x24, 0x55, 0x48, 0x88, 0x8b, 0x05,
-	0xe4, 0x22, 0x09, 0x46, 0x05, 0x46, 0x0d, 0xce, 0x20, 0x30, 0x5b, 0xc8, 0x8d, 0x8b, 0x2d, 0x27,
-	0x31, 0x29, 0x35, 0xa7, 0x58, 0x82, 0x49, 0x81, 0x59, 0x83, 0xdb, 0x48, 0x4f, 0x0f, 0xc3, 0x8d,
-	0x7a, 0x68, 0xe6, 0xe8, 0xf9, 0x80, 0x35, 0xb8, 0xe6, 0x95, 0x14, 0x55, 0x06, 0x41, 0x75, 0x4b,
-	0x59, 0x72, 0x71, 0x23, 0x09, 0x0b, 0x09, 0x70, 0x31, 0x67, 0xa7, 0x56, 0x42, 0x6d, 0x02, 0x31,
-	0x85, 0x44, 0xb8, 0x58, 0xcb, 0x12, 0x73, 0x4a, 0x53, 0x25, 0x98, 0xc0, 0x62, 0x10, 0x8e, 0x15,
-	0x93, 0x05, 0x23, 0xaa, 0x53, 0x43, 0x0b, 0x52, 0xa8, 0xe2, 0x54, 0x88, 0x39, 0xd4, 0x76, 0xaa,
-	0x2a, 0x92, 0x4b, 0x5d, 0x52, 0x73, 0x52, 0xb1, 0xbb, 0xd4, 0x29, 0xe0, 0xc4, 0x43, 0x39, 0x86,
-	0x1b, 0x0f, 0xe5, 0x18, 0x1a, 0x1e, 0xc9, 0x31, 0x9e, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c,
-	0xe3, 0x83, 0x47, 0x72, 0x8c, 0x0b, 0xbe, 0xc8, 0x31, 0x46, 0x19, 0x91, 0x90, 0x84, 0xac, 0x21,
-	0x54, 0x04, 0x43, 0x04, 0x63, 0x12, 0x1b, 0x38, 0x66, 0x8d, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff,
-	0x00, 0x50, 0x87, 0x59, 0x83, 0x02, 0x00, 0x00,
-}
diff --git a/vendor/github.com/containerd/containerd/api/events/snapshot.pb.go b/vendor/github.com/containerd/containerd/api/events/snapshot.pb.go
index e1f8f5c..0dbdfdb 100644
--- a/vendor/github.com/containerd/containerd/api/events/snapshot.pb.go
+++ b/vendor/github.com/containerd/containerd/api/events/snapshot.pb.go
@@ -3,47 +3,144 @@
 
 package events
 
-import proto "github.com/gogo/protobuf/proto"
-import fmt "fmt"
-import math "math"
-
-// skipping weak import containerd_plugin "github.com/containerd/containerd/protobuf/plugin"
-
-import strings "strings"
-import reflect "reflect"
-
-import io "io"
+import (
+	fmt "fmt"
+	proto "github.com/gogo/protobuf/proto"
+	io "io"
+	math "math"
+	reflect "reflect"
+	strings "strings"
+)
 
 // Reference imports to suppress errors if they are not otherwise used.
 var _ = proto.Marshal
 var _ = fmt.Errorf
 var _ = math.Inf
 
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+
 type SnapshotPrepare struct {
-	Key    string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
-	Parent string `protobuf:"bytes,2,opt,name=parent,proto3" json:"parent,omitempty"`
+	Key                  string   `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
+	Parent               string   `protobuf:"bytes,2,opt,name=parent,proto3" json:"parent,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
-func (m *SnapshotPrepare) Reset()                    { *m = SnapshotPrepare{} }
-func (*SnapshotPrepare) ProtoMessage()               {}
-func (*SnapshotPrepare) Descriptor() ([]byte, []int) { return fileDescriptorSnapshot, []int{0} }
+func (m *SnapshotPrepare) Reset()      { *m = SnapshotPrepare{} }
+func (*SnapshotPrepare) ProtoMessage() {}
+func (*SnapshotPrepare) Descriptor() ([]byte, []int) {
+	return fileDescriptor_bd6c184d3d9aa5f2, []int{0}
+}
+func (m *SnapshotPrepare) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *SnapshotPrepare) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_SnapshotPrepare.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *SnapshotPrepare) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_SnapshotPrepare.Merge(m, src)
+}
+func (m *SnapshotPrepare) XXX_Size() int {
+	return m.Size()
+}
+func (m *SnapshotPrepare) XXX_DiscardUnknown() {
+	xxx_messageInfo_SnapshotPrepare.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SnapshotPrepare proto.InternalMessageInfo
 
 type SnapshotCommit struct {
-	Key  string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
-	Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
+	Key                  string   `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
+	Name                 string   `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
-func (m *SnapshotCommit) Reset()                    { *m = SnapshotCommit{} }
-func (*SnapshotCommit) ProtoMessage()               {}
-func (*SnapshotCommit) Descriptor() ([]byte, []int) { return fileDescriptorSnapshot, []int{1} }
+func (m *SnapshotCommit) Reset()      { *m = SnapshotCommit{} }
+func (*SnapshotCommit) ProtoMessage() {}
+func (*SnapshotCommit) Descriptor() ([]byte, []int) {
+	return fileDescriptor_bd6c184d3d9aa5f2, []int{1}
+}
+func (m *SnapshotCommit) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *SnapshotCommit) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_SnapshotCommit.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *SnapshotCommit) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_SnapshotCommit.Merge(m, src)
+}
+func (m *SnapshotCommit) XXX_Size() int {
+	return m.Size()
+}
+func (m *SnapshotCommit) XXX_DiscardUnknown() {
+	xxx_messageInfo_SnapshotCommit.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SnapshotCommit proto.InternalMessageInfo
 
 type SnapshotRemove struct {
-	Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
+	Key                  string   `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
-func (m *SnapshotRemove) Reset()                    { *m = SnapshotRemove{} }
-func (*SnapshotRemove) ProtoMessage()               {}
-func (*SnapshotRemove) Descriptor() ([]byte, []int) { return fileDescriptorSnapshot, []int{2} }
+func (m *SnapshotRemove) Reset()      { *m = SnapshotRemove{} }
+func (*SnapshotRemove) ProtoMessage() {}
+func (*SnapshotRemove) Descriptor() ([]byte, []int) {
+	return fileDescriptor_bd6c184d3d9aa5f2, []int{2}
+}
+func (m *SnapshotRemove) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *SnapshotRemove) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_SnapshotRemove.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *SnapshotRemove) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_SnapshotRemove.Merge(m, src)
+}
+func (m *SnapshotRemove) XXX_Size() int {
+	return m.Size()
+}
+func (m *SnapshotRemove) XXX_DiscardUnknown() {
+	xxx_messageInfo_SnapshotRemove.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SnapshotRemove proto.InternalMessageInfo
 
 func init() {
 	proto.RegisterType((*SnapshotPrepare)(nil), "containerd.events.SnapshotPrepare")
@@ -51,6 +148,29 @@
 	proto.RegisterType((*SnapshotRemove)(nil), "containerd.events.SnapshotRemove")
 }
 
+func init() {
+	proto.RegisterFile("github.com/containerd/containerd/api/events/snapshot.proto", fileDescriptor_bd6c184d3d9aa5f2)
+}
+
+var fileDescriptor_bd6c184d3d9aa5f2 = []byte{
+	// 235 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0x4a, 0xcf, 0x2c, 0xc9,
+	0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
+	0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0xa7, 0x96, 0xa5, 0xe6, 0x95, 0x14, 0xeb, 0x17, 0xe7,
+	0x25, 0x16, 0x14, 0x67, 0xe4, 0x97, 0xe8, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0x09, 0x22, 0x54,
+	0xe9, 0x41, 0x54, 0x48, 0x39, 0x10, 0x34, 0x0e, 0xac, 0x35, 0xa9, 0x34, 0x4d, 0xbf, 0x20, 0xa7,
+	0x34, 0x3d, 0x33, 0x4f, 0x3f, 0x2d, 0x33, 0x35, 0x27, 0xa5, 0x20, 0xb1, 0x24, 0x03, 0x62, 0xa8,
+	0x92, 0x35, 0x17, 0x7f, 0x30, 0xd4, 0x9a, 0x80, 0xa2, 0xd4, 0x82, 0xc4, 0xa2, 0x54, 0x21, 0x01,
+	0x2e, 0xe6, 0xec, 0xd4, 0x4a, 0x09, 0x46, 0x05, 0x46, 0x0d, 0xce, 0x20, 0x10, 0x53, 0x48, 0x8c,
+	0x8b, 0x0d, 0x24, 0x93, 0x57, 0x22, 0xc1, 0x04, 0x16, 0x84, 0xf2, 0x94, 0xcc, 0xb8, 0xf8, 0x60,
+	0x9a, 0x9d, 0xf3, 0x73, 0x73, 0x33, 0x4b, 0xb0, 0xe8, 0x15, 0xe2, 0x62, 0xc9, 0x4b, 0xcc, 0x4d,
+	0x85, 0xea, 0x04, 0xb3, 0x95, 0x94, 0x10, 0xfa, 0x82, 0x52, 0x73, 0xf3, 0xcb, 0xb0, 0xd8, 0xe9,
+	0x14, 0x70, 0xe2, 0xa1, 0x1c, 0xc3, 0x8d, 0x87, 0x72, 0x0c, 0x0d, 0x8f, 0xe4, 0x18, 0x4f, 0x3c,
+	0x92, 0x63, 0xbc, 0xf0, 0x48, 0x8e, 0xf1, 0xc1, 0x23, 0x39, 0xc6, 0x05, 0x5f, 0xe4, 0x18, 0xa3,
+	0x8c, 0x48, 0x08, 0x47, 0x6b, 0x08, 0x15, 0xc1, 0x90, 0xc4, 0x06, 0xf6, 0xb3, 0x31, 0x20, 0x00,
+	0x00, 0xff, 0xff, 0x69, 0x66, 0xa9, 0x2a, 0x86, 0x01, 0x00, 0x00,
+}
+
 // Field returns the value for the given fieldpath as a string, if defined.
 // If the value is not defined, the second value will be false.
 func (m *SnapshotPrepare) Field(fieldpath []string) (string, bool) {
@@ -123,6 +243,9 @@
 		i = encodeVarintSnapshot(dAtA, i, uint64(len(m.Parent)))
 		i += copy(dAtA[i:], m.Parent)
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -153,6 +276,9 @@
 		i = encodeVarintSnapshot(dAtA, i, uint64(len(m.Name)))
 		i += copy(dAtA[i:], m.Name)
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -177,6 +303,9 @@
 		i = encodeVarintSnapshot(dAtA, i, uint64(len(m.Key)))
 		i += copy(dAtA[i:], m.Key)
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -190,6 +319,9 @@
 	return offset + 1
 }
 func (m *SnapshotPrepare) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = len(m.Key)
@@ -200,10 +332,16 @@
 	if l > 0 {
 		n += 1 + l + sovSnapshot(uint64(l))
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *SnapshotCommit) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = len(m.Key)
@@ -214,16 +352,25 @@
 	if l > 0 {
 		n += 1 + l + sovSnapshot(uint64(l))
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *SnapshotRemove) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = len(m.Key)
 	if l > 0 {
 		n += 1 + l + sovSnapshot(uint64(l))
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
@@ -247,6 +394,7 @@
 	s := strings.Join([]string{`&SnapshotPrepare{`,
 		`Key:` + fmt.Sprintf("%v", this.Key) + `,`,
 		`Parent:` + fmt.Sprintf("%v", this.Parent) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -258,6 +406,7 @@
 	s := strings.Join([]string{`&SnapshotCommit{`,
 		`Key:` + fmt.Sprintf("%v", this.Key) + `,`,
 		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -268,6 +417,7 @@
 	}
 	s := strings.Join([]string{`&SnapshotRemove{`,
 		`Key:` + fmt.Sprintf("%v", this.Key) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -295,7 +445,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -323,7 +473,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -333,6 +483,9 @@
 				return ErrInvalidLengthSnapshot
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthSnapshot
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -352,7 +505,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -362,6 +515,9 @@
 				return ErrInvalidLengthSnapshot
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthSnapshot
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -376,9 +532,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthSnapshot
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthSnapshot
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -403,7 +563,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -431,7 +591,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -441,6 +601,9 @@
 				return ErrInvalidLengthSnapshot
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthSnapshot
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -460,7 +623,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -470,6 +633,9 @@
 				return ErrInvalidLengthSnapshot
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthSnapshot
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -484,9 +650,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthSnapshot
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthSnapshot
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -511,7 +681,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -539,7 +709,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -549,6 +719,9 @@
 				return ErrInvalidLengthSnapshot
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthSnapshot
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -563,9 +736,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthSnapshot
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthSnapshot
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -629,10 +806,13 @@
 					break
 				}
 			}
-			iNdEx += length
 			if length < 0 {
 				return 0, ErrInvalidLengthSnapshot
 			}
+			iNdEx += length
+			if iNdEx < 0 {
+				return 0, ErrInvalidLengthSnapshot
+			}
 			return iNdEx, nil
 		case 3:
 			for {
@@ -661,6 +841,9 @@
 					return 0, err
 				}
 				iNdEx = start + next
+				if iNdEx < 0 {
+					return 0, ErrInvalidLengthSnapshot
+				}
 			}
 			return iNdEx, nil
 		case 4:
@@ -679,26 +862,3 @@
 	ErrInvalidLengthSnapshot = fmt.Errorf("proto: negative length found during unmarshaling")
 	ErrIntOverflowSnapshot   = fmt.Errorf("proto: integer overflow")
 )
-
-func init() {
-	proto.RegisterFile("github.com/containerd/containerd/api/events/snapshot.proto", fileDescriptorSnapshot)
-}
-
-var fileDescriptorSnapshot = []byte{
-	// 235 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0x4a, 0xcf, 0x2c, 0xc9,
-	0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
-	0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0xa7, 0x96, 0xa5, 0xe6, 0x95, 0x14, 0xeb, 0x17, 0xe7,
-	0x25, 0x16, 0x14, 0x67, 0xe4, 0x97, 0xe8, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0x09, 0x22, 0x54,
-	0xe9, 0x41, 0x54, 0x48, 0x39, 0x10, 0x34, 0x0e, 0xac, 0x35, 0xa9, 0x34, 0x4d, 0xbf, 0x20, 0xa7,
-	0x34, 0x3d, 0x33, 0x4f, 0x3f, 0x2d, 0x33, 0x35, 0x27, 0xa5, 0x20, 0xb1, 0x24, 0x03, 0x62, 0xa8,
-	0x92, 0x35, 0x17, 0x7f, 0x30, 0xd4, 0x9a, 0x80, 0xa2, 0xd4, 0x82, 0xc4, 0xa2, 0x54, 0x21, 0x01,
-	0x2e, 0xe6, 0xec, 0xd4, 0x4a, 0x09, 0x46, 0x05, 0x46, 0x0d, 0xce, 0x20, 0x10, 0x53, 0x48, 0x8c,
-	0x8b, 0x0d, 0x24, 0x93, 0x57, 0x22, 0xc1, 0x04, 0x16, 0x84, 0xf2, 0x94, 0xcc, 0xb8, 0xf8, 0x60,
-	0x9a, 0x9d, 0xf3, 0x73, 0x73, 0x33, 0x4b, 0xb0, 0xe8, 0x15, 0xe2, 0x62, 0xc9, 0x4b, 0xcc, 0x4d,
-	0x85, 0xea, 0x04, 0xb3, 0x95, 0x94, 0x10, 0xfa, 0x82, 0x52, 0x73, 0xf3, 0xcb, 0xb0, 0xd8, 0xe9,
-	0x14, 0x70, 0xe2, 0xa1, 0x1c, 0xc3, 0x8d, 0x87, 0x72, 0x0c, 0x0d, 0x8f, 0xe4, 0x18, 0x4f, 0x3c,
-	0x92, 0x63, 0xbc, 0xf0, 0x48, 0x8e, 0xf1, 0xc1, 0x23, 0x39, 0xc6, 0x05, 0x5f, 0xe4, 0x18, 0xa3,
-	0x8c, 0x48, 0x08, 0x47, 0x6b, 0x08, 0x15, 0xc1, 0x90, 0xc4, 0x06, 0xf6, 0xb3, 0x31, 0x20, 0x00,
-	0x00, 0xff, 0xff, 0x69, 0x66, 0xa9, 0x2a, 0x86, 0x01, 0x00, 0x00,
-}
diff --git a/vendor/github.com/containerd/containerd/api/events/task.pb.go b/vendor/github.com/containerd/containerd/api/events/task.pb.go
index 64fcfa7..fb653d0 100644
--- a/vendor/github.com/containerd/containerd/api/events/task.pb.go
+++ b/vendor/github.com/containerd/containerd/api/events/task.pb.go
@@ -3,24 +3,18 @@
 
 package events
 
-import proto "github.com/gogo/protobuf/proto"
-import fmt "fmt"
-import math "math"
-
-// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto"
-import _ "github.com/gogo/protobuf/types"
-import containerd_types "github.com/containerd/containerd/api/types"
-
-// skipping weak import containerd_plugin "github.com/containerd/containerd/protobuf/plugin"
-
-import time "time"
-
-import types "github.com/gogo/protobuf/types"
-
-import strings "strings"
-import reflect "reflect"
-
-import io "io"
+import (
+	fmt "fmt"
+	types "github.com/containerd/containerd/api/types"
+	proto "github.com/gogo/protobuf/proto"
+	_ "github.com/gogo/protobuf/types"
+	github_com_gogo_protobuf_types "github.com/gogo/protobuf/types"
+	io "io"
+	math "math"
+	reflect "reflect"
+	strings "strings"
+	time "time"
+)
 
 // Reference imports to suppress errors if they are not otherwise used.
 var _ = proto.Marshal
@@ -28,116 +22,463 @@
 var _ = math.Inf
 var _ = time.Kitchen
 
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+
 type TaskCreate struct {
-	ContainerID string                    `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
-	Bundle      string                    `protobuf:"bytes,2,opt,name=bundle,proto3" json:"bundle,omitempty"`
-	Rootfs      []*containerd_types.Mount `protobuf:"bytes,3,rep,name=rootfs" json:"rootfs,omitempty"`
-	IO          *TaskIO                   `protobuf:"bytes,4,opt,name=io" json:"io,omitempty"`
-	Checkpoint  string                    `protobuf:"bytes,5,opt,name=checkpoint,proto3" json:"checkpoint,omitempty"`
-	Pid         uint32                    `protobuf:"varint,6,opt,name=pid,proto3" json:"pid,omitempty"`
+	ContainerID          string         `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
+	Bundle               string         `protobuf:"bytes,2,opt,name=bundle,proto3" json:"bundle,omitempty"`
+	Rootfs               []*types.Mount `protobuf:"bytes,3,rep,name=rootfs,proto3" json:"rootfs,omitempty"`
+	IO                   *TaskIO        `protobuf:"bytes,4,opt,name=io,proto3" json:"io,omitempty"`
+	Checkpoint           string         `protobuf:"bytes,5,opt,name=checkpoint,proto3" json:"checkpoint,omitempty"`
+	Pid                  uint32         `protobuf:"varint,6,opt,name=pid,proto3" json:"pid,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}       `json:"-"`
+	XXX_unrecognized     []byte         `json:"-"`
+	XXX_sizecache        int32          `json:"-"`
 }
 
-func (m *TaskCreate) Reset()                    { *m = TaskCreate{} }
-func (*TaskCreate) ProtoMessage()               {}
-func (*TaskCreate) Descriptor() ([]byte, []int) { return fileDescriptorTask, []int{0} }
+func (m *TaskCreate) Reset()      { *m = TaskCreate{} }
+func (*TaskCreate) ProtoMessage() {}
+func (*TaskCreate) Descriptor() ([]byte, []int) {
+	return fileDescriptor_8db0813f7adfb63c, []int{0}
+}
+func (m *TaskCreate) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *TaskCreate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_TaskCreate.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *TaskCreate) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_TaskCreate.Merge(m, src)
+}
+func (m *TaskCreate) XXX_Size() int {
+	return m.Size()
+}
+func (m *TaskCreate) XXX_DiscardUnknown() {
+	xxx_messageInfo_TaskCreate.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_TaskCreate proto.InternalMessageInfo
 
 type TaskStart struct {
-	ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
-	Pid         uint32 `protobuf:"varint,2,opt,name=pid,proto3" json:"pid,omitempty"`
+	ContainerID          string   `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
+	Pid                  uint32   `protobuf:"varint,2,opt,name=pid,proto3" json:"pid,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
-func (m *TaskStart) Reset()                    { *m = TaskStart{} }
-func (*TaskStart) ProtoMessage()               {}
-func (*TaskStart) Descriptor() ([]byte, []int) { return fileDescriptorTask, []int{1} }
+func (m *TaskStart) Reset()      { *m = TaskStart{} }
+func (*TaskStart) ProtoMessage() {}
+func (*TaskStart) Descriptor() ([]byte, []int) {
+	return fileDescriptor_8db0813f7adfb63c, []int{1}
+}
+func (m *TaskStart) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *TaskStart) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_TaskStart.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *TaskStart) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_TaskStart.Merge(m, src)
+}
+func (m *TaskStart) XXX_Size() int {
+	return m.Size()
+}
+func (m *TaskStart) XXX_DiscardUnknown() {
+	xxx_messageInfo_TaskStart.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_TaskStart proto.InternalMessageInfo
 
 type TaskDelete struct {
 	ContainerID string    `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
 	Pid         uint32    `protobuf:"varint,2,opt,name=pid,proto3" json:"pid,omitempty"`
 	ExitStatus  uint32    `protobuf:"varint,3,opt,name=exit_status,json=exitStatus,proto3" json:"exit_status,omitempty"`
-	ExitedAt    time.Time `protobuf:"bytes,4,opt,name=exited_at,json=exitedAt,stdtime" json:"exited_at"`
+	ExitedAt    time.Time `protobuf:"bytes,4,opt,name=exited_at,json=exitedAt,proto3,stdtime" json:"exited_at"`
 	// id is the specific exec. By default if omitted will be `""` thus matches
 	// the init exec of the task matching `container_id`.
-	ID string `protobuf:"bytes,5,opt,name=id,proto3" json:"id,omitempty"`
+	ID                   string   `protobuf:"bytes,5,opt,name=id,proto3" json:"id,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
-func (m *TaskDelete) Reset()                    { *m = TaskDelete{} }
-func (*TaskDelete) ProtoMessage()               {}
-func (*TaskDelete) Descriptor() ([]byte, []int) { return fileDescriptorTask, []int{2} }
+func (m *TaskDelete) Reset()      { *m = TaskDelete{} }
+func (*TaskDelete) ProtoMessage() {}
+func (*TaskDelete) Descriptor() ([]byte, []int) {
+	return fileDescriptor_8db0813f7adfb63c, []int{2}
+}
+func (m *TaskDelete) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *TaskDelete) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_TaskDelete.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *TaskDelete) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_TaskDelete.Merge(m, src)
+}
+func (m *TaskDelete) XXX_Size() int {
+	return m.Size()
+}
+func (m *TaskDelete) XXX_DiscardUnknown() {
+	xxx_messageInfo_TaskDelete.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_TaskDelete proto.InternalMessageInfo
 
 type TaskIO struct {
-	Stdin    string `protobuf:"bytes,1,opt,name=stdin,proto3" json:"stdin,omitempty"`
-	Stdout   string `protobuf:"bytes,2,opt,name=stdout,proto3" json:"stdout,omitempty"`
-	Stderr   string `protobuf:"bytes,3,opt,name=stderr,proto3" json:"stderr,omitempty"`
-	Terminal bool   `protobuf:"varint,4,opt,name=terminal,proto3" json:"terminal,omitempty"`
+	Stdin                string   `protobuf:"bytes,1,opt,name=stdin,proto3" json:"stdin,omitempty"`
+	Stdout               string   `protobuf:"bytes,2,opt,name=stdout,proto3" json:"stdout,omitempty"`
+	Stderr               string   `protobuf:"bytes,3,opt,name=stderr,proto3" json:"stderr,omitempty"`
+	Terminal             bool     `protobuf:"varint,4,opt,name=terminal,proto3" json:"terminal,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
-func (m *TaskIO) Reset()                    { *m = TaskIO{} }
-func (*TaskIO) ProtoMessage()               {}
-func (*TaskIO) Descriptor() ([]byte, []int) { return fileDescriptorTask, []int{3} }
+func (m *TaskIO) Reset()      { *m = TaskIO{} }
+func (*TaskIO) ProtoMessage() {}
+func (*TaskIO) Descriptor() ([]byte, []int) {
+	return fileDescriptor_8db0813f7adfb63c, []int{3}
+}
+func (m *TaskIO) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *TaskIO) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_TaskIO.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *TaskIO) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_TaskIO.Merge(m, src)
+}
+func (m *TaskIO) XXX_Size() int {
+	return m.Size()
+}
+func (m *TaskIO) XXX_DiscardUnknown() {
+	xxx_messageInfo_TaskIO.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_TaskIO proto.InternalMessageInfo
 
 type TaskExit struct {
-	ContainerID string    `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
-	ID          string    `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"`
-	Pid         uint32    `protobuf:"varint,3,opt,name=pid,proto3" json:"pid,omitempty"`
-	ExitStatus  uint32    `protobuf:"varint,4,opt,name=exit_status,json=exitStatus,proto3" json:"exit_status,omitempty"`
-	ExitedAt    time.Time `protobuf:"bytes,5,opt,name=exited_at,json=exitedAt,stdtime" json:"exited_at"`
+	ContainerID          string    `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
+	ID                   string    `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"`
+	Pid                  uint32    `protobuf:"varint,3,opt,name=pid,proto3" json:"pid,omitempty"`
+	ExitStatus           uint32    `protobuf:"varint,4,opt,name=exit_status,json=exitStatus,proto3" json:"exit_status,omitempty"`
+	ExitedAt             time.Time `protobuf:"bytes,5,opt,name=exited_at,json=exitedAt,proto3,stdtime" json:"exited_at"`
+	XXX_NoUnkeyedLiteral struct{}  `json:"-"`
+	XXX_unrecognized     []byte    `json:"-"`
+	XXX_sizecache        int32     `json:"-"`
 }
 
-func (m *TaskExit) Reset()                    { *m = TaskExit{} }
-func (*TaskExit) ProtoMessage()               {}
-func (*TaskExit) Descriptor() ([]byte, []int) { return fileDescriptorTask, []int{4} }
+func (m *TaskExit) Reset()      { *m = TaskExit{} }
+func (*TaskExit) ProtoMessage() {}
+func (*TaskExit) Descriptor() ([]byte, []int) {
+	return fileDescriptor_8db0813f7adfb63c, []int{4}
+}
+func (m *TaskExit) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *TaskExit) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_TaskExit.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *TaskExit) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_TaskExit.Merge(m, src)
+}
+func (m *TaskExit) XXX_Size() int {
+	return m.Size()
+}
+func (m *TaskExit) XXX_DiscardUnknown() {
+	xxx_messageInfo_TaskExit.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_TaskExit proto.InternalMessageInfo
 
 type TaskOOM struct {
-	ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
+	ContainerID          string   `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
-func (m *TaskOOM) Reset()                    { *m = TaskOOM{} }
-func (*TaskOOM) ProtoMessage()               {}
-func (*TaskOOM) Descriptor() ([]byte, []int) { return fileDescriptorTask, []int{5} }
+func (m *TaskOOM) Reset()      { *m = TaskOOM{} }
+func (*TaskOOM) ProtoMessage() {}
+func (*TaskOOM) Descriptor() ([]byte, []int) {
+	return fileDescriptor_8db0813f7adfb63c, []int{5}
+}
+func (m *TaskOOM) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *TaskOOM) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_TaskOOM.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *TaskOOM) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_TaskOOM.Merge(m, src)
+}
+func (m *TaskOOM) XXX_Size() int {
+	return m.Size()
+}
+func (m *TaskOOM) XXX_DiscardUnknown() {
+	xxx_messageInfo_TaskOOM.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_TaskOOM proto.InternalMessageInfo
 
 type TaskExecAdded struct {
-	ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
-	ExecID      string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"`
+	ContainerID          string   `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
+	ExecID               string   `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
-func (m *TaskExecAdded) Reset()                    { *m = TaskExecAdded{} }
-func (*TaskExecAdded) ProtoMessage()               {}
-func (*TaskExecAdded) Descriptor() ([]byte, []int) { return fileDescriptorTask, []int{6} }
+func (m *TaskExecAdded) Reset()      { *m = TaskExecAdded{} }
+func (*TaskExecAdded) ProtoMessage() {}
+func (*TaskExecAdded) Descriptor() ([]byte, []int) {
+	return fileDescriptor_8db0813f7adfb63c, []int{6}
+}
+func (m *TaskExecAdded) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *TaskExecAdded) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_TaskExecAdded.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *TaskExecAdded) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_TaskExecAdded.Merge(m, src)
+}
+func (m *TaskExecAdded) XXX_Size() int {
+	return m.Size()
+}
+func (m *TaskExecAdded) XXX_DiscardUnknown() {
+	xxx_messageInfo_TaskExecAdded.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_TaskExecAdded proto.InternalMessageInfo
 
 type TaskExecStarted struct {
-	ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
-	ExecID      string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"`
-	Pid         uint32 `protobuf:"varint,3,opt,name=pid,proto3" json:"pid,omitempty"`
+	ContainerID          string   `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
+	ExecID               string   `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"`
+	Pid                  uint32   `protobuf:"varint,3,opt,name=pid,proto3" json:"pid,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
-func (m *TaskExecStarted) Reset()                    { *m = TaskExecStarted{} }
-func (*TaskExecStarted) ProtoMessage()               {}
-func (*TaskExecStarted) Descriptor() ([]byte, []int) { return fileDescriptorTask, []int{7} }
+func (m *TaskExecStarted) Reset()      { *m = TaskExecStarted{} }
+func (*TaskExecStarted) ProtoMessage() {}
+func (*TaskExecStarted) Descriptor() ([]byte, []int) {
+	return fileDescriptor_8db0813f7adfb63c, []int{7}
+}
+func (m *TaskExecStarted) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *TaskExecStarted) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_TaskExecStarted.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *TaskExecStarted) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_TaskExecStarted.Merge(m, src)
+}
+func (m *TaskExecStarted) XXX_Size() int {
+	return m.Size()
+}
+func (m *TaskExecStarted) XXX_DiscardUnknown() {
+	xxx_messageInfo_TaskExecStarted.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_TaskExecStarted proto.InternalMessageInfo
 
 type TaskPaused struct {
-	ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
+	ContainerID          string   `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
-func (m *TaskPaused) Reset()                    { *m = TaskPaused{} }
-func (*TaskPaused) ProtoMessage()               {}
-func (*TaskPaused) Descriptor() ([]byte, []int) { return fileDescriptorTask, []int{8} }
+func (m *TaskPaused) Reset()      { *m = TaskPaused{} }
+func (*TaskPaused) ProtoMessage() {}
+func (*TaskPaused) Descriptor() ([]byte, []int) {
+	return fileDescriptor_8db0813f7adfb63c, []int{8}
+}
+func (m *TaskPaused) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *TaskPaused) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_TaskPaused.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *TaskPaused) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_TaskPaused.Merge(m, src)
+}
+func (m *TaskPaused) XXX_Size() int {
+	return m.Size()
+}
+func (m *TaskPaused) XXX_DiscardUnknown() {
+	xxx_messageInfo_TaskPaused.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_TaskPaused proto.InternalMessageInfo
 
 type TaskResumed struct {
-	ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
+	ContainerID          string   `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
-func (m *TaskResumed) Reset()                    { *m = TaskResumed{} }
-func (*TaskResumed) ProtoMessage()               {}
-func (*TaskResumed) Descriptor() ([]byte, []int) { return fileDescriptorTask, []int{9} }
+func (m *TaskResumed) Reset()      { *m = TaskResumed{} }
+func (*TaskResumed) ProtoMessage() {}
+func (*TaskResumed) Descriptor() ([]byte, []int) {
+	return fileDescriptor_8db0813f7adfb63c, []int{9}
+}
+func (m *TaskResumed) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *TaskResumed) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_TaskResumed.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *TaskResumed) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_TaskResumed.Merge(m, src)
+}
+func (m *TaskResumed) XXX_Size() int {
+	return m.Size()
+}
+func (m *TaskResumed) XXX_DiscardUnknown() {
+	xxx_messageInfo_TaskResumed.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_TaskResumed proto.InternalMessageInfo
 
 type TaskCheckpointed struct {
-	ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
-	Checkpoint  string `protobuf:"bytes,2,opt,name=checkpoint,proto3" json:"checkpoint,omitempty"`
+	ContainerID          string   `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
+	Checkpoint           string   `protobuf:"bytes,2,opt,name=checkpoint,proto3" json:"checkpoint,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
-func (m *TaskCheckpointed) Reset()                    { *m = TaskCheckpointed{} }
-func (*TaskCheckpointed) ProtoMessage()               {}
-func (*TaskCheckpointed) Descriptor() ([]byte, []int) { return fileDescriptorTask, []int{10} }
+func (m *TaskCheckpointed) Reset()      { *m = TaskCheckpointed{} }
+func (*TaskCheckpointed) ProtoMessage() {}
+func (*TaskCheckpointed) Descriptor() ([]byte, []int) {
+	return fileDescriptor_8db0813f7adfb63c, []int{10}
+}
+func (m *TaskCheckpointed) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *TaskCheckpointed) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_TaskCheckpointed.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *TaskCheckpointed) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_TaskCheckpointed.Merge(m, src)
+}
+func (m *TaskCheckpointed) XXX_Size() int {
+	return m.Size()
+}
+func (m *TaskCheckpointed) XXX_DiscardUnknown() {
+	xxx_messageInfo_TaskCheckpointed.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_TaskCheckpointed proto.InternalMessageInfo
 
 func init() {
 	proto.RegisterType((*TaskCreate)(nil), "containerd.events.TaskCreate")
@@ -153,6 +494,55 @@
 	proto.RegisterType((*TaskCheckpointed)(nil), "containerd.events.TaskCheckpointed")
 }
 
+func init() {
+	proto.RegisterFile("github.com/containerd/containerd/api/events/task.proto", fileDescriptor_8db0813f7adfb63c)
+}
+
+var fileDescriptor_8db0813f7adfb63c = []byte{
+	// 644 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x95, 0xcd, 0x6e, 0xd3, 0x40,
+	0x10, 0xc7, 0x63, 0xa7, 0x75, 0xd3, 0x09, 0x55, 0x8b, 0x55, 0x95, 0x90, 0x83, 0x1d, 0x99, 0x4b,
+	0x4e, 0xb6, 0x08, 0x12, 0x17, 0x84, 0xd4, 0xa4, 0xe1, 0x90, 0x43, 0x95, 0xe2, 0xf6, 0x50, 0x71,
+	0x89, 0x36, 0xd9, 0x4d, 0xb2, 0x34, 0xf1, 0x5a, 0xf6, 0x18, 0x15, 0x89, 0x03, 0x8f, 0xc0, 0x23,
+	0xf0, 0x38, 0x3d, 0x20, 0xc4, 0x91, 0x53, 0xa0, 0x7e, 0x00, 0x4e, 0x3c, 0x00, 0x5a, 0xaf, 0x93,
+	0xb6, 0x54, 0x7c, 0x59, 0xe2, 0x94, 0x9d, 0xd9, 0xd9, 0xff, 0xec, 0xfc, 0x76, 0x3c, 0x81, 0xc7,
+	0x13, 0x8e, 0xd3, 0x64, 0xe8, 0x8e, 0xc4, 0xdc, 0x1b, 0x89, 0x00, 0x09, 0x0f, 0x58, 0x44, 0xaf,
+	0x2f, 0x49, 0xc8, 0x3d, 0xf6, 0x8a, 0x05, 0x18, 0x7b, 0x48, 0xe2, 0x33, 0x37, 0x8c, 0x04, 0x0a,
+	0xf3, 0xee, 0x55, 0x84, 0xab, 0x76, 0xeb, 0xbb, 0x13, 0x31, 0x11, 0xd9, 0xae, 0x27, 0x57, 0x2a,
+	0xb0, 0x6e, 0x4f, 0x84, 0x98, 0xcc, 0x98, 0x97, 0x59, 0xc3, 0x64, 0xec, 0x21, 0x9f, 0xb3, 0x18,
+	0xc9, 0x3c, 0xcc, 0x03, 0xfe, 0xee, 0x06, 0xf8, 0x3a, 0x64, 0xb1, 0x37, 0x17, 0x49, 0x80, 0xf9,
+	0xb9, 0xfd, 0x3f, 0x9e, 0x5b, 0xa5, 0x0c, 0x67, 0xc9, 0x84, 0x07, 0xde, 0x98, 0xb3, 0x19, 0x0d,
+	0x09, 0x4e, 0x95, 0x82, 0xf3, 0x4d, 0x03, 0x38, 0x21, 0xf1, 0xd9, 0x41, 0xc4, 0x08, 0x32, 0xb3,
+	0x05, 0x77, 0x56, 0x87, 0x07, 0x9c, 0xd6, 0xb4, 0x86, 0xd6, 0xdc, 0xec, 0x6c, 0xa7, 0x0b, 0xbb,
+	0x7a, 0xb0, 0xf4, 0xf7, 0xba, 0x7e, 0x75, 0x15, 0xd4, 0xa3, 0xe6, 0x1e, 0x18, 0xc3, 0x24, 0xa0,
+	0x33, 0x56, 0xd3, 0x65, 0xb4, 0x9f, 0x5b, 0xa6, 0x07, 0x46, 0x24, 0x04, 0x8e, 0xe3, 0x5a, 0xb9,
+	0x51, 0x6e, 0x56, 0x5b, 0xf7, 0xdc, 0x6b, 0xbc, 0xb2, 0x5a, 0xdc, 0x43, 0x59, 0x8b, 0x9f, 0x87,
+	0x99, 0x0f, 0x41, 0xe7, 0xa2, 0xb6, 0xd6, 0xd0, 0x9a, 0xd5, 0xd6, 0x7d, 0xf7, 0x16, 0x5c, 0x57,
+	0xde, 0xb3, 0xd7, 0xef, 0x18, 0xe9, 0xc2, 0xd6, 0x7b, 0x7d, 0x5f, 0xe7, 0xc2, 0xb4, 0x00, 0x46,
+	0x53, 0x36, 0x3a, 0x0b, 0x05, 0x0f, 0xb0, 0xb6, 0x9e, 0xe5, 0xbf, 0xe6, 0x31, 0x77, 0xa0, 0x1c,
+	0x72, 0x5a, 0x33, 0x1a, 0x5a, 0x73, 0xcb, 0x97, 0x4b, 0xe7, 0x39, 0x6c, 0x4a, 0x9d, 0x63, 0x24,
+	0x11, 0x16, 0x2a, 0x37, 0x97, 0xd4, 0xaf, 0x24, 0x3f, 0xe6, 0x0c, 0xbb, 0x6c, 0xc6, 0x0a, 0x32,
+	0xbc, 0x25, 0x6a, 0xda, 0x50, 0x65, 0xe7, 0x1c, 0x07, 0x31, 0x12, 0x4c, 0x24, 0x42, 0xb9, 0x03,
+	0xd2, 0x75, 0x9c, 0x79, 0xcc, 0x36, 0x6c, 0x4a, 0x8b, 0xd1, 0x01, 0xc1, 0x1c, 0x5a, 0xdd, 0x55,
+	0x8d, 0xe6, 0x2e, 0x5f, 0xdd, 0x3d, 0x59, 0x36, 0x5a, 0xa7, 0x72, 0xb1, 0xb0, 0x4b, 0xef, 0xbe,
+	0xd8, 0x9a, 0x5f, 0x51, 0xc7, 0xda, 0x68, 0xee, 0x81, 0xce, 0xa9, 0xa2, 0x96, 0x53, 0xed, 0xfa,
+	0x3a, 0xa7, 0xce, 0x4b, 0x30, 0x14, 0x6b, 0x73, 0x17, 0xd6, 0x63, 0xa4, 0x3c, 0x50, 0x45, 0xf8,
+	0xca, 0x90, 0x2f, 0x1e, 0x23, 0x15, 0x09, 0x2e, 0x5f, 0x5c, 0x59, 0xb9, 0x9f, 0x45, 0x51, 0x76,
+	0x5d, 0xe5, 0x67, 0x51, 0x64, 0xd6, 0xa1, 0x82, 0x2c, 0x9a, 0xf3, 0x80, 0xcc, 0xb2, 0x9b, 0x56,
+	0xfc, 0x95, 0xed, 0x7c, 0xd0, 0xa0, 0x22, 0x93, 0x3d, 0x3b, 0xe7, 0x58, 0xb0, 0xfd, 0xf4, 0x9c,
+	0xdc, 0x8d, 0x22, 0x96, 0x48, 0xcb, 0xbf, 0x44, 0xba, 0xf6, 0x7b, 0xa4, 0xeb, 0x45, 0x90, 0x3a,
+	0x4f, 0x61, 0x43, 0x56, 0xd3, 0xef, 0x1f, 0x16, 0x29, 0xc6, 0x99, 0xc2, 0x96, 0x82, 0xc1, 0x46,
+	0x6d, 0x4a, 0x19, 0x2d, 0x44, 0xe4, 0x01, 0x6c, 0xb0, 0x73, 0x36, 0x1a, 0xac, 0xb0, 0x40, 0xba,
+	0xb0, 0x0d, 0xa9, 0xd9, 0xeb, 0xfa, 0x86, 0xdc, 0xea, 0x51, 0xe7, 0x0d, 0x6c, 0x2f, 0x33, 0x65,
+	0xdf, 0xc2, 0x7f, 0xcc, 0x75, 0xfb, 0x29, 0x9c, 0x7d, 0xf5, 0xc5, 0x1c, 0x91, 0x24, 0x2e, 0x96,
+	0xd8, 0x69, 0x43, 0x55, 0x2a, 0xf8, 0x2c, 0x4e, 0xe6, 0x05, 0x25, 0xc6, 0xb0, 0x93, 0x8d, 0xbe,
+	0xd5, 0xb8, 0x28, 0xc8, 0xe0, 0xe6, 0x10, 0xd2, 0x7f, 0x1e, 0x42, 0x9d, 0xa3, 0x8b, 0x4b, 0xab,
+	0xf4, 0xf9, 0xd2, 0x2a, 0xbd, 0x4d, 0x2d, 0xed, 0x22, 0xb5, 0xb4, 0x4f, 0xa9, 0xa5, 0x7d, 0x4d,
+	0x2d, 0xed, 0xfd, 0x77, 0x4b, 0x7b, 0xd1, 0xfa, 0x87, 0x7f, 0x9f, 0x27, 0xea, 0xe7, 0xb4, 0x74,
+	0x5a, 0x1e, 0x1a, 0x59, 0x47, 0x3e, 0xfa, 0x11, 0x00, 0x00, 0xff, 0xff, 0xc5, 0x58, 0x0f, 0xec,
+	0xbe, 0x06, 0x00, 0x00,
+}
+
 // Field returns the value for the given fieldpath as a string, if defined.
 // If the value is not defined, the second value will be false.
 func (m *TaskCreate) Field(fieldpath []string) (string, bool) {
@@ -408,6 +798,9 @@
 		i++
 		i = encodeVarintTask(dAtA, i, uint64(m.Pid))
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -437,6 +830,9 @@
 		i++
 		i = encodeVarintTask(dAtA, i, uint64(m.Pid))
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -473,8 +869,8 @@
 	}
 	dAtA[i] = 0x22
 	i++
-	i = encodeVarintTask(dAtA, i, uint64(types.SizeOfStdTime(m.ExitedAt)))
-	n2, err := types.StdTimeMarshalTo(m.ExitedAt, dAtA[i:])
+	i = encodeVarintTask(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt)))
+	n2, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.ExitedAt, dAtA[i:])
 	if err != nil {
 		return 0, err
 	}
@@ -485,6 +881,9 @@
 		i = encodeVarintTask(dAtA, i, uint64(len(m.ID)))
 		i += copy(dAtA[i:], m.ID)
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -531,6 +930,9 @@
 		}
 		i++
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -573,12 +975,15 @@
 	}
 	dAtA[i] = 0x2a
 	i++
-	i = encodeVarintTask(dAtA, i, uint64(types.SizeOfStdTime(m.ExitedAt)))
-	n3, err := types.StdTimeMarshalTo(m.ExitedAt, dAtA[i:])
+	i = encodeVarintTask(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt)))
+	n3, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.ExitedAt, dAtA[i:])
 	if err != nil {
 		return 0, err
 	}
 	i += n3
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -603,6 +1008,9 @@
 		i = encodeVarintTask(dAtA, i, uint64(len(m.ContainerID)))
 		i += copy(dAtA[i:], m.ContainerID)
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -633,6 +1041,9 @@
 		i = encodeVarintTask(dAtA, i, uint64(len(m.ExecID)))
 		i += copy(dAtA[i:], m.ExecID)
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -668,6 +1079,9 @@
 		i++
 		i = encodeVarintTask(dAtA, i, uint64(m.Pid))
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -692,6 +1106,9 @@
 		i = encodeVarintTask(dAtA, i, uint64(len(m.ContainerID)))
 		i += copy(dAtA[i:], m.ContainerID)
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -716,6 +1133,9 @@
 		i = encodeVarintTask(dAtA, i, uint64(len(m.ContainerID)))
 		i += copy(dAtA[i:], m.ContainerID)
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -746,6 +1166,9 @@
 		i = encodeVarintTask(dAtA, i, uint64(len(m.Checkpoint)))
 		i += copy(dAtA[i:], m.Checkpoint)
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -759,6 +1182,9 @@
 	return offset + 1
 }
 func (m *TaskCreate) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = len(m.ContainerID)
@@ -786,10 +1212,16 @@
 	if m.Pid != 0 {
 		n += 1 + sovTask(uint64(m.Pid))
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *TaskStart) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = len(m.ContainerID)
@@ -799,10 +1231,16 @@
 	if m.Pid != 0 {
 		n += 1 + sovTask(uint64(m.Pid))
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *TaskDelete) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = len(m.ContainerID)
@@ -815,16 +1253,22 @@
 	if m.ExitStatus != 0 {
 		n += 1 + sovTask(uint64(m.ExitStatus))
 	}
-	l = types.SizeOfStdTime(m.ExitedAt)
+	l = github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt)
 	n += 1 + l + sovTask(uint64(l))
 	l = len(m.ID)
 	if l > 0 {
 		n += 1 + l + sovTask(uint64(l))
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *TaskIO) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = len(m.Stdin)
@@ -842,10 +1286,16 @@
 	if m.Terminal {
 		n += 2
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *TaskExit) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = len(m.ContainerID)
@@ -862,22 +1312,34 @@
 	if m.ExitStatus != 0 {
 		n += 1 + sovTask(uint64(m.ExitStatus))
 	}
-	l = types.SizeOfStdTime(m.ExitedAt)
+	l = github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt)
 	n += 1 + l + sovTask(uint64(l))
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *TaskOOM) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = len(m.ContainerID)
 	if l > 0 {
 		n += 1 + l + sovTask(uint64(l))
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *TaskExecAdded) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = len(m.ContainerID)
@@ -888,10 +1350,16 @@
 	if l > 0 {
 		n += 1 + l + sovTask(uint64(l))
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *TaskExecStarted) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = len(m.ContainerID)
@@ -905,30 +1373,48 @@
 	if m.Pid != 0 {
 		n += 1 + sovTask(uint64(m.Pid))
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *TaskPaused) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = len(m.ContainerID)
 	if l > 0 {
 		n += 1 + l + sovTask(uint64(l))
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *TaskResumed) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = len(m.ContainerID)
 	if l > 0 {
 		n += 1 + l + sovTask(uint64(l))
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *TaskCheckpointed) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = len(m.ContainerID)
@@ -939,6 +1425,9 @@
 	if l > 0 {
 		n += 1 + l + sovTask(uint64(l))
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
@@ -962,10 +1451,11 @@
 	s := strings.Join([]string{`&TaskCreate{`,
 		`ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`,
 		`Bundle:` + fmt.Sprintf("%v", this.Bundle) + `,`,
-		`Rootfs:` + strings.Replace(fmt.Sprintf("%v", this.Rootfs), "Mount", "containerd_types.Mount", 1) + `,`,
+		`Rootfs:` + strings.Replace(fmt.Sprintf("%v", this.Rootfs), "Mount", "types.Mount", 1) + `,`,
 		`IO:` + strings.Replace(fmt.Sprintf("%v", this.IO), "TaskIO", "TaskIO", 1) + `,`,
 		`Checkpoint:` + fmt.Sprintf("%v", this.Checkpoint) + `,`,
 		`Pid:` + fmt.Sprintf("%v", this.Pid) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -977,6 +1467,7 @@
 	s := strings.Join([]string{`&TaskStart{`,
 		`ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`,
 		`Pid:` + fmt.Sprintf("%v", this.Pid) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -989,8 +1480,9 @@
 		`ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`,
 		`Pid:` + fmt.Sprintf("%v", this.Pid) + `,`,
 		`ExitStatus:` + fmt.Sprintf("%v", this.ExitStatus) + `,`,
-		`ExitedAt:` + strings.Replace(strings.Replace(this.ExitedAt.String(), "Timestamp", "google_protobuf2.Timestamp", 1), `&`, ``, 1) + `,`,
+		`ExitedAt:` + strings.Replace(strings.Replace(this.ExitedAt.String(), "Timestamp", "types1.Timestamp", 1), `&`, ``, 1) + `,`,
 		`ID:` + fmt.Sprintf("%v", this.ID) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -1004,6 +1496,7 @@
 		`Stdout:` + fmt.Sprintf("%v", this.Stdout) + `,`,
 		`Stderr:` + fmt.Sprintf("%v", this.Stderr) + `,`,
 		`Terminal:` + fmt.Sprintf("%v", this.Terminal) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -1017,7 +1510,8 @@
 		`ID:` + fmt.Sprintf("%v", this.ID) + `,`,
 		`Pid:` + fmt.Sprintf("%v", this.Pid) + `,`,
 		`ExitStatus:` + fmt.Sprintf("%v", this.ExitStatus) + `,`,
-		`ExitedAt:` + strings.Replace(strings.Replace(this.ExitedAt.String(), "Timestamp", "google_protobuf2.Timestamp", 1), `&`, ``, 1) + `,`,
+		`ExitedAt:` + strings.Replace(strings.Replace(this.ExitedAt.String(), "Timestamp", "types1.Timestamp", 1), `&`, ``, 1) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -1028,6 +1522,7 @@
 	}
 	s := strings.Join([]string{`&TaskOOM{`,
 		`ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -1039,6 +1534,7 @@
 	s := strings.Join([]string{`&TaskExecAdded{`,
 		`ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`,
 		`ExecID:` + fmt.Sprintf("%v", this.ExecID) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -1051,6 +1547,7 @@
 		`ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`,
 		`ExecID:` + fmt.Sprintf("%v", this.ExecID) + `,`,
 		`Pid:` + fmt.Sprintf("%v", this.Pid) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -1061,6 +1558,7 @@
 	}
 	s := strings.Join([]string{`&TaskPaused{`,
 		`ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -1071,6 +1569,7 @@
 	}
 	s := strings.Join([]string{`&TaskResumed{`,
 		`ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -1082,6 +1581,7 @@
 	s := strings.Join([]string{`&TaskCheckpointed{`,
 		`ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`,
 		`Checkpoint:` + fmt.Sprintf("%v", this.Checkpoint) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -1109,7 +1609,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -1137,7 +1637,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1147,6 +1647,9 @@
 				return ErrInvalidLengthTask
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthTask
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -1166,7 +1669,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1176,6 +1679,9 @@
 				return ErrInvalidLengthTask
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthTask
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -1195,7 +1701,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1204,10 +1710,13 @@
 				return ErrInvalidLengthTask
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthTask
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			m.Rootfs = append(m.Rootfs, &containerd_types.Mount{})
+			m.Rootfs = append(m.Rootfs, &types.Mount{})
 			if err := m.Rootfs[len(m.Rootfs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
 				return err
 			}
@@ -1226,7 +1735,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1235,6 +1744,9 @@
 				return ErrInvalidLengthTask
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthTask
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -1259,7 +1771,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1269,6 +1781,9 @@
 				return ErrInvalidLengthTask
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthTask
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -1288,7 +1803,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				m.Pid |= (uint32(b) & 0x7F) << shift
+				m.Pid |= uint32(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1302,9 +1817,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthTask
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthTask
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -1329,7 +1848,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -1357,7 +1876,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1367,6 +1886,9 @@
 				return ErrInvalidLengthTask
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthTask
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -1386,7 +1908,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				m.Pid |= (uint32(b) & 0x7F) << shift
+				m.Pid |= uint32(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1400,9 +1922,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthTask
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthTask
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -1427,7 +1953,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -1455,7 +1981,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1465,6 +1991,9 @@
 				return ErrInvalidLengthTask
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthTask
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -1484,7 +2013,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				m.Pid |= (uint32(b) & 0x7F) << shift
+				m.Pid |= uint32(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1503,7 +2032,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				m.ExitStatus |= (uint32(b) & 0x7F) << shift
+				m.ExitStatus |= uint32(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1522,7 +2051,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1531,10 +2060,13 @@
 				return ErrInvalidLengthTask
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthTask
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			if err := types.StdTimeUnmarshal(&m.ExitedAt, dAtA[iNdEx:postIndex]); err != nil {
+			if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.ExitedAt, dAtA[iNdEx:postIndex]); err != nil {
 				return err
 			}
 			iNdEx = postIndex
@@ -1552,7 +2084,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1562,6 +2094,9 @@
 				return ErrInvalidLengthTask
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthTask
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -1576,9 +2111,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthTask
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthTask
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -1603,7 +2142,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -1631,7 +2170,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1641,6 +2180,9 @@
 				return ErrInvalidLengthTask
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthTask
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -1660,7 +2202,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1670,6 +2212,9 @@
 				return ErrInvalidLengthTask
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthTask
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -1689,7 +2234,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1699,6 +2244,9 @@
 				return ErrInvalidLengthTask
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthTask
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -1718,7 +2266,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				v |= (int(b) & 0x7F) << shift
+				v |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1733,9 +2281,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthTask
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthTask
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -1760,7 +2312,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -1788,7 +2340,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1798,6 +2350,9 @@
 				return ErrInvalidLengthTask
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthTask
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -1817,7 +2372,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1827,6 +2382,9 @@
 				return ErrInvalidLengthTask
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthTask
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -1846,7 +2404,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				m.Pid |= (uint32(b) & 0x7F) << shift
+				m.Pid |= uint32(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1865,7 +2423,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				m.ExitStatus |= (uint32(b) & 0x7F) << shift
+				m.ExitStatus |= uint32(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1884,7 +2442,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1893,10 +2451,13 @@
 				return ErrInvalidLengthTask
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthTask
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			if err := types.StdTimeUnmarshal(&m.ExitedAt, dAtA[iNdEx:postIndex]); err != nil {
+			if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.ExitedAt, dAtA[iNdEx:postIndex]); err != nil {
 				return err
 			}
 			iNdEx = postIndex
@@ -1909,9 +2470,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthTask
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthTask
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -1936,7 +2501,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -1964,7 +2529,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1974,6 +2539,9 @@
 				return ErrInvalidLengthTask
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthTask
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -1988,9 +2556,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthTask
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthTask
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -2015,7 +2587,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -2043,7 +2615,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2053,6 +2625,9 @@
 				return ErrInvalidLengthTask
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthTask
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -2072,7 +2647,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2082,6 +2657,9 @@
 				return ErrInvalidLengthTask
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthTask
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -2096,9 +2674,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthTask
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthTask
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -2123,7 +2705,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -2151,7 +2733,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2161,6 +2743,9 @@
 				return ErrInvalidLengthTask
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthTask
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -2180,7 +2765,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2190,6 +2775,9 @@
 				return ErrInvalidLengthTask
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthTask
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -2209,7 +2797,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				m.Pid |= (uint32(b) & 0x7F) << shift
+				m.Pid |= uint32(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2223,9 +2811,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthTask
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthTask
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -2250,7 +2842,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -2278,7 +2870,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2288,6 +2880,9 @@
 				return ErrInvalidLengthTask
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthTask
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -2302,9 +2897,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthTask
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthTask
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -2329,7 +2928,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -2357,7 +2956,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2367,6 +2966,9 @@
 				return ErrInvalidLengthTask
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthTask
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -2381,9 +2983,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthTask
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthTask
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -2408,7 +3014,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -2436,7 +3042,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2446,6 +3052,9 @@
 				return ErrInvalidLengthTask
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthTask
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -2465,7 +3074,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2475,6 +3084,9 @@
 				return ErrInvalidLengthTask
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthTask
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -2489,9 +3101,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthTask
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthTask
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -2555,10 +3171,13 @@
 					break
 				}
 			}
-			iNdEx += length
 			if length < 0 {
 				return 0, ErrInvalidLengthTask
 			}
+			iNdEx += length
+			if iNdEx < 0 {
+				return 0, ErrInvalidLengthTask
+			}
 			return iNdEx, nil
 		case 3:
 			for {
@@ -2587,6 +3206,9 @@
 					return 0, err
 				}
 				iNdEx = start + next
+				if iNdEx < 0 {
+					return 0, ErrInvalidLengthTask
+				}
 			}
 			return iNdEx, nil
 		case 4:
@@ -2605,52 +3227,3 @@
 	ErrInvalidLengthTask = fmt.Errorf("proto: negative length found during unmarshaling")
 	ErrIntOverflowTask   = fmt.Errorf("proto: integer overflow")
 )
-
-func init() {
-	proto.RegisterFile("github.com/containerd/containerd/api/events/task.proto", fileDescriptorTask)
-}
-
-var fileDescriptorTask = []byte{
-	// 644 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x95, 0xcd, 0x6e, 0xd3, 0x40,
-	0x10, 0xc7, 0x63, 0xa7, 0x75, 0xd3, 0x09, 0x55, 0x8b, 0x55, 0x95, 0x90, 0x83, 0x1d, 0x99, 0x4b,
-	0x4e, 0xb6, 0x08, 0x12, 0x17, 0x84, 0xd4, 0xa4, 0xe1, 0x90, 0x43, 0x95, 0xe2, 0xf6, 0x50, 0x71,
-	0x89, 0x36, 0xd9, 0x4d, 0xb2, 0x34, 0xf1, 0x5a, 0xf6, 0x18, 0x15, 0x89, 0x03, 0x8f, 0xc0, 0x23,
-	0xf0, 0x38, 0x3d, 0x20, 0xc4, 0x91, 0x53, 0xa0, 0x7e, 0x00, 0x4e, 0x3c, 0x00, 0x5a, 0xaf, 0x93,
-	0xb6, 0x54, 0x7c, 0x59, 0xe2, 0x94, 0x9d, 0xd9, 0xd9, 0xff, 0xec, 0xfc, 0x76, 0x3c, 0x81, 0xc7,
-	0x13, 0x8e, 0xd3, 0x64, 0xe8, 0x8e, 0xc4, 0xdc, 0x1b, 0x89, 0x00, 0x09, 0x0f, 0x58, 0x44, 0xaf,
-	0x2f, 0x49, 0xc8, 0x3d, 0xf6, 0x8a, 0x05, 0x18, 0x7b, 0x48, 0xe2, 0x33, 0x37, 0x8c, 0x04, 0x0a,
-	0xf3, 0xee, 0x55, 0x84, 0xab, 0x76, 0xeb, 0xbb, 0x13, 0x31, 0x11, 0xd9, 0xae, 0x27, 0x57, 0x2a,
-	0xb0, 0x6e, 0x4f, 0x84, 0x98, 0xcc, 0x98, 0x97, 0x59, 0xc3, 0x64, 0xec, 0x21, 0x9f, 0xb3, 0x18,
-	0xc9, 0x3c, 0xcc, 0x03, 0xfe, 0xee, 0x06, 0xf8, 0x3a, 0x64, 0xb1, 0x37, 0x17, 0x49, 0x80, 0xf9,
-	0xb9, 0xfd, 0x3f, 0x9e, 0x5b, 0xa5, 0x0c, 0x67, 0xc9, 0x84, 0x07, 0xde, 0x98, 0xb3, 0x19, 0x0d,
-	0x09, 0x4e, 0x95, 0x82, 0xf3, 0x4d, 0x03, 0x38, 0x21, 0xf1, 0xd9, 0x41, 0xc4, 0x08, 0x32, 0xb3,
-	0x05, 0x77, 0x56, 0x87, 0x07, 0x9c, 0xd6, 0xb4, 0x86, 0xd6, 0xdc, 0xec, 0x6c, 0xa7, 0x0b, 0xbb,
-	0x7a, 0xb0, 0xf4, 0xf7, 0xba, 0x7e, 0x75, 0x15, 0xd4, 0xa3, 0xe6, 0x1e, 0x18, 0xc3, 0x24, 0xa0,
-	0x33, 0x56, 0xd3, 0x65, 0xb4, 0x9f, 0x5b, 0xa6, 0x07, 0x46, 0x24, 0x04, 0x8e, 0xe3, 0x5a, 0xb9,
-	0x51, 0x6e, 0x56, 0x5b, 0xf7, 0xdc, 0x6b, 0xbc, 0xb2, 0x5a, 0xdc, 0x43, 0x59, 0x8b, 0x9f, 0x87,
-	0x99, 0x0f, 0x41, 0xe7, 0xa2, 0xb6, 0xd6, 0xd0, 0x9a, 0xd5, 0xd6, 0x7d, 0xf7, 0x16, 0x5c, 0x57,
-	0xde, 0xb3, 0xd7, 0xef, 0x18, 0xe9, 0xc2, 0xd6, 0x7b, 0x7d, 0x5f, 0xe7, 0xc2, 0xb4, 0x00, 0x46,
-	0x53, 0x36, 0x3a, 0x0b, 0x05, 0x0f, 0xb0, 0xb6, 0x9e, 0xe5, 0xbf, 0xe6, 0x31, 0x77, 0xa0, 0x1c,
-	0x72, 0x5a, 0x33, 0x1a, 0x5a, 0x73, 0xcb, 0x97, 0x4b, 0xe7, 0x39, 0x6c, 0x4a, 0x9d, 0x63, 0x24,
-	0x11, 0x16, 0x2a, 0x37, 0x97, 0xd4, 0xaf, 0x24, 0x3f, 0xe6, 0x0c, 0xbb, 0x6c, 0xc6, 0x0a, 0x32,
-	0xbc, 0x25, 0x6a, 0xda, 0x50, 0x65, 0xe7, 0x1c, 0x07, 0x31, 0x12, 0x4c, 0x24, 0x42, 0xb9, 0x03,
-	0xd2, 0x75, 0x9c, 0x79, 0xcc, 0x36, 0x6c, 0x4a, 0x8b, 0xd1, 0x01, 0xc1, 0x1c, 0x5a, 0xdd, 0x55,
-	0x8d, 0xe6, 0x2e, 0x5f, 0xdd, 0x3d, 0x59, 0x36, 0x5a, 0xa7, 0x72, 0xb1, 0xb0, 0x4b, 0xef, 0xbe,
-	0xd8, 0x9a, 0x5f, 0x51, 0xc7, 0xda, 0x68, 0xee, 0x81, 0xce, 0xa9, 0xa2, 0x96, 0x53, 0xed, 0xfa,
-	0x3a, 0xa7, 0xce, 0x4b, 0x30, 0x14, 0x6b, 0x73, 0x17, 0xd6, 0x63, 0xa4, 0x3c, 0x50, 0x45, 0xf8,
-	0xca, 0x90, 0x2f, 0x1e, 0x23, 0x15, 0x09, 0x2e, 0x5f, 0x5c, 0x59, 0xb9, 0x9f, 0x45, 0x51, 0x76,
-	0x5d, 0xe5, 0x67, 0x51, 0x64, 0xd6, 0xa1, 0x82, 0x2c, 0x9a, 0xf3, 0x80, 0xcc, 0xb2, 0x9b, 0x56,
-	0xfc, 0x95, 0xed, 0x7c, 0xd0, 0xa0, 0x22, 0x93, 0x3d, 0x3b, 0xe7, 0x58, 0xb0, 0xfd, 0xf4, 0x9c,
-	0xdc, 0x8d, 0x22, 0x96, 0x48, 0xcb, 0xbf, 0x44, 0xba, 0xf6, 0x7b, 0xa4, 0xeb, 0x45, 0x90, 0x3a,
-	0x4f, 0x61, 0x43, 0x56, 0xd3, 0xef, 0x1f, 0x16, 0x29, 0xc6, 0x99, 0xc2, 0x96, 0x82, 0xc1, 0x46,
-	0x6d, 0x4a, 0x19, 0x2d, 0x44, 0xe4, 0x01, 0x6c, 0xb0, 0x73, 0x36, 0x1a, 0xac, 0xb0, 0x40, 0xba,
-	0xb0, 0x0d, 0xa9, 0xd9, 0xeb, 0xfa, 0x86, 0xdc, 0xea, 0x51, 0xe7, 0x0d, 0x6c, 0x2f, 0x33, 0x65,
-	0xdf, 0xc2, 0x7f, 0xcc, 0x75, 0xfb, 0x29, 0x9c, 0x7d, 0xf5, 0xc5, 0x1c, 0x91, 0x24, 0x2e, 0x96,
-	0xd8, 0x69, 0x43, 0x55, 0x2a, 0xf8, 0x2c, 0x4e, 0xe6, 0x05, 0x25, 0xc6, 0xb0, 0x93, 0x8d, 0xbe,
-	0xd5, 0xb8, 0x28, 0xc8, 0xe0, 0xe6, 0x10, 0xd2, 0x7f, 0x1e, 0x42, 0x9d, 0xa3, 0x8b, 0x4b, 0xab,
-	0xf4, 0xf9, 0xd2, 0x2a, 0xbd, 0x4d, 0x2d, 0xed, 0x22, 0xb5, 0xb4, 0x4f, 0xa9, 0xa5, 0x7d, 0x4d,
-	0x2d, 0xed, 0xfd, 0x77, 0x4b, 0x7b, 0xd1, 0xfa, 0x87, 0x7f, 0x9f, 0x27, 0xea, 0xe7, 0xb4, 0x74,
-	0x5a, 0x1e, 0x1a, 0x59, 0x47, 0x3e, 0xfa, 0x11, 0x00, 0x00, 0xff, 0xff, 0xc5, 0x58, 0x0f, 0xec,
-	0xbe, 0x06, 0x00, 0x00,
-}
diff --git a/vendor/github.com/containerd/containerd/api/services/containers/v1/containers.pb.go b/vendor/github.com/containerd/containerd/api/services/containers/v1/containers.pb.go
index 4d93221..ce3cef0 100644
--- a/vendor/github.com/containerd/containerd/api/services/containers/v1/containers.pb.go
+++ b/vendor/github.com/containerd/containerd/api/services/containers/v1/containers.pb.go
@@ -1,49 +1,22 @@
 // Code generated by protoc-gen-gogo. DO NOT EDIT.
 // source: github.com/containerd/containerd/api/services/containers/v1/containers.proto
 
-/*
-	Package containers is a generated protocol buffer package.
-
-	It is generated from these files:
-		github.com/containerd/containerd/api/services/containers/v1/containers.proto
-
-	It has these top-level messages:
-		Container
-		GetContainerRequest
-		GetContainerResponse
-		ListContainersRequest
-		ListContainersResponse
-		CreateContainerRequest
-		CreateContainerResponse
-		UpdateContainerRequest
-		UpdateContainerResponse
-		DeleteContainerRequest
-		ListContainerMessage
-*/
 package containers
 
-import proto "github.com/gogo/protobuf/proto"
-import fmt "fmt"
-import math "math"
-
-// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto"
-import google_protobuf1 "github.com/gogo/protobuf/types"
-import google_protobuf2 "github.com/gogo/protobuf/types"
-import google_protobuf3 "github.com/gogo/protobuf/types"
-import _ "github.com/gogo/protobuf/types"
-
-import time "time"
-
-import context "golang.org/x/net/context"
-import grpc "google.golang.org/grpc"
-
-import types "github.com/gogo/protobuf/types"
-
-import strings "strings"
-import reflect "reflect"
-import sortkeys "github.com/gogo/protobuf/sortkeys"
-
-import io "io"
+import (
+	context "context"
+	fmt "fmt"
+	proto "github.com/gogo/protobuf/proto"
+	github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
+	github_com_gogo_protobuf_types "github.com/gogo/protobuf/types"
+	types "github.com/gogo/protobuf/types"
+	grpc "google.golang.org/grpc"
+	io "io"
+	math "math"
+	reflect "reflect"
+	strings "strings"
+	time "time"
+)
 
 // Reference imports to suppress errors if they are not otherwise used.
 var _ = proto.Marshal
@@ -68,16 +41,16 @@
 	//
 	// Note that to add a new value to this field, read the existing set and
 	// include the entire result in the update call.
-	Labels map[string]string `protobuf:"bytes,2,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	Labels map[string]string `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
 	// Image contains the reference of the image used to build the
 	// specification and snapshots for running this container.
 	//
 	// If this field is updated, the spec and rootfs needed to updated, as well.
 	Image string `protobuf:"bytes,3,opt,name=image,proto3" json:"image,omitempty"`
 	// Runtime specifies which runtime to use for executing this container.
-	Runtime *Container_Runtime `protobuf:"bytes,4,opt,name=runtime" json:"runtime,omitempty"`
+	Runtime *Container_Runtime `protobuf:"bytes,4,opt,name=runtime,proto3" json:"runtime,omitempty"`
 	// Spec to be used when creating the container. This is runtime specific.
-	Spec *google_protobuf1.Any `protobuf:"bytes,5,opt,name=spec" json:"spec,omitempty"`
+	Spec *types.Any `protobuf:"bytes,5,opt,name=spec,proto3" json:"spec,omitempty"`
 	// Snapshotter specifies the snapshotter name used for rootfs
 	Snapshotter string `protobuf:"bytes,6,opt,name=snapshotter,proto3" json:"snapshotter,omitempty"`
 	// SnapshotKey specifies the snapshot key to use for the container's root
@@ -92,9 +65,9 @@
 	// This field may be updated.
 	SnapshotKey string `protobuf:"bytes,7,opt,name=snapshot_key,json=snapshotKey,proto3" json:"snapshot_key,omitempty"`
 	// CreatedAt is the time the container was first created.
-	CreatedAt time.Time `protobuf:"bytes,8,opt,name=created_at,json=createdAt,stdtime" json:"created_at"`
+	CreatedAt time.Time `protobuf:"bytes,8,opt,name=created_at,json=createdAt,proto3,stdtime" json:"created_at"`
 	// UpdatedAt is the last time the container was mutated.
-	UpdatedAt time.Time `protobuf:"bytes,9,opt,name=updated_at,json=updatedAt,stdtime" json:"updated_at"`
+	UpdatedAt time.Time `protobuf:"bytes,9,opt,name=updated_at,json=updatedAt,proto3,stdtime" json:"updated_at"`
 	// Extensions allow clients to provide zero or more blobs that are directly
 	// associated with the container. One may provide protobuf, json, or other
 	// encoding formats. The primary use of this is to further decorate the
@@ -104,39 +77,163 @@
 	// that should be unique against other extensions. When updating extension
 	// data, one should only update the specified extension using field paths
 	// to select a specific map key.
-	Extensions map[string]google_protobuf1.Any `protobuf:"bytes,10,rep,name=extensions" json:"extensions" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value"`
+	Extensions           map[string]types.Any `protobuf:"bytes,10,rep,name=extensions,proto3" json:"extensions" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	XXX_NoUnkeyedLiteral struct{}             `json:"-"`
+	XXX_unrecognized     []byte               `json:"-"`
+	XXX_sizecache        int32                `json:"-"`
 }
 
-func (m *Container) Reset()                    { *m = Container{} }
-func (*Container) ProtoMessage()               {}
-func (*Container) Descriptor() ([]byte, []int) { return fileDescriptorContainers, []int{0} }
+func (m *Container) Reset()      { *m = Container{} }
+func (*Container) ProtoMessage() {}
+func (*Container) Descriptor() ([]byte, []int) {
+	return fileDescriptor_311afb8e15951042, []int{0}
+}
+func (m *Container) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *Container) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_Container.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *Container) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Container.Merge(m, src)
+}
+func (m *Container) XXX_Size() int {
+	return m.Size()
+}
+func (m *Container) XXX_DiscardUnknown() {
+	xxx_messageInfo_Container.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Container proto.InternalMessageInfo
 
 type Container_Runtime struct {
 	// Name is the name of the runtime.
 	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
 	// Options specify additional runtime initialization options.
-	Options *google_protobuf1.Any `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"`
+	Options              *types.Any `protobuf:"bytes,2,opt,name=options,proto3" json:"options,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}   `json:"-"`
+	XXX_unrecognized     []byte     `json:"-"`
+	XXX_sizecache        int32      `json:"-"`
 }
 
-func (m *Container_Runtime) Reset()                    { *m = Container_Runtime{} }
-func (*Container_Runtime) ProtoMessage()               {}
-func (*Container_Runtime) Descriptor() ([]byte, []int) { return fileDescriptorContainers, []int{0, 1} }
+func (m *Container_Runtime) Reset()      { *m = Container_Runtime{} }
+func (*Container_Runtime) ProtoMessage() {}
+func (*Container_Runtime) Descriptor() ([]byte, []int) {
+	return fileDescriptor_311afb8e15951042, []int{0, 1}
+}
+func (m *Container_Runtime) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *Container_Runtime) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_Container_Runtime.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *Container_Runtime) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Container_Runtime.Merge(m, src)
+}
+func (m *Container_Runtime) XXX_Size() int {
+	return m.Size()
+}
+func (m *Container_Runtime) XXX_DiscardUnknown() {
+	xxx_messageInfo_Container_Runtime.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Container_Runtime proto.InternalMessageInfo
 
 type GetContainerRequest struct {
-	ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+	ID                   string   `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
-func (m *GetContainerRequest) Reset()                    { *m = GetContainerRequest{} }
-func (*GetContainerRequest) ProtoMessage()               {}
-func (*GetContainerRequest) Descriptor() ([]byte, []int) { return fileDescriptorContainers, []int{1} }
+func (m *GetContainerRequest) Reset()      { *m = GetContainerRequest{} }
+func (*GetContainerRequest) ProtoMessage() {}
+func (*GetContainerRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_311afb8e15951042, []int{1}
+}
+func (m *GetContainerRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *GetContainerRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_GetContainerRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *GetContainerRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_GetContainerRequest.Merge(m, src)
+}
+func (m *GetContainerRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *GetContainerRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_GetContainerRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetContainerRequest proto.InternalMessageInfo
 
 type GetContainerResponse struct {
-	Container Container `protobuf:"bytes,1,opt,name=container" json:"container"`
+	Container            Container `protobuf:"bytes,1,opt,name=container,proto3" json:"container"`
+	XXX_NoUnkeyedLiteral struct{}  `json:"-"`
+	XXX_unrecognized     []byte    `json:"-"`
+	XXX_sizecache        int32     `json:"-"`
 }
 
-func (m *GetContainerResponse) Reset()                    { *m = GetContainerResponse{} }
-func (*GetContainerResponse) ProtoMessage()               {}
-func (*GetContainerResponse) Descriptor() ([]byte, []int) { return fileDescriptorContainers, []int{2} }
+func (m *GetContainerResponse) Reset()      { *m = GetContainerResponse{} }
+func (*GetContainerResponse) ProtoMessage() {}
+func (*GetContainerResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_311afb8e15951042, []int{2}
+}
+func (m *GetContainerResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *GetContainerResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_GetContainerResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *GetContainerResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_GetContainerResponse.Merge(m, src)
+}
+func (m *GetContainerResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *GetContainerResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_GetContainerResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetContainerResponse proto.InternalMessageInfo
 
 type ListContainersRequest struct {
 	// Filters contains one or more filters using the syntax defined in the
@@ -149,38 +246,160 @@
 	//   filters[0] or filters[1] or ... or filters[n-1] or filters[n]
 	//
 	// If filters is zero-length or nil, all items will be returned.
-	Filters []string `protobuf:"bytes,1,rep,name=filters" json:"filters,omitempty"`
+	Filters              []string `protobuf:"bytes,1,rep,name=filters,proto3" json:"filters,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
-func (m *ListContainersRequest) Reset()                    { *m = ListContainersRequest{} }
-func (*ListContainersRequest) ProtoMessage()               {}
-func (*ListContainersRequest) Descriptor() ([]byte, []int) { return fileDescriptorContainers, []int{3} }
+func (m *ListContainersRequest) Reset()      { *m = ListContainersRequest{} }
+func (*ListContainersRequest) ProtoMessage() {}
+func (*ListContainersRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_311afb8e15951042, []int{3}
+}
+func (m *ListContainersRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ListContainersRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_ListContainersRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *ListContainersRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ListContainersRequest.Merge(m, src)
+}
+func (m *ListContainersRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *ListContainersRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_ListContainersRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ListContainersRequest proto.InternalMessageInfo
 
 type ListContainersResponse struct {
-	Containers []Container `protobuf:"bytes,1,rep,name=containers" json:"containers"`
+	Containers           []Container `protobuf:"bytes,1,rep,name=containers,proto3" json:"containers"`
+	XXX_NoUnkeyedLiteral struct{}    `json:"-"`
+	XXX_unrecognized     []byte      `json:"-"`
+	XXX_sizecache        int32       `json:"-"`
 }
 
-func (m *ListContainersResponse) Reset()                    { *m = ListContainersResponse{} }
-func (*ListContainersResponse) ProtoMessage()               {}
-func (*ListContainersResponse) Descriptor() ([]byte, []int) { return fileDescriptorContainers, []int{4} }
+func (m *ListContainersResponse) Reset()      { *m = ListContainersResponse{} }
+func (*ListContainersResponse) ProtoMessage() {}
+func (*ListContainersResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_311afb8e15951042, []int{4}
+}
+func (m *ListContainersResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ListContainersResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_ListContainersResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *ListContainersResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ListContainersResponse.Merge(m, src)
+}
+func (m *ListContainersResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *ListContainersResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_ListContainersResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ListContainersResponse proto.InternalMessageInfo
 
 type CreateContainerRequest struct {
-	Container Container `protobuf:"bytes,1,opt,name=container" json:"container"`
+	Container            Container `protobuf:"bytes,1,opt,name=container,proto3" json:"container"`
+	XXX_NoUnkeyedLiteral struct{}  `json:"-"`
+	XXX_unrecognized     []byte    `json:"-"`
+	XXX_sizecache        int32     `json:"-"`
 }
 
-func (m *CreateContainerRequest) Reset()                    { *m = CreateContainerRequest{} }
-func (*CreateContainerRequest) ProtoMessage()               {}
-func (*CreateContainerRequest) Descriptor() ([]byte, []int) { return fileDescriptorContainers, []int{5} }
+func (m *CreateContainerRequest) Reset()      { *m = CreateContainerRequest{} }
+func (*CreateContainerRequest) ProtoMessage() {}
+func (*CreateContainerRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_311afb8e15951042, []int{5}
+}
+func (m *CreateContainerRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *CreateContainerRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_CreateContainerRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *CreateContainerRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_CreateContainerRequest.Merge(m, src)
+}
+func (m *CreateContainerRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *CreateContainerRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_CreateContainerRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CreateContainerRequest proto.InternalMessageInfo
 
 type CreateContainerResponse struct {
-	Container Container `protobuf:"bytes,1,opt,name=container" json:"container"`
+	Container            Container `protobuf:"bytes,1,opt,name=container,proto3" json:"container"`
+	XXX_NoUnkeyedLiteral struct{}  `json:"-"`
+	XXX_unrecognized     []byte    `json:"-"`
+	XXX_sizecache        int32     `json:"-"`
 }
 
 func (m *CreateContainerResponse) Reset()      { *m = CreateContainerResponse{} }
 func (*CreateContainerResponse) ProtoMessage() {}
 func (*CreateContainerResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptorContainers, []int{6}
+	return fileDescriptor_311afb8e15951042, []int{6}
 }
+func (m *CreateContainerResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *CreateContainerResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_CreateContainerResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *CreateContainerResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_CreateContainerResponse.Merge(m, src)
+}
+func (m *CreateContainerResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *CreateContainerResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_CreateContainerResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CreateContainerResponse proto.InternalMessageInfo
 
 // UpdateContainerRequest updates the metadata on one or more container.
 //
@@ -191,44 +410,168 @@
 	// Container provides the target values, as declared by the mask, for the update.
 	//
 	// The ID field must be set.
-	Container Container `protobuf:"bytes,1,opt,name=container" json:"container"`
+	Container Container `protobuf:"bytes,1,opt,name=container,proto3" json:"container"`
 	// UpdateMask specifies which fields to perform the update on. If empty,
 	// the operation applies to all fields.
-	UpdateMask *google_protobuf3.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask" json:"update_mask,omitempty"`
+	UpdateMask           *types.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}         `json:"-"`
+	XXX_unrecognized     []byte           `json:"-"`
+	XXX_sizecache        int32            `json:"-"`
 }
 
-func (m *UpdateContainerRequest) Reset()                    { *m = UpdateContainerRequest{} }
-func (*UpdateContainerRequest) ProtoMessage()               {}
-func (*UpdateContainerRequest) Descriptor() ([]byte, []int) { return fileDescriptorContainers, []int{7} }
+func (m *UpdateContainerRequest) Reset()      { *m = UpdateContainerRequest{} }
+func (*UpdateContainerRequest) ProtoMessage() {}
+func (*UpdateContainerRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_311afb8e15951042, []int{7}
+}
+func (m *UpdateContainerRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *UpdateContainerRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_UpdateContainerRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *UpdateContainerRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_UpdateContainerRequest.Merge(m, src)
+}
+func (m *UpdateContainerRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *UpdateContainerRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_UpdateContainerRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_UpdateContainerRequest proto.InternalMessageInfo
 
 type UpdateContainerResponse struct {
-	Container Container `protobuf:"bytes,1,opt,name=container" json:"container"`
+	Container            Container `protobuf:"bytes,1,opt,name=container,proto3" json:"container"`
+	XXX_NoUnkeyedLiteral struct{}  `json:"-"`
+	XXX_unrecognized     []byte    `json:"-"`
+	XXX_sizecache        int32     `json:"-"`
 }
 
 func (m *UpdateContainerResponse) Reset()      { *m = UpdateContainerResponse{} }
 func (*UpdateContainerResponse) ProtoMessage() {}
 func (*UpdateContainerResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptorContainers, []int{8}
+	return fileDescriptor_311afb8e15951042, []int{8}
 }
+func (m *UpdateContainerResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *UpdateContainerResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_UpdateContainerResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *UpdateContainerResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_UpdateContainerResponse.Merge(m, src)
+}
+func (m *UpdateContainerResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *UpdateContainerResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_UpdateContainerResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_UpdateContainerResponse proto.InternalMessageInfo
 
 type DeleteContainerRequest struct {
-	ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+	ID                   string   `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
-func (m *DeleteContainerRequest) Reset()                    { *m = DeleteContainerRequest{} }
-func (*DeleteContainerRequest) ProtoMessage()               {}
-func (*DeleteContainerRequest) Descriptor() ([]byte, []int) { return fileDescriptorContainers, []int{9} }
+func (m *DeleteContainerRequest) Reset()      { *m = DeleteContainerRequest{} }
+func (*DeleteContainerRequest) ProtoMessage() {}
+func (*DeleteContainerRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_311afb8e15951042, []int{9}
+}
+func (m *DeleteContainerRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *DeleteContainerRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_DeleteContainerRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *DeleteContainerRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DeleteContainerRequest.Merge(m, src)
+}
+func (m *DeleteContainerRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *DeleteContainerRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_DeleteContainerRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeleteContainerRequest proto.InternalMessageInfo
 
 type ListContainerMessage struct {
-	Container *Container `protobuf:"bytes,1,opt,name=container" json:"container,omitempty"`
+	Container            *Container `protobuf:"bytes,1,opt,name=container,proto3" json:"container,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}   `json:"-"`
+	XXX_unrecognized     []byte     `json:"-"`
+	XXX_sizecache        int32      `json:"-"`
 }
 
-func (m *ListContainerMessage) Reset()                    { *m = ListContainerMessage{} }
-func (*ListContainerMessage) ProtoMessage()               {}
-func (*ListContainerMessage) Descriptor() ([]byte, []int) { return fileDescriptorContainers, []int{10} }
+func (m *ListContainerMessage) Reset()      { *m = ListContainerMessage{} }
+func (*ListContainerMessage) ProtoMessage() {}
+func (*ListContainerMessage) Descriptor() ([]byte, []int) {
+	return fileDescriptor_311afb8e15951042, []int{10}
+}
+func (m *ListContainerMessage) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ListContainerMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_ListContainerMessage.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *ListContainerMessage) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ListContainerMessage.Merge(m, src)
+}
+func (m *ListContainerMessage) XXX_Size() int {
+	return m.Size()
+}
+func (m *ListContainerMessage) XXX_DiscardUnknown() {
+	xxx_messageInfo_ListContainerMessage.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ListContainerMessage proto.InternalMessageInfo
 
 func init() {
 	proto.RegisterType((*Container)(nil), "containerd.services.containers.v1.Container")
+	proto.RegisterMapType((map[string]types.Any)(nil), "containerd.services.containers.v1.Container.ExtensionsEntry")
+	proto.RegisterMapType((map[string]string)(nil), "containerd.services.containers.v1.Container.LabelsEntry")
 	proto.RegisterType((*Container_Runtime)(nil), "containerd.services.containers.v1.Container.Runtime")
 	proto.RegisterType((*GetContainerRequest)(nil), "containerd.services.containers.v1.GetContainerRequest")
 	proto.RegisterType((*GetContainerResponse)(nil), "containerd.services.containers.v1.GetContainerResponse")
@@ -242,6 +585,66 @@
 	proto.RegisterType((*ListContainerMessage)(nil), "containerd.services.containers.v1.ListContainerMessage")
 }
 
+func init() {
+	proto.RegisterFile("github.com/containerd/containerd/api/services/containers/v1/containers.proto", fileDescriptor_311afb8e15951042)
+}
+
+var fileDescriptor_311afb8e15951042 = []byte{
+	// 820 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0xcb, 0x6e, 0x13, 0x49,
+	0x14, 0x75, 0xdb, 0x4e, 0x3b, 0xbe, 0x1e, 0x69, 0x46, 0x35, 0x1e, 0x4f, 0x4f, 0x8f, 0x64, 0x3b,
+	0x5e, 0x59, 0xa3, 0xa1, 0x9d, 0x18, 0x44, 0x5e, 0x6c, 0xe2, 0xbc, 0x04, 0x24, 0x28, 0xea, 0x80,
+	0x84, 0x60, 0x11, 0xda, 0x76, 0xc5, 0x69, 0xdc, 0x2f, 0xba, 0xca, 0x16, 0x16, 0x8b, 0xc0, 0x1f,
+	0xb0, 0xe3, 0x13, 0xf8, 0x95, 0x2c, 0x59, 0xb2, 0x0a, 0xc4, 0xe2, 0x43, 0x50, 0x57, 0x57, 0xbb,
+	0x3b, 0x7e, 0x80, 0x9d, 0x90, 0x5d, 0x5d, 0xd7, 0x3d, 0xf7, 0x9e, 0x3a, 0xb7, 0x4e, 0xb9, 0x61,
+	0xaf, 0xa5, 0xd3, 0x93, 0x4e, 0x5d, 0x69, 0xd8, 0x66, 0xa5, 0x61, 0x5b, 0x54, 0xd3, 0x2d, 0xec,
+	0x36, 0xa3, 0x4b, 0xcd, 0xd1, 0x2b, 0x04, 0xbb, 0x5d, 0xbd, 0x81, 0x49, 0xf8, 0x3b, 0xa9, 0x74,
+	0x97, 0x22, 0x91, 0xe2, 0xb8, 0x36, 0xb5, 0xd1, 0x42, 0x88, 0x53, 0x02, 0x8c, 0x12, 0xc9, 0xea,
+	0x2e, 0xc9, 0xd9, 0x96, 0xdd, 0xb2, 0x59, 0x76, 0xc5, 0x5b, 0xf9, 0x40, 0xf9, 0x9f, 0x96, 0x6d,
+	0xb7, 0x0c, 0x5c, 0x61, 0x51, 0xbd, 0x73, 0x5c, 0xd1, 0xac, 0x1e, 0xdf, 0xfa, 0x77, 0x78, 0x0b,
+	0x9b, 0x0e, 0x0d, 0x36, 0x8b, 0xc3, 0x9b, 0xc7, 0x3a, 0x36, 0x9a, 0x47, 0xa6, 0x46, 0xda, 0x3c,
+	0xa3, 0x30, 0x9c, 0x41, 0x75, 0x13, 0x13, 0xaa, 0x99, 0x8e, 0x9f, 0x50, 0xfa, 0x20, 0x42, 0x7a,
+	0x33, 0xa0, 0x88, 0x72, 0x10, 0xd7, 0x9b, 0x92, 0x50, 0x14, 0xca, 0xe9, 0x9a, 0xd8, 0x3f, 0x2f,
+	0xc4, 0xef, 0x6f, 0xa9, 0x71, 0xbd, 0x89, 0x0e, 0x40, 0x34, 0xb4, 0x3a, 0x36, 0x88, 0x14, 0x2f,
+	0x26, 0xca, 0x99, 0xea, 0x8a, 0xf2, 0xd3, 0xa3, 0x2a, 0x83, 0xaa, 0xca, 0x1e, 0x83, 0x6e, 0x5b,
+	0xd4, 0xed, 0xa9, 0xbc, 0x0e, 0xca, 0xc2, 0x9c, 0x6e, 0x6a, 0x2d, 0x2c, 0x25, 0xbc, 0x66, 0xaa,
+	0x1f, 0xa0, 0x47, 0x90, 0x72, 0x3b, 0x96, 0xc7, 0x51, 0x4a, 0x16, 0x85, 0x72, 0xa6, 0x7a, 0x67,
+	0xa6, 0x46, 0xaa, 0x8f, 0x55, 0x83, 0x22, 0xa8, 0x0c, 0x49, 0xe2, 0xe0, 0x86, 0x34, 0xc7, 0x8a,
+	0x65, 0x15, 0x5f, 0x0d, 0x25, 0x50, 0x43, 0xd9, 0xb0, 0x7a, 0x2a, 0xcb, 0x40, 0x45, 0xc8, 0x10,
+	0x4b, 0x73, 0xc8, 0x89, 0x4d, 0x29, 0x76, 0x25, 0x91, 0xb1, 0x8a, 0xfe, 0x84, 0x16, 0xe0, 0xb7,
+	0x20, 0x3c, 0x6a, 0xe3, 0x9e, 0x94, 0xba, 0x9c, 0xf2, 0x10, 0xf7, 0xd0, 0x26, 0x40, 0xc3, 0xc5,
+	0x1a, 0xc5, 0xcd, 0x23, 0x8d, 0x4a, 0xf3, 0xac, 0xa9, 0x3c, 0xd2, 0xf4, 0x71, 0x30, 0x82, 0xda,
+	0xfc, 0xd9, 0x79, 0x21, 0xf6, 0xfe, 0x4b, 0x41, 0x50, 0xd3, 0x1c, 0xb7, 0x41, 0xbd, 0x22, 0x1d,
+	0xa7, 0x19, 0x14, 0x49, 0xcf, 0x52, 0x84, 0xe3, 0x36, 0x28, 0xaa, 0x03, 0xe0, 0xd7, 0x14, 0x5b,
+	0x44, 0xb7, 0x2d, 0x22, 0x01, 0x1b, 0xda, 0xbd, 0x99, 0xb4, 0xdc, 0x1e, 0xc0, 0xd9, 0xe0, 0x6a,
+	0x49, 0xaf, 0x8d, 0x1a, 0xa9, 0x2a, 0xaf, 0x42, 0x26, 0x32, 0x59, 0xf4, 0x07, 0x24, 0x3c, 0x59,
+	0xd8, 0xe5, 0x51, 0xbd, 0xa5, 0x37, 0xe3, 0xae, 0x66, 0x74, 0xb0, 0x14, 0xf7, 0x67, 0xcc, 0x82,
+	0xb5, 0xf8, 0x8a, 0x20, 0xef, 0x43, 0x8a, 0xcf, 0x0a, 0x21, 0x48, 0x5a, 0x9a, 0x89, 0x39, 0x8e,
+	0xad, 0x91, 0x02, 0x29, 0xdb, 0xa1, 0x8c, 0x7a, 0xfc, 0x07, 0x93, 0x0b, 0x92, 0xe4, 0x43, 0xf8,
+	0x7d, 0x88, 0xee, 0x18, 0x36, 0xff, 0x45, 0xd9, 0x4c, 0x2a, 0x19, 0x72, 0x2c, 0xdd, 0x82, 0x3f,
+	0x77, 0x31, 0x1d, 0x08, 0xa2, 0xe2, 0x57, 0x1d, 0x4c, 0xe8, 0x24, 0x8b, 0x94, 0x4e, 0x20, 0x7b,
+	0x39, 0x9d, 0x38, 0xb6, 0x45, 0x30, 0x3a, 0x80, 0xf4, 0x40, 0x62, 0x06, 0xcb, 0x54, 0xff, 0x9f,
+	0x65, 0x10, 0x5c, 0xf8, 0xb0, 0x48, 0x69, 0x09, 0xfe, 0xda, 0xd3, 0x49, 0xd8, 0x8a, 0x04, 0xd4,
+	0x24, 0x48, 0x1d, 0xeb, 0x06, 0xc5, 0x2e, 0x91, 0x84, 0x62, 0xa2, 0x9c, 0x56, 0x83, 0xb0, 0x64,
+	0x40, 0x6e, 0x18, 0xc2, 0xe9, 0xa9, 0x00, 0x61, 0x63, 0x06, 0xbb, 0x1a, 0xbf, 0x48, 0x95, 0xd2,
+	0x4b, 0xc8, 0x6d, 0xb2, 0xeb, 0x3c, 0x22, 0xde, 0xaf, 0x17, 0xa3, 0x0d, 0x7f, 0x8f, 0xf4, 0xba,
+	0x31, 0xe5, 0x3f, 0x0a, 0x90, 0x7b, 0xc2, 0x3c, 0x76, 0xf3, 0x27, 0x43, 0xeb, 0x90, 0xf1, 0xfd,
+	0xcc, 0xde, 0x73, 0x7e, 0x6b, 0x47, 0x1f, 0x82, 0x1d, 0xef, 0xc9, 0xdf, 0xd7, 0x48, 0x5b, 0xe5,
+	0xcf, 0x86, 0xb7, 0xf6, 0x64, 0x19, 0x21, 0x7a, 0x63, 0xb2, 0x2c, 0x42, 0x6e, 0x0b, 0x1b, 0x78,
+	0x8c, 0x2a, 0x93, 0xcc, 0x52, 0x87, 0xec, 0xa5, 0xfb, 0xb8, 0x8f, 0x09, 0xf1, 0xde, 0xff, 0x07,
+	0xd7, 0xe4, 0x16, 0x61, 0x55, 0xfd, 0x36, 0x07, 0x10, 0x5e, 0x78, 0xd4, 0x85, 0xc4, 0x2e, 0xa6,
+	0xe8, 0xee, 0x14, 0xe5, 0xc6, 0xd8, 0x5e, 0x5e, 0x9e, 0x19, 0xc7, 0xe5, 0x7e, 0x03, 0x49, 0xef,
+	0xa8, 0x68, 0x9a, 0xbf, 0xcc, 0xb1, 0xb6, 0x96, 0x57, 0xaf, 0x80, 0xe4, 0xcd, 0xdf, 0x09, 0x00,
+	0xde, 0xd6, 0x21, 0x75, 0xb1, 0x66, 0x5e, 0x83, 0xc3, 0xf2, 0xac, 0x48, 0x3e, 0xd1, 0x45, 0x01,
+	0x9d, 0x82, 0xe8, 0x3b, 0x14, 0x4d, 0x73, 0x90, 0xf1, 0x0f, 0x87, 0xbc, 0x76, 0x15, 0x28, 0x17,
+	0xe1, 0x14, 0x44, 0xdf, 0x0b, 0x53, 0x11, 0x18, 0xef, 0xef, 0xa9, 0x08, 0x4c, 0x72, 0xdc, 0x73,
+	0x10, 0x7d, 0x7f, 0x4c, 0x45, 0x60, 0xbc, 0x95, 0xe4, 0xdc, 0x88, 0xf3, 0xb7, 0xbd, 0x2f, 0xc1,
+	0xda, 0x8b, 0xb3, 0x8b, 0x7c, 0xec, 0xf3, 0x45, 0x3e, 0xf6, 0xb6, 0x9f, 0x17, 0xce, 0xfa, 0x79,
+	0xe1, 0x53, 0x3f, 0x2f, 0x7c, 0xed, 0xe7, 0x85, 0x67, 0x3b, 0xd7, 0xf8, 0xb8, 0x5d, 0x0f, 0xa3,
+	0xa7, 0xb1, 0xba, 0xc8, 0x7a, 0xde, 0xfe, 0x1e, 0x00, 0x00, 0xff, 0xff, 0xd0, 0xae, 0xca, 0xcb,
+	0x2f, 0x0b, 0x00, 0x00,
+}
+
 // Reference imports to suppress errors if they are not otherwise used.
 var _ context.Context
 var _ grpc.ClientConn
@@ -250,15 +653,16 @@
 // is compatible with the grpc package it is being compiled against.
 const _ = grpc.SupportPackageIsVersion4
 
-// Client API for Containers service
-
+// ContainersClient is the client API for Containers service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
 type ContainersClient interface {
 	Get(ctx context.Context, in *GetContainerRequest, opts ...grpc.CallOption) (*GetContainerResponse, error)
 	List(ctx context.Context, in *ListContainersRequest, opts ...grpc.CallOption) (*ListContainersResponse, error)
 	ListStream(ctx context.Context, in *ListContainersRequest, opts ...grpc.CallOption) (Containers_ListStreamClient, error)
 	Create(ctx context.Context, in *CreateContainerRequest, opts ...grpc.CallOption) (*CreateContainerResponse, error)
 	Update(ctx context.Context, in *UpdateContainerRequest, opts ...grpc.CallOption) (*UpdateContainerResponse, error)
-	Delete(ctx context.Context, in *DeleteContainerRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error)
+	Delete(ctx context.Context, in *DeleteContainerRequest, opts ...grpc.CallOption) (*types.Empty, error)
 }
 
 type containersClient struct {
@@ -271,7 +675,7 @@
 
 func (c *containersClient) Get(ctx context.Context, in *GetContainerRequest, opts ...grpc.CallOption) (*GetContainerResponse, error) {
 	out := new(GetContainerResponse)
-	err := grpc.Invoke(ctx, "/containerd.services.containers.v1.Containers/Get", in, out, c.cc, opts...)
+	err := c.cc.Invoke(ctx, "/containerd.services.containers.v1.Containers/Get", in, out, opts...)
 	if err != nil {
 		return nil, err
 	}
@@ -280,7 +684,7 @@
 
 func (c *containersClient) List(ctx context.Context, in *ListContainersRequest, opts ...grpc.CallOption) (*ListContainersResponse, error) {
 	out := new(ListContainersResponse)
-	err := grpc.Invoke(ctx, "/containerd.services.containers.v1.Containers/List", in, out, c.cc, opts...)
+	err := c.cc.Invoke(ctx, "/containerd.services.containers.v1.Containers/List", in, out, opts...)
 	if err != nil {
 		return nil, err
 	}
@@ -288,7 +692,7 @@
 }
 
 func (c *containersClient) ListStream(ctx context.Context, in *ListContainersRequest, opts ...grpc.CallOption) (Containers_ListStreamClient, error) {
-	stream, err := grpc.NewClientStream(ctx, &_Containers_serviceDesc.Streams[0], c.cc, "/containerd.services.containers.v1.Containers/ListStream", opts...)
+	stream, err := c.cc.NewStream(ctx, &_Containers_serviceDesc.Streams[0], "/containerd.services.containers.v1.Containers/ListStream", opts...)
 	if err != nil {
 		return nil, err
 	}
@@ -321,7 +725,7 @@
 
 func (c *containersClient) Create(ctx context.Context, in *CreateContainerRequest, opts ...grpc.CallOption) (*CreateContainerResponse, error) {
 	out := new(CreateContainerResponse)
-	err := grpc.Invoke(ctx, "/containerd.services.containers.v1.Containers/Create", in, out, c.cc, opts...)
+	err := c.cc.Invoke(ctx, "/containerd.services.containers.v1.Containers/Create", in, out, opts...)
 	if err != nil {
 		return nil, err
 	}
@@ -330,31 +734,30 @@
 
 func (c *containersClient) Update(ctx context.Context, in *UpdateContainerRequest, opts ...grpc.CallOption) (*UpdateContainerResponse, error) {
 	out := new(UpdateContainerResponse)
-	err := grpc.Invoke(ctx, "/containerd.services.containers.v1.Containers/Update", in, out, c.cc, opts...)
+	err := c.cc.Invoke(ctx, "/containerd.services.containers.v1.Containers/Update", in, out, opts...)
 	if err != nil {
 		return nil, err
 	}
 	return out, nil
 }
 
-func (c *containersClient) Delete(ctx context.Context, in *DeleteContainerRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error) {
-	out := new(google_protobuf2.Empty)
-	err := grpc.Invoke(ctx, "/containerd.services.containers.v1.Containers/Delete", in, out, c.cc, opts...)
+func (c *containersClient) Delete(ctx context.Context, in *DeleteContainerRequest, opts ...grpc.CallOption) (*types.Empty, error) {
+	out := new(types.Empty)
+	err := c.cc.Invoke(ctx, "/containerd.services.containers.v1.Containers/Delete", in, out, opts...)
 	if err != nil {
 		return nil, err
 	}
 	return out, nil
 }
 
-// Server API for Containers service
-
+// ContainersServer is the server API for Containers service.
 type ContainersServer interface {
 	Get(context.Context, *GetContainerRequest) (*GetContainerResponse, error)
 	List(context.Context, *ListContainersRequest) (*ListContainersResponse, error)
 	ListStream(*ListContainersRequest, Containers_ListStreamServer) error
 	Create(context.Context, *CreateContainerRequest) (*CreateContainerResponse, error)
 	Update(context.Context, *UpdateContainerRequest) (*UpdateContainerResponse, error)
-	Delete(context.Context, *DeleteContainerRequest) (*google_protobuf2.Empty, error)
+	Delete(context.Context, *DeleteContainerRequest) (*types.Empty, error)
 }
 
 func RegisterContainersServer(s *grpc.Server, srv ContainersServer) {
@@ -585,16 +988,16 @@
 	}
 	dAtA[i] = 0x42
 	i++
-	i = encodeVarintContainers(dAtA, i, uint64(types.SizeOfStdTime(m.CreatedAt)))
-	n3, err := types.StdTimeMarshalTo(m.CreatedAt, dAtA[i:])
+	i = encodeVarintContainers(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.CreatedAt)))
+	n3, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.CreatedAt, dAtA[i:])
 	if err != nil {
 		return 0, err
 	}
 	i += n3
 	dAtA[i] = 0x4a
 	i++
-	i = encodeVarintContainers(dAtA, i, uint64(types.SizeOfStdTime(m.UpdatedAt)))
-	n4, err := types.StdTimeMarshalTo(m.UpdatedAt, dAtA[i:])
+	i = encodeVarintContainers(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.UpdatedAt)))
+	n4, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.UpdatedAt, dAtA[i:])
 	if err != nil {
 		return 0, err
 	}
@@ -625,6 +1028,9 @@
 			i += n5
 		}
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -659,6 +1065,9 @@
 		}
 		i += n6
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -683,6 +1092,9 @@
 		i = encodeVarintContainers(dAtA, i, uint64(len(m.ID)))
 		i += copy(dAtA[i:], m.ID)
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -709,6 +1121,9 @@
 		return 0, err
 	}
 	i += n7
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -742,6 +1157,9 @@
 			i += copy(dAtA[i:], s)
 		}
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -772,6 +1190,9 @@
 			i += n
 		}
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -798,6 +1219,9 @@
 		return 0, err
 	}
 	i += n8
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -824,6 +1248,9 @@
 		return 0, err
 	}
 	i += n9
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -860,6 +1287,9 @@
 		}
 		i += n11
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -886,6 +1316,9 @@
 		return 0, err
 	}
 	i += n12
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -910,6 +1343,9 @@
 		i = encodeVarintContainers(dAtA, i, uint64(len(m.ID)))
 		i += copy(dAtA[i:], m.ID)
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -938,6 +1374,9 @@
 		}
 		i += n13
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -951,6 +1390,9 @@
 	return offset + 1
 }
 func (m *Container) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = len(m.ID)
@@ -985,9 +1427,9 @@
 	if l > 0 {
 		n += 1 + l + sovContainers(uint64(l))
 	}
-	l = types.SizeOfStdTime(m.CreatedAt)
+	l = github_com_gogo_protobuf_types.SizeOfStdTime(m.CreatedAt)
 	n += 1 + l + sovContainers(uint64(l))
-	l = types.SizeOfStdTime(m.UpdatedAt)
+	l = github_com_gogo_protobuf_types.SizeOfStdTime(m.UpdatedAt)
 	n += 1 + l + sovContainers(uint64(l))
 	if len(m.Extensions) > 0 {
 		for k, v := range m.Extensions {
@@ -998,10 +1440,16 @@
 			n += mapEntrySize + 1 + sovContainers(uint64(mapEntrySize))
 		}
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *Container_Runtime) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = len(m.Name)
@@ -1012,28 +1460,46 @@
 		l = m.Options.Size()
 		n += 1 + l + sovContainers(uint64(l))
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *GetContainerRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = len(m.ID)
 	if l > 0 {
 		n += 1 + l + sovContainers(uint64(l))
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *GetContainerResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = m.Container.Size()
 	n += 1 + l + sovContainers(uint64(l))
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *ListContainersRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	if len(m.Filters) > 0 {
@@ -1042,10 +1508,16 @@
 			n += 1 + l + sovContainers(uint64(l))
 		}
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *ListContainersResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	if len(m.Containers) > 0 {
@@ -1054,26 +1526,44 @@
 			n += 1 + l + sovContainers(uint64(l))
 		}
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *CreateContainerRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = m.Container.Size()
 	n += 1 + l + sovContainers(uint64(l))
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *CreateContainerResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = m.Container.Size()
 	n += 1 + l + sovContainers(uint64(l))
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *UpdateContainerRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = m.Container.Size()
@@ -1082,34 +1572,55 @@
 		l = m.UpdateMask.Size()
 		n += 1 + l + sovContainers(uint64(l))
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *UpdateContainerResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = m.Container.Size()
 	n += 1 + l + sovContainers(uint64(l))
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *DeleteContainerRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = len(m.ID)
 	if l > 0 {
 		n += 1 + l + sovContainers(uint64(l))
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *ListContainerMessage) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	if m.Container != nil {
 		l = m.Container.Size()
 		n += 1 + l + sovContainers(uint64(l))
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
@@ -1134,7 +1645,7 @@
 	for k, _ := range this.Labels {
 		keysForLabels = append(keysForLabels, k)
 	}
-	sortkeys.Strings(keysForLabels)
+	github_com_gogo_protobuf_sortkeys.Strings(keysForLabels)
 	mapStringForLabels := "map[string]string{"
 	for _, k := range keysForLabels {
 		mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k])
@@ -1144,8 +1655,8 @@
 	for k, _ := range this.Extensions {
 		keysForExtensions = append(keysForExtensions, k)
 	}
-	sortkeys.Strings(keysForExtensions)
-	mapStringForExtensions := "map[string]google_protobuf1.Any{"
+	github_com_gogo_protobuf_sortkeys.Strings(keysForExtensions)
+	mapStringForExtensions := "map[string]types.Any{"
 	for _, k := range keysForExtensions {
 		mapStringForExtensions += fmt.Sprintf("%v: %v,", k, this.Extensions[k])
 	}
@@ -1155,12 +1666,13 @@
 		`Labels:` + mapStringForLabels + `,`,
 		`Image:` + fmt.Sprintf("%v", this.Image) + `,`,
 		`Runtime:` + strings.Replace(fmt.Sprintf("%v", this.Runtime), "Container_Runtime", "Container_Runtime", 1) + `,`,
-		`Spec:` + strings.Replace(fmt.Sprintf("%v", this.Spec), "Any", "google_protobuf1.Any", 1) + `,`,
+		`Spec:` + strings.Replace(fmt.Sprintf("%v", this.Spec), "Any", "types.Any", 1) + `,`,
 		`Snapshotter:` + fmt.Sprintf("%v", this.Snapshotter) + `,`,
 		`SnapshotKey:` + fmt.Sprintf("%v", this.SnapshotKey) + `,`,
-		`CreatedAt:` + strings.Replace(strings.Replace(this.CreatedAt.String(), "Timestamp", "google_protobuf4.Timestamp", 1), `&`, ``, 1) + `,`,
-		`UpdatedAt:` + strings.Replace(strings.Replace(this.UpdatedAt.String(), "Timestamp", "google_protobuf4.Timestamp", 1), `&`, ``, 1) + `,`,
+		`CreatedAt:` + strings.Replace(strings.Replace(this.CreatedAt.String(), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`,
+		`UpdatedAt:` + strings.Replace(strings.Replace(this.UpdatedAt.String(), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`,
 		`Extensions:` + mapStringForExtensions + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -1171,7 +1683,8 @@
 	}
 	s := strings.Join([]string{`&Container_Runtime{`,
 		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
-		`Options:` + strings.Replace(fmt.Sprintf("%v", this.Options), "Any", "google_protobuf1.Any", 1) + `,`,
+		`Options:` + strings.Replace(fmt.Sprintf("%v", this.Options), "Any", "types.Any", 1) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -1182,6 +1695,7 @@
 	}
 	s := strings.Join([]string{`&GetContainerRequest{`,
 		`ID:` + fmt.Sprintf("%v", this.ID) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -1192,6 +1706,7 @@
 	}
 	s := strings.Join([]string{`&GetContainerResponse{`,
 		`Container:` + strings.Replace(strings.Replace(this.Container.String(), "Container", "Container", 1), `&`, ``, 1) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -1202,6 +1717,7 @@
 	}
 	s := strings.Join([]string{`&ListContainersRequest{`,
 		`Filters:` + fmt.Sprintf("%v", this.Filters) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -1212,6 +1728,7 @@
 	}
 	s := strings.Join([]string{`&ListContainersResponse{`,
 		`Containers:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Containers), "Container", "Container", 1), `&`, ``, 1) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -1222,6 +1739,7 @@
 	}
 	s := strings.Join([]string{`&CreateContainerRequest{`,
 		`Container:` + strings.Replace(strings.Replace(this.Container.String(), "Container", "Container", 1), `&`, ``, 1) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -1232,6 +1750,7 @@
 	}
 	s := strings.Join([]string{`&CreateContainerResponse{`,
 		`Container:` + strings.Replace(strings.Replace(this.Container.String(), "Container", "Container", 1), `&`, ``, 1) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -1242,7 +1761,8 @@
 	}
 	s := strings.Join([]string{`&UpdateContainerRequest{`,
 		`Container:` + strings.Replace(strings.Replace(this.Container.String(), "Container", "Container", 1), `&`, ``, 1) + `,`,
-		`UpdateMask:` + strings.Replace(fmt.Sprintf("%v", this.UpdateMask), "FieldMask", "google_protobuf3.FieldMask", 1) + `,`,
+		`UpdateMask:` + strings.Replace(fmt.Sprintf("%v", this.UpdateMask), "FieldMask", "types.FieldMask", 1) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -1253,6 +1773,7 @@
 	}
 	s := strings.Join([]string{`&UpdateContainerResponse{`,
 		`Container:` + strings.Replace(strings.Replace(this.Container.String(), "Container", "Container", 1), `&`, ``, 1) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -1263,6 +1784,7 @@
 	}
 	s := strings.Join([]string{`&DeleteContainerRequest{`,
 		`ID:` + fmt.Sprintf("%v", this.ID) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -1273,6 +1795,7 @@
 	}
 	s := strings.Join([]string{`&ListContainerMessage{`,
 		`Container:` + strings.Replace(fmt.Sprintf("%v", this.Container), "Container", "Container", 1) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -1300,7 +1823,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -1328,7 +1851,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1338,6 +1861,9 @@
 				return ErrInvalidLengthContainers
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthContainers
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -1357,7 +1883,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1366,6 +1892,9 @@
 				return ErrInvalidLengthContainers
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthContainers
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -1386,7 +1915,7 @@
 					}
 					b := dAtA[iNdEx]
 					iNdEx++
-					wire |= (uint64(b) & 0x7F) << shift
+					wire |= uint64(b&0x7F) << shift
 					if b < 0x80 {
 						break
 					}
@@ -1403,7 +1932,7 @@
 						}
 						b := dAtA[iNdEx]
 						iNdEx++
-						stringLenmapkey |= (uint64(b) & 0x7F) << shift
+						stringLenmapkey |= uint64(b&0x7F) << shift
 						if b < 0x80 {
 							break
 						}
@@ -1413,6 +1942,9 @@
 						return ErrInvalidLengthContainers
 					}
 					postStringIndexmapkey := iNdEx + intStringLenmapkey
+					if postStringIndexmapkey < 0 {
+						return ErrInvalidLengthContainers
+					}
 					if postStringIndexmapkey > l {
 						return io.ErrUnexpectedEOF
 					}
@@ -1429,7 +1961,7 @@
 						}
 						b := dAtA[iNdEx]
 						iNdEx++
-						stringLenmapvalue |= (uint64(b) & 0x7F) << shift
+						stringLenmapvalue |= uint64(b&0x7F) << shift
 						if b < 0x80 {
 							break
 						}
@@ -1439,6 +1971,9 @@
 						return ErrInvalidLengthContainers
 					}
 					postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+					if postStringIndexmapvalue < 0 {
+						return ErrInvalidLengthContainers
+					}
 					if postStringIndexmapvalue > l {
 						return io.ErrUnexpectedEOF
 					}
@@ -1475,7 +2010,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1485,6 +2020,9 @@
 				return ErrInvalidLengthContainers
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthContainers
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -1504,7 +2042,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1513,6 +2051,9 @@
 				return ErrInvalidLengthContainers
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthContainers
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -1537,7 +2078,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1546,11 +2087,14 @@
 				return ErrInvalidLengthContainers
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthContainers
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
 			if m.Spec == nil {
-				m.Spec = &google_protobuf1.Any{}
+				m.Spec = &types.Any{}
 			}
 			if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
 				return err
@@ -1570,7 +2114,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1580,6 +2124,9 @@
 				return ErrInvalidLengthContainers
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthContainers
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -1599,7 +2146,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1609,6 +2156,9 @@
 				return ErrInvalidLengthContainers
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthContainers
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -1628,7 +2178,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1637,10 +2187,13 @@
 				return ErrInvalidLengthContainers
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthContainers
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			if err := types.StdTimeUnmarshal(&m.CreatedAt, dAtA[iNdEx:postIndex]); err != nil {
+			if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.CreatedAt, dAtA[iNdEx:postIndex]); err != nil {
 				return err
 			}
 			iNdEx = postIndex
@@ -1658,7 +2211,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1667,10 +2220,13 @@
 				return ErrInvalidLengthContainers
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthContainers
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			if err := types.StdTimeUnmarshal(&m.UpdatedAt, dAtA[iNdEx:postIndex]); err != nil {
+			if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.UpdatedAt, dAtA[iNdEx:postIndex]); err != nil {
 				return err
 			}
 			iNdEx = postIndex
@@ -1688,7 +2244,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1697,14 +2253,17 @@
 				return ErrInvalidLengthContainers
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthContainers
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
 			if m.Extensions == nil {
-				m.Extensions = make(map[string]google_protobuf1.Any)
+				m.Extensions = make(map[string]types.Any)
 			}
 			var mapkey string
-			mapvalue := &google_protobuf1.Any{}
+			mapvalue := &types.Any{}
 			for iNdEx < postIndex {
 				entryPreIndex := iNdEx
 				var wire uint64
@@ -1717,7 +2276,7 @@
 					}
 					b := dAtA[iNdEx]
 					iNdEx++
-					wire |= (uint64(b) & 0x7F) << shift
+					wire |= uint64(b&0x7F) << shift
 					if b < 0x80 {
 						break
 					}
@@ -1734,7 +2293,7 @@
 						}
 						b := dAtA[iNdEx]
 						iNdEx++
-						stringLenmapkey |= (uint64(b) & 0x7F) << shift
+						stringLenmapkey |= uint64(b&0x7F) << shift
 						if b < 0x80 {
 							break
 						}
@@ -1744,6 +2303,9 @@
 						return ErrInvalidLengthContainers
 					}
 					postStringIndexmapkey := iNdEx + intStringLenmapkey
+					if postStringIndexmapkey < 0 {
+						return ErrInvalidLengthContainers
+					}
 					if postStringIndexmapkey > l {
 						return io.ErrUnexpectedEOF
 					}
@@ -1760,7 +2322,7 @@
 						}
 						b := dAtA[iNdEx]
 						iNdEx++
-						mapmsglen |= (int(b) & 0x7F) << shift
+						mapmsglen |= int(b&0x7F) << shift
 						if b < 0x80 {
 							break
 						}
@@ -1769,13 +2331,13 @@
 						return ErrInvalidLengthContainers
 					}
 					postmsgIndex := iNdEx + mapmsglen
-					if mapmsglen < 0 {
+					if postmsgIndex < 0 {
 						return ErrInvalidLengthContainers
 					}
 					if postmsgIndex > l {
 						return io.ErrUnexpectedEOF
 					}
-					mapvalue = &google_protobuf1.Any{}
+					mapvalue = &types.Any{}
 					if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
 						return err
 					}
@@ -1806,9 +2368,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthContainers
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthContainers
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -1833,7 +2399,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -1861,7 +2427,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1871,6 +2437,9 @@
 				return ErrInvalidLengthContainers
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthContainers
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -1890,7 +2459,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1899,11 +2468,14 @@
 				return ErrInvalidLengthContainers
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthContainers
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
 			if m.Options == nil {
-				m.Options = &google_protobuf1.Any{}
+				m.Options = &types.Any{}
 			}
 			if err := m.Options.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
 				return err
@@ -1918,9 +2490,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthContainers
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthContainers
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -1945,7 +2521,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -1973,7 +2549,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1983,6 +2559,9 @@
 				return ErrInvalidLengthContainers
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthContainers
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -1997,9 +2576,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthContainers
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthContainers
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -2024,7 +2607,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -2052,7 +2635,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2061,6 +2644,9 @@
 				return ErrInvalidLengthContainers
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthContainers
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -2077,9 +2663,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthContainers
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthContainers
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -2104,7 +2694,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -2132,7 +2722,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2142,6 +2732,9 @@
 				return ErrInvalidLengthContainers
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthContainers
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -2156,9 +2749,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthContainers
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthContainers
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -2183,7 +2780,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -2211,7 +2808,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2220,6 +2817,9 @@
 				return ErrInvalidLengthContainers
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthContainers
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -2237,9 +2837,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthContainers
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthContainers
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -2264,7 +2868,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -2292,7 +2896,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2301,6 +2905,9 @@
 				return ErrInvalidLengthContainers
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthContainers
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -2317,9 +2924,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthContainers
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthContainers
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -2344,7 +2955,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -2372,7 +2983,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2381,6 +2992,9 @@
 				return ErrInvalidLengthContainers
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthContainers
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -2397,9 +3011,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthContainers
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthContainers
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -2424,7 +3042,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -2452,7 +3070,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2461,6 +3079,9 @@
 				return ErrInvalidLengthContainers
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthContainers
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -2482,7 +3103,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2491,11 +3112,14 @@
 				return ErrInvalidLengthContainers
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthContainers
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
 			if m.UpdateMask == nil {
-				m.UpdateMask = &google_protobuf3.FieldMask{}
+				m.UpdateMask = &types.FieldMask{}
 			}
 			if err := m.UpdateMask.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
 				return err
@@ -2510,9 +3134,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthContainers
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthContainers
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -2537,7 +3165,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -2565,7 +3193,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2574,6 +3202,9 @@
 				return ErrInvalidLengthContainers
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthContainers
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -2590,9 +3221,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthContainers
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthContainers
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -2617,7 +3252,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -2645,7 +3280,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2655,6 +3290,9 @@
 				return ErrInvalidLengthContainers
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthContainers
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -2669,9 +3307,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthContainers
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthContainers
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -2696,7 +3338,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -2724,7 +3366,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2733,6 +3375,9 @@
 				return ErrInvalidLengthContainers
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthContainers
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -2752,9 +3397,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthContainers
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthContainers
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -2818,10 +3467,13 @@
 					break
 				}
 			}
-			iNdEx += length
 			if length < 0 {
 				return 0, ErrInvalidLengthContainers
 			}
+			iNdEx += length
+			if iNdEx < 0 {
+				return 0, ErrInvalidLengthContainers
+			}
 			return iNdEx, nil
 		case 3:
 			for {
@@ -2850,6 +3502,9 @@
 					return 0, err
 				}
 				iNdEx = start + next
+				if iNdEx < 0 {
+					return 0, ErrInvalidLengthContainers
+				}
 			}
 			return iNdEx, nil
 		case 4:
@@ -2868,63 +3523,3 @@
 	ErrInvalidLengthContainers = fmt.Errorf("proto: negative length found during unmarshaling")
 	ErrIntOverflowContainers   = fmt.Errorf("proto: integer overflow")
 )
-
-func init() {
-	proto.RegisterFile("github.com/containerd/containerd/api/services/containers/v1/containers.proto", fileDescriptorContainers)
-}
-
-var fileDescriptorContainers = []byte{
-	// 820 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0xcb, 0x6e, 0x13, 0x49,
-	0x14, 0x75, 0xdb, 0x4e, 0x3b, 0xbe, 0x1e, 0x69, 0x46, 0x35, 0x1e, 0x4f, 0x4f, 0x8f, 0x64, 0x3b,
-	0x5e, 0x59, 0xa3, 0xa1, 0x9d, 0x18, 0x44, 0x5e, 0x6c, 0xe2, 0xbc, 0x04, 0x24, 0x28, 0xea, 0x80,
-	0x84, 0x60, 0x11, 0xda, 0x76, 0xc5, 0x69, 0xdc, 0x2f, 0xba, 0xca, 0x16, 0x16, 0x8b, 0xc0, 0x1f,
-	0xb0, 0xe3, 0x13, 0xf8, 0x95, 0x2c, 0x59, 0xb2, 0x0a, 0xc4, 0xe2, 0x43, 0x50, 0x57, 0x57, 0xbb,
-	0x3b, 0x7e, 0x80, 0x9d, 0x90, 0x5d, 0x5d, 0xd7, 0x3d, 0xf7, 0x9e, 0x3a, 0xb7, 0x4e, 0xb9, 0x61,
-	0xaf, 0xa5, 0xd3, 0x93, 0x4e, 0x5d, 0x69, 0xd8, 0x66, 0xa5, 0x61, 0x5b, 0x54, 0xd3, 0x2d, 0xec,
-	0x36, 0xa3, 0x4b, 0xcd, 0xd1, 0x2b, 0x04, 0xbb, 0x5d, 0xbd, 0x81, 0x49, 0xf8, 0x3b, 0xa9, 0x74,
-	0x97, 0x22, 0x91, 0xe2, 0xb8, 0x36, 0xb5, 0xd1, 0x42, 0x88, 0x53, 0x02, 0x8c, 0x12, 0xc9, 0xea,
-	0x2e, 0xc9, 0xd9, 0x96, 0xdd, 0xb2, 0x59, 0x76, 0xc5, 0x5b, 0xf9, 0x40, 0xf9, 0x9f, 0x96, 0x6d,
-	0xb7, 0x0c, 0x5c, 0x61, 0x51, 0xbd, 0x73, 0x5c, 0xd1, 0xac, 0x1e, 0xdf, 0xfa, 0x77, 0x78, 0x0b,
-	0x9b, 0x0e, 0x0d, 0x36, 0x8b, 0xc3, 0x9b, 0xc7, 0x3a, 0x36, 0x9a, 0x47, 0xa6, 0x46, 0xda, 0x3c,
-	0xa3, 0x30, 0x9c, 0x41, 0x75, 0x13, 0x13, 0xaa, 0x99, 0x8e, 0x9f, 0x50, 0xfa, 0x20, 0x42, 0x7a,
-	0x33, 0xa0, 0x88, 0x72, 0x10, 0xd7, 0x9b, 0x92, 0x50, 0x14, 0xca, 0xe9, 0x9a, 0xd8, 0x3f, 0x2f,
-	0xc4, 0xef, 0x6f, 0xa9, 0x71, 0xbd, 0x89, 0x0e, 0x40, 0x34, 0xb4, 0x3a, 0x36, 0x88, 0x14, 0x2f,
-	0x26, 0xca, 0x99, 0xea, 0x8a, 0xf2, 0xd3, 0xa3, 0x2a, 0x83, 0xaa, 0xca, 0x1e, 0x83, 0x6e, 0x5b,
-	0xd4, 0xed, 0xa9, 0xbc, 0x0e, 0xca, 0xc2, 0x9c, 0x6e, 0x6a, 0x2d, 0x2c, 0x25, 0xbc, 0x66, 0xaa,
-	0x1f, 0xa0, 0x47, 0x90, 0x72, 0x3b, 0x96, 0xc7, 0x51, 0x4a, 0x16, 0x85, 0x72, 0xa6, 0x7a, 0x67,
-	0xa6, 0x46, 0xaa, 0x8f, 0x55, 0x83, 0x22, 0xa8, 0x0c, 0x49, 0xe2, 0xe0, 0x86, 0x34, 0xc7, 0x8a,
-	0x65, 0x15, 0x5f, 0x0d, 0x25, 0x50, 0x43, 0xd9, 0xb0, 0x7a, 0x2a, 0xcb, 0x40, 0x45, 0xc8, 0x10,
-	0x4b, 0x73, 0xc8, 0x89, 0x4d, 0x29, 0x76, 0x25, 0x91, 0xb1, 0x8a, 0xfe, 0x84, 0x16, 0xe0, 0xb7,
-	0x20, 0x3c, 0x6a, 0xe3, 0x9e, 0x94, 0xba, 0x9c, 0xf2, 0x10, 0xf7, 0xd0, 0x26, 0x40, 0xc3, 0xc5,
-	0x1a, 0xc5, 0xcd, 0x23, 0x8d, 0x4a, 0xf3, 0xac, 0xa9, 0x3c, 0xd2, 0xf4, 0x71, 0x30, 0x82, 0xda,
-	0xfc, 0xd9, 0x79, 0x21, 0xf6, 0xfe, 0x4b, 0x41, 0x50, 0xd3, 0x1c, 0xb7, 0x41, 0xbd, 0x22, 0x1d,
-	0xa7, 0x19, 0x14, 0x49, 0xcf, 0x52, 0x84, 0xe3, 0x36, 0x28, 0xaa, 0x03, 0xe0, 0xd7, 0x14, 0x5b,
-	0x44, 0xb7, 0x2d, 0x22, 0x01, 0x1b, 0xda, 0xbd, 0x99, 0xb4, 0xdc, 0x1e, 0xc0, 0xd9, 0xe0, 0x6a,
-	0x49, 0xaf, 0x8d, 0x1a, 0xa9, 0x2a, 0xaf, 0x42, 0x26, 0x32, 0x59, 0xf4, 0x07, 0x24, 0x3c, 0x59,
-	0xd8, 0xe5, 0x51, 0xbd, 0xa5, 0x37, 0xe3, 0xae, 0x66, 0x74, 0xb0, 0x14, 0xf7, 0x67, 0xcc, 0x82,
-	0xb5, 0xf8, 0x8a, 0x20, 0xef, 0x43, 0x8a, 0xcf, 0x0a, 0x21, 0x48, 0x5a, 0x9a, 0x89, 0x39, 0x8e,
-	0xad, 0x91, 0x02, 0x29, 0xdb, 0xa1, 0x8c, 0x7a, 0xfc, 0x07, 0x93, 0x0b, 0x92, 0xe4, 0x43, 0xf8,
-	0x7d, 0x88, 0xee, 0x18, 0x36, 0xff, 0x45, 0xd9, 0x4c, 0x2a, 0x19, 0x72, 0x2c, 0xdd, 0x82, 0x3f,
-	0x77, 0x31, 0x1d, 0x08, 0xa2, 0xe2, 0x57, 0x1d, 0x4c, 0xe8, 0x24, 0x8b, 0x94, 0x4e, 0x20, 0x7b,
-	0x39, 0x9d, 0x38, 0xb6, 0x45, 0x30, 0x3a, 0x80, 0xf4, 0x40, 0x62, 0x06, 0xcb, 0x54, 0xff, 0x9f,
-	0x65, 0x10, 0x5c, 0xf8, 0xb0, 0x48, 0x69, 0x09, 0xfe, 0xda, 0xd3, 0x49, 0xd8, 0x8a, 0x04, 0xd4,
-	0x24, 0x48, 0x1d, 0xeb, 0x06, 0xc5, 0x2e, 0x91, 0x84, 0x62, 0xa2, 0x9c, 0x56, 0x83, 0xb0, 0x64,
-	0x40, 0x6e, 0x18, 0xc2, 0xe9, 0xa9, 0x00, 0x61, 0x63, 0x06, 0xbb, 0x1a, 0xbf, 0x48, 0x95, 0xd2,
-	0x4b, 0xc8, 0x6d, 0xb2, 0xeb, 0x3c, 0x22, 0xde, 0xaf, 0x17, 0xa3, 0x0d, 0x7f, 0x8f, 0xf4, 0xba,
-	0x31, 0xe5, 0x3f, 0x0a, 0x90, 0x7b, 0xc2, 0x3c, 0x76, 0xf3, 0x27, 0x43, 0xeb, 0x90, 0xf1, 0xfd,
-	0xcc, 0xde, 0x73, 0x7e, 0x6b, 0x47, 0x1f, 0x82, 0x1d, 0xef, 0xc9, 0xdf, 0xd7, 0x48, 0x5b, 0xe5,
-	0xcf, 0x86, 0xb7, 0xf6, 0x64, 0x19, 0x21, 0x7a, 0x63, 0xb2, 0x2c, 0x42, 0x6e, 0x0b, 0x1b, 0x78,
-	0x8c, 0x2a, 0x93, 0xcc, 0x52, 0x87, 0xec, 0xa5, 0xfb, 0xb8, 0x8f, 0x09, 0xf1, 0xde, 0xff, 0x07,
-	0xd7, 0xe4, 0x16, 0x61, 0x55, 0xfd, 0x36, 0x07, 0x10, 0x5e, 0x78, 0xd4, 0x85, 0xc4, 0x2e, 0xa6,
-	0xe8, 0xee, 0x14, 0xe5, 0xc6, 0xd8, 0x5e, 0x5e, 0x9e, 0x19, 0xc7, 0xe5, 0x7e, 0x03, 0x49, 0xef,
-	0xa8, 0x68, 0x9a, 0xbf, 0xcc, 0xb1, 0xb6, 0x96, 0x57, 0xaf, 0x80, 0xe4, 0xcd, 0xdf, 0x09, 0x00,
-	0xde, 0xd6, 0x21, 0x75, 0xb1, 0x66, 0x5e, 0x83, 0xc3, 0xf2, 0xac, 0x48, 0x3e, 0xd1, 0x45, 0x01,
-	0x9d, 0x82, 0xe8, 0x3b, 0x14, 0x4d, 0x73, 0x90, 0xf1, 0x0f, 0x87, 0xbc, 0x76, 0x15, 0x28, 0x17,
-	0xe1, 0x14, 0x44, 0xdf, 0x0b, 0x53, 0x11, 0x18, 0xef, 0xef, 0xa9, 0x08, 0x4c, 0x72, 0xdc, 0x73,
-	0x10, 0x7d, 0x7f, 0x4c, 0x45, 0x60, 0xbc, 0x95, 0xe4, 0xdc, 0x88, 0xf3, 0xb7, 0xbd, 0x2f, 0xc1,
-	0xda, 0x8b, 0xb3, 0x8b, 0x7c, 0xec, 0xf3, 0x45, 0x3e, 0xf6, 0xb6, 0x9f, 0x17, 0xce, 0xfa, 0x79,
-	0xe1, 0x53, 0x3f, 0x2f, 0x7c, 0xed, 0xe7, 0x85, 0x67, 0x3b, 0xd7, 0xf8, 0xb8, 0x5d, 0x0f, 0xa3,
-	0xa7, 0xb1, 0xba, 0xc8, 0x7a, 0xde, 0xfe, 0x1e, 0x00, 0x00, 0xff, 0xff, 0xd0, 0xae, 0xca, 0xcb,
-	0x2f, 0x0b, 0x00, 0x00,
-}
diff --git a/vendor/github.com/containerd/containerd/api/services/content/v1/content.pb.go b/vendor/github.com/containerd/containerd/api/services/content/v1/content.pb.go
index ec08c3b..3e45800 100644
--- a/vendor/github.com/containerd/containerd/api/services/content/v1/content.pb.go
+++ b/vendor/github.com/containerd/containerd/api/services/content/v1/content.pb.go
@@ -1,56 +1,23 @@
 // Code generated by protoc-gen-gogo. DO NOT EDIT.
 // source: github.com/containerd/containerd/api/services/content/v1/content.proto
 
-/*
-	Package content is a generated protocol buffer package.
-
-	It is generated from these files:
-		github.com/containerd/containerd/api/services/content/v1/content.proto
-
-	It has these top-level messages:
-		Info
-		InfoRequest
-		InfoResponse
-		UpdateRequest
-		UpdateResponse
-		ListContentRequest
-		ListContentResponse
-		DeleteContentRequest
-		ReadContentRequest
-		ReadContentResponse
-		Status
-		StatusRequest
-		StatusResponse
-		ListStatusesRequest
-		ListStatusesResponse
-		WriteContentRequest
-		WriteContentResponse
-		AbortRequest
-*/
 package content
 
-import proto "github.com/gogo/protobuf/proto"
-import fmt "fmt"
-import math "math"
-
-// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto"
-import google_protobuf1 "github.com/gogo/protobuf/types"
-import _ "github.com/gogo/protobuf/types"
-import google_protobuf3 "github.com/gogo/protobuf/types"
-
-import github_com_opencontainers_go_digest "github.com/opencontainers/go-digest"
-import time "time"
-
-import context "golang.org/x/net/context"
-import grpc "google.golang.org/grpc"
-
-import types "github.com/gogo/protobuf/types"
-
-import strings "strings"
-import reflect "reflect"
-import sortkeys "github.com/gogo/protobuf/sortkeys"
-
-import io "io"
+import (
+	context "context"
+	fmt "fmt"
+	proto "github.com/gogo/protobuf/proto"
+	github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
+	github_com_gogo_protobuf_types "github.com/gogo/protobuf/types"
+	types "github.com/gogo/protobuf/types"
+	github_com_opencontainers_go_digest "github.com/opencontainers/go-digest"
+	grpc "google.golang.org/grpc"
+	io "io"
+	math "math"
+	reflect "reflect"
+	strings "strings"
+	time "time"
+)
 
 // Reference imports to suppress errors if they are not otherwise used.
 var _ = proto.Marshal
@@ -93,6 +60,7 @@
 	1: "WRITE",
 	2: "COMMIT",
 }
+
 var WriteAction_value = map[string]int32{
 	"STAT":   0,
 	"WRITE":  1,
@@ -102,7 +70,10 @@
 func (x WriteAction) String() string {
 	return proto.EnumName(WriteAction_name, int32(x))
 }
-func (WriteAction) EnumDescriptor() ([]byte, []int) { return fileDescriptorContent, []int{0} }
+
+func (WriteAction) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_468430ba3e400391, []int{0}
+}
 
 type Info struct {
 	// Digest is the hash identity of the blob.
@@ -110,57 +81,212 @@
 	// Size is the total number of bytes in the blob.
 	Size_ int64 `protobuf:"varint,2,opt,name=size,proto3" json:"size,omitempty"`
 	// CreatedAt provides the time at which the blob was committed.
-	CreatedAt time.Time `protobuf:"bytes,3,opt,name=created_at,json=createdAt,stdtime" json:"created_at"`
+	CreatedAt time.Time `protobuf:"bytes,3,opt,name=created_at,json=createdAt,proto3,stdtime" json:"created_at"`
 	// UpdatedAt provides the time the info was last updated.
-	UpdatedAt time.Time `protobuf:"bytes,4,opt,name=updated_at,json=updatedAt,stdtime" json:"updated_at"`
+	UpdatedAt time.Time `protobuf:"bytes,4,opt,name=updated_at,json=updatedAt,proto3,stdtime" json:"updated_at"`
 	// Labels are arbitrary data on snapshots.
 	//
 	// The combined size of a key/value pair cannot exceed 4096 bytes.
-	Labels map[string]string `protobuf:"bytes,5,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	Labels               map[string]string `protobuf:"bytes,5,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
+	XXX_unrecognized     []byte            `json:"-"`
+	XXX_sizecache        int32             `json:"-"`
 }
 
-func (m *Info) Reset()                    { *m = Info{} }
-func (*Info) ProtoMessage()               {}
-func (*Info) Descriptor() ([]byte, []int) { return fileDescriptorContent, []int{0} }
+func (m *Info) Reset()      { *m = Info{} }
+func (*Info) ProtoMessage() {}
+func (*Info) Descriptor() ([]byte, []int) {
+	return fileDescriptor_468430ba3e400391, []int{0}
+}
+func (m *Info) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *Info) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_Info.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *Info) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Info.Merge(m, src)
+}
+func (m *Info) XXX_Size() int {
+	return m.Size()
+}
+func (m *Info) XXX_DiscardUnknown() {
+	xxx_messageInfo_Info.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Info proto.InternalMessageInfo
 
 type InfoRequest struct {
-	Digest github_com_opencontainers_go_digest.Digest `protobuf:"bytes,1,opt,name=digest,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"digest"`
+	Digest               github_com_opencontainers_go_digest.Digest `protobuf:"bytes,1,opt,name=digest,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"digest"`
+	XXX_NoUnkeyedLiteral struct{}                                   `json:"-"`
+	XXX_unrecognized     []byte                                     `json:"-"`
+	XXX_sizecache        int32                                      `json:"-"`
 }
 
-func (m *InfoRequest) Reset()                    { *m = InfoRequest{} }
-func (*InfoRequest) ProtoMessage()               {}
-func (*InfoRequest) Descriptor() ([]byte, []int) { return fileDescriptorContent, []int{1} }
+func (m *InfoRequest) Reset()      { *m = InfoRequest{} }
+func (*InfoRequest) ProtoMessage() {}
+func (*InfoRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_468430ba3e400391, []int{1}
+}
+func (m *InfoRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *InfoRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_InfoRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *InfoRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_InfoRequest.Merge(m, src)
+}
+func (m *InfoRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *InfoRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_InfoRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_InfoRequest proto.InternalMessageInfo
 
 type InfoResponse struct {
-	Info Info `protobuf:"bytes,1,opt,name=info" json:"info"`
+	Info                 Info     `protobuf:"bytes,1,opt,name=info,proto3" json:"info"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
-func (m *InfoResponse) Reset()                    { *m = InfoResponse{} }
-func (*InfoResponse) ProtoMessage()               {}
-func (*InfoResponse) Descriptor() ([]byte, []int) { return fileDescriptorContent, []int{2} }
+func (m *InfoResponse) Reset()      { *m = InfoResponse{} }
+func (*InfoResponse) ProtoMessage() {}
+func (*InfoResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_468430ba3e400391, []int{2}
+}
+func (m *InfoResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *InfoResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_InfoResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *InfoResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_InfoResponse.Merge(m, src)
+}
+func (m *InfoResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *InfoResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_InfoResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_InfoResponse proto.InternalMessageInfo
 
 type UpdateRequest struct {
-	Info Info `protobuf:"bytes,1,opt,name=info" json:"info"`
+	Info Info `protobuf:"bytes,1,opt,name=info,proto3" json:"info"`
 	// UpdateMask specifies which fields to perform the update on. If empty,
 	// the operation applies to all fields.
 	//
 	// In info, Digest, Size, and CreatedAt are immutable,
 	// other field may be updated using this mask.
 	// If no mask is provided, all mutable field are updated.
-	UpdateMask *google_protobuf1.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask" json:"update_mask,omitempty"`
+	UpdateMask           *types.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}         `json:"-"`
+	XXX_unrecognized     []byte           `json:"-"`
+	XXX_sizecache        int32            `json:"-"`
 }
 
-func (m *UpdateRequest) Reset()                    { *m = UpdateRequest{} }
-func (*UpdateRequest) ProtoMessage()               {}
-func (*UpdateRequest) Descriptor() ([]byte, []int) { return fileDescriptorContent, []int{3} }
+func (m *UpdateRequest) Reset()      { *m = UpdateRequest{} }
+func (*UpdateRequest) ProtoMessage() {}
+func (*UpdateRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_468430ba3e400391, []int{3}
+}
+func (m *UpdateRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *UpdateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_UpdateRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *UpdateRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_UpdateRequest.Merge(m, src)
+}
+func (m *UpdateRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *UpdateRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_UpdateRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_UpdateRequest proto.InternalMessageInfo
 
 type UpdateResponse struct {
-	Info Info `protobuf:"bytes,1,opt,name=info" json:"info"`
+	Info                 Info     `protobuf:"bytes,1,opt,name=info,proto3" json:"info"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
-func (m *UpdateResponse) Reset()                    { *m = UpdateResponse{} }
-func (*UpdateResponse) ProtoMessage()               {}
-func (*UpdateResponse) Descriptor() ([]byte, []int) { return fileDescriptorContent, []int{4} }
+func (m *UpdateResponse) Reset()      { *m = UpdateResponse{} }
+func (*UpdateResponse) ProtoMessage() {}
+func (*UpdateResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_468430ba3e400391, []int{4}
+}
+func (m *UpdateResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *UpdateResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_UpdateResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *UpdateResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_UpdateResponse.Merge(m, src)
+}
+func (m *UpdateResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *UpdateResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_UpdateResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_UpdateResponse proto.InternalMessageInfo
 
 type ListContentRequest struct {
 	// Filters contains one or more filters using the syntax defined in the
@@ -173,29 +299,122 @@
 	//   filters[0] or filters[1] or ... or filters[n-1] or filters[n]
 	//
 	// If filters is zero-length or nil, all items will be returned.
-	Filters []string `protobuf:"bytes,1,rep,name=filters" json:"filters,omitempty"`
+	Filters              []string `protobuf:"bytes,1,rep,name=filters,proto3" json:"filters,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
-func (m *ListContentRequest) Reset()                    { *m = ListContentRequest{} }
-func (*ListContentRequest) ProtoMessage()               {}
-func (*ListContentRequest) Descriptor() ([]byte, []int) { return fileDescriptorContent, []int{5} }
+func (m *ListContentRequest) Reset()      { *m = ListContentRequest{} }
+func (*ListContentRequest) ProtoMessage() {}
+func (*ListContentRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_468430ba3e400391, []int{5}
+}
+func (m *ListContentRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ListContentRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_ListContentRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *ListContentRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ListContentRequest.Merge(m, src)
+}
+func (m *ListContentRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *ListContentRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_ListContentRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ListContentRequest proto.InternalMessageInfo
 
 type ListContentResponse struct {
-	Info []Info `protobuf:"bytes,1,rep,name=info" json:"info"`
+	Info                 []Info   `protobuf:"bytes,1,rep,name=info,proto3" json:"info"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
-func (m *ListContentResponse) Reset()                    { *m = ListContentResponse{} }
-func (*ListContentResponse) ProtoMessage()               {}
-func (*ListContentResponse) Descriptor() ([]byte, []int) { return fileDescriptorContent, []int{6} }
+func (m *ListContentResponse) Reset()      { *m = ListContentResponse{} }
+func (*ListContentResponse) ProtoMessage() {}
+func (*ListContentResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_468430ba3e400391, []int{6}
+}
+func (m *ListContentResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ListContentResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_ListContentResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *ListContentResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ListContentResponse.Merge(m, src)
+}
+func (m *ListContentResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *ListContentResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_ListContentResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ListContentResponse proto.InternalMessageInfo
 
 type DeleteContentRequest struct {
 	// Digest specifies which content to delete.
-	Digest github_com_opencontainers_go_digest.Digest `protobuf:"bytes,1,opt,name=digest,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"digest"`
+	Digest               github_com_opencontainers_go_digest.Digest `protobuf:"bytes,1,opt,name=digest,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"digest"`
+	XXX_NoUnkeyedLiteral struct{}                                   `json:"-"`
+	XXX_unrecognized     []byte                                     `json:"-"`
+	XXX_sizecache        int32                                      `json:"-"`
 }
 
-func (m *DeleteContentRequest) Reset()                    { *m = DeleteContentRequest{} }
-func (*DeleteContentRequest) ProtoMessage()               {}
-func (*DeleteContentRequest) Descriptor() ([]byte, []int) { return fileDescriptorContent, []int{7} }
+func (m *DeleteContentRequest) Reset()      { *m = DeleteContentRequest{} }
+func (*DeleteContentRequest) ProtoMessage() {}
+func (*DeleteContentRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_468430ba3e400391, []int{7}
+}
+func (m *DeleteContentRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *DeleteContentRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_DeleteContentRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *DeleteContentRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DeleteContentRequest.Merge(m, src)
+}
+func (m *DeleteContentRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *DeleteContentRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_DeleteContentRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeleteContentRequest proto.InternalMessageInfo
 
 // ReadContentRequest defines the fields that make up a request to read a portion of
 // data from a stored object.
@@ -208,67 +427,284 @@
 	Offset int64 `protobuf:"varint,2,opt,name=offset,proto3" json:"offset,omitempty"`
 	// size is the total size of the read. If zero, the entire blob will be
 	// returned by the service.
-	Size_ int64 `protobuf:"varint,3,opt,name=size,proto3" json:"size,omitempty"`
+	Size_                int64    `protobuf:"varint,3,opt,name=size,proto3" json:"size,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
-func (m *ReadContentRequest) Reset()                    { *m = ReadContentRequest{} }
-func (*ReadContentRequest) ProtoMessage()               {}
-func (*ReadContentRequest) Descriptor() ([]byte, []int) { return fileDescriptorContent, []int{8} }
+func (m *ReadContentRequest) Reset()      { *m = ReadContentRequest{} }
+func (*ReadContentRequest) ProtoMessage() {}
+func (*ReadContentRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_468430ba3e400391, []int{8}
+}
+func (m *ReadContentRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ReadContentRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_ReadContentRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *ReadContentRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ReadContentRequest.Merge(m, src)
+}
+func (m *ReadContentRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *ReadContentRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_ReadContentRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ReadContentRequest proto.InternalMessageInfo
 
 // ReadContentResponse carries byte data for a read request.
 type ReadContentResponse struct {
-	Offset int64  `protobuf:"varint,1,opt,name=offset,proto3" json:"offset,omitempty"`
-	Data   []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"`
+	Offset               int64    `protobuf:"varint,1,opt,name=offset,proto3" json:"offset,omitempty"`
+	Data                 []byte   `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
-func (m *ReadContentResponse) Reset()                    { *m = ReadContentResponse{} }
-func (*ReadContentResponse) ProtoMessage()               {}
-func (*ReadContentResponse) Descriptor() ([]byte, []int) { return fileDescriptorContent, []int{9} }
+func (m *ReadContentResponse) Reset()      { *m = ReadContentResponse{} }
+func (*ReadContentResponse) ProtoMessage() {}
+func (*ReadContentResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_468430ba3e400391, []int{9}
+}
+func (m *ReadContentResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ReadContentResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_ReadContentResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *ReadContentResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ReadContentResponse.Merge(m, src)
+}
+func (m *ReadContentResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *ReadContentResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_ReadContentResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ReadContentResponse proto.InternalMessageInfo
 
 type Status struct {
-	StartedAt time.Time                                  `protobuf:"bytes,1,opt,name=started_at,json=startedAt,stdtime" json:"started_at"`
-	UpdatedAt time.Time                                  `protobuf:"bytes,2,opt,name=updated_at,json=updatedAt,stdtime" json:"updated_at"`
-	Ref       string                                     `protobuf:"bytes,3,opt,name=ref,proto3" json:"ref,omitempty"`
-	Offset    int64                                      `protobuf:"varint,4,opt,name=offset,proto3" json:"offset,omitempty"`
-	Total     int64                                      `protobuf:"varint,5,opt,name=total,proto3" json:"total,omitempty"`
-	Expected  github_com_opencontainers_go_digest.Digest `protobuf:"bytes,6,opt,name=expected,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"expected"`
+	StartedAt            time.Time                                  `protobuf:"bytes,1,opt,name=started_at,json=startedAt,proto3,stdtime" json:"started_at"`
+	UpdatedAt            time.Time                                  `protobuf:"bytes,2,opt,name=updated_at,json=updatedAt,proto3,stdtime" json:"updated_at"`
+	Ref                  string                                     `protobuf:"bytes,3,opt,name=ref,proto3" json:"ref,omitempty"`
+	Offset               int64                                      `protobuf:"varint,4,opt,name=offset,proto3" json:"offset,omitempty"`
+	Total                int64                                      `protobuf:"varint,5,opt,name=total,proto3" json:"total,omitempty"`
+	Expected             github_com_opencontainers_go_digest.Digest `protobuf:"bytes,6,opt,name=expected,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"expected"`
+	XXX_NoUnkeyedLiteral struct{}                                   `json:"-"`
+	XXX_unrecognized     []byte                                     `json:"-"`
+	XXX_sizecache        int32                                      `json:"-"`
 }
 
-func (m *Status) Reset()                    { *m = Status{} }
-func (*Status) ProtoMessage()               {}
-func (*Status) Descriptor() ([]byte, []int) { return fileDescriptorContent, []int{10} }
+func (m *Status) Reset()      { *m = Status{} }
+func (*Status) ProtoMessage() {}
+func (*Status) Descriptor() ([]byte, []int) {
+	return fileDescriptor_468430ba3e400391, []int{10}
+}
+func (m *Status) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *Status) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_Status.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *Status) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Status.Merge(m, src)
+}
+func (m *Status) XXX_Size() int {
+	return m.Size()
+}
+func (m *Status) XXX_DiscardUnknown() {
+	xxx_messageInfo_Status.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Status proto.InternalMessageInfo
 
 type StatusRequest struct {
-	Ref string `protobuf:"bytes,1,opt,name=ref,proto3" json:"ref,omitempty"`
+	Ref                  string   `protobuf:"bytes,1,opt,name=ref,proto3" json:"ref,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
-func (m *StatusRequest) Reset()                    { *m = StatusRequest{} }
-func (*StatusRequest) ProtoMessage()               {}
-func (*StatusRequest) Descriptor() ([]byte, []int) { return fileDescriptorContent, []int{11} }
+func (m *StatusRequest) Reset()      { *m = StatusRequest{} }
+func (*StatusRequest) ProtoMessage() {}
+func (*StatusRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_468430ba3e400391, []int{11}
+}
+func (m *StatusRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *StatusRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_StatusRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *StatusRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_StatusRequest.Merge(m, src)
+}
+func (m *StatusRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *StatusRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_StatusRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_StatusRequest proto.InternalMessageInfo
 
 type StatusResponse struct {
-	Status *Status `protobuf:"bytes,1,opt,name=status" json:"status,omitempty"`
+	Status               *Status  `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
-func (m *StatusResponse) Reset()                    { *m = StatusResponse{} }
-func (*StatusResponse) ProtoMessage()               {}
-func (*StatusResponse) Descriptor() ([]byte, []int) { return fileDescriptorContent, []int{12} }
+func (m *StatusResponse) Reset()      { *m = StatusResponse{} }
+func (*StatusResponse) ProtoMessage() {}
+func (*StatusResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_468430ba3e400391, []int{12}
+}
+func (m *StatusResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *StatusResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_StatusResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *StatusResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_StatusResponse.Merge(m, src)
+}
+func (m *StatusResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *StatusResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_StatusResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_StatusResponse proto.InternalMessageInfo
 
 type ListStatusesRequest struct {
-	Filters []string `protobuf:"bytes,1,rep,name=filters" json:"filters,omitempty"`
+	Filters              []string `protobuf:"bytes,1,rep,name=filters,proto3" json:"filters,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
-func (m *ListStatusesRequest) Reset()                    { *m = ListStatusesRequest{} }
-func (*ListStatusesRequest) ProtoMessage()               {}
-func (*ListStatusesRequest) Descriptor() ([]byte, []int) { return fileDescriptorContent, []int{13} }
+func (m *ListStatusesRequest) Reset()      { *m = ListStatusesRequest{} }
+func (*ListStatusesRequest) ProtoMessage() {}
+func (*ListStatusesRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_468430ba3e400391, []int{13}
+}
+func (m *ListStatusesRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ListStatusesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_ListStatusesRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *ListStatusesRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ListStatusesRequest.Merge(m, src)
+}
+func (m *ListStatusesRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *ListStatusesRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_ListStatusesRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ListStatusesRequest proto.InternalMessageInfo
 
 type ListStatusesResponse struct {
-	Statuses []Status `protobuf:"bytes,1,rep,name=statuses" json:"statuses"`
+	Statuses             []Status `protobuf:"bytes,1,rep,name=statuses,proto3" json:"statuses"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
-func (m *ListStatusesResponse) Reset()                    { *m = ListStatusesResponse{} }
-func (*ListStatusesResponse) ProtoMessage()               {}
-func (*ListStatusesResponse) Descriptor() ([]byte, []int) { return fileDescriptorContent, []int{14} }
+func (m *ListStatusesResponse) Reset()      { *m = ListStatusesResponse{} }
+func (*ListStatusesResponse) ProtoMessage() {}
+func (*ListStatusesResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_468430ba3e400391, []int{14}
+}
+func (m *ListStatusesResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ListStatusesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_ListStatusesResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *ListStatusesResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ListStatusesResponse.Merge(m, src)
+}
+func (m *ListStatusesResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *ListStatusesResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_ListStatusesResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ListStatusesResponse proto.InternalMessageInfo
 
 // WriteContentRequest writes data to the request ref at offset.
 type WriteContentRequest struct {
@@ -324,12 +760,43 @@
 	// Labels are arbitrary data on snapshots.
 	//
 	// The combined size of a key/value pair cannot exceed 4096 bytes.
-	Labels map[string]string `protobuf:"bytes,7,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	Labels               map[string]string `protobuf:"bytes,7,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
+	XXX_unrecognized     []byte            `json:"-"`
+	XXX_sizecache        int32             `json:"-"`
 }
 
-func (m *WriteContentRequest) Reset()                    { *m = WriteContentRequest{} }
-func (*WriteContentRequest) ProtoMessage()               {}
-func (*WriteContentRequest) Descriptor() ([]byte, []int) { return fileDescriptorContent, []int{15} }
+func (m *WriteContentRequest) Reset()      { *m = WriteContentRequest{} }
+func (*WriteContentRequest) ProtoMessage() {}
+func (*WriteContentRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_468430ba3e400391, []int{15}
+}
+func (m *WriteContentRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *WriteContentRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_WriteContentRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *WriteContentRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_WriteContentRequest.Merge(m, src)
+}
+func (m *WriteContentRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *WriteContentRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_WriteContentRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_WriteContentRequest proto.InternalMessageInfo
 
 // WriteContentResponse is returned on the culmination of a write call.
 type WriteContentResponse struct {
@@ -340,12 +807,12 @@
 	//
 	// This must be set for stat and commit write actions. All other write
 	// actions may omit this.
-	StartedAt time.Time `protobuf:"bytes,2,opt,name=started_at,json=startedAt,stdtime" json:"started_at"`
+	StartedAt time.Time `protobuf:"bytes,2,opt,name=started_at,json=startedAt,proto3,stdtime" json:"started_at"`
 	// UpdatedAt provides the last time of a successful write.
 	//
 	// This must be set for stat and commit write actions. All other write
 	// actions may omit this.
-	UpdatedAt time.Time `protobuf:"bytes,3,opt,name=updated_at,json=updatedAt,stdtime" json:"updated_at"`
+	UpdatedAt time.Time `protobuf:"bytes,3,opt,name=updated_at,json=updatedAt,proto3,stdtime" json:"updated_at"`
 	// Offset is the current committed size for the write.
 	Offset int64 `protobuf:"varint,4,opt,name=offset,proto3" json:"offset,omitempty"`
 	// Total provides the current, expected total size of the write.
@@ -358,23 +825,87 @@
 	// Digest, if present, includes the digest up to the currently committed
 	// bytes. If action is commit, this field will be set. It is implementation
 	// defined if this is set for other actions.
-	Digest github_com_opencontainers_go_digest.Digest `protobuf:"bytes,6,opt,name=digest,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"digest"`
+	Digest               github_com_opencontainers_go_digest.Digest `protobuf:"bytes,6,opt,name=digest,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"digest"`
+	XXX_NoUnkeyedLiteral struct{}                                   `json:"-"`
+	XXX_unrecognized     []byte                                     `json:"-"`
+	XXX_sizecache        int32                                      `json:"-"`
 }
 
-func (m *WriteContentResponse) Reset()                    { *m = WriteContentResponse{} }
-func (*WriteContentResponse) ProtoMessage()               {}
-func (*WriteContentResponse) Descriptor() ([]byte, []int) { return fileDescriptorContent, []int{16} }
+func (m *WriteContentResponse) Reset()      { *m = WriteContentResponse{} }
+func (*WriteContentResponse) ProtoMessage() {}
+func (*WriteContentResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_468430ba3e400391, []int{16}
+}
+func (m *WriteContentResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *WriteContentResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_WriteContentResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *WriteContentResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_WriteContentResponse.Merge(m, src)
+}
+func (m *WriteContentResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *WriteContentResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_WriteContentResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_WriteContentResponse proto.InternalMessageInfo
 
 type AbortRequest struct {
-	Ref string `protobuf:"bytes,1,opt,name=ref,proto3" json:"ref,omitempty"`
+	Ref                  string   `protobuf:"bytes,1,opt,name=ref,proto3" json:"ref,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
-func (m *AbortRequest) Reset()                    { *m = AbortRequest{} }
-func (*AbortRequest) ProtoMessage()               {}
-func (*AbortRequest) Descriptor() ([]byte, []int) { return fileDescriptorContent, []int{17} }
+func (m *AbortRequest) Reset()      { *m = AbortRequest{} }
+func (*AbortRequest) ProtoMessage() {}
+func (*AbortRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_468430ba3e400391, []int{17}
+}
+func (m *AbortRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AbortRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AbortRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AbortRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AbortRequest.Merge(m, src)
+}
+func (m *AbortRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *AbortRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_AbortRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AbortRequest proto.InternalMessageInfo
 
 func init() {
+	proto.RegisterEnum("containerd.services.content.v1.WriteAction", WriteAction_name, WriteAction_value)
 	proto.RegisterType((*Info)(nil), "containerd.services.content.v1.Info")
+	proto.RegisterMapType((map[string]string)(nil), "containerd.services.content.v1.Info.LabelsEntry")
 	proto.RegisterType((*InfoRequest)(nil), "containerd.services.content.v1.InfoRequest")
 	proto.RegisterType((*InfoResponse)(nil), "containerd.services.content.v1.InfoResponse")
 	proto.RegisterType((*UpdateRequest)(nil), "containerd.services.content.v1.UpdateRequest")
@@ -390,9 +921,85 @@
 	proto.RegisterType((*ListStatusesRequest)(nil), "containerd.services.content.v1.ListStatusesRequest")
 	proto.RegisterType((*ListStatusesResponse)(nil), "containerd.services.content.v1.ListStatusesResponse")
 	proto.RegisterType((*WriteContentRequest)(nil), "containerd.services.content.v1.WriteContentRequest")
+	proto.RegisterMapType((map[string]string)(nil), "containerd.services.content.v1.WriteContentRequest.LabelsEntry")
 	proto.RegisterType((*WriteContentResponse)(nil), "containerd.services.content.v1.WriteContentResponse")
 	proto.RegisterType((*AbortRequest)(nil), "containerd.services.content.v1.AbortRequest")
-	proto.RegisterEnum("containerd.services.content.v1.WriteAction", WriteAction_name, WriteAction_value)
+}
+
+func init() {
+	proto.RegisterFile("github.com/containerd/containerd/api/services/content/v1/content.proto", fileDescriptor_468430ba3e400391)
+}
+
+var fileDescriptor_468430ba3e400391 = []byte{
+	// 1081 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x57, 0xcd, 0x6f, 0x1b, 0x45,
+	0x14, 0xf7, 0x78, 0xed, 0x4d, 0xf2, 0x9c, 0x16, 0x33, 0x31, 0x95, 0xb5, 0x08, 0x67, 0xbb, 0x42,
+	0xc8, 0x6a, 0xc9, 0x3a, 0x75, 0x7a, 0x00, 0x2a, 0x01, 0x8e, 0x9b, 0xaa, 0x41, 0x4d, 0x41, 0x5b,
+	0x97, 0x40, 0x2f, 0x65, 0x6d, 0x8f, 0xcd, 0x2a, 0xb6, 0xd7, 0xdd, 0x19, 0x5b, 0x84, 0x13, 0x17,
+	0x24, 0x14, 0xf5, 0x80, 0xb8, 0xe7, 0x02, 0xfc, 0x15, 0x1c, 0x38, 0xe7, 0xc8, 0x11, 0x71, 0x68,
+	0x69, 0xfe, 0x07, 0xee, 0x68, 0x66, 0x67, 0xed, 0xf5, 0x47, 0x58, 0xdb, 0x31, 0x27, 0xbf, 0x99,
+	0x7d, 0xbf, 0xf7, 0xfd, 0x31, 0x86, 0x7b, 0x4d, 0x87, 0x7d, 0xdd, 0xab, 0x9a, 0x35, 0xb7, 0x5d,
+	0xa8, 0xb9, 0x1d, 0x66, 0x3b, 0x1d, 0xe2, 0xd5, 0xc3, 0xa4, 0xdd, 0x75, 0x0a, 0x94, 0x78, 0x7d,
+	0xa7, 0x46, 0xa8, 0xb8, 0x27, 0x1d, 0x56, 0xe8, 0xdf, 0x0a, 0x48, 0xb3, 0xeb, 0xb9, 0xcc, 0xc5,
+	0xb9, 0x21, 0xc2, 0x0c, 0xb8, 0xcd, 0x80, 0xa5, 0x7f, 0x4b, 0xcb, 0x34, 0xdd, 0xa6, 0x2b, 0x58,
+	0x0b, 0x9c, 0xf2, 0x51, 0x9a, 0xde, 0x74, 0xdd, 0x66, 0x8b, 0x14, 0xc4, 0xa9, 0xda, 0x6b, 0x14,
+	0x1a, 0x0e, 0x69, 0xd5, 0x9f, 0xb6, 0x6d, 0x7a, 0x24, 0x39, 0x36, 0xc7, 0x39, 0x98, 0xd3, 0x26,
+	0x94, 0xd9, 0xed, 0xae, 0x64, 0x78, 0x73, 0x9c, 0x81, 0xb4, 0xbb, 0xec, 0xd8, 0xff, 0x68, 0xfc,
+	0x13, 0x87, 0xc4, 0x7e, 0xa7, 0xe1, 0xe2, 0x4f, 0x40, 0xad, 0x3b, 0x4d, 0x42, 0x59, 0x16, 0xe9,
+	0x28, 0xbf, 0xb6, 0x5b, 0x3c, 0x7b, 0xb1, 0x19, 0xfb, 0xeb, 0xc5, 0xe6, 0x8d, 0x90, 0xfb, 0x6e,
+	0x97, 0x74, 0x06, 0x5e, 0xd0, 0x42, 0xd3, 0xdd, 0xf2, 0x21, 0xe6, 0x5d, 0xf1, 0x63, 0x49, 0x09,
+	0x18, 0x43, 0x82, 0x3a, 0xdf, 0x92, 0x6c, 0x5c, 0x47, 0x79, 0xc5, 0x12, 0x34, 0x2e, 0x03, 0xd4,
+	0x3c, 0x62, 0x33, 0x52, 0x7f, 0x6a, 0xb3, 0xac, 0xa2, 0xa3, 0x7c, 0xaa, 0xa8, 0x99, 0xbe, 0x69,
+	0x66, 0x60, 0x9a, 0x59, 0x09, 0x6c, 0xdf, 0x5d, 0xe5, 0xfa, 0x7f, 0x7c, 0xb9, 0x89, 0xac, 0x35,
+	0x89, 0x2b, 0x31, 0x2e, 0xa4, 0xd7, 0xad, 0x07, 0x42, 0x12, 0xf3, 0x08, 0x91, 0xb8, 0x12, 0xc3,
+	0xf7, 0x41, 0x6d, 0xd9, 0x55, 0xd2, 0xa2, 0xd9, 0xa4, 0xae, 0xe4, 0x53, 0xc5, 0x6d, 0xf3, 0xbf,
+	0x33, 0x63, 0xf2, 0xf8, 0x98, 0x0f, 0x04, 0x64, 0xaf, 0xc3, 0xbc, 0x63, 0x4b, 0xe2, 0xb5, 0xf7,
+	0x21, 0x15, 0xba, 0xc6, 0x69, 0x50, 0x8e, 0xc8, 0xb1, 0x1f, 0x3f, 0x8b, 0x93, 0x38, 0x03, 0xc9,
+	0xbe, 0xdd, 0xea, 0xf9, 0x91, 0x58, 0xb3, 0xfc, 0xc3, 0x07, 0xf1, 0xf7, 0x90, 0xf1, 0x25, 0xa4,
+	0xb8, 0x58, 0x8b, 0x3c, 0xeb, 0xf1, 0x88, 0x2d, 0x31, 0xfa, 0xc6, 0x43, 0x58, 0xf7, 0x45, 0xd3,
+	0xae, 0xdb, 0xa1, 0x04, 0x7f, 0x08, 0x09, 0xa7, 0xd3, 0x70, 0x85, 0xe4, 0x54, 0xf1, 0xed, 0x59,
+	0xbc, 0xdd, 0x4d, 0x70, 0xfd, 0x96, 0xc0, 0x19, 0xcf, 0x11, 0x5c, 0x79, 0x2c, 0xa2, 0x17, 0x58,
+	0x7b, 0x49, 0x89, 0xf8, 0x0e, 0xa4, 0xfc, 0x74, 0x88, 0x3a, 0x16, 0xc1, 0x99, 0x96, 0xc7, 0x7b,
+	0xbc, 0xd4, 0x0f, 0x6c, 0x7a, 0x64, 0xc9, 0xac, 0x73, 0xda, 0xf8, 0x0c, 0xae, 0x06, 0xd6, 0x2c,
+	0xc9, 0x41, 0x13, 0xf0, 0x03, 0x87, 0xb2, 0xb2, 0xcf, 0x12, 0x38, 0x99, 0x85, 0x95, 0x86, 0xd3,
+	0x62, 0xc4, 0xa3, 0x59, 0xa4, 0x2b, 0xf9, 0x35, 0x2b, 0x38, 0x1a, 0x8f, 0x61, 0x63, 0x84, 0x7f,
+	0xc2, 0x0c, 0x65, 0x21, 0x33, 0xaa, 0x90, 0xb9, 0x4b, 0x5a, 0x84, 0x91, 0x31, 0x43, 0x96, 0x59,
+	0x1b, 0xcf, 0x11, 0x60, 0x8b, 0xd8, 0xf5, 0xff, 0x4f, 0x05, 0xbe, 0x06, 0xaa, 0xdb, 0x68, 0x50,
+	0xc2, 0x64, 0xfb, 0xcb, 0xd3, 0x60, 0x28, 0x28, 0xc3, 0xa1, 0x60, 0x94, 0x60, 0x63, 0xc4, 0x1a,
+	0x19, 0xc9, 0xa1, 0x08, 0x34, 0x2e, 0xa2, 0x6e, 0x33, 0x5b, 0x08, 0x5e, 0xb7, 0x04, 0x6d, 0xfc,
+	0x1c, 0x07, 0xf5, 0x11, 0xb3, 0x59, 0x8f, 0xf2, 0xe9, 0x40, 0x99, 0xed, 0xc9, 0xe9, 0x80, 0xe6,
+	0x99, 0x0e, 0x12, 0x37, 0x31, 0x62, 0xe2, 0x8b, 0x8d, 0x98, 0x34, 0x28, 0x1e, 0x69, 0x08, 0x57,
+	0xd7, 0x2c, 0x4e, 0x86, 0x5c, 0x4a, 0x8c, 0xb8, 0x94, 0x81, 0x24, 0x73, 0x99, 0xdd, 0xca, 0x26,
+	0xc5, 0xb5, 0x7f, 0xc0, 0x0f, 0x61, 0x95, 0x7c, 0xd3, 0x25, 0x35, 0x46, 0xea, 0x59, 0x75, 0xe1,
+	0x8c, 0x0c, 0x64, 0x18, 0xd7, 0xe1, 0x8a, 0x1f, 0xa3, 0x20, 0xe1, 0xd2, 0x40, 0x34, 0x30, 0x90,
+	0xb7, 0x55, 0xc0, 0x32, 0xa8, 0x67, 0x95, 0x8a, 0x1b, 0x19, 0xca, 0x77, 0xa2, 0x2a, 0x5a, 0xe2,
+	0x25, 0xca, 0x28, 0xf8, 0x6d, 0xe2, 0xdf, 0x12, 0x1a, 0xdd, 0x57, 0x5f, 0x41, 0x66, 0x14, 0x20,
+	0x0d, 0xb9, 0x0f, 0xab, 0x54, 0xde, 0xc9, 0xe6, 0x9a, 0xd1, 0x14, 0xd9, 0x5e, 0x03, 0xb4, 0xf1,
+	0x93, 0x02, 0x1b, 0x87, 0x9e, 0x33, 0xd1, 0x62, 0x65, 0x50, 0xed, 0x1a, 0x73, 0xdc, 0x8e, 0x70,
+	0xf5, 0x6a, 0xf1, 0x66, 0x94, 0x7c, 0x21, 0xa4, 0x24, 0x20, 0x96, 0x84, 0x06, 0x31, 0x8d, 0x0f,
+	0x93, 0x3e, 0x48, 0xae, 0x72, 0x51, 0x72, 0x13, 0x97, 0x4f, 0x6e, 0xa8, 0xb4, 0x92, 0x53, 0xbb,
+	0x45, 0x1d, 0x76, 0x0b, 0x3e, 0x1c, 0xec, 0xbe, 0x15, 0x11, 0xc8, 0x8f, 0x66, 0x72, 0x74, 0x34,
+	0x5a, 0xcb, 0x5e, 0x85, 0x2f, 0xe3, 0x90, 0x19, 0x55, 0x23, 0xf3, 0xbe, 0x94, 0xac, 0x8c, 0x0e,
+	0x85, 0xf8, 0x32, 0x86, 0x82, 0xb2, 0xd8, 0x50, 0x98, 0x6f, 0x04, 0x0c, 0x47, 0xb2, 0x7a, 0xe9,
+	0xa9, 0xaf, 0xc3, 0x7a, 0xa9, 0xea, 0x7a, 0xec, 0xc2, 0xee, 0xbf, 0xf1, 0x3d, 0x82, 0x54, 0x28,
+	0x7a, 0xf8, 0x2d, 0x48, 0x3c, 0xaa, 0x94, 0x2a, 0xe9, 0x98, 0xb6, 0x71, 0x72, 0xaa, 0xbf, 0x16,
+	0xfa, 0xc4, 0x3b, 0x0b, 0x6f, 0x42, 0xf2, 0xd0, 0xda, 0xaf, 0xec, 0xa5, 0x91, 0x96, 0x39, 0x39,
+	0xd5, 0xd3, 0xa1, 0xef, 0x82, 0xc4, 0xd7, 0x41, 0x2d, 0x7f, 0x7a, 0x70, 0xb0, 0x5f, 0x49, 0xc7,
+	0xb5, 0x37, 0x4e, 0x4e, 0xf5, 0xd7, 0x43, 0x1c, 0x65, 0xb7, 0xdd, 0x76, 0x98, 0xb6, 0xf1, 0xc3,
+	0x2f, 0xb9, 0xd8, 0x6f, 0xbf, 0xe6, 0xc2, 0x7a, 0x8b, 0xbf, 0xaf, 0xc0, 0x8a, 0x2c, 0x03, 0x6c,
+	0xcb, 0x97, 0xe9, 0xcd, 0x59, 0x36, 0xa9, 0x74, 0x4d, 0x7b, 0x77, 0x36, 0x66, 0x59, 0x61, 0x4d,
+	0x50, 0xfd, 0xb7, 0x04, 0xde, 0x8a, 0xc2, 0x8d, 0xbc, 0x80, 0x34, 0x73, 0x56, 0x76, 0xa9, 0xe8,
+	0x19, 0x24, 0xf8, 0x68, 0xc3, 0xc5, 0x28, 0xdc, 0xe4, 0x43, 0x44, 0xdb, 0x99, 0x0b, 0xe3, 0x2b,
+	0xdc, 0x46, 0xf8, 0x73, 0x50, 0xfd, 0xe7, 0x04, 0xbe, 0x1d, 0x25, 0x60, 0xda, 0xb3, 0x43, 0xbb,
+	0x36, 0x51, 0xdf, 0x7b, 0xfc, 0x7f, 0x03, 0x77, 0x85, 0xef, 0xec, 0x68, 0x57, 0x26, 0xdf, 0x19,
+	0xd1, 0xae, 0x4c, 0x79, 0x0d, 0x6c, 0x23, 0x9e, 0x26, 0xb9, 0xe2, 0xb7, 0x66, 0xdc, 0x41, 0xb3,
+	0xa6, 0x69, 0x6c, 0xe5, 0x1d, 0xc3, 0x7a, 0x78, 0x03, 0xe1, 0x99, 0x42, 0x3f, 0xb6, 0xe0, 0xb4,
+	0xdb, 0xf3, 0x81, 0xa4, 0xea, 0x3e, 0x24, 0xfd, 0xd6, 0xd9, 0x59, 0x60, 0x24, 0x47, 0xeb, 0x9c,
+	0x36, 0x60, 0xf3, 0x68, 0x1b, 0xe1, 0x03, 0x48, 0x8a, 0xd9, 0x80, 0x23, 0x3b, 0x27, 0x3c, 0x42,
+	0x2e, 0xaa, 0x8e, 0xdd, 0x27, 0x67, 0xaf, 0x72, 0xb1, 0x3f, 0x5f, 0xe5, 0x62, 0xdf, 0x9d, 0xe7,
+	0xd0, 0xd9, 0x79, 0x0e, 0xfd, 0x71, 0x9e, 0x43, 0x7f, 0x9f, 0xe7, 0xd0, 0x93, 0x8f, 0x17, 0xfd,
+	0x1f, 0x7d, 0x47, 0x92, 0x5f, 0xc4, 0xaa, 0xaa, 0xd0, 0xb6, 0xf3, 0x6f, 0x00, 0x00, 0x00, 0xff,
+	0xff, 0xc0, 0xc2, 0x35, 0xb1, 0x94, 0x0f, 0x00, 0x00,
 }
 
 // Reference imports to suppress errors if they are not otherwise used.
@@ -403,8 +1010,9 @@
 // is compatible with the grpc package it is being compiled against.
 const _ = grpc.SupportPackageIsVersion4
 
-// Client API for Content service
-
+// ContentClient is the client API for Content service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
 type ContentClient interface {
 	// Info returns information about a committed object.
 	//
@@ -425,7 +1033,7 @@
 	// set.
 	List(ctx context.Context, in *ListContentRequest, opts ...grpc.CallOption) (Content_ListClient, error)
 	// Delete will delete the referenced object.
-	Delete(ctx context.Context, in *DeleteContentRequest, opts ...grpc.CallOption) (*google_protobuf3.Empty, error)
+	Delete(ctx context.Context, in *DeleteContentRequest, opts ...grpc.CallOption) (*types.Empty, error)
 	// Read allows one to read an object based on the offset into the content.
 	//
 	// The requested data may be returned in one or more messages.
@@ -458,7 +1066,7 @@
 	Write(ctx context.Context, opts ...grpc.CallOption) (Content_WriteClient, error)
 	// Abort cancels the ongoing write named in the request. Any resources
 	// associated with the write will be collected.
-	Abort(ctx context.Context, in *AbortRequest, opts ...grpc.CallOption) (*google_protobuf3.Empty, error)
+	Abort(ctx context.Context, in *AbortRequest, opts ...grpc.CallOption) (*types.Empty, error)
 }
 
 type contentClient struct {
@@ -471,7 +1079,7 @@
 
 func (c *contentClient) Info(ctx context.Context, in *InfoRequest, opts ...grpc.CallOption) (*InfoResponse, error) {
 	out := new(InfoResponse)
-	err := grpc.Invoke(ctx, "/containerd.services.content.v1.Content/Info", in, out, c.cc, opts...)
+	err := c.cc.Invoke(ctx, "/containerd.services.content.v1.Content/Info", in, out, opts...)
 	if err != nil {
 		return nil, err
 	}
@@ -480,7 +1088,7 @@
 
 func (c *contentClient) Update(ctx context.Context, in *UpdateRequest, opts ...grpc.CallOption) (*UpdateResponse, error) {
 	out := new(UpdateResponse)
-	err := grpc.Invoke(ctx, "/containerd.services.content.v1.Content/Update", in, out, c.cc, opts...)
+	err := c.cc.Invoke(ctx, "/containerd.services.content.v1.Content/Update", in, out, opts...)
 	if err != nil {
 		return nil, err
 	}
@@ -488,7 +1096,7 @@
 }
 
 func (c *contentClient) List(ctx context.Context, in *ListContentRequest, opts ...grpc.CallOption) (Content_ListClient, error) {
-	stream, err := grpc.NewClientStream(ctx, &_Content_serviceDesc.Streams[0], c.cc, "/containerd.services.content.v1.Content/List", opts...)
+	stream, err := c.cc.NewStream(ctx, &_Content_serviceDesc.Streams[0], "/containerd.services.content.v1.Content/List", opts...)
 	if err != nil {
 		return nil, err
 	}
@@ -519,9 +1127,9 @@
 	return m, nil
 }
 
-func (c *contentClient) Delete(ctx context.Context, in *DeleteContentRequest, opts ...grpc.CallOption) (*google_protobuf3.Empty, error) {
-	out := new(google_protobuf3.Empty)
-	err := grpc.Invoke(ctx, "/containerd.services.content.v1.Content/Delete", in, out, c.cc, opts...)
+func (c *contentClient) Delete(ctx context.Context, in *DeleteContentRequest, opts ...grpc.CallOption) (*types.Empty, error) {
+	out := new(types.Empty)
+	err := c.cc.Invoke(ctx, "/containerd.services.content.v1.Content/Delete", in, out, opts...)
 	if err != nil {
 		return nil, err
 	}
@@ -529,7 +1137,7 @@
 }
 
 func (c *contentClient) Read(ctx context.Context, in *ReadContentRequest, opts ...grpc.CallOption) (Content_ReadClient, error) {
-	stream, err := grpc.NewClientStream(ctx, &_Content_serviceDesc.Streams[1], c.cc, "/containerd.services.content.v1.Content/Read", opts...)
+	stream, err := c.cc.NewStream(ctx, &_Content_serviceDesc.Streams[1], "/containerd.services.content.v1.Content/Read", opts...)
 	if err != nil {
 		return nil, err
 	}
@@ -562,7 +1170,7 @@
 
 func (c *contentClient) Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusResponse, error) {
 	out := new(StatusResponse)
-	err := grpc.Invoke(ctx, "/containerd.services.content.v1.Content/Status", in, out, c.cc, opts...)
+	err := c.cc.Invoke(ctx, "/containerd.services.content.v1.Content/Status", in, out, opts...)
 	if err != nil {
 		return nil, err
 	}
@@ -571,7 +1179,7 @@
 
 func (c *contentClient) ListStatuses(ctx context.Context, in *ListStatusesRequest, opts ...grpc.CallOption) (*ListStatusesResponse, error) {
 	out := new(ListStatusesResponse)
-	err := grpc.Invoke(ctx, "/containerd.services.content.v1.Content/ListStatuses", in, out, c.cc, opts...)
+	err := c.cc.Invoke(ctx, "/containerd.services.content.v1.Content/ListStatuses", in, out, opts...)
 	if err != nil {
 		return nil, err
 	}
@@ -579,7 +1187,7 @@
 }
 
 func (c *contentClient) Write(ctx context.Context, opts ...grpc.CallOption) (Content_WriteClient, error) {
-	stream, err := grpc.NewClientStream(ctx, &_Content_serviceDesc.Streams[2], c.cc, "/containerd.services.content.v1.Content/Write", opts...)
+	stream, err := c.cc.NewStream(ctx, &_Content_serviceDesc.Streams[2], "/containerd.services.content.v1.Content/Write", opts...)
 	if err != nil {
 		return nil, err
 	}
@@ -609,17 +1217,16 @@
 	return m, nil
 }
 
-func (c *contentClient) Abort(ctx context.Context, in *AbortRequest, opts ...grpc.CallOption) (*google_protobuf3.Empty, error) {
-	out := new(google_protobuf3.Empty)
-	err := grpc.Invoke(ctx, "/containerd.services.content.v1.Content/Abort", in, out, c.cc, opts...)
+func (c *contentClient) Abort(ctx context.Context, in *AbortRequest, opts ...grpc.CallOption) (*types.Empty, error) {
+	out := new(types.Empty)
+	err := c.cc.Invoke(ctx, "/containerd.services.content.v1.Content/Abort", in, out, opts...)
 	if err != nil {
 		return nil, err
 	}
 	return out, nil
 }
 
-// Server API for Content service
-
+// ContentServer is the server API for Content service.
 type ContentServer interface {
 	// Info returns information about a committed object.
 	//
@@ -640,7 +1247,7 @@
 	// set.
 	List(*ListContentRequest, Content_ListServer) error
 	// Delete will delete the referenced object.
-	Delete(context.Context, *DeleteContentRequest) (*google_protobuf3.Empty, error)
+	Delete(context.Context, *DeleteContentRequest) (*types.Empty, error)
 	// Read allows one to read an object based on the offset into the content.
 	//
 	// The requested data may be returned in one or more messages.
@@ -673,7 +1280,7 @@
 	Write(Content_WriteServer) error
 	// Abort cancels the ongoing write named in the request. Any resources
 	// associated with the write will be collected.
-	Abort(context.Context, *AbortRequest) (*google_protobuf3.Empty, error)
+	Abort(context.Context, *AbortRequest) (*types.Empty, error)
 }
 
 func RegisterContentServer(s *grpc.Server, srv ContentServer) {
@@ -934,16 +1541,16 @@
 	}
 	dAtA[i] = 0x1a
 	i++
-	i = encodeVarintContent(dAtA, i, uint64(types.SizeOfStdTime(m.CreatedAt)))
-	n1, err := types.StdTimeMarshalTo(m.CreatedAt, dAtA[i:])
+	i = encodeVarintContent(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.CreatedAt)))
+	n1, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.CreatedAt, dAtA[i:])
 	if err != nil {
 		return 0, err
 	}
 	i += n1
 	dAtA[i] = 0x22
 	i++
-	i = encodeVarintContent(dAtA, i, uint64(types.SizeOfStdTime(m.UpdatedAt)))
-	n2, err := types.StdTimeMarshalTo(m.UpdatedAt, dAtA[i:])
+	i = encodeVarintContent(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.UpdatedAt)))
+	n2, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.UpdatedAt, dAtA[i:])
 	if err != nil {
 		return 0, err
 	}
@@ -965,6 +1572,9 @@
 			i += copy(dAtA[i:], v)
 		}
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -989,6 +1599,9 @@
 		i = encodeVarintContent(dAtA, i, uint64(len(m.Digest)))
 		i += copy(dAtA[i:], m.Digest)
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -1015,6 +1628,9 @@
 		return 0, err
 	}
 	i += n3
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -1051,6 +1667,9 @@
 		}
 		i += n5
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -1077,6 +1696,9 @@
 		return 0, err
 	}
 	i += n6
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -1110,6 +1732,9 @@
 			i += copy(dAtA[i:], s)
 		}
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -1140,6 +1765,9 @@
 			i += n
 		}
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -1164,6 +1792,9 @@
 		i = encodeVarintContent(dAtA, i, uint64(len(m.Digest)))
 		i += copy(dAtA[i:], m.Digest)
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -1198,6 +1829,9 @@
 		i++
 		i = encodeVarintContent(dAtA, i, uint64(m.Size_))
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -1227,6 +1861,9 @@
 		i = encodeVarintContent(dAtA, i, uint64(len(m.Data)))
 		i += copy(dAtA[i:], m.Data)
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -1247,16 +1884,16 @@
 	_ = l
 	dAtA[i] = 0xa
 	i++
-	i = encodeVarintContent(dAtA, i, uint64(types.SizeOfStdTime(m.StartedAt)))
-	n7, err := types.StdTimeMarshalTo(m.StartedAt, dAtA[i:])
+	i = encodeVarintContent(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.StartedAt)))
+	n7, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.StartedAt, dAtA[i:])
 	if err != nil {
 		return 0, err
 	}
 	i += n7
 	dAtA[i] = 0x12
 	i++
-	i = encodeVarintContent(dAtA, i, uint64(types.SizeOfStdTime(m.UpdatedAt)))
-	n8, err := types.StdTimeMarshalTo(m.UpdatedAt, dAtA[i:])
+	i = encodeVarintContent(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.UpdatedAt)))
+	n8, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.UpdatedAt, dAtA[i:])
 	if err != nil {
 		return 0, err
 	}
@@ -1283,6 +1920,9 @@
 		i = encodeVarintContent(dAtA, i, uint64(len(m.Expected)))
 		i += copy(dAtA[i:], m.Expected)
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -1307,6 +1947,9 @@
 		i = encodeVarintContent(dAtA, i, uint64(len(m.Ref)))
 		i += copy(dAtA[i:], m.Ref)
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -1335,6 +1978,9 @@
 		}
 		i += n9
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -1368,6 +2014,9 @@
 			i += copy(dAtA[i:], s)
 		}
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -1398,6 +2047,9 @@
 			i += n
 		}
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -1466,6 +2118,9 @@
 			i += copy(dAtA[i:], v)
 		}
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -1491,16 +2146,16 @@
 	}
 	dAtA[i] = 0x12
 	i++
-	i = encodeVarintContent(dAtA, i, uint64(types.SizeOfStdTime(m.StartedAt)))
-	n10, err := types.StdTimeMarshalTo(m.StartedAt, dAtA[i:])
+	i = encodeVarintContent(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.StartedAt)))
+	n10, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.StartedAt, dAtA[i:])
 	if err != nil {
 		return 0, err
 	}
 	i += n10
 	dAtA[i] = 0x1a
 	i++
-	i = encodeVarintContent(dAtA, i, uint64(types.SizeOfStdTime(m.UpdatedAt)))
-	n11, err := types.StdTimeMarshalTo(m.UpdatedAt, dAtA[i:])
+	i = encodeVarintContent(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.UpdatedAt)))
+	n11, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.UpdatedAt, dAtA[i:])
 	if err != nil {
 		return 0, err
 	}
@@ -1521,6 +2176,9 @@
 		i = encodeVarintContent(dAtA, i, uint64(len(m.Digest)))
 		i += copy(dAtA[i:], m.Digest)
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -1545,6 +2203,9 @@
 		i = encodeVarintContent(dAtA, i, uint64(len(m.Ref)))
 		i += copy(dAtA[i:], m.Ref)
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -1558,6 +2219,9 @@
 	return offset + 1
 }
 func (m *Info) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = len(m.Digest)
@@ -1567,9 +2231,9 @@
 	if m.Size_ != 0 {
 		n += 1 + sovContent(uint64(m.Size_))
 	}
-	l = types.SizeOfStdTime(m.CreatedAt)
+	l = github_com_gogo_protobuf_types.SizeOfStdTime(m.CreatedAt)
 	n += 1 + l + sovContent(uint64(l))
-	l = types.SizeOfStdTime(m.UpdatedAt)
+	l = github_com_gogo_protobuf_types.SizeOfStdTime(m.UpdatedAt)
 	n += 1 + l + sovContent(uint64(l))
 	if len(m.Labels) > 0 {
 		for k, v := range m.Labels {
@@ -1579,28 +2243,46 @@
 			n += mapEntrySize + 1 + sovContent(uint64(mapEntrySize))
 		}
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *InfoRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = len(m.Digest)
 	if l > 0 {
 		n += 1 + l + sovContent(uint64(l))
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *InfoResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = m.Info.Size()
 	n += 1 + l + sovContent(uint64(l))
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *UpdateRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = m.Info.Size()
@@ -1609,18 +2291,30 @@
 		l = m.UpdateMask.Size()
 		n += 1 + l + sovContent(uint64(l))
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *UpdateResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = m.Info.Size()
 	n += 1 + l + sovContent(uint64(l))
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *ListContentRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	if len(m.Filters) > 0 {
@@ -1629,10 +2323,16 @@
 			n += 1 + l + sovContent(uint64(l))
 		}
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *ListContentResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	if len(m.Info) > 0 {
@@ -1641,20 +2341,32 @@
 			n += 1 + l + sovContent(uint64(l))
 		}
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *DeleteContentRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = len(m.Digest)
 	if l > 0 {
 		n += 1 + l + sovContent(uint64(l))
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *ReadContentRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = len(m.Digest)
@@ -1667,10 +2379,16 @@
 	if m.Size_ != 0 {
 		n += 1 + sovContent(uint64(m.Size_))
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *ReadContentResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	if m.Offset != 0 {
@@ -1680,15 +2398,21 @@
 	if l > 0 {
 		n += 1 + l + sovContent(uint64(l))
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *Status) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
-	l = types.SizeOfStdTime(m.StartedAt)
+	l = github_com_gogo_protobuf_types.SizeOfStdTime(m.StartedAt)
 	n += 1 + l + sovContent(uint64(l))
-	l = types.SizeOfStdTime(m.UpdatedAt)
+	l = github_com_gogo_protobuf_types.SizeOfStdTime(m.UpdatedAt)
 	n += 1 + l + sovContent(uint64(l))
 	l = len(m.Ref)
 	if l > 0 {
@@ -1704,30 +2428,48 @@
 	if l > 0 {
 		n += 1 + l + sovContent(uint64(l))
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *StatusRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = len(m.Ref)
 	if l > 0 {
 		n += 1 + l + sovContent(uint64(l))
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *StatusResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	if m.Status != nil {
 		l = m.Status.Size()
 		n += 1 + l + sovContent(uint64(l))
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *ListStatusesRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	if len(m.Filters) > 0 {
@@ -1736,10 +2478,16 @@
 			n += 1 + l + sovContent(uint64(l))
 		}
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *ListStatusesResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	if len(m.Statuses) > 0 {
@@ -1748,10 +2496,16 @@
 			n += 1 + l + sovContent(uint64(l))
 		}
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *WriteContentRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	if m.Action != 0 {
@@ -1783,18 +2537,24 @@
 			n += mapEntrySize + 1 + sovContent(uint64(mapEntrySize))
 		}
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *WriteContentResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	if m.Action != 0 {
 		n += 1 + sovContent(uint64(m.Action))
 	}
-	l = types.SizeOfStdTime(m.StartedAt)
+	l = github_com_gogo_protobuf_types.SizeOfStdTime(m.StartedAt)
 	n += 1 + l + sovContent(uint64(l))
-	l = types.SizeOfStdTime(m.UpdatedAt)
+	l = github_com_gogo_protobuf_types.SizeOfStdTime(m.UpdatedAt)
 	n += 1 + l + sovContent(uint64(l))
 	if m.Offset != 0 {
 		n += 1 + sovContent(uint64(m.Offset))
@@ -1806,16 +2566,25 @@
 	if l > 0 {
 		n += 1 + l + sovContent(uint64(l))
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *AbortRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = len(m.Ref)
 	if l > 0 {
 		n += 1 + l + sovContent(uint64(l))
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
@@ -1840,7 +2609,7 @@
 	for k, _ := range this.Labels {
 		keysForLabels = append(keysForLabels, k)
 	}
-	sortkeys.Strings(keysForLabels)
+	github_com_gogo_protobuf_sortkeys.Strings(keysForLabels)
 	mapStringForLabels := "map[string]string{"
 	for _, k := range keysForLabels {
 		mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k])
@@ -1849,9 +2618,10 @@
 	s := strings.Join([]string{`&Info{`,
 		`Digest:` + fmt.Sprintf("%v", this.Digest) + `,`,
 		`Size_:` + fmt.Sprintf("%v", this.Size_) + `,`,
-		`CreatedAt:` + strings.Replace(strings.Replace(this.CreatedAt.String(), "Timestamp", "google_protobuf2.Timestamp", 1), `&`, ``, 1) + `,`,
-		`UpdatedAt:` + strings.Replace(strings.Replace(this.UpdatedAt.String(), "Timestamp", "google_protobuf2.Timestamp", 1), `&`, ``, 1) + `,`,
+		`CreatedAt:` + strings.Replace(strings.Replace(this.CreatedAt.String(), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`,
+		`UpdatedAt:` + strings.Replace(strings.Replace(this.UpdatedAt.String(), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`,
 		`Labels:` + mapStringForLabels + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -1862,6 +2632,7 @@
 	}
 	s := strings.Join([]string{`&InfoRequest{`,
 		`Digest:` + fmt.Sprintf("%v", this.Digest) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -1872,6 +2643,7 @@
 	}
 	s := strings.Join([]string{`&InfoResponse{`,
 		`Info:` + strings.Replace(strings.Replace(this.Info.String(), "Info", "Info", 1), `&`, ``, 1) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -1882,7 +2654,8 @@
 	}
 	s := strings.Join([]string{`&UpdateRequest{`,
 		`Info:` + strings.Replace(strings.Replace(this.Info.String(), "Info", "Info", 1), `&`, ``, 1) + `,`,
-		`UpdateMask:` + strings.Replace(fmt.Sprintf("%v", this.UpdateMask), "FieldMask", "google_protobuf1.FieldMask", 1) + `,`,
+		`UpdateMask:` + strings.Replace(fmt.Sprintf("%v", this.UpdateMask), "FieldMask", "types.FieldMask", 1) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -1893,6 +2666,7 @@
 	}
 	s := strings.Join([]string{`&UpdateResponse{`,
 		`Info:` + strings.Replace(strings.Replace(this.Info.String(), "Info", "Info", 1), `&`, ``, 1) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -1903,6 +2677,7 @@
 	}
 	s := strings.Join([]string{`&ListContentRequest{`,
 		`Filters:` + fmt.Sprintf("%v", this.Filters) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -1913,6 +2688,7 @@
 	}
 	s := strings.Join([]string{`&ListContentResponse{`,
 		`Info:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Info), "Info", "Info", 1), `&`, ``, 1) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -1923,6 +2699,7 @@
 	}
 	s := strings.Join([]string{`&DeleteContentRequest{`,
 		`Digest:` + fmt.Sprintf("%v", this.Digest) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -1935,6 +2712,7 @@
 		`Digest:` + fmt.Sprintf("%v", this.Digest) + `,`,
 		`Offset:` + fmt.Sprintf("%v", this.Offset) + `,`,
 		`Size_:` + fmt.Sprintf("%v", this.Size_) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -1946,6 +2724,7 @@
 	s := strings.Join([]string{`&ReadContentResponse{`,
 		`Offset:` + fmt.Sprintf("%v", this.Offset) + `,`,
 		`Data:` + fmt.Sprintf("%v", this.Data) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -1955,12 +2734,13 @@
 		return "nil"
 	}
 	s := strings.Join([]string{`&Status{`,
-		`StartedAt:` + strings.Replace(strings.Replace(this.StartedAt.String(), "Timestamp", "google_protobuf2.Timestamp", 1), `&`, ``, 1) + `,`,
-		`UpdatedAt:` + strings.Replace(strings.Replace(this.UpdatedAt.String(), "Timestamp", "google_protobuf2.Timestamp", 1), `&`, ``, 1) + `,`,
+		`StartedAt:` + strings.Replace(strings.Replace(this.StartedAt.String(), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`,
+		`UpdatedAt:` + strings.Replace(strings.Replace(this.UpdatedAt.String(), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`,
 		`Ref:` + fmt.Sprintf("%v", this.Ref) + `,`,
 		`Offset:` + fmt.Sprintf("%v", this.Offset) + `,`,
 		`Total:` + fmt.Sprintf("%v", this.Total) + `,`,
 		`Expected:` + fmt.Sprintf("%v", this.Expected) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -1971,6 +2751,7 @@
 	}
 	s := strings.Join([]string{`&StatusRequest{`,
 		`Ref:` + fmt.Sprintf("%v", this.Ref) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -1981,6 +2762,7 @@
 	}
 	s := strings.Join([]string{`&StatusResponse{`,
 		`Status:` + strings.Replace(fmt.Sprintf("%v", this.Status), "Status", "Status", 1) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -1991,6 +2773,7 @@
 	}
 	s := strings.Join([]string{`&ListStatusesRequest{`,
 		`Filters:` + fmt.Sprintf("%v", this.Filters) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -2001,6 +2784,7 @@
 	}
 	s := strings.Join([]string{`&ListStatusesResponse{`,
 		`Statuses:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Statuses), "Status", "Status", 1), `&`, ``, 1) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -2013,7 +2797,7 @@
 	for k, _ := range this.Labels {
 		keysForLabels = append(keysForLabels, k)
 	}
-	sortkeys.Strings(keysForLabels)
+	github_com_gogo_protobuf_sortkeys.Strings(keysForLabels)
 	mapStringForLabels := "map[string]string{"
 	for _, k := range keysForLabels {
 		mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k])
@@ -2027,6 +2811,7 @@
 		`Offset:` + fmt.Sprintf("%v", this.Offset) + `,`,
 		`Data:` + fmt.Sprintf("%v", this.Data) + `,`,
 		`Labels:` + mapStringForLabels + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -2037,11 +2822,12 @@
 	}
 	s := strings.Join([]string{`&WriteContentResponse{`,
 		`Action:` + fmt.Sprintf("%v", this.Action) + `,`,
-		`StartedAt:` + strings.Replace(strings.Replace(this.StartedAt.String(), "Timestamp", "google_protobuf2.Timestamp", 1), `&`, ``, 1) + `,`,
-		`UpdatedAt:` + strings.Replace(strings.Replace(this.UpdatedAt.String(), "Timestamp", "google_protobuf2.Timestamp", 1), `&`, ``, 1) + `,`,
+		`StartedAt:` + strings.Replace(strings.Replace(this.StartedAt.String(), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`,
+		`UpdatedAt:` + strings.Replace(strings.Replace(this.UpdatedAt.String(), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`,
 		`Offset:` + fmt.Sprintf("%v", this.Offset) + `,`,
 		`Total:` + fmt.Sprintf("%v", this.Total) + `,`,
 		`Digest:` + fmt.Sprintf("%v", this.Digest) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -2052,6 +2838,7 @@
 	}
 	s := strings.Join([]string{`&AbortRequest{`,
 		`Ref:` + fmt.Sprintf("%v", this.Ref) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -2079,7 +2866,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -2107,7 +2894,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2117,6 +2904,9 @@
 				return ErrInvalidLengthContent
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthContent
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -2136,7 +2926,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				m.Size_ |= (int64(b) & 0x7F) << shift
+				m.Size_ |= int64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2155,7 +2945,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2164,10 +2954,13 @@
 				return ErrInvalidLengthContent
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthContent
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			if err := types.StdTimeUnmarshal(&m.CreatedAt, dAtA[iNdEx:postIndex]); err != nil {
+			if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.CreatedAt, dAtA[iNdEx:postIndex]); err != nil {
 				return err
 			}
 			iNdEx = postIndex
@@ -2185,7 +2978,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2194,10 +2987,13 @@
 				return ErrInvalidLengthContent
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthContent
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			if err := types.StdTimeUnmarshal(&m.UpdatedAt, dAtA[iNdEx:postIndex]); err != nil {
+			if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.UpdatedAt, dAtA[iNdEx:postIndex]); err != nil {
 				return err
 			}
 			iNdEx = postIndex
@@ -2215,7 +3011,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2224,6 +3020,9 @@
 				return ErrInvalidLengthContent
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthContent
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -2244,7 +3043,7 @@
 					}
 					b := dAtA[iNdEx]
 					iNdEx++
-					wire |= (uint64(b) & 0x7F) << shift
+					wire |= uint64(b&0x7F) << shift
 					if b < 0x80 {
 						break
 					}
@@ -2261,7 +3060,7 @@
 						}
 						b := dAtA[iNdEx]
 						iNdEx++
-						stringLenmapkey |= (uint64(b) & 0x7F) << shift
+						stringLenmapkey |= uint64(b&0x7F) << shift
 						if b < 0x80 {
 							break
 						}
@@ -2271,6 +3070,9 @@
 						return ErrInvalidLengthContent
 					}
 					postStringIndexmapkey := iNdEx + intStringLenmapkey
+					if postStringIndexmapkey < 0 {
+						return ErrInvalidLengthContent
+					}
 					if postStringIndexmapkey > l {
 						return io.ErrUnexpectedEOF
 					}
@@ -2287,7 +3089,7 @@
 						}
 						b := dAtA[iNdEx]
 						iNdEx++
-						stringLenmapvalue |= (uint64(b) & 0x7F) << shift
+						stringLenmapvalue |= uint64(b&0x7F) << shift
 						if b < 0x80 {
 							break
 						}
@@ -2297,6 +3099,9 @@
 						return ErrInvalidLengthContent
 					}
 					postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+					if postStringIndexmapvalue < 0 {
+						return ErrInvalidLengthContent
+					}
 					if postStringIndexmapvalue > l {
 						return io.ErrUnexpectedEOF
 					}
@@ -2328,9 +3133,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthContent
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthContent
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -2355,7 +3164,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -2383,7 +3192,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2393,6 +3202,9 @@
 				return ErrInvalidLengthContent
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthContent
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -2407,9 +3219,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthContent
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthContent
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -2434,7 +3250,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -2462,7 +3278,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2471,6 +3287,9 @@
 				return ErrInvalidLengthContent
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthContent
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -2487,9 +3306,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthContent
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthContent
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -2514,7 +3337,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -2542,7 +3365,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2551,6 +3374,9 @@
 				return ErrInvalidLengthContent
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthContent
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -2572,7 +3398,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2581,11 +3407,14 @@
 				return ErrInvalidLengthContent
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthContent
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
 			if m.UpdateMask == nil {
-				m.UpdateMask = &google_protobuf1.FieldMask{}
+				m.UpdateMask = &types.FieldMask{}
 			}
 			if err := m.UpdateMask.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
 				return err
@@ -2600,9 +3429,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthContent
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthContent
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -2627,7 +3460,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -2655,7 +3488,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2664,6 +3497,9 @@
 				return ErrInvalidLengthContent
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthContent
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -2680,9 +3516,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthContent
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthContent
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -2707,7 +3547,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -2735,7 +3575,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2745,6 +3585,9 @@
 				return ErrInvalidLengthContent
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthContent
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -2759,9 +3602,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthContent
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthContent
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -2786,7 +3633,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -2814,7 +3661,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2823,6 +3670,9 @@
 				return ErrInvalidLengthContent
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthContent
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -2840,9 +3690,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthContent
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthContent
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -2867,7 +3721,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -2895,7 +3749,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2905,6 +3759,9 @@
 				return ErrInvalidLengthContent
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthContent
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -2919,9 +3776,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthContent
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthContent
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -2946,7 +3807,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -2974,7 +3835,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2984,6 +3845,9 @@
 				return ErrInvalidLengthContent
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthContent
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -3003,7 +3867,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				m.Offset |= (int64(b) & 0x7F) << shift
+				m.Offset |= int64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -3022,7 +3886,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				m.Size_ |= (int64(b) & 0x7F) << shift
+				m.Size_ |= int64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -3036,9 +3900,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthContent
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthContent
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -3063,7 +3931,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -3091,7 +3959,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				m.Offset |= (int64(b) & 0x7F) << shift
+				m.Offset |= int64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -3110,7 +3978,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				byteLen |= (int(b) & 0x7F) << shift
+				byteLen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -3119,6 +3987,9 @@
 				return ErrInvalidLengthContent
 			}
 			postIndex := iNdEx + byteLen
+			if postIndex < 0 {
+				return ErrInvalidLengthContent
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -3136,9 +4007,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthContent
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthContent
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -3163,7 +4038,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -3191,7 +4066,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -3200,10 +4075,13 @@
 				return ErrInvalidLengthContent
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthContent
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			if err := types.StdTimeUnmarshal(&m.StartedAt, dAtA[iNdEx:postIndex]); err != nil {
+			if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.StartedAt, dAtA[iNdEx:postIndex]); err != nil {
 				return err
 			}
 			iNdEx = postIndex
@@ -3221,7 +4099,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -3230,10 +4108,13 @@
 				return ErrInvalidLengthContent
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthContent
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			if err := types.StdTimeUnmarshal(&m.UpdatedAt, dAtA[iNdEx:postIndex]); err != nil {
+			if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.UpdatedAt, dAtA[iNdEx:postIndex]); err != nil {
 				return err
 			}
 			iNdEx = postIndex
@@ -3251,7 +4132,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -3261,6 +4142,9 @@
 				return ErrInvalidLengthContent
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthContent
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -3280,7 +4164,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				m.Offset |= (int64(b) & 0x7F) << shift
+				m.Offset |= int64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -3299,7 +4183,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				m.Total |= (int64(b) & 0x7F) << shift
+				m.Total |= int64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -3318,7 +4202,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -3328,6 +4212,9 @@
 				return ErrInvalidLengthContent
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthContent
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -3342,9 +4229,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthContent
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthContent
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -3369,7 +4260,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -3397,7 +4288,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -3407,6 +4298,9 @@
 				return ErrInvalidLengthContent
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthContent
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -3421,9 +4315,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthContent
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthContent
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -3448,7 +4346,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -3476,7 +4374,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -3485,6 +4383,9 @@
 				return ErrInvalidLengthContent
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthContent
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -3504,9 +4405,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthContent
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthContent
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -3531,7 +4436,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -3559,7 +4464,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -3569,6 +4474,9 @@
 				return ErrInvalidLengthContent
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthContent
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -3583,9 +4491,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthContent
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthContent
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -3610,7 +4522,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -3638,7 +4550,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -3647,6 +4559,9 @@
 				return ErrInvalidLengthContent
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthContent
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -3664,9 +4579,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthContent
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthContent
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -3691,7 +4610,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -3719,7 +4638,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				m.Action |= (WriteAction(b) & 0x7F) << shift
+				m.Action |= WriteAction(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -3738,7 +4657,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -3748,6 +4667,9 @@
 				return ErrInvalidLengthContent
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthContent
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -3767,7 +4689,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				m.Total |= (int64(b) & 0x7F) << shift
+				m.Total |= int64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -3786,7 +4708,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -3796,6 +4718,9 @@
 				return ErrInvalidLengthContent
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthContent
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -3815,7 +4740,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				m.Offset |= (int64(b) & 0x7F) << shift
+				m.Offset |= int64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -3834,7 +4759,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				byteLen |= (int(b) & 0x7F) << shift
+				byteLen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -3843,6 +4768,9 @@
 				return ErrInvalidLengthContent
 			}
 			postIndex := iNdEx + byteLen
+			if postIndex < 0 {
+				return ErrInvalidLengthContent
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -3865,7 +4793,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -3874,6 +4802,9 @@
 				return ErrInvalidLengthContent
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthContent
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -3894,7 +4825,7 @@
 					}
 					b := dAtA[iNdEx]
 					iNdEx++
-					wire |= (uint64(b) & 0x7F) << shift
+					wire |= uint64(b&0x7F) << shift
 					if b < 0x80 {
 						break
 					}
@@ -3911,7 +4842,7 @@
 						}
 						b := dAtA[iNdEx]
 						iNdEx++
-						stringLenmapkey |= (uint64(b) & 0x7F) << shift
+						stringLenmapkey |= uint64(b&0x7F) << shift
 						if b < 0x80 {
 							break
 						}
@@ -3921,6 +4852,9 @@
 						return ErrInvalidLengthContent
 					}
 					postStringIndexmapkey := iNdEx + intStringLenmapkey
+					if postStringIndexmapkey < 0 {
+						return ErrInvalidLengthContent
+					}
 					if postStringIndexmapkey > l {
 						return io.ErrUnexpectedEOF
 					}
@@ -3937,7 +4871,7 @@
 						}
 						b := dAtA[iNdEx]
 						iNdEx++
-						stringLenmapvalue |= (uint64(b) & 0x7F) << shift
+						stringLenmapvalue |= uint64(b&0x7F) << shift
 						if b < 0x80 {
 							break
 						}
@@ -3947,6 +4881,9 @@
 						return ErrInvalidLengthContent
 					}
 					postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+					if postStringIndexmapvalue < 0 {
+						return ErrInvalidLengthContent
+					}
 					if postStringIndexmapvalue > l {
 						return io.ErrUnexpectedEOF
 					}
@@ -3978,9 +4915,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthContent
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthContent
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -4005,7 +4946,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -4033,7 +4974,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				m.Action |= (WriteAction(b) & 0x7F) << shift
+				m.Action |= WriteAction(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -4052,7 +4993,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -4061,10 +5002,13 @@
 				return ErrInvalidLengthContent
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthContent
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			if err := types.StdTimeUnmarshal(&m.StartedAt, dAtA[iNdEx:postIndex]); err != nil {
+			if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.StartedAt, dAtA[iNdEx:postIndex]); err != nil {
 				return err
 			}
 			iNdEx = postIndex
@@ -4082,7 +5026,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -4091,10 +5035,13 @@
 				return ErrInvalidLengthContent
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthContent
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			if err := types.StdTimeUnmarshal(&m.UpdatedAt, dAtA[iNdEx:postIndex]); err != nil {
+			if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.UpdatedAt, dAtA[iNdEx:postIndex]); err != nil {
 				return err
 			}
 			iNdEx = postIndex
@@ -4112,7 +5059,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				m.Offset |= (int64(b) & 0x7F) << shift
+				m.Offset |= int64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -4131,7 +5078,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				m.Total |= (int64(b) & 0x7F) << shift
+				m.Total |= int64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -4150,7 +5097,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -4160,6 +5107,9 @@
 				return ErrInvalidLengthContent
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthContent
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -4174,9 +5124,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthContent
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthContent
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -4201,7 +5155,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -4229,7 +5183,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -4239,6 +5193,9 @@
 				return ErrInvalidLengthContent
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthContent
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -4253,9 +5210,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthContent
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthContent
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -4319,10 +5280,13 @@
 					break
 				}
 			}
-			iNdEx += length
 			if length < 0 {
 				return 0, ErrInvalidLengthContent
 			}
+			iNdEx += length
+			if iNdEx < 0 {
+				return 0, ErrInvalidLengthContent
+			}
 			return iNdEx, nil
 		case 3:
 			for {
@@ -4351,6 +5315,9 @@
 					return 0, err
 				}
 				iNdEx = start + next
+				if iNdEx < 0 {
+					return 0, ErrInvalidLengthContent
+				}
 			}
 			return iNdEx, nil
 		case 4:
@@ -4369,79 +5336,3 @@
 	ErrInvalidLengthContent = fmt.Errorf("proto: negative length found during unmarshaling")
 	ErrIntOverflowContent   = fmt.Errorf("proto: integer overflow")
 )
-
-func init() {
-	proto.RegisterFile("github.com/containerd/containerd/api/services/content/v1/content.proto", fileDescriptorContent)
-}
-
-var fileDescriptorContent = []byte{
-	// 1081 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x57, 0xcd, 0x6f, 0x1b, 0x45,
-	0x14, 0xf7, 0x78, 0xed, 0x4d, 0xf2, 0x9c, 0x16, 0x33, 0x31, 0x95, 0xb5, 0x08, 0x67, 0xbb, 0x42,
-	0xc8, 0x6a, 0xc9, 0x3a, 0x75, 0x7a, 0x00, 0x2a, 0x01, 0x8e, 0x9b, 0xaa, 0x41, 0x4d, 0x41, 0x5b,
-	0x97, 0x40, 0x2f, 0x65, 0x6d, 0x8f, 0xcd, 0x2a, 0xb6, 0xd7, 0xdd, 0x19, 0x5b, 0x84, 0x13, 0x17,
-	0x24, 0x14, 0xf5, 0x80, 0xb8, 0xe7, 0x02, 0xfc, 0x15, 0x1c, 0x38, 0xe7, 0xc8, 0x11, 0x71, 0x68,
-	0x69, 0xfe, 0x07, 0xee, 0x68, 0x66, 0x67, 0xed, 0xf5, 0x47, 0x58, 0xdb, 0x31, 0x27, 0xbf, 0x99,
-	0x7d, 0xbf, 0xf7, 0xfd, 0x31, 0x86, 0x7b, 0x4d, 0x87, 0x7d, 0xdd, 0xab, 0x9a, 0x35, 0xb7, 0x5d,
-	0xa8, 0xb9, 0x1d, 0x66, 0x3b, 0x1d, 0xe2, 0xd5, 0xc3, 0xa4, 0xdd, 0x75, 0x0a, 0x94, 0x78, 0x7d,
-	0xa7, 0x46, 0xa8, 0xb8, 0x27, 0x1d, 0x56, 0xe8, 0xdf, 0x0a, 0x48, 0xb3, 0xeb, 0xb9, 0xcc, 0xc5,
-	0xb9, 0x21, 0xc2, 0x0c, 0xb8, 0xcd, 0x80, 0xa5, 0x7f, 0x4b, 0xcb, 0x34, 0xdd, 0xa6, 0x2b, 0x58,
-	0x0b, 0x9c, 0xf2, 0x51, 0x9a, 0xde, 0x74, 0xdd, 0x66, 0x8b, 0x14, 0xc4, 0xa9, 0xda, 0x6b, 0x14,
-	0x1a, 0x0e, 0x69, 0xd5, 0x9f, 0xb6, 0x6d, 0x7a, 0x24, 0x39, 0x36, 0xc7, 0x39, 0x98, 0xd3, 0x26,
-	0x94, 0xd9, 0xed, 0xae, 0x64, 0x78, 0x73, 0x9c, 0x81, 0xb4, 0xbb, 0xec, 0xd8, 0xff, 0x68, 0xfc,
-	0x13, 0x87, 0xc4, 0x7e, 0xa7, 0xe1, 0xe2, 0x4f, 0x40, 0xad, 0x3b, 0x4d, 0x42, 0x59, 0x16, 0xe9,
-	0x28, 0xbf, 0xb6, 0x5b, 0x3c, 0x7b, 0xb1, 0x19, 0xfb, 0xeb, 0xc5, 0xe6, 0x8d, 0x90, 0xfb, 0x6e,
-	0x97, 0x74, 0x06, 0x5e, 0xd0, 0x42, 0xd3, 0xdd, 0xf2, 0x21, 0xe6, 0x5d, 0xf1, 0x63, 0x49, 0x09,
-	0x18, 0x43, 0x82, 0x3a, 0xdf, 0x92, 0x6c, 0x5c, 0x47, 0x79, 0xc5, 0x12, 0x34, 0x2e, 0x03, 0xd4,
-	0x3c, 0x62, 0x33, 0x52, 0x7f, 0x6a, 0xb3, 0xac, 0xa2, 0xa3, 0x7c, 0xaa, 0xa8, 0x99, 0xbe, 0x69,
-	0x66, 0x60, 0x9a, 0x59, 0x09, 0x6c, 0xdf, 0x5d, 0xe5, 0xfa, 0x7f, 0x7c, 0xb9, 0x89, 0xac, 0x35,
-	0x89, 0x2b, 0x31, 0x2e, 0xa4, 0xd7, 0xad, 0x07, 0x42, 0x12, 0xf3, 0x08, 0x91, 0xb8, 0x12, 0xc3,
-	0xf7, 0x41, 0x6d, 0xd9, 0x55, 0xd2, 0xa2, 0xd9, 0xa4, 0xae, 0xe4, 0x53, 0xc5, 0x6d, 0xf3, 0xbf,
-	0x33, 0x63, 0xf2, 0xf8, 0x98, 0x0f, 0x04, 0x64, 0xaf, 0xc3, 0xbc, 0x63, 0x4b, 0xe2, 0xb5, 0xf7,
-	0x21, 0x15, 0xba, 0xc6, 0x69, 0x50, 0x8e, 0xc8, 0xb1, 0x1f, 0x3f, 0x8b, 0x93, 0x38, 0x03, 0xc9,
-	0xbe, 0xdd, 0xea, 0xf9, 0x91, 0x58, 0xb3, 0xfc, 0xc3, 0x07, 0xf1, 0xf7, 0x90, 0xf1, 0x25, 0xa4,
-	0xb8, 0x58, 0x8b, 0x3c, 0xeb, 0xf1, 0x88, 0x2d, 0x31, 0xfa, 0xc6, 0x43, 0x58, 0xf7, 0x45, 0xd3,
-	0xae, 0xdb, 0xa1, 0x04, 0x7f, 0x08, 0x09, 0xa7, 0xd3, 0x70, 0x85, 0xe4, 0x54, 0xf1, 0xed, 0x59,
-	0xbc, 0xdd, 0x4d, 0x70, 0xfd, 0x96, 0xc0, 0x19, 0xcf, 0x11, 0x5c, 0x79, 0x2c, 0xa2, 0x17, 0x58,
-	0x7b, 0x49, 0x89, 0xf8, 0x0e, 0xa4, 0xfc, 0x74, 0x88, 0x3a, 0x16, 0xc1, 0x99, 0x96, 0xc7, 0x7b,
-	0xbc, 0xd4, 0x0f, 0x6c, 0x7a, 0x64, 0xc9, 0xac, 0x73, 0xda, 0xf8, 0x0c, 0xae, 0x06, 0xd6, 0x2c,
-	0xc9, 0x41, 0x13, 0xf0, 0x03, 0x87, 0xb2, 0xb2, 0xcf, 0x12, 0x38, 0x99, 0x85, 0x95, 0x86, 0xd3,
-	0x62, 0xc4, 0xa3, 0x59, 0xa4, 0x2b, 0xf9, 0x35, 0x2b, 0x38, 0x1a, 0x8f, 0x61, 0x63, 0x84, 0x7f,
-	0xc2, 0x0c, 0x65, 0x21, 0x33, 0xaa, 0x90, 0xb9, 0x4b, 0x5a, 0x84, 0x91, 0x31, 0x43, 0x96, 0x59,
-	0x1b, 0xcf, 0x11, 0x60, 0x8b, 0xd8, 0xf5, 0xff, 0x4f, 0x05, 0xbe, 0x06, 0xaa, 0xdb, 0x68, 0x50,
-	0xc2, 0x64, 0xfb, 0xcb, 0xd3, 0x60, 0x28, 0x28, 0xc3, 0xa1, 0x60, 0x94, 0x60, 0x63, 0xc4, 0x1a,
-	0x19, 0xc9, 0xa1, 0x08, 0x34, 0x2e, 0xa2, 0x6e, 0x33, 0x5b, 0x08, 0x5e, 0xb7, 0x04, 0x6d, 0xfc,
-	0x1c, 0x07, 0xf5, 0x11, 0xb3, 0x59, 0x8f, 0xf2, 0xe9, 0x40, 0x99, 0xed, 0xc9, 0xe9, 0x80, 0xe6,
-	0x99, 0x0e, 0x12, 0x37, 0x31, 0x62, 0xe2, 0x8b, 0x8d, 0x98, 0x34, 0x28, 0x1e, 0x69, 0x08, 0x57,
-	0xd7, 0x2c, 0x4e, 0x86, 0x5c, 0x4a, 0x8c, 0xb8, 0x94, 0x81, 0x24, 0x73, 0x99, 0xdd, 0xca, 0x26,
-	0xc5, 0xb5, 0x7f, 0xc0, 0x0f, 0x61, 0x95, 0x7c, 0xd3, 0x25, 0x35, 0x46, 0xea, 0x59, 0x75, 0xe1,
-	0x8c, 0x0c, 0x64, 0x18, 0xd7, 0xe1, 0x8a, 0x1f, 0xa3, 0x20, 0xe1, 0xd2, 0x40, 0x34, 0x30, 0x90,
-	0xb7, 0x55, 0xc0, 0x32, 0xa8, 0x67, 0x95, 0x8a, 0x1b, 0x19, 0xca, 0x77, 0xa2, 0x2a, 0x5a, 0xe2,
-	0x25, 0xca, 0x28, 0xf8, 0x6d, 0xe2, 0xdf, 0x12, 0x1a, 0xdd, 0x57, 0x5f, 0x41, 0x66, 0x14, 0x20,
-	0x0d, 0xb9, 0x0f, 0xab, 0x54, 0xde, 0xc9, 0xe6, 0x9a, 0xd1, 0x14, 0xd9, 0x5e, 0x03, 0xb4, 0xf1,
-	0x93, 0x02, 0x1b, 0x87, 0x9e, 0x33, 0xd1, 0x62, 0x65, 0x50, 0xed, 0x1a, 0x73, 0xdc, 0x8e, 0x70,
-	0xf5, 0x6a, 0xf1, 0x66, 0x94, 0x7c, 0x21, 0xa4, 0x24, 0x20, 0x96, 0x84, 0x06, 0x31, 0x8d, 0x0f,
-	0x93, 0x3e, 0x48, 0xae, 0x72, 0x51, 0x72, 0x13, 0x97, 0x4f, 0x6e, 0xa8, 0xb4, 0x92, 0x53, 0xbb,
-	0x45, 0x1d, 0x76, 0x0b, 0x3e, 0x1c, 0xec, 0xbe, 0x15, 0x11, 0xc8, 0x8f, 0x66, 0x72, 0x74, 0x34,
-	0x5a, 0xcb, 0x5e, 0x85, 0x2f, 0xe3, 0x90, 0x19, 0x55, 0x23, 0xf3, 0xbe, 0x94, 0xac, 0x8c, 0x0e,
-	0x85, 0xf8, 0x32, 0x86, 0x82, 0xb2, 0xd8, 0x50, 0x98, 0x6f, 0x04, 0x0c, 0x47, 0xb2, 0x7a, 0xe9,
-	0xa9, 0xaf, 0xc3, 0x7a, 0xa9, 0xea, 0x7a, 0xec, 0xc2, 0xee, 0xbf, 0xf1, 0x3d, 0x82, 0x54, 0x28,
-	0x7a, 0xf8, 0x2d, 0x48, 0x3c, 0xaa, 0x94, 0x2a, 0xe9, 0x98, 0xb6, 0x71, 0x72, 0xaa, 0xbf, 0x16,
-	0xfa, 0xc4, 0x3b, 0x0b, 0x6f, 0x42, 0xf2, 0xd0, 0xda, 0xaf, 0xec, 0xa5, 0x91, 0x96, 0x39, 0x39,
-	0xd5, 0xd3, 0xa1, 0xef, 0x82, 0xc4, 0xd7, 0x41, 0x2d, 0x7f, 0x7a, 0x70, 0xb0, 0x5f, 0x49, 0xc7,
-	0xb5, 0x37, 0x4e, 0x4e, 0xf5, 0xd7, 0x43, 0x1c, 0x65, 0xb7, 0xdd, 0x76, 0x98, 0xb6, 0xf1, 0xc3,
-	0x2f, 0xb9, 0xd8, 0x6f, 0xbf, 0xe6, 0xc2, 0x7a, 0x8b, 0xbf, 0xaf, 0xc0, 0x8a, 0x2c, 0x03, 0x6c,
-	0xcb, 0x97, 0xe9, 0xcd, 0x59, 0x36, 0xa9, 0x74, 0x4d, 0x7b, 0x77, 0x36, 0x66, 0x59, 0x61, 0x4d,
-	0x50, 0xfd, 0xb7, 0x04, 0xde, 0x8a, 0xc2, 0x8d, 0xbc, 0x80, 0x34, 0x73, 0x56, 0x76, 0xa9, 0xe8,
-	0x19, 0x24, 0xf8, 0x68, 0xc3, 0xc5, 0x28, 0xdc, 0xe4, 0x43, 0x44, 0xdb, 0x99, 0x0b, 0xe3, 0x2b,
-	0xdc, 0x46, 0xf8, 0x73, 0x50, 0xfd, 0xe7, 0x04, 0xbe, 0x1d, 0x25, 0x60, 0xda, 0xb3, 0x43, 0xbb,
-	0x36, 0x51, 0xdf, 0x7b, 0xfc, 0x7f, 0x03, 0x77, 0x85, 0xef, 0xec, 0x68, 0x57, 0x26, 0xdf, 0x19,
-	0xd1, 0xae, 0x4c, 0x79, 0x0d, 0x6c, 0x23, 0x9e, 0x26, 0xb9, 0xe2, 0xb7, 0x66, 0xdc, 0x41, 0xb3,
-	0xa6, 0x69, 0x6c, 0xe5, 0x1d, 0xc3, 0x7a, 0x78, 0x03, 0xe1, 0x99, 0x42, 0x3f, 0xb6, 0xe0, 0xb4,
-	0xdb, 0xf3, 0x81, 0xa4, 0xea, 0x3e, 0x24, 0xfd, 0xd6, 0xd9, 0x59, 0x60, 0x24, 0x47, 0xeb, 0x9c,
-	0x36, 0x60, 0xf3, 0x68, 0x1b, 0xe1, 0x03, 0x48, 0x8a, 0xd9, 0x80, 0x23, 0x3b, 0x27, 0x3c, 0x42,
-	0x2e, 0xaa, 0x8e, 0xdd, 0x27, 0x67, 0xaf, 0x72, 0xb1, 0x3f, 0x5f, 0xe5, 0x62, 0xdf, 0x9d, 0xe7,
-	0xd0, 0xd9, 0x79, 0x0e, 0xfd, 0x71, 0x9e, 0x43, 0x7f, 0x9f, 0xe7, 0xd0, 0x93, 0x8f, 0x17, 0xfd,
-	0x1f, 0x7d, 0x47, 0x92, 0x5f, 0xc4, 0xaa, 0xaa, 0xd0, 0xb6, 0xf3, 0x6f, 0x00, 0x00, 0x00, 0xff,
-	0xff, 0xc0, 0xc2, 0x35, 0xb1, 0x94, 0x0f, 0x00, 0x00,
-}
diff --git a/vendor/github.com/containerd/containerd/api/services/diff/v1/diff.pb.go b/vendor/github.com/containerd/containerd/api/services/diff/v1/diff.pb.go
index 6eba311..9ada873 100644
--- a/vendor/github.com/containerd/containerd/api/services/diff/v1/diff.pb.go
+++ b/vendor/github.com/containerd/containerd/api/services/diff/v1/diff.pb.go
@@ -1,36 +1,20 @@
 // Code generated by protoc-gen-gogo. DO NOT EDIT.
 // source: github.com/containerd/containerd/api/services/diff/v1/diff.proto
 
-/*
-	Package diff is a generated protocol buffer package.
-
-	It is generated from these files:
-		github.com/containerd/containerd/api/services/diff/v1/diff.proto
-
-	It has these top-level messages:
-		ApplyRequest
-		ApplyResponse
-		DiffRequest
-		DiffResponse
-*/
 package diff
 
-import proto "github.com/gogo/protobuf/proto"
-import fmt "fmt"
-import math "math"
-
-// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto"
-import containerd_types "github.com/containerd/containerd/api/types"
-import containerd_types1 "github.com/containerd/containerd/api/types"
-
-import context "golang.org/x/net/context"
-import grpc "google.golang.org/grpc"
-
-import strings "strings"
-import reflect "reflect"
-import sortkeys "github.com/gogo/protobuf/sortkeys"
-
-import io "io"
+import (
+	context "context"
+	fmt "fmt"
+	types "github.com/containerd/containerd/api/types"
+	proto "github.com/gogo/protobuf/proto"
+	github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
+	grpc "google.golang.org/grpc"
+	io "io"
+	math "math"
+	reflect "reflect"
+	strings "strings"
+)
 
 // Reference imports to suppress errors if they are not otherwise used.
 var _ = proto.Marshal
@@ -45,32 +29,94 @@
 
 type ApplyRequest struct {
 	// Diff is the descriptor of the diff to be extracted
-	Diff   *containerd_types1.Descriptor `protobuf:"bytes,1,opt,name=diff" json:"diff,omitempty"`
-	Mounts []*containerd_types.Mount     `protobuf:"bytes,2,rep,name=mounts" json:"mounts,omitempty"`
+	Diff                 *types.Descriptor `protobuf:"bytes,1,opt,name=diff,proto3" json:"diff,omitempty"`
+	Mounts               []*types.Mount    `protobuf:"bytes,2,rep,name=mounts,proto3" json:"mounts,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
+	XXX_unrecognized     []byte            `json:"-"`
+	XXX_sizecache        int32             `json:"-"`
 }
 
-func (m *ApplyRequest) Reset()                    { *m = ApplyRequest{} }
-func (*ApplyRequest) ProtoMessage()               {}
-func (*ApplyRequest) Descriptor() ([]byte, []int) { return fileDescriptorDiff, []int{0} }
+func (m *ApplyRequest) Reset()      { *m = ApplyRequest{} }
+func (*ApplyRequest) ProtoMessage() {}
+func (*ApplyRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_3b36a99e6faaa935, []int{0}
+}
+func (m *ApplyRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ApplyRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_ApplyRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *ApplyRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ApplyRequest.Merge(m, src)
+}
+func (m *ApplyRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *ApplyRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_ApplyRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ApplyRequest proto.InternalMessageInfo
 
 type ApplyResponse struct {
 	// Applied is the descriptor for the object which was applied.
 	// If the input was a compressed blob then the result will be
 	// the descriptor for the uncompressed blob.
-	Applied *containerd_types1.Descriptor `protobuf:"bytes,1,opt,name=applied" json:"applied,omitempty"`
+	Applied              *types.Descriptor `protobuf:"bytes,1,opt,name=applied,proto3" json:"applied,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
+	XXX_unrecognized     []byte            `json:"-"`
+	XXX_sizecache        int32             `json:"-"`
 }
 
-func (m *ApplyResponse) Reset()                    { *m = ApplyResponse{} }
-func (*ApplyResponse) ProtoMessage()               {}
-func (*ApplyResponse) Descriptor() ([]byte, []int) { return fileDescriptorDiff, []int{1} }
+func (m *ApplyResponse) Reset()      { *m = ApplyResponse{} }
+func (*ApplyResponse) ProtoMessage() {}
+func (*ApplyResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_3b36a99e6faaa935, []int{1}
+}
+func (m *ApplyResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ApplyResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_ApplyResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *ApplyResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ApplyResponse.Merge(m, src)
+}
+func (m *ApplyResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *ApplyResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_ApplyResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ApplyResponse proto.InternalMessageInfo
 
 type DiffRequest struct {
 	// Left are the mounts which represent the older copy
 	// in which is the base of the computed changes.
-	Left []*containerd_types.Mount `protobuf:"bytes,1,rep,name=left" json:"left,omitempty"`
+	Left []*types.Mount `protobuf:"bytes,1,rep,name=left,proto3" json:"left,omitempty"`
 	// Right are the mounts which represents the newer copy
 	// in which changes from the left were made into.
-	Right []*containerd_types.Mount `protobuf:"bytes,2,rep,name=right" json:"right,omitempty"`
+	Right []*types.Mount `protobuf:"bytes,2,rep,name=right,proto3" json:"right,omitempty"`
 	// MediaType is the media type descriptor for the created diff
 	// object
 	MediaType string `protobuf:"bytes,3,opt,name=media_type,json=mediaType,proto3" json:"media_type,omitempty"`
@@ -79,29 +125,129 @@
 	Ref string `protobuf:"bytes,4,opt,name=ref,proto3" json:"ref,omitempty"`
 	// Labels are the labels to apply to the generated content
 	// on content store commit.
-	Labels map[string]string `protobuf:"bytes,5,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	Labels               map[string]string `protobuf:"bytes,5,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
+	XXX_unrecognized     []byte            `json:"-"`
+	XXX_sizecache        int32             `json:"-"`
 }
 
-func (m *DiffRequest) Reset()                    { *m = DiffRequest{} }
-func (*DiffRequest) ProtoMessage()               {}
-func (*DiffRequest) Descriptor() ([]byte, []int) { return fileDescriptorDiff, []int{2} }
+func (m *DiffRequest) Reset()      { *m = DiffRequest{} }
+func (*DiffRequest) ProtoMessage() {}
+func (*DiffRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_3b36a99e6faaa935, []int{2}
+}
+func (m *DiffRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *DiffRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_DiffRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *DiffRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DiffRequest.Merge(m, src)
+}
+func (m *DiffRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *DiffRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_DiffRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DiffRequest proto.InternalMessageInfo
 
 type DiffResponse struct {
 	// Diff is the descriptor of the diff which can be applied
-	Diff *containerd_types1.Descriptor `protobuf:"bytes,3,opt,name=diff" json:"diff,omitempty"`
+	Diff                 *types.Descriptor `protobuf:"bytes,3,opt,name=diff,proto3" json:"diff,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
+	XXX_unrecognized     []byte            `json:"-"`
+	XXX_sizecache        int32             `json:"-"`
 }
 
-func (m *DiffResponse) Reset()                    { *m = DiffResponse{} }
-func (*DiffResponse) ProtoMessage()               {}
-func (*DiffResponse) Descriptor() ([]byte, []int) { return fileDescriptorDiff, []int{3} }
+func (m *DiffResponse) Reset()      { *m = DiffResponse{} }
+func (*DiffResponse) ProtoMessage() {}
+func (*DiffResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_3b36a99e6faaa935, []int{3}
+}
+func (m *DiffResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *DiffResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_DiffResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *DiffResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DiffResponse.Merge(m, src)
+}
+func (m *DiffResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *DiffResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_DiffResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DiffResponse proto.InternalMessageInfo
 
 func init() {
 	proto.RegisterType((*ApplyRequest)(nil), "containerd.services.diff.v1.ApplyRequest")
 	proto.RegisterType((*ApplyResponse)(nil), "containerd.services.diff.v1.ApplyResponse")
 	proto.RegisterType((*DiffRequest)(nil), "containerd.services.diff.v1.DiffRequest")
+	proto.RegisterMapType((map[string]string)(nil), "containerd.services.diff.v1.DiffRequest.LabelsEntry")
 	proto.RegisterType((*DiffResponse)(nil), "containerd.services.diff.v1.DiffResponse")
 }
 
+func init() {
+	proto.RegisterFile("github.com/containerd/containerd/api/services/diff/v1/diff.proto", fileDescriptor_3b36a99e6faaa935)
+}
+
+var fileDescriptor_3b36a99e6faaa935 = []byte{
+	// 457 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x53, 0x4f, 0x6f, 0xd3, 0x30,
+	0x14, 0xaf, 0xfb, 0x0f, 0xf5, 0x75, 0x48, 0xc8, 0x9a, 0x44, 0x14, 0x20, 0xaa, 0x7a, 0xea, 0x40,
+	0x38, 0xac, 0xa0, 0x09, 0xb6, 0xcb, 0x40, 0x43, 0x5c, 0xc6, 0x25, 0xda, 0x01, 0x81, 0x04, 0x4a,
+	0x9b, 0x97, 0xce, 0x22, 0x8d, 0xbd, 0xd8, 0xad, 0x94, 0x1b, 0xdf, 0x85, 0x8f, 0xc2, 0x65, 0x47,
+	0x8e, 0x1c, 0x69, 0x3f, 0x09, 0xb2, 0x93, 0x40, 0x24, 0xa4, 0x12, 0x76, 0xca, 0xcb, 0xf3, 0xef,
+	0x9f, 0xfd, 0x6c, 0x38, 0x5d, 0x70, 0x7d, 0xb9, 0x9a, 0xb1, 0xb9, 0x58, 0xfa, 0x73, 0x91, 0xea,
+	0x90, 0xa7, 0x98, 0x45, 0xf5, 0x32, 0x94, 0xdc, 0x57, 0x98, 0xad, 0xf9, 0x1c, 0x95, 0x1f, 0xf1,
+	0x38, 0xf6, 0xd7, 0x87, 0xf6, 0xcb, 0x64, 0x26, 0xb4, 0xa0, 0xf7, 0xfe, 0x60, 0x59, 0x85, 0x63,
+	0x76, 0x7d, 0x7d, 0xe8, 0xee, 0x2f, 0xc4, 0x42, 0x58, 0x9c, 0x6f, 0xaa, 0x82, 0xe2, 0x1e, 0x35,
+	0x32, 0xd5, 0xb9, 0x44, 0xe5, 0x2f, 0xc5, 0x2a, 0xd5, 0x25, 0xef, 0xe4, 0x3f, 0x78, 0x11, 0xaa,
+	0x79, 0xc6, 0xa5, 0x16, 0x59, 0x41, 0x1e, 0x5f, 0xc1, 0xde, 0x4b, 0x29, 0x93, 0x3c, 0xc0, 0xab,
+	0x15, 0x2a, 0x4d, 0x9f, 0x40, 0xd7, 0xa4, 0x74, 0xc8, 0x88, 0x4c, 0x86, 0xd3, 0xfb, 0xac, 0xb6,
+	0x0d, 0xab, 0xc0, 0xce, 0x7e, 0x2b, 0x04, 0x16, 0x49, 0x7d, 0xe8, 0xdb, 0x34, 0xca, 0x69, 0x8f,
+	0x3a, 0x93, 0xe1, 0xf4, 0xee, 0xdf, 0x9c, 0xb7, 0x66, 0x3d, 0x28, 0x61, 0xe3, 0x37, 0x70, 0xbb,
+	0xb4, 0x54, 0x52, 0xa4, 0x0a, 0xe9, 0x11, 0xdc, 0x0a, 0xa5, 0x4c, 0x38, 0x46, 0x8d, 0x6c, 0x2b,
+	0xf0, 0xf8, 0x6b, 0x1b, 0x86, 0x67, 0x3c, 0x8e, 0xab, 0xec, 0x8f, 0xa0, 0x9b, 0x60, 0xac, 0x1d,
+	0xb2, 0x3b, 0x87, 0x05, 0xd1, 0xc7, 0xd0, 0xcb, 0xf8, 0xe2, 0x52, 0xff, 0x2b, 0x75, 0x81, 0xa2,
+	0x0f, 0x00, 0x96, 0x18, 0xf1, 0xf0, 0x93, 0x59, 0x73, 0x3a, 0x23, 0x32, 0x19, 0x04, 0x03, 0xdb,
+	0xb9, 0xc8, 0x25, 0xd2, 0x3b, 0xd0, 0xc9, 0x30, 0x76, 0xba, 0xb6, 0x6f, 0x4a, 0x7a, 0x0e, 0xfd,
+	0x24, 0x9c, 0x61, 0xa2, 0x9c, 0x9e, 0x35, 0x78, 0xc6, 0x76, 0xdc, 0x08, 0x56, 0xdb, 0x06, 0x3b,
+	0xb7, 0xb4, 0xd7, 0xa9, 0xce, 0xf2, 0xa0, 0xd4, 0x70, 0x5f, 0xc0, 0xb0, 0xd6, 0x36, 0x76, 0x9f,
+	0x31, 0xb7, 0xa7, 0x35, 0x08, 0x4c, 0x49, 0xf7, 0xa1, 0xb7, 0x0e, 0x93, 0x15, 0x3a, 0x6d, 0xdb,
+	0x2b, 0x7e, 0x8e, 0xdb, 0xcf, 0xc9, 0xf8, 0x14, 0xf6, 0x0a, 0xf5, 0xf2, 0xb4, 0xab, 0x09, 0x77,
+	0x9a, 0x4e, 0x78, 0xfa, 0x8d, 0x40, 0xd7, 0x48, 0xd0, 0x8f, 0xd0, 0xb3, 0x93, 0xa3, 0x07, 0x3b,
+	0x37, 0x53, 0xbf, 0x50, 0xee, 0xc3, 0x26, 0xd0, 0x32, 0xda, 0x87, 0xd2, 0x67, 0xd2, 0xf4, 0xac,
+	0xdc, 0x83, 0x06, 0xc8, 0x42, 0xfc, 0xd5, 0xc5, 0xf5, 0xc6, 0x6b, 0xfd, 0xd8, 0x78, 0xad, 0x2f,
+	0x5b, 0x8f, 0x5c, 0x6f, 0x3d, 0xf2, 0x7d, 0xeb, 0x91, 0x9f, 0x5b, 0x8f, 0xbc, 0x3f, 0xbe, 0xd1,
+	0x6b, 0x3f, 0x31, 0xdf, 0x77, 0xad, 0x59, 0xdf, 0x3e, 0xa4, 0xa7, 0xbf, 0x02, 0x00, 0x00, 0xff,
+	0xff, 0x61, 0xd1, 0x6e, 0x9e, 0x34, 0x04, 0x00, 0x00,
+}
+
 // Reference imports to suppress errors if they are not otherwise used.
 var _ context.Context
 var _ grpc.ClientConn
@@ -110,8 +256,9 @@
 // is compatible with the grpc package it is being compiled against.
 const _ = grpc.SupportPackageIsVersion4
 
-// Client API for Diff service
-
+// DiffClient is the client API for Diff service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
 type DiffClient interface {
 	// Apply applies the content associated with the provided digests onto
 	// the provided mounts. Archive content will be extracted and
@@ -132,7 +279,7 @@
 
 func (c *diffClient) Apply(ctx context.Context, in *ApplyRequest, opts ...grpc.CallOption) (*ApplyResponse, error) {
 	out := new(ApplyResponse)
-	err := grpc.Invoke(ctx, "/containerd.services.diff.v1.Diff/Apply", in, out, c.cc, opts...)
+	err := c.cc.Invoke(ctx, "/containerd.services.diff.v1.Diff/Apply", in, out, opts...)
 	if err != nil {
 		return nil, err
 	}
@@ -141,15 +288,14 @@
 
 func (c *diffClient) Diff(ctx context.Context, in *DiffRequest, opts ...grpc.CallOption) (*DiffResponse, error) {
 	out := new(DiffResponse)
-	err := grpc.Invoke(ctx, "/containerd.services.diff.v1.Diff/Diff", in, out, c.cc, opts...)
+	err := c.cc.Invoke(ctx, "/containerd.services.diff.v1.Diff/Diff", in, out, opts...)
 	if err != nil {
 		return nil, err
 	}
 	return out, nil
 }
 
-// Server API for Diff service
-
+// DiffServer is the server API for Diff service.
 type DiffServer interface {
 	// Apply applies the content associated with the provided digests onto
 	// the provided mounts. Archive content will be extracted and
@@ -254,6 +400,9 @@
 			i += n
 		}
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -282,6 +431,9 @@
 		}
 		i += n2
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -353,6 +505,9 @@
 			i += copy(dAtA[i:], v)
 		}
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -381,6 +536,9 @@
 		}
 		i += n3
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -394,6 +552,9 @@
 	return offset + 1
 }
 func (m *ApplyRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	if m.Diff != nil {
@@ -406,20 +567,32 @@
 			n += 1 + l + sovDiff(uint64(l))
 		}
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *ApplyResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	if m.Applied != nil {
 		l = m.Applied.Size()
 		n += 1 + l + sovDiff(uint64(l))
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *DiffRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	if len(m.Left) > 0 {
@@ -450,16 +623,25 @@
 			n += mapEntrySize + 1 + sovDiff(uint64(mapEntrySize))
 		}
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *DiffResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	if m.Diff != nil {
 		l = m.Diff.Size()
 		n += 1 + l + sovDiff(uint64(l))
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
@@ -481,8 +663,9 @@
 		return "nil"
 	}
 	s := strings.Join([]string{`&ApplyRequest{`,
-		`Diff:` + strings.Replace(fmt.Sprintf("%v", this.Diff), "Descriptor", "containerd_types1.Descriptor", 1) + `,`,
-		`Mounts:` + strings.Replace(fmt.Sprintf("%v", this.Mounts), "Mount", "containerd_types.Mount", 1) + `,`,
+		`Diff:` + strings.Replace(fmt.Sprintf("%v", this.Diff), "Descriptor", "types.Descriptor", 1) + `,`,
+		`Mounts:` + strings.Replace(fmt.Sprintf("%v", this.Mounts), "Mount", "types.Mount", 1) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -492,7 +675,8 @@
 		return "nil"
 	}
 	s := strings.Join([]string{`&ApplyResponse{`,
-		`Applied:` + strings.Replace(fmt.Sprintf("%v", this.Applied), "Descriptor", "containerd_types1.Descriptor", 1) + `,`,
+		`Applied:` + strings.Replace(fmt.Sprintf("%v", this.Applied), "Descriptor", "types.Descriptor", 1) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -505,18 +689,19 @@
 	for k, _ := range this.Labels {
 		keysForLabels = append(keysForLabels, k)
 	}
-	sortkeys.Strings(keysForLabels)
+	github_com_gogo_protobuf_sortkeys.Strings(keysForLabels)
 	mapStringForLabels := "map[string]string{"
 	for _, k := range keysForLabels {
 		mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k])
 	}
 	mapStringForLabels += "}"
 	s := strings.Join([]string{`&DiffRequest{`,
-		`Left:` + strings.Replace(fmt.Sprintf("%v", this.Left), "Mount", "containerd_types.Mount", 1) + `,`,
-		`Right:` + strings.Replace(fmt.Sprintf("%v", this.Right), "Mount", "containerd_types.Mount", 1) + `,`,
+		`Left:` + strings.Replace(fmt.Sprintf("%v", this.Left), "Mount", "types.Mount", 1) + `,`,
+		`Right:` + strings.Replace(fmt.Sprintf("%v", this.Right), "Mount", "types.Mount", 1) + `,`,
 		`MediaType:` + fmt.Sprintf("%v", this.MediaType) + `,`,
 		`Ref:` + fmt.Sprintf("%v", this.Ref) + `,`,
 		`Labels:` + mapStringForLabels + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -526,7 +711,8 @@
 		return "nil"
 	}
 	s := strings.Join([]string{`&DiffResponse{`,
-		`Diff:` + strings.Replace(fmt.Sprintf("%v", this.Diff), "Descriptor", "containerd_types1.Descriptor", 1) + `,`,
+		`Diff:` + strings.Replace(fmt.Sprintf("%v", this.Diff), "Descriptor", "types.Descriptor", 1) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -554,7 +740,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -582,7 +768,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -591,11 +777,14 @@
 				return ErrInvalidLengthDiff
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthDiff
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
 			if m.Diff == nil {
-				m.Diff = &containerd_types1.Descriptor{}
+				m.Diff = &types.Descriptor{}
 			}
 			if err := m.Diff.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
 				return err
@@ -615,7 +804,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -624,10 +813,13 @@
 				return ErrInvalidLengthDiff
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthDiff
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			m.Mounts = append(m.Mounts, &containerd_types.Mount{})
+			m.Mounts = append(m.Mounts, &types.Mount{})
 			if err := m.Mounts[len(m.Mounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
 				return err
 			}
@@ -641,9 +833,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthDiff
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthDiff
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -668,7 +864,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -696,7 +892,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -705,11 +901,14 @@
 				return ErrInvalidLengthDiff
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthDiff
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
 			if m.Applied == nil {
-				m.Applied = &containerd_types1.Descriptor{}
+				m.Applied = &types.Descriptor{}
 			}
 			if err := m.Applied.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
 				return err
@@ -724,9 +923,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthDiff
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthDiff
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -751,7 +954,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -779,7 +982,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -788,10 +991,13 @@
 				return ErrInvalidLengthDiff
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthDiff
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			m.Left = append(m.Left, &containerd_types.Mount{})
+			m.Left = append(m.Left, &types.Mount{})
 			if err := m.Left[len(m.Left)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
 				return err
 			}
@@ -810,7 +1016,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -819,10 +1025,13 @@
 				return ErrInvalidLengthDiff
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthDiff
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			m.Right = append(m.Right, &containerd_types.Mount{})
+			m.Right = append(m.Right, &types.Mount{})
 			if err := m.Right[len(m.Right)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
 				return err
 			}
@@ -841,7 +1050,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -851,6 +1060,9 @@
 				return ErrInvalidLengthDiff
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthDiff
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -870,7 +1082,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -880,6 +1092,9 @@
 				return ErrInvalidLengthDiff
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthDiff
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -899,7 +1114,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -908,6 +1123,9 @@
 				return ErrInvalidLengthDiff
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthDiff
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -928,7 +1146,7 @@
 					}
 					b := dAtA[iNdEx]
 					iNdEx++
-					wire |= (uint64(b) & 0x7F) << shift
+					wire |= uint64(b&0x7F) << shift
 					if b < 0x80 {
 						break
 					}
@@ -945,7 +1163,7 @@
 						}
 						b := dAtA[iNdEx]
 						iNdEx++
-						stringLenmapkey |= (uint64(b) & 0x7F) << shift
+						stringLenmapkey |= uint64(b&0x7F) << shift
 						if b < 0x80 {
 							break
 						}
@@ -955,6 +1173,9 @@
 						return ErrInvalidLengthDiff
 					}
 					postStringIndexmapkey := iNdEx + intStringLenmapkey
+					if postStringIndexmapkey < 0 {
+						return ErrInvalidLengthDiff
+					}
 					if postStringIndexmapkey > l {
 						return io.ErrUnexpectedEOF
 					}
@@ -971,7 +1192,7 @@
 						}
 						b := dAtA[iNdEx]
 						iNdEx++
-						stringLenmapvalue |= (uint64(b) & 0x7F) << shift
+						stringLenmapvalue |= uint64(b&0x7F) << shift
 						if b < 0x80 {
 							break
 						}
@@ -981,6 +1202,9 @@
 						return ErrInvalidLengthDiff
 					}
 					postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+					if postStringIndexmapvalue < 0 {
+						return ErrInvalidLengthDiff
+					}
 					if postStringIndexmapvalue > l {
 						return io.ErrUnexpectedEOF
 					}
@@ -1012,9 +1236,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthDiff
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthDiff
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -1039,7 +1267,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -1067,7 +1295,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1076,11 +1304,14 @@
 				return ErrInvalidLengthDiff
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthDiff
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
 			if m.Diff == nil {
-				m.Diff = &containerd_types1.Descriptor{}
+				m.Diff = &types.Descriptor{}
 			}
 			if err := m.Diff.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
 				return err
@@ -1095,9 +1326,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthDiff
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthDiff
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -1161,10 +1396,13 @@
 					break
 				}
 			}
-			iNdEx += length
 			if length < 0 {
 				return 0, ErrInvalidLengthDiff
 			}
+			iNdEx += length
+			if iNdEx < 0 {
+				return 0, ErrInvalidLengthDiff
+			}
 			return iNdEx, nil
 		case 3:
 			for {
@@ -1193,6 +1431,9 @@
 					return 0, err
 				}
 				iNdEx = start + next
+				if iNdEx < 0 {
+					return 0, ErrInvalidLengthDiff
+				}
 			}
 			return iNdEx, nil
 		case 4:
@@ -1211,40 +1452,3 @@
 	ErrInvalidLengthDiff = fmt.Errorf("proto: negative length found during unmarshaling")
 	ErrIntOverflowDiff   = fmt.Errorf("proto: integer overflow")
 )
-
-func init() {
-	proto.RegisterFile("github.com/containerd/containerd/api/services/diff/v1/diff.proto", fileDescriptorDiff)
-}
-
-var fileDescriptorDiff = []byte{
-	// 457 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x53, 0x4f, 0x6f, 0xd3, 0x30,
-	0x14, 0xaf, 0xfb, 0x0f, 0xf5, 0x75, 0x48, 0xc8, 0x9a, 0x44, 0x14, 0x20, 0xaa, 0x7a, 0xea, 0x40,
-	0x38, 0xac, 0xa0, 0x09, 0xb6, 0xcb, 0x40, 0x43, 0x5c, 0xc6, 0x25, 0xda, 0x01, 0x81, 0x04, 0x4a,
-	0x9b, 0x97, 0xce, 0x22, 0x8d, 0xbd, 0xd8, 0xad, 0x94, 0x1b, 0xdf, 0x85, 0x8f, 0xc2, 0x65, 0x47,
-	0x8e, 0x1c, 0x69, 0x3f, 0x09, 0xb2, 0x93, 0x40, 0x24, 0xa4, 0x12, 0x76, 0xca, 0xcb, 0xf3, 0xef,
-	0x9f, 0xfd, 0x6c, 0x38, 0x5d, 0x70, 0x7d, 0xb9, 0x9a, 0xb1, 0xb9, 0x58, 0xfa, 0x73, 0x91, 0xea,
-	0x90, 0xa7, 0x98, 0x45, 0xf5, 0x32, 0x94, 0xdc, 0x57, 0x98, 0xad, 0xf9, 0x1c, 0x95, 0x1f, 0xf1,
-	0x38, 0xf6, 0xd7, 0x87, 0xf6, 0xcb, 0x64, 0x26, 0xb4, 0xa0, 0xf7, 0xfe, 0x60, 0x59, 0x85, 0x63,
-	0x76, 0x7d, 0x7d, 0xe8, 0xee, 0x2f, 0xc4, 0x42, 0x58, 0x9c, 0x6f, 0xaa, 0x82, 0xe2, 0x1e, 0x35,
-	0x32, 0xd5, 0xb9, 0x44, 0xe5, 0x2f, 0xc5, 0x2a, 0xd5, 0x25, 0xef, 0xe4, 0x3f, 0x78, 0x11, 0xaa,
-	0x79, 0xc6, 0xa5, 0x16, 0x59, 0x41, 0x1e, 0x5f, 0xc1, 0xde, 0x4b, 0x29, 0x93, 0x3c, 0xc0, 0xab,
-	0x15, 0x2a, 0x4d, 0x9f, 0x40, 0xd7, 0xa4, 0x74, 0xc8, 0x88, 0x4c, 0x86, 0xd3, 0xfb, 0xac, 0xb6,
-	0x0d, 0xab, 0xc0, 0xce, 0x7e, 0x2b, 0x04, 0x16, 0x49, 0x7d, 0xe8, 0xdb, 0x34, 0xca, 0x69, 0x8f,
-	0x3a, 0x93, 0xe1, 0xf4, 0xee, 0xdf, 0x9c, 0xb7, 0x66, 0x3d, 0x28, 0x61, 0xe3, 0x37, 0x70, 0xbb,
-	0xb4, 0x54, 0x52, 0xa4, 0x0a, 0xe9, 0x11, 0xdc, 0x0a, 0xa5, 0x4c, 0x38, 0x46, 0x8d, 0x6c, 0x2b,
-	0xf0, 0xf8, 0x6b, 0x1b, 0x86, 0x67, 0x3c, 0x8e, 0xab, 0xec, 0x8f, 0xa0, 0x9b, 0x60, 0xac, 0x1d,
-	0xb2, 0x3b, 0x87, 0x05, 0xd1, 0xc7, 0xd0, 0xcb, 0xf8, 0xe2, 0x52, 0xff, 0x2b, 0x75, 0x81, 0xa2,
-	0x0f, 0x00, 0x96, 0x18, 0xf1, 0xf0, 0x93, 0x59, 0x73, 0x3a, 0x23, 0x32, 0x19, 0x04, 0x03, 0xdb,
-	0xb9, 0xc8, 0x25, 0xd2, 0x3b, 0xd0, 0xc9, 0x30, 0x76, 0xba, 0xb6, 0x6f, 0x4a, 0x7a, 0x0e, 0xfd,
-	0x24, 0x9c, 0x61, 0xa2, 0x9c, 0x9e, 0x35, 0x78, 0xc6, 0x76, 0xdc, 0x08, 0x56, 0xdb, 0x06, 0x3b,
-	0xb7, 0xb4, 0xd7, 0xa9, 0xce, 0xf2, 0xa0, 0xd4, 0x70, 0x5f, 0xc0, 0xb0, 0xd6, 0x36, 0x76, 0x9f,
-	0x31, 0xb7, 0xa7, 0x35, 0x08, 0x4c, 0x49, 0xf7, 0xa1, 0xb7, 0x0e, 0x93, 0x15, 0x3a, 0x6d, 0xdb,
-	0x2b, 0x7e, 0x8e, 0xdb, 0xcf, 0xc9, 0xf8, 0x14, 0xf6, 0x0a, 0xf5, 0xf2, 0xb4, 0xab, 0x09, 0x77,
-	0x9a, 0x4e, 0x78, 0xfa, 0x8d, 0x40, 0xd7, 0x48, 0xd0, 0x8f, 0xd0, 0xb3, 0x93, 0xa3, 0x07, 0x3b,
-	0x37, 0x53, 0xbf, 0x50, 0xee, 0xc3, 0x26, 0xd0, 0x32, 0xda, 0x87, 0xd2, 0x67, 0xd2, 0xf4, 0xac,
-	0xdc, 0x83, 0x06, 0xc8, 0x42, 0xfc, 0xd5, 0xc5, 0xf5, 0xc6, 0x6b, 0xfd, 0xd8, 0x78, 0xad, 0x2f,
-	0x5b, 0x8f, 0x5c, 0x6f, 0x3d, 0xf2, 0x7d, 0xeb, 0x91, 0x9f, 0x5b, 0x8f, 0xbc, 0x3f, 0xbe, 0xd1,
-	0x6b, 0x3f, 0x31, 0xdf, 0x77, 0xad, 0x59, 0xdf, 0x3e, 0xa4, 0xa7, 0xbf, 0x02, 0x00, 0x00, 0xff,
-	0xff, 0x61, 0xd1, 0x6e, 0x9e, 0x34, 0x04, 0x00, 0x00,
-}
diff --git a/vendor/github.com/containerd/containerd/api/services/events/v1/events.pb.go b/vendor/github.com/containerd/containerd/api/services/events/v1/events.pb.go
index d6a7b38..dcaebbf 100644
--- a/vendor/github.com/containerd/containerd/api/services/events/v1/events.pb.go
+++ b/vendor/github.com/containerd/containerd/api/services/events/v1/events.pb.go
@@ -1,43 +1,22 @@
 // Code generated by protoc-gen-gogo. DO NOT EDIT.
 // source: github.com/containerd/containerd/api/services/events/v1/events.proto
 
-/*
-	Package events is a generated protocol buffer package.
-
-	It is generated from these files:
-		github.com/containerd/containerd/api/services/events/v1/events.proto
-
-	It has these top-level messages:
-		PublishRequest
-		ForwardRequest
-		SubscribeRequest
-		Envelope
-*/
 package events
 
-import proto "github.com/gogo/protobuf/proto"
-import fmt "fmt"
-import math "math"
-
-// skipping weak import containerd_plugin "github.com/containerd/containerd/protobuf/plugin"
-// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto"
-import google_protobuf1 "github.com/gogo/protobuf/types"
-import google_protobuf2 "github.com/gogo/protobuf/types"
-import _ "github.com/gogo/protobuf/types"
-
-import time "time"
-
-import typeurl "github.com/containerd/typeurl"
-
-import context "golang.org/x/net/context"
-import grpc "google.golang.org/grpc"
-
-import types "github.com/gogo/protobuf/types"
-
-import strings "strings"
-import reflect "reflect"
-
-import io "io"
+import (
+	context "context"
+	fmt "fmt"
+	github_com_containerd_typeurl "github.com/containerd/typeurl"
+	proto "github.com/gogo/protobuf/proto"
+	github_com_gogo_protobuf_types "github.com/gogo/protobuf/types"
+	types "github.com/gogo/protobuf/types"
+	grpc "google.golang.org/grpc"
+	io "io"
+	math "math"
+	reflect "reflect"
+	strings "strings"
+	time "time"
+)
 
 // Reference imports to suppress errors if they are not otherwise used.
 var _ = proto.Marshal
@@ -52,40 +31,164 @@
 const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
 
 type PublishRequest struct {
-	Topic string                `protobuf:"bytes,1,opt,name=topic,proto3" json:"topic,omitempty"`
-	Event *google_protobuf1.Any `protobuf:"bytes,2,opt,name=event" json:"event,omitempty"`
+	Topic                string     `protobuf:"bytes,1,opt,name=topic,proto3" json:"topic,omitempty"`
+	Event                *types.Any `protobuf:"bytes,2,opt,name=event,proto3" json:"event,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}   `json:"-"`
+	XXX_unrecognized     []byte     `json:"-"`
+	XXX_sizecache        int32      `json:"-"`
 }
 
-func (m *PublishRequest) Reset()                    { *m = PublishRequest{} }
-func (*PublishRequest) ProtoMessage()               {}
-func (*PublishRequest) Descriptor() ([]byte, []int) { return fileDescriptorEvents, []int{0} }
+func (m *PublishRequest) Reset()      { *m = PublishRequest{} }
+func (*PublishRequest) ProtoMessage() {}
+func (*PublishRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_43fcd20dc1642376, []int{0}
+}
+func (m *PublishRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *PublishRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_PublishRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *PublishRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_PublishRequest.Merge(m, src)
+}
+func (m *PublishRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *PublishRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_PublishRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PublishRequest proto.InternalMessageInfo
 
 type ForwardRequest struct {
-	Envelope *Envelope `protobuf:"bytes,1,opt,name=envelope" json:"envelope,omitempty"`
+	Envelope             *Envelope `protobuf:"bytes,1,opt,name=envelope,proto3" json:"envelope,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}  `json:"-"`
+	XXX_unrecognized     []byte    `json:"-"`
+	XXX_sizecache        int32     `json:"-"`
 }
 
-func (m *ForwardRequest) Reset()                    { *m = ForwardRequest{} }
-func (*ForwardRequest) ProtoMessage()               {}
-func (*ForwardRequest) Descriptor() ([]byte, []int) { return fileDescriptorEvents, []int{1} }
+func (m *ForwardRequest) Reset()      { *m = ForwardRequest{} }
+func (*ForwardRequest) ProtoMessage() {}
+func (*ForwardRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_43fcd20dc1642376, []int{1}
+}
+func (m *ForwardRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ForwardRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_ForwardRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *ForwardRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ForwardRequest.Merge(m, src)
+}
+func (m *ForwardRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *ForwardRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_ForwardRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ForwardRequest proto.InternalMessageInfo
 
 type SubscribeRequest struct {
-	Filters []string `protobuf:"bytes,1,rep,name=filters" json:"filters,omitempty"`
+	Filters              []string `protobuf:"bytes,1,rep,name=filters,proto3" json:"filters,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
-func (m *SubscribeRequest) Reset()                    { *m = SubscribeRequest{} }
-func (*SubscribeRequest) ProtoMessage()               {}
-func (*SubscribeRequest) Descriptor() ([]byte, []int) { return fileDescriptorEvents, []int{2} }
+func (m *SubscribeRequest) Reset()      { *m = SubscribeRequest{} }
+func (*SubscribeRequest) ProtoMessage() {}
+func (*SubscribeRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_43fcd20dc1642376, []int{2}
+}
+func (m *SubscribeRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *SubscribeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_SubscribeRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *SubscribeRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_SubscribeRequest.Merge(m, src)
+}
+func (m *SubscribeRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *SubscribeRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_SubscribeRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SubscribeRequest proto.InternalMessageInfo
 
 type Envelope struct {
-	Timestamp time.Time             `protobuf:"bytes,1,opt,name=timestamp,stdtime" json:"timestamp"`
-	Namespace string                `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"`
-	Topic     string                `protobuf:"bytes,3,opt,name=topic,proto3" json:"topic,omitempty"`
-	Event     *google_protobuf1.Any `protobuf:"bytes,4,opt,name=event" json:"event,omitempty"`
+	Timestamp            time.Time  `protobuf:"bytes,1,opt,name=timestamp,proto3,stdtime" json:"timestamp"`
+	Namespace            string     `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"`
+	Topic                string     `protobuf:"bytes,3,opt,name=topic,proto3" json:"topic,omitempty"`
+	Event                *types.Any `protobuf:"bytes,4,opt,name=event,proto3" json:"event,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}   `json:"-"`
+	XXX_unrecognized     []byte     `json:"-"`
+	XXX_sizecache        int32      `json:"-"`
 }
 
-func (m *Envelope) Reset()                    { *m = Envelope{} }
-func (*Envelope) ProtoMessage()               {}
-func (*Envelope) Descriptor() ([]byte, []int) { return fileDescriptorEvents, []int{3} }
+func (m *Envelope) Reset()      { *m = Envelope{} }
+func (*Envelope) ProtoMessage() {}
+func (*Envelope) Descriptor() ([]byte, []int) {
+	return fileDescriptor_43fcd20dc1642376, []int{3}
+}
+func (m *Envelope) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *Envelope) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_Envelope.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *Envelope) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Envelope.Merge(m, src)
+}
+func (m *Envelope) XXX_Size() int {
+	return m.Size()
+}
+func (m *Envelope) XXX_DiscardUnknown() {
+	xxx_messageInfo_Envelope.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Envelope proto.InternalMessageInfo
 
 func init() {
 	proto.RegisterType((*PublishRequest)(nil), "containerd.services.events.v1.PublishRequest")
@@ -94,6 +197,44 @@
 	proto.RegisterType((*Envelope)(nil), "containerd.services.events.v1.Envelope")
 }
 
+func init() {
+	proto.RegisterFile("github.com/containerd/containerd/api/services/events/v1/events.proto", fileDescriptor_43fcd20dc1642376)
+}
+
+var fileDescriptor_43fcd20dc1642376 = []byte{
+	// 466 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x93, 0xcd, 0x8e, 0xd3, 0x30,
+	0x14, 0x85, 0xeb, 0xf9, 0x6d, 0x3c, 0xd2, 0x08, 0x45, 0x15, 0x2a, 0x01, 0xd2, 0xaa, 0x1b, 0x2a,
+	0x04, 0x0e, 0x53, 0x76, 0x20, 0x21, 0x28, 0x94, 0xf5, 0x28, 0x80, 0x54, 0xb1, 0x4b, 0xd2, 0xdb,
+	0xd4, 0x52, 0x62, 0x9b, 0xd8, 0x09, 0x9a, 0xdd, 0x3c, 0x02, 0x1b, 0xde, 0x84, 0x0d, 0x6f, 0xd0,
+	0x25, 0x4b, 0x56, 0xc0, 0xf4, 0x49, 0x50, 0x13, 0xbb, 0x61, 0x3a, 0x40, 0x10, 0xbb, 0x6b, 0xdf,
+	0xe3, 0xcf, 0xb9, 0xe7, 0x38, 0xf8, 0x45, 0x4c, 0xd5, 0x22, 0x0f, 0x49, 0xc4, 0x53, 0x2f, 0xe2,
+	0x4c, 0x05, 0x94, 0x41, 0x36, 0xfb, 0xb5, 0x0c, 0x04, 0xf5, 0x24, 0x64, 0x05, 0x8d, 0x40, 0x7a,
+	0x50, 0x00, 0x53, 0xd2, 0x2b, 0x4e, 0x74, 0x45, 0x44, 0xc6, 0x15, 0xb7, 0x6f, 0xd7, 0x7a, 0x62,
+	0xb4, 0x44, 0x2b, 0x8a, 0x13, 0xe7, 0x69, 0xe3, 0x25, 0x25, 0x26, 0xcc, 0xe7, 0x9e, 0x48, 0xf2,
+	0x98, 0x32, 0x6f, 0x4e, 0x21, 0x99, 0x89, 0x40, 0x2d, 0xaa, 0x0b, 0x9c, 0x4e, 0xcc, 0x63, 0x5e,
+	0x96, 0xde, 0xba, 0xd2, 0xbb, 0x37, 0x62, 0xce, 0xe3, 0x04, 0xea, 0xd3, 0x01, 0x3b, 0xd3, 0xad,
+	0x9b, 0xdb, 0x2d, 0x48, 0x85, 0x32, 0xcd, 0xde, 0x76, 0x53, 0xd1, 0x14, 0xa4, 0x0a, 0x52, 0x51,
+	0x09, 0x06, 0x3e, 0x3e, 0x3e, 0xcd, 0xc3, 0x84, 0xca, 0x85, 0x0f, 0xef, 0x72, 0x90, 0xca, 0xee,
+	0xe0, 0x7d, 0xc5, 0x05, 0x8d, 0xba, 0xa8, 0x8f, 0x86, 0x96, 0x5f, 0x2d, 0xec, 0xbb, 0x78, 0xbf,
+	0x9c, 0xb2, 0xbb, 0xd3, 0x47, 0xc3, 0xa3, 0x51, 0x87, 0x54, 0x60, 0x62, 0xc0, 0xe4, 0x19, 0x3b,
+	0xf3, 0x2b, 0xc9, 0xe0, 0x0d, 0x3e, 0x7e, 0xc9, 0xb3, 0xf7, 0x41, 0x36, 0x33, 0xcc, 0xe7, 0xb8,
+	0x0d, 0xac, 0x80, 0x84, 0x0b, 0x28, 0xb1, 0x47, 0xa3, 0x3b, 0xe4, 0xaf, 0x46, 0x92, 0x89, 0x96,
+	0xfb, 0x9b, 0x83, 0x83, 0x7b, 0xf8, 0xda, 0xab, 0x3c, 0x94, 0x51, 0x46, 0x43, 0x30, 0xe0, 0x2e,
+	0x3e, 0x9c, 0xd3, 0x44, 0x41, 0x26, 0xbb, 0xa8, 0xbf, 0x3b, 0xb4, 0x7c, 0xb3, 0x1c, 0x7c, 0x42,
+	0xb8, 0x6d, 0x20, 0xf6, 0x18, 0x5b, 0x9b, 0xc1, 0xf5, 0x07, 0x38, 0x57, 0x26, 0x78, 0x6d, 0x14,
+	0xe3, 0xf6, 0xf2, 0x5b, 0xaf, 0xf5, 0xe1, 0x7b, 0x0f, 0xf9, 0xf5, 0x31, 0xfb, 0x16, 0xb6, 0x58,
+	0x90, 0x82, 0x14, 0x41, 0x04, 0xa5, 0x0b, 0x96, 0x5f, 0x6f, 0xd4, 0xae, 0xed, 0xfe, 0xd6, 0xb5,
+	0xbd, 0x46, 0xd7, 0x1e, 0xed, 0x9d, 0x7f, 0xee, 0xa1, 0xd1, 0xc7, 0x1d, 0x7c, 0x30, 0x29, 0x5d,
+	0xb0, 0x4f, 0xf1, 0xa1, 0x8e, 0xc6, 0xbe, 0xdf, 0xe0, 0xd6, 0xe5, 0x08, 0x9d, 0xeb, 0x57, 0xee,
+	0x99, 0xac, 0xdf, 0xc4, 0x9a, 0xa8, 0x83, 0x69, 0x24, 0x5e, 0x0e, 0xf0, 0x8f, 0xc4, 0x18, 0x5b,
+	0x9b, 0x4c, 0x6c, 0xaf, 0x81, 0xb9, 0x9d, 0x9e, 0xf3, 0xaf, 0x8f, 0xe0, 0x01, 0x1a, 0x4f, 0x97,
+	0x17, 0x6e, 0xeb, 0xeb, 0x85, 0xdb, 0x3a, 0x5f, 0xb9, 0x68, 0xb9, 0x72, 0xd1, 0x97, 0x95, 0x8b,
+	0x7e, 0xac, 0x5c, 0xf4, 0xf6, 0xc9, 0x7f, 0xfe, 0xd7, 0x8f, 0xab, 0x6a, 0xda, 0x9a, 0xa2, 0xf0,
+	0xa0, 0x1c, 0xeb, 0xe1, 0xcf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xe6, 0xbf, 0x19, 0xa6, 0x24, 0x04,
+	0x00, 0x00,
+}
+
 // Field returns the value for the given fieldpath as a string, if defined.
 // If the value is not defined, the second value will be false.
 func (m *Envelope) Field(fieldpath []string) (string, bool) {
@@ -108,7 +249,7 @@
 	case "topic":
 		return string(m.Topic), len(m.Topic) > 0
 	case "event":
-		decoded, err := typeurl.UnmarshalAny(m.Event)
+		decoded, err := github_com_containerd_typeurl.UnmarshalAny(m.Event)
 		if err != nil {
 			return "", false
 		}
@@ -130,20 +271,21 @@
 // is compatible with the grpc package it is being compiled against.
 const _ = grpc.SupportPackageIsVersion4
 
-// Client API for Events service
-
+// EventsClient is the client API for Events service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
 type EventsClient interface {
 	// Publish an event to a topic.
 	//
 	// The event will be packed into a timestamp envelope with the namespace
 	// introspected from the context. The envelope will then be dispatched.
-	Publish(ctx context.Context, in *PublishRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error)
+	Publish(ctx context.Context, in *PublishRequest, opts ...grpc.CallOption) (*types.Empty, error)
 	// Forward sends an event that has already been packaged into an envelope
 	// with a timestamp and namespace.
 	//
 	// This is useful if earlier timestamping is required or when forwarding on
 	// behalf of another component, namespace or publisher.
-	Forward(ctx context.Context, in *ForwardRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error)
+	Forward(ctx context.Context, in *ForwardRequest, opts ...grpc.CallOption) (*types.Empty, error)
 	// Subscribe to a stream of events, possibly returning only that match any
 	// of the provided filters.
 	//
@@ -162,18 +304,18 @@
 	return &eventsClient{cc}
 }
 
-func (c *eventsClient) Publish(ctx context.Context, in *PublishRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error) {
-	out := new(google_protobuf2.Empty)
-	err := grpc.Invoke(ctx, "/containerd.services.events.v1.Events/Publish", in, out, c.cc, opts...)
+func (c *eventsClient) Publish(ctx context.Context, in *PublishRequest, opts ...grpc.CallOption) (*types.Empty, error) {
+	out := new(types.Empty)
+	err := c.cc.Invoke(ctx, "/containerd.services.events.v1.Events/Publish", in, out, opts...)
 	if err != nil {
 		return nil, err
 	}
 	return out, nil
 }
 
-func (c *eventsClient) Forward(ctx context.Context, in *ForwardRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error) {
-	out := new(google_protobuf2.Empty)
-	err := grpc.Invoke(ctx, "/containerd.services.events.v1.Events/Forward", in, out, c.cc, opts...)
+func (c *eventsClient) Forward(ctx context.Context, in *ForwardRequest, opts ...grpc.CallOption) (*types.Empty, error) {
+	out := new(types.Empty)
+	err := c.cc.Invoke(ctx, "/containerd.services.events.v1.Events/Forward", in, out, opts...)
 	if err != nil {
 		return nil, err
 	}
@@ -181,7 +323,7 @@
 }
 
 func (c *eventsClient) Subscribe(ctx context.Context, in *SubscribeRequest, opts ...grpc.CallOption) (Events_SubscribeClient, error) {
-	stream, err := grpc.NewClientStream(ctx, &_Events_serviceDesc.Streams[0], c.cc, "/containerd.services.events.v1.Events/Subscribe", opts...)
+	stream, err := c.cc.NewStream(ctx, &_Events_serviceDesc.Streams[0], "/containerd.services.events.v1.Events/Subscribe", opts...)
 	if err != nil {
 		return nil, err
 	}
@@ -212,20 +354,19 @@
 	return m, nil
 }
 
-// Server API for Events service
-
+// EventsServer is the server API for Events service.
 type EventsServer interface {
 	// Publish an event to a topic.
 	//
 	// The event will be packed into a timestamp envelope with the namespace
 	// introspected from the context. The envelope will then be dispatched.
-	Publish(context.Context, *PublishRequest) (*google_protobuf2.Empty, error)
+	Publish(context.Context, *PublishRequest) (*types.Empty, error)
 	// Forward sends an event that has already been packaged into an envelope
 	// with a timestamp and namespace.
 	//
 	// This is useful if earlier timestamping is required or when forwarding on
 	// behalf of another component, namespace or publisher.
-	Forward(context.Context, *ForwardRequest) (*google_protobuf2.Empty, error)
+	Forward(context.Context, *ForwardRequest) (*types.Empty, error)
 	// Subscribe to a stream of events, possibly returning only that match any
 	// of the provided filters.
 	//
@@ -351,6 +492,9 @@
 		}
 		i += n1
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -379,6 +523,9 @@
 		}
 		i += n2
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -412,6 +559,9 @@
 			i += copy(dAtA[i:], s)
 		}
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -432,8 +582,8 @@
 	_ = l
 	dAtA[i] = 0xa
 	i++
-	i = encodeVarintEvents(dAtA, i, uint64(types.SizeOfStdTime(m.Timestamp)))
-	n3, err := types.StdTimeMarshalTo(m.Timestamp, dAtA[i:])
+	i = encodeVarintEvents(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp)))
+	n3, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i:])
 	if err != nil {
 		return 0, err
 	}
@@ -460,6 +610,9 @@
 		}
 		i += n4
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -473,6 +626,9 @@
 	return offset + 1
 }
 func (m *PublishRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = len(m.Topic)
@@ -483,20 +639,32 @@
 		l = m.Event.Size()
 		n += 1 + l + sovEvents(uint64(l))
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *ForwardRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	if m.Envelope != nil {
 		l = m.Envelope.Size()
 		n += 1 + l + sovEvents(uint64(l))
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *SubscribeRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	if len(m.Filters) > 0 {
@@ -505,13 +673,19 @@
 			n += 1 + l + sovEvents(uint64(l))
 		}
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *Envelope) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
-	l = types.SizeOfStdTime(m.Timestamp)
+	l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp)
 	n += 1 + l + sovEvents(uint64(l))
 	l = len(m.Namespace)
 	if l > 0 {
@@ -525,6 +699,9 @@
 		l = m.Event.Size()
 		n += 1 + l + sovEvents(uint64(l))
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
@@ -547,7 +724,8 @@
 	}
 	s := strings.Join([]string{`&PublishRequest{`,
 		`Topic:` + fmt.Sprintf("%v", this.Topic) + `,`,
-		`Event:` + strings.Replace(fmt.Sprintf("%v", this.Event), "Any", "google_protobuf1.Any", 1) + `,`,
+		`Event:` + strings.Replace(fmt.Sprintf("%v", this.Event), "Any", "types.Any", 1) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -558,6 +736,7 @@
 	}
 	s := strings.Join([]string{`&ForwardRequest{`,
 		`Envelope:` + strings.Replace(fmt.Sprintf("%v", this.Envelope), "Envelope", "Envelope", 1) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -568,6 +747,7 @@
 	}
 	s := strings.Join([]string{`&SubscribeRequest{`,
 		`Filters:` + fmt.Sprintf("%v", this.Filters) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -577,10 +757,11 @@
 		return "nil"
 	}
 	s := strings.Join([]string{`&Envelope{`,
-		`Timestamp:` + strings.Replace(strings.Replace(this.Timestamp.String(), "Timestamp", "google_protobuf3.Timestamp", 1), `&`, ``, 1) + `,`,
+		`Timestamp:` + strings.Replace(strings.Replace(this.Timestamp.String(), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`,
 		`Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`,
 		`Topic:` + fmt.Sprintf("%v", this.Topic) + `,`,
-		`Event:` + strings.Replace(fmt.Sprintf("%v", this.Event), "Any", "google_protobuf1.Any", 1) + `,`,
+		`Event:` + strings.Replace(fmt.Sprintf("%v", this.Event), "Any", "types.Any", 1) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -608,7 +789,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -636,7 +817,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -646,6 +827,9 @@
 				return ErrInvalidLengthEvents
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthEvents
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -665,7 +849,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -674,11 +858,14 @@
 				return ErrInvalidLengthEvents
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthEvents
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
 			if m.Event == nil {
-				m.Event = &google_protobuf1.Any{}
+				m.Event = &types.Any{}
 			}
 			if err := m.Event.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
 				return err
@@ -693,9 +880,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthEvents
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthEvents
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -720,7 +911,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -748,7 +939,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -757,6 +948,9 @@
 				return ErrInvalidLengthEvents
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthEvents
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -776,9 +970,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthEvents
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthEvents
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -803,7 +1001,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -831,7 +1029,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -841,6 +1039,9 @@
 				return ErrInvalidLengthEvents
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthEvents
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -855,9 +1056,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthEvents
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthEvents
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -882,7 +1087,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -910,7 +1115,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -919,10 +1124,13 @@
 				return ErrInvalidLengthEvents
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthEvents
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			if err := types.StdTimeUnmarshal(&m.Timestamp, dAtA[iNdEx:postIndex]); err != nil {
+			if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Timestamp, dAtA[iNdEx:postIndex]); err != nil {
 				return err
 			}
 			iNdEx = postIndex
@@ -940,7 +1148,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -950,6 +1158,9 @@
 				return ErrInvalidLengthEvents
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthEvents
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -969,7 +1180,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -979,6 +1190,9 @@
 				return ErrInvalidLengthEvents
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthEvents
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -998,7 +1212,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1007,11 +1221,14 @@
 				return ErrInvalidLengthEvents
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthEvents
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
 			if m.Event == nil {
-				m.Event = &google_protobuf1.Any{}
+				m.Event = &types.Any{}
 			}
 			if err := m.Event.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
 				return err
@@ -1026,9 +1243,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthEvents
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthEvents
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -1092,10 +1313,13 @@
 					break
 				}
 			}
-			iNdEx += length
 			if length < 0 {
 				return 0, ErrInvalidLengthEvents
 			}
+			iNdEx += length
+			if iNdEx < 0 {
+				return 0, ErrInvalidLengthEvents
+			}
 			return iNdEx, nil
 		case 3:
 			for {
@@ -1124,6 +1348,9 @@
 					return 0, err
 				}
 				iNdEx = start + next
+				if iNdEx < 0 {
+					return 0, ErrInvalidLengthEvents
+				}
 			}
 			return iNdEx, nil
 		case 4:
@@ -1142,41 +1369,3 @@
 	ErrInvalidLengthEvents = fmt.Errorf("proto: negative length found during unmarshaling")
 	ErrIntOverflowEvents   = fmt.Errorf("proto: integer overflow")
 )
-
-func init() {
-	proto.RegisterFile("github.com/containerd/containerd/api/services/events/v1/events.proto", fileDescriptorEvents)
-}
-
-var fileDescriptorEvents = []byte{
-	// 466 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x93, 0xcd, 0x8e, 0xd3, 0x30,
-	0x14, 0x85, 0xeb, 0xf9, 0x6d, 0x3c, 0xd2, 0x08, 0x45, 0x15, 0x2a, 0x01, 0xd2, 0xaa, 0x1b, 0x2a,
-	0x04, 0x0e, 0x53, 0x76, 0x20, 0x21, 0x28, 0x94, 0xf5, 0x28, 0x80, 0x54, 0xb1, 0x4b, 0xd2, 0xdb,
-	0xd4, 0x52, 0x62, 0x9b, 0xd8, 0x09, 0x9a, 0xdd, 0x3c, 0x02, 0x1b, 0xde, 0x84, 0x0d, 0x6f, 0xd0,
-	0x25, 0x4b, 0x56, 0xc0, 0xf4, 0x49, 0x50, 0x13, 0xbb, 0x61, 0x3a, 0x40, 0x10, 0xbb, 0x6b, 0xdf,
-	0xe3, 0xcf, 0xb9, 0xe7, 0x38, 0xf8, 0x45, 0x4c, 0xd5, 0x22, 0x0f, 0x49, 0xc4, 0x53, 0x2f, 0xe2,
-	0x4c, 0x05, 0x94, 0x41, 0x36, 0xfb, 0xb5, 0x0c, 0x04, 0xf5, 0x24, 0x64, 0x05, 0x8d, 0x40, 0x7a,
-	0x50, 0x00, 0x53, 0xd2, 0x2b, 0x4e, 0x74, 0x45, 0x44, 0xc6, 0x15, 0xb7, 0x6f, 0xd7, 0x7a, 0x62,
-	0xb4, 0x44, 0x2b, 0x8a, 0x13, 0xe7, 0x69, 0xe3, 0x25, 0x25, 0x26, 0xcc, 0xe7, 0x9e, 0x48, 0xf2,
-	0x98, 0x32, 0x6f, 0x4e, 0x21, 0x99, 0x89, 0x40, 0x2d, 0xaa, 0x0b, 0x9c, 0x4e, 0xcc, 0x63, 0x5e,
-	0x96, 0xde, 0xba, 0xd2, 0xbb, 0x37, 0x62, 0xce, 0xe3, 0x04, 0xea, 0xd3, 0x01, 0x3b, 0xd3, 0xad,
-	0x9b, 0xdb, 0x2d, 0x48, 0x85, 0x32, 0xcd, 0xde, 0x76, 0x53, 0xd1, 0x14, 0xa4, 0x0a, 0x52, 0x51,
-	0x09, 0x06, 0x3e, 0x3e, 0x3e, 0xcd, 0xc3, 0x84, 0xca, 0x85, 0x0f, 0xef, 0x72, 0x90, 0xca, 0xee,
-	0xe0, 0x7d, 0xc5, 0x05, 0x8d, 0xba, 0xa8, 0x8f, 0x86, 0x96, 0x5f, 0x2d, 0xec, 0xbb, 0x78, 0xbf,
-	0x9c, 0xb2, 0xbb, 0xd3, 0x47, 0xc3, 0xa3, 0x51, 0x87, 0x54, 0x60, 0x62, 0xc0, 0xe4, 0x19, 0x3b,
-	0xf3, 0x2b, 0xc9, 0xe0, 0x0d, 0x3e, 0x7e, 0xc9, 0xb3, 0xf7, 0x41, 0x36, 0x33, 0xcc, 0xe7, 0xb8,
-	0x0d, 0xac, 0x80, 0x84, 0x0b, 0x28, 0xb1, 0x47, 0xa3, 0x3b, 0xe4, 0xaf, 0x46, 0x92, 0x89, 0x96,
-	0xfb, 0x9b, 0x83, 0x83, 0x7b, 0xf8, 0xda, 0xab, 0x3c, 0x94, 0x51, 0x46, 0x43, 0x30, 0xe0, 0x2e,
-	0x3e, 0x9c, 0xd3, 0x44, 0x41, 0x26, 0xbb, 0xa8, 0xbf, 0x3b, 0xb4, 0x7c, 0xb3, 0x1c, 0x7c, 0x42,
-	0xb8, 0x6d, 0x20, 0xf6, 0x18, 0x5b, 0x9b, 0xc1, 0xf5, 0x07, 0x38, 0x57, 0x26, 0x78, 0x6d, 0x14,
-	0xe3, 0xf6, 0xf2, 0x5b, 0xaf, 0xf5, 0xe1, 0x7b, 0x0f, 0xf9, 0xf5, 0x31, 0xfb, 0x16, 0xb6, 0x58,
-	0x90, 0x82, 0x14, 0x41, 0x04, 0xa5, 0x0b, 0x96, 0x5f, 0x6f, 0xd4, 0xae, 0xed, 0xfe, 0xd6, 0xb5,
-	0xbd, 0x46, 0xd7, 0x1e, 0xed, 0x9d, 0x7f, 0xee, 0xa1, 0xd1, 0xc7, 0x1d, 0x7c, 0x30, 0x29, 0x5d,
-	0xb0, 0x4f, 0xf1, 0xa1, 0x8e, 0xc6, 0xbe, 0xdf, 0xe0, 0xd6, 0xe5, 0x08, 0x9d, 0xeb, 0x57, 0xee,
-	0x99, 0xac, 0xdf, 0xc4, 0x9a, 0xa8, 0x83, 0x69, 0x24, 0x5e, 0x0e, 0xf0, 0x8f, 0xc4, 0x18, 0x5b,
-	0x9b, 0x4c, 0x6c, 0xaf, 0x81, 0xb9, 0x9d, 0x9e, 0xf3, 0xaf, 0x8f, 0xe0, 0x01, 0x1a, 0x4f, 0x97,
-	0x17, 0x6e, 0xeb, 0xeb, 0x85, 0xdb, 0x3a, 0x5f, 0xb9, 0x68, 0xb9, 0x72, 0xd1, 0x97, 0x95, 0x8b,
-	0x7e, 0xac, 0x5c, 0xf4, 0xf6, 0xc9, 0x7f, 0xfe, 0xd7, 0x8f, 0xab, 0x6a, 0xda, 0x9a, 0xa2, 0xf0,
-	0xa0, 0x1c, 0xeb, 0xe1, 0xcf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xe6, 0xbf, 0x19, 0xa6, 0x24, 0x04,
-	0x00, 0x00,
-}
diff --git a/vendor/github.com/containerd/containerd/api/services/images/v1/images.pb.go b/vendor/github.com/containerd/containerd/api/services/images/v1/images.pb.go
index 0809074..eaa1d17 100644
--- a/vendor/github.com/containerd/containerd/api/services/images/v1/images.pb.go
+++ b/vendor/github.com/containerd/containerd/api/services/images/v1/images.pb.go
@@ -1,48 +1,23 @@
 // Code generated by protoc-gen-gogo. DO NOT EDIT.
 // source: github.com/containerd/containerd/api/services/images/v1/images.proto
 
-/*
-	Package images is a generated protocol buffer package.
-
-	It is generated from these files:
-		github.com/containerd/containerd/api/services/images/v1/images.proto
-
-	It has these top-level messages:
-		Image
-		GetImageRequest
-		GetImageResponse
-		CreateImageRequest
-		CreateImageResponse
-		UpdateImageRequest
-		UpdateImageResponse
-		ListImagesRequest
-		ListImagesResponse
-		DeleteImageRequest
-*/
 package images
 
-import proto "github.com/gogo/protobuf/proto"
-import fmt "fmt"
-import math "math"
-
-// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto"
-import google_protobuf1 "github.com/gogo/protobuf/types"
-import google_protobuf2 "github.com/gogo/protobuf/types"
-import _ "github.com/gogo/protobuf/types"
-import containerd_types "github.com/containerd/containerd/api/types"
-
-import time "time"
-
-import context "golang.org/x/net/context"
-import grpc "google.golang.org/grpc"
-
-import types "github.com/gogo/protobuf/types"
-
-import strings "strings"
-import reflect "reflect"
-import sortkeys "github.com/gogo/protobuf/sortkeys"
-
-import io "io"
+import (
+	context "context"
+	fmt "fmt"
+	types "github.com/containerd/containerd/api/types"
+	proto "github.com/gogo/protobuf/proto"
+	github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
+	github_com_gogo_protobuf_types "github.com/gogo/protobuf/types"
+	types1 "github.com/gogo/protobuf/types"
+	grpc "google.golang.org/grpc"
+	io "io"
+	math "math"
+	reflect "reflect"
+	strings "strings"
+	time "time"
+)
 
 // Reference imports to suppress errors if they are not otherwise used.
 var _ = proto.Marshal
@@ -66,72 +41,289 @@
 	//
 	// Labels may be updated using the field mask.
 	// The combined size of a key/value pair cannot exceed 4096 bytes.
-	Labels map[string]string `protobuf:"bytes,2,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	Labels map[string]string `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
 	// Target describes the content entry point of the image.
-	Target containerd_types.Descriptor `protobuf:"bytes,3,opt,name=target" json:"target"`
+	Target types.Descriptor `protobuf:"bytes,3,opt,name=target,proto3" json:"target"`
 	// CreatedAt is the time the image was first created.
-	CreatedAt time.Time `protobuf:"bytes,7,opt,name=created_at,json=createdAt,stdtime" json:"created_at"`
+	CreatedAt time.Time `protobuf:"bytes,7,opt,name=created_at,json=createdAt,proto3,stdtime" json:"created_at"`
 	// UpdatedAt is the last time the image was mutated.
-	UpdatedAt time.Time `protobuf:"bytes,8,opt,name=updated_at,json=updatedAt,stdtime" json:"updated_at"`
+	UpdatedAt            time.Time `protobuf:"bytes,8,opt,name=updated_at,json=updatedAt,proto3,stdtime" json:"updated_at"`
+	XXX_NoUnkeyedLiteral struct{}  `json:"-"`
+	XXX_unrecognized     []byte    `json:"-"`
+	XXX_sizecache        int32     `json:"-"`
 }
 
-func (m *Image) Reset()                    { *m = Image{} }
-func (*Image) ProtoMessage()               {}
-func (*Image) Descriptor() ([]byte, []int) { return fileDescriptorImages, []int{0} }
+func (m *Image) Reset()      { *m = Image{} }
+func (*Image) ProtoMessage() {}
+func (*Image) Descriptor() ([]byte, []int) {
+	return fileDescriptor_8666fa071128ae5f, []int{0}
+}
+func (m *Image) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *Image) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_Image.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *Image) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Image.Merge(m, src)
+}
+func (m *Image) XXX_Size() int {
+	return m.Size()
+}
+func (m *Image) XXX_DiscardUnknown() {
+	xxx_messageInfo_Image.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Image proto.InternalMessageInfo
 
 type GetImageRequest struct {
-	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	Name                 string   `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
-func (m *GetImageRequest) Reset()                    { *m = GetImageRequest{} }
-func (*GetImageRequest) ProtoMessage()               {}
-func (*GetImageRequest) Descriptor() ([]byte, []int) { return fileDescriptorImages, []int{1} }
+func (m *GetImageRequest) Reset()      { *m = GetImageRequest{} }
+func (*GetImageRequest) ProtoMessage() {}
+func (*GetImageRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_8666fa071128ae5f, []int{1}
+}
+func (m *GetImageRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *GetImageRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_GetImageRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *GetImageRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_GetImageRequest.Merge(m, src)
+}
+func (m *GetImageRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *GetImageRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_GetImageRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetImageRequest proto.InternalMessageInfo
 
 type GetImageResponse struct {
-	Image *Image `protobuf:"bytes,1,opt,name=image" json:"image,omitempty"`
+	Image                *Image   `protobuf:"bytes,1,opt,name=image,proto3" json:"image,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
-func (m *GetImageResponse) Reset()                    { *m = GetImageResponse{} }
-func (*GetImageResponse) ProtoMessage()               {}
-func (*GetImageResponse) Descriptor() ([]byte, []int) { return fileDescriptorImages, []int{2} }
+func (m *GetImageResponse) Reset()      { *m = GetImageResponse{} }
+func (*GetImageResponse) ProtoMessage() {}
+func (*GetImageResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_8666fa071128ae5f, []int{2}
+}
+func (m *GetImageResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *GetImageResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_GetImageResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *GetImageResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_GetImageResponse.Merge(m, src)
+}
+func (m *GetImageResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *GetImageResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_GetImageResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetImageResponse proto.InternalMessageInfo
 
 type CreateImageRequest struct {
-	Image Image `protobuf:"bytes,1,opt,name=image" json:"image"`
+	Image                Image    `protobuf:"bytes,1,opt,name=image,proto3" json:"image"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
-func (m *CreateImageRequest) Reset()                    { *m = CreateImageRequest{} }
-func (*CreateImageRequest) ProtoMessage()               {}
-func (*CreateImageRequest) Descriptor() ([]byte, []int) { return fileDescriptorImages, []int{3} }
+func (m *CreateImageRequest) Reset()      { *m = CreateImageRequest{} }
+func (*CreateImageRequest) ProtoMessage() {}
+func (*CreateImageRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_8666fa071128ae5f, []int{3}
+}
+func (m *CreateImageRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *CreateImageRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_CreateImageRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *CreateImageRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_CreateImageRequest.Merge(m, src)
+}
+func (m *CreateImageRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *CreateImageRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_CreateImageRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CreateImageRequest proto.InternalMessageInfo
 
 type CreateImageResponse struct {
-	Image Image `protobuf:"bytes,1,opt,name=image" json:"image"`
+	Image                Image    `protobuf:"bytes,1,opt,name=image,proto3" json:"image"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
-func (m *CreateImageResponse) Reset()                    { *m = CreateImageResponse{} }
-func (*CreateImageResponse) ProtoMessage()               {}
-func (*CreateImageResponse) Descriptor() ([]byte, []int) { return fileDescriptorImages, []int{4} }
+func (m *CreateImageResponse) Reset()      { *m = CreateImageResponse{} }
+func (*CreateImageResponse) ProtoMessage() {}
+func (*CreateImageResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_8666fa071128ae5f, []int{4}
+}
+func (m *CreateImageResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *CreateImageResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_CreateImageResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *CreateImageResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_CreateImageResponse.Merge(m, src)
+}
+func (m *CreateImageResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *CreateImageResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_CreateImageResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CreateImageResponse proto.InternalMessageInfo
 
 type UpdateImageRequest struct {
 	// Image provides a full or partial image for update.
 	//
 	// The name field must be set or an error will be returned.
-	Image Image `protobuf:"bytes,1,opt,name=image" json:"image"`
+	Image Image `protobuf:"bytes,1,opt,name=image,proto3" json:"image"`
 	// UpdateMask specifies which fields to perform the update on. If empty,
 	// the operation applies to all fields.
-	UpdateMask *google_protobuf2.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask" json:"update_mask,omitempty"`
+	UpdateMask           *types1.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
+	XXX_unrecognized     []byte            `json:"-"`
+	XXX_sizecache        int32             `json:"-"`
 }
 
-func (m *UpdateImageRequest) Reset()                    { *m = UpdateImageRequest{} }
-func (*UpdateImageRequest) ProtoMessage()               {}
-func (*UpdateImageRequest) Descriptor() ([]byte, []int) { return fileDescriptorImages, []int{5} }
+func (m *UpdateImageRequest) Reset()      { *m = UpdateImageRequest{} }
+func (*UpdateImageRequest) ProtoMessage() {}
+func (*UpdateImageRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_8666fa071128ae5f, []int{5}
+}
+func (m *UpdateImageRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *UpdateImageRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_UpdateImageRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *UpdateImageRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_UpdateImageRequest.Merge(m, src)
+}
+func (m *UpdateImageRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *UpdateImageRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_UpdateImageRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_UpdateImageRequest proto.InternalMessageInfo
 
 type UpdateImageResponse struct {
-	Image Image `protobuf:"bytes,1,opt,name=image" json:"image"`
+	Image                Image    `protobuf:"bytes,1,opt,name=image,proto3" json:"image"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
-func (m *UpdateImageResponse) Reset()                    { *m = UpdateImageResponse{} }
-func (*UpdateImageResponse) ProtoMessage()               {}
-func (*UpdateImageResponse) Descriptor() ([]byte, []int) { return fileDescriptorImages, []int{6} }
+func (m *UpdateImageResponse) Reset()      { *m = UpdateImageResponse{} }
+func (*UpdateImageResponse) ProtoMessage() {}
+func (*UpdateImageResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_8666fa071128ae5f, []int{6}
+}
+func (m *UpdateImageResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *UpdateImageResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_UpdateImageResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *UpdateImageResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_UpdateImageResponse.Merge(m, src)
+}
+func (m *UpdateImageResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *UpdateImageResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_UpdateImageResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_UpdateImageResponse proto.InternalMessageInfo
 
 type ListImagesRequest struct {
 	// Filters contains one or more filters using the syntax defined in the
@@ -144,20 +336,82 @@
 	//   filters[0] or filters[1] or ... or filters[n-1] or filters[n]
 	//
 	// If filters is zero-length or nil, all items will be returned.
-	Filters []string `protobuf:"bytes,1,rep,name=filters" json:"filters,omitempty"`
+	Filters              []string `protobuf:"bytes,1,rep,name=filters,proto3" json:"filters,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
-func (m *ListImagesRequest) Reset()                    { *m = ListImagesRequest{} }
-func (*ListImagesRequest) ProtoMessage()               {}
-func (*ListImagesRequest) Descriptor() ([]byte, []int) { return fileDescriptorImages, []int{7} }
+func (m *ListImagesRequest) Reset()      { *m = ListImagesRequest{} }
+func (*ListImagesRequest) ProtoMessage() {}
+func (*ListImagesRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_8666fa071128ae5f, []int{7}
+}
+func (m *ListImagesRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ListImagesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_ListImagesRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *ListImagesRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ListImagesRequest.Merge(m, src)
+}
+func (m *ListImagesRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *ListImagesRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_ListImagesRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ListImagesRequest proto.InternalMessageInfo
 
 type ListImagesResponse struct {
-	Images []Image `protobuf:"bytes,1,rep,name=images" json:"images"`
+	Images               []Image  `protobuf:"bytes,1,rep,name=images,proto3" json:"images"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
-func (m *ListImagesResponse) Reset()                    { *m = ListImagesResponse{} }
-func (*ListImagesResponse) ProtoMessage()               {}
-func (*ListImagesResponse) Descriptor() ([]byte, []int) { return fileDescriptorImages, []int{8} }
+func (m *ListImagesResponse) Reset()      { *m = ListImagesResponse{} }
+func (*ListImagesResponse) ProtoMessage() {}
+func (*ListImagesResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_8666fa071128ae5f, []int{8}
+}
+func (m *ListImagesResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ListImagesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_ListImagesResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *ListImagesResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ListImagesResponse.Merge(m, src)
+}
+func (m *ListImagesResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *ListImagesResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_ListImagesResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ListImagesResponse proto.InternalMessageInfo
 
 type DeleteImageRequest struct {
 	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
@@ -165,15 +419,47 @@
 	// synchronously before returning to the caller
 	//
 	// Default is false
-	Sync bool `protobuf:"varint,2,opt,name=sync,proto3" json:"sync,omitempty"`
+	Sync                 bool     `protobuf:"varint,2,opt,name=sync,proto3" json:"sync,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
-func (m *DeleteImageRequest) Reset()                    { *m = DeleteImageRequest{} }
-func (*DeleteImageRequest) ProtoMessage()               {}
-func (*DeleteImageRequest) Descriptor() ([]byte, []int) { return fileDescriptorImages, []int{9} }
+func (m *DeleteImageRequest) Reset()      { *m = DeleteImageRequest{} }
+func (*DeleteImageRequest) ProtoMessage() {}
+func (*DeleteImageRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_8666fa071128ae5f, []int{9}
+}
+func (m *DeleteImageRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *DeleteImageRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_DeleteImageRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *DeleteImageRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DeleteImageRequest.Merge(m, src)
+}
+func (m *DeleteImageRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *DeleteImageRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_DeleteImageRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeleteImageRequest proto.InternalMessageInfo
 
 func init() {
 	proto.RegisterType((*Image)(nil), "containerd.services.images.v1.Image")
+	proto.RegisterMapType((map[string]string)(nil), "containerd.services.images.v1.Image.LabelsEntry")
 	proto.RegisterType((*GetImageRequest)(nil), "containerd.services.images.v1.GetImageRequest")
 	proto.RegisterType((*GetImageResponse)(nil), "containerd.services.images.v1.GetImageResponse")
 	proto.RegisterType((*CreateImageRequest)(nil), "containerd.services.images.v1.CreateImageRequest")
@@ -185,6 +471,56 @@
 	proto.RegisterType((*DeleteImageRequest)(nil), "containerd.services.images.v1.DeleteImageRequest")
 }
 
+func init() {
+	proto.RegisterFile("github.com/containerd/containerd/api/services/images/v1/images.proto", fileDescriptor_8666fa071128ae5f)
+}
+
+var fileDescriptor_8666fa071128ae5f = []byte{
+	// 659 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x55, 0xcd, 0x6e, 0xd3, 0x40,
+	0x10, 0x8e, 0x93, 0xd4, 0x6d, 0x27, 0x07, 0xca, 0x52, 0x21, 0xcb, 0x40, 0x1a, 0x45, 0x20, 0xe5,
+	0xc2, 0x9a, 0x86, 0x0b, 0xb4, 0x08, 0xd1, 0xb4, 0xa5, 0x20, 0x15, 0x0e, 0xe6, 0xaf, 0xe2, 0x52,
+	0x6d, 0x92, 0x89, 0xb1, 0x62, 0xc7, 0xc6, 0xbb, 0x89, 0x94, 0x1b, 0x8f, 0x80, 0x04, 0x0f, 0xd5,
+	0x23, 0x47, 0x4e, 0x40, 0x73, 0xe0, 0x39, 0x90, 0x77, 0x37, 0x34, 0x4d, 0x22, 0x92, 0x94, 0xde,
+	0x66, 0xed, 0xef, 0x9b, 0x9f, 0x6f, 0x66, 0x76, 0x61, 0xcf, 0xf3, 0xc5, 0x87, 0x6e, 0x9d, 0x36,
+	0xa2, 0xd0, 0x69, 0x44, 0x1d, 0xc1, 0xfc, 0x0e, 0x26, 0xcd, 0x51, 0x93, 0xc5, 0xbe, 0xc3, 0x31,
+	0xe9, 0xf9, 0x0d, 0xe4, 0x8e, 0x1f, 0x32, 0x0f, 0xb9, 0xd3, 0xdb, 0xd4, 0x16, 0x8d, 0x93, 0x48,
+	0x44, 0xe4, 0xd6, 0x19, 0x9e, 0x0e, 0xb1, 0x54, 0x23, 0x7a, 0x9b, 0xf6, 0xba, 0x17, 0x79, 0x91,
+	0x44, 0x3a, 0xa9, 0xa5, 0x48, 0xf6, 0x0d, 0x2f, 0x8a, 0xbc, 0x00, 0x1d, 0x79, 0xaa, 0x77, 0x5b,
+	0x0e, 0x86, 0xb1, 0xe8, 0xeb, 0x9f, 0xa5, 0xf1, 0x9f, 0x2d, 0x1f, 0x83, 0xe6, 0x71, 0xc8, 0x78,
+	0x5b, 0x23, 0x36, 0xc6, 0x11, 0xc2, 0x0f, 0x91, 0x0b, 0x16, 0xc6, 0x1a, 0xb0, 0x3d, 0x57, 0x69,
+	0xa2, 0x1f, 0x23, 0x77, 0x9a, 0xc8, 0x1b, 0x89, 0x1f, 0x8b, 0x28, 0x51, 0xe4, 0xf2, 0xef, 0x2c,
+	0x2c, 0x3d, 0x4f, 0x0b, 0x20, 0x04, 0xf2, 0x1d, 0x16, 0xa2, 0x65, 0x94, 0x8c, 0xca, 0xaa, 0x2b,
+	0x6d, 0xf2, 0x0c, 0xcc, 0x80, 0xd5, 0x31, 0xe0, 0x56, 0xb6, 0x94, 0xab, 0x14, 0xaa, 0xf7, 0xe8,
+	0x3f, 0x05, 0xa0, 0xd2, 0x13, 0x3d, 0x94, 0x94, 0xfd, 0x8e, 0x48, 0xfa, 0xae, 0xe6, 0x93, 0x2d,
+	0x30, 0x05, 0x4b, 0x3c, 0x14, 0x56, 0xae, 0x64, 0x54, 0x0a, 0xd5, 0x9b, 0xa3, 0x9e, 0x64, 0x6e,
+	0x74, 0xef, 0x6f, 0x6e, 0xb5, 0xfc, 0xc9, 0x8f, 0x8d, 0x8c, 0xab, 0x19, 0x64, 0x17, 0xa0, 0x91,
+	0x20, 0x13, 0xd8, 0x3c, 0x66, 0xc2, 0x5a, 0x96, 0x7c, 0x9b, 0x2a, 0x59, 0xe8, 0x50, 0x16, 0xfa,
+	0x7a, 0x28, 0x4b, 0x6d, 0x25, 0x65, 0x7f, 0xfe, 0xb9, 0x61, 0xb8, 0xab, 0x9a, 0xb7, 0x23, 0x9d,
+	0x74, 0xe3, 0xe6, 0xd0, 0xc9, 0xca, 0x22, 0x4e, 0x34, 0x6f, 0x47, 0xd8, 0x0f, 0xa1, 0x30, 0x52,
+	0x1c, 0x59, 0x83, 0x5c, 0x1b, 0xfb, 0x5a, 0xb1, 0xd4, 0x24, 0xeb, 0xb0, 0xd4, 0x63, 0x41, 0x17,
+	0xad, 0xac, 0xfc, 0xa6, 0x0e, 0x5b, 0xd9, 0x07, 0x46, 0xf9, 0x0e, 0x5c, 0x39, 0x40, 0x21, 0x05,
+	0x72, 0xf1, 0x63, 0x17, 0xb9, 0x98, 0xa6, 0x78, 0xf9, 0x25, 0xac, 0x9d, 0xc1, 0x78, 0x1c, 0x75,
+	0x38, 0x92, 0x2d, 0x58, 0x92, 0x12, 0x4b, 0x60, 0xa1, 0x7a, 0x7b, 0x9e, 0x26, 0xb8, 0x8a, 0x52,
+	0x7e, 0x0b, 0x64, 0x57, 0x6a, 0x70, 0x2e, 0xf2, 0x93, 0x0b, 0x78, 0xd4, 0x4d, 0xd1, 0x7e, 0xdf,
+	0xc1, 0xb5, 0x73, 0x7e, 0x75, 0xaa, 0xff, 0xef, 0xf8, 0x8b, 0x01, 0xe4, 0x8d, 0x14, 0xfc, 0x72,
+	0x33, 0x26, 0xdb, 0x50, 0x50, 0x8d, 0x94, 0xcb, 0x25, 0x1b, 0x34, 0x6d, 0x02, 0x9e, 0xa6, 0xfb,
+	0xf7, 0x82, 0xf1, 0xb6, 0xab, 0xe7, 0x25, 0xb5, 0xd3, 0x72, 0xcf, 0x25, 0x75, 0x69, 0xe5, 0xde,
+	0x85, 0xab, 0x87, 0x3e, 0x57, 0x0d, 0xe7, 0xc3, 0x62, 0x2d, 0x58, 0x6e, 0xf9, 0x81, 0xc0, 0x84,
+	0x5b, 0x46, 0x29, 0x57, 0x59, 0x75, 0x87, 0xc7, 0xf2, 0x11, 0x90, 0x51, 0xb8, 0x4e, 0xa3, 0x06,
+	0xa6, 0x0a, 0x22, 0xe1, 0x8b, 0xe5, 0xa1, 0x99, 0xe5, 0x47, 0x40, 0xf6, 0x30, 0xc0, 0x31, 0xd9,
+	0xa7, 0x5d, 0x0a, 0x04, 0xf2, 0xbc, 0xdf, 0x69, 0x48, 0x05, 0x57, 0x5c, 0x69, 0x57, 0xbf, 0xe6,
+	0xc1, 0x54, 0x49, 0x91, 0x16, 0xe4, 0x0e, 0x50, 0x10, 0x3a, 0x23, 0x87, 0xb1, 0x65, 0xb0, 0x9d,
+	0xb9, 0xf1, 0xba, 0xe8, 0x36, 0xe4, 0x53, 0x29, 0xc8, 0xac, 0x3b, 0x69, 0x42, 0x5e, 0x7b, 0x73,
+	0x01, 0x86, 0x0e, 0x16, 0x81, 0xa9, 0xc6, 0x9d, 0xcc, 0x22, 0x4f, 0x6e, 0x9b, 0x5d, 0x5d, 0x84,
+	0x72, 0x16, 0x50, 0x0d, 0xdc, 0xcc, 0x80, 0x93, 0xcb, 0x32, 0x33, 0xe0, 0xb4, 0x51, 0x7e, 0x05,
+	0xa6, 0xea, 0xff, 0xcc, 0x80, 0x93, 0x63, 0x62, 0x5f, 0x9f, 0x58, 0xa3, 0xfd, 0xf4, 0x8d, 0xab,
+	0x1d, 0x9d, 0x9c, 0x16, 0x33, 0xdf, 0x4f, 0x8b, 0x99, 0x4f, 0x83, 0xa2, 0x71, 0x32, 0x28, 0x1a,
+	0xdf, 0x06, 0x45, 0xe3, 0xd7, 0xa0, 0x68, 0xbc, 0x7f, 0x7c, 0xc1, 0xf7, 0x78, 0x5b, 0x59, 0x47,
+	0x99, 0xba, 0x29, 0x63, 0xdd, 0xff, 0x13, 0x00, 0x00, 0xff, 0xff, 0x24, 0x4e, 0xca, 0x64, 0xda,
+	0x07, 0x00, 0x00,
+}
+
 // Reference imports to suppress errors if they are not otherwise used.
 var _ context.Context
 var _ grpc.ClientConn
@@ -193,8 +529,9 @@
 // is compatible with the grpc package it is being compiled against.
 const _ = grpc.SupportPackageIsVersion4
 
-// Client API for Images service
-
+// ImagesClient is the client API for Images service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
 type ImagesClient interface {
 	// Get returns an image by name.
 	Get(ctx context.Context, in *GetImageRequest, opts ...grpc.CallOption) (*GetImageResponse, error)
@@ -208,7 +545,7 @@
 	// image.
 	Update(ctx context.Context, in *UpdateImageRequest, opts ...grpc.CallOption) (*UpdateImageResponse, error)
 	// Delete deletes the image by name.
-	Delete(ctx context.Context, in *DeleteImageRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error)
+	Delete(ctx context.Context, in *DeleteImageRequest, opts ...grpc.CallOption) (*types1.Empty, error)
 }
 
 type imagesClient struct {
@@ -221,7 +558,7 @@
 
 func (c *imagesClient) Get(ctx context.Context, in *GetImageRequest, opts ...grpc.CallOption) (*GetImageResponse, error) {
 	out := new(GetImageResponse)
-	err := grpc.Invoke(ctx, "/containerd.services.images.v1.Images/Get", in, out, c.cc, opts...)
+	err := c.cc.Invoke(ctx, "/containerd.services.images.v1.Images/Get", in, out, opts...)
 	if err != nil {
 		return nil, err
 	}
@@ -230,7 +567,7 @@
 
 func (c *imagesClient) List(ctx context.Context, in *ListImagesRequest, opts ...grpc.CallOption) (*ListImagesResponse, error) {
 	out := new(ListImagesResponse)
-	err := grpc.Invoke(ctx, "/containerd.services.images.v1.Images/List", in, out, c.cc, opts...)
+	err := c.cc.Invoke(ctx, "/containerd.services.images.v1.Images/List", in, out, opts...)
 	if err != nil {
 		return nil, err
 	}
@@ -239,7 +576,7 @@
 
 func (c *imagesClient) Create(ctx context.Context, in *CreateImageRequest, opts ...grpc.CallOption) (*CreateImageResponse, error) {
 	out := new(CreateImageResponse)
-	err := grpc.Invoke(ctx, "/containerd.services.images.v1.Images/Create", in, out, c.cc, opts...)
+	err := c.cc.Invoke(ctx, "/containerd.services.images.v1.Images/Create", in, out, opts...)
 	if err != nil {
 		return nil, err
 	}
@@ -248,24 +585,23 @@
 
 func (c *imagesClient) Update(ctx context.Context, in *UpdateImageRequest, opts ...grpc.CallOption) (*UpdateImageResponse, error) {
 	out := new(UpdateImageResponse)
-	err := grpc.Invoke(ctx, "/containerd.services.images.v1.Images/Update", in, out, c.cc, opts...)
+	err := c.cc.Invoke(ctx, "/containerd.services.images.v1.Images/Update", in, out, opts...)
 	if err != nil {
 		return nil, err
 	}
 	return out, nil
 }
 
-func (c *imagesClient) Delete(ctx context.Context, in *DeleteImageRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) {
-	out := new(google_protobuf1.Empty)
-	err := grpc.Invoke(ctx, "/containerd.services.images.v1.Images/Delete", in, out, c.cc, opts...)
+func (c *imagesClient) Delete(ctx context.Context, in *DeleteImageRequest, opts ...grpc.CallOption) (*types1.Empty, error) {
+	out := new(types1.Empty)
+	err := c.cc.Invoke(ctx, "/containerd.services.images.v1.Images/Delete", in, out, opts...)
 	if err != nil {
 		return nil, err
 	}
 	return out, nil
 }
 
-// Server API for Images service
-
+// ImagesServer is the server API for Images service.
 type ImagesServer interface {
 	// Get returns an image by name.
 	Get(context.Context, *GetImageRequest) (*GetImageResponse, error)
@@ -279,7 +615,7 @@
 	// image.
 	Update(context.Context, *UpdateImageRequest) (*UpdateImageResponse, error)
 	// Delete deletes the image by name.
-	Delete(context.Context, *DeleteImageRequest) (*google_protobuf1.Empty, error)
+	Delete(context.Context, *DeleteImageRequest) (*types1.Empty, error)
 }
 
 func RegisterImagesServer(s *grpc.Server, srv ImagesServer) {
@@ -453,20 +789,23 @@
 	i += n1
 	dAtA[i] = 0x3a
 	i++
-	i = encodeVarintImages(dAtA, i, uint64(types.SizeOfStdTime(m.CreatedAt)))
-	n2, err := types.StdTimeMarshalTo(m.CreatedAt, dAtA[i:])
+	i = encodeVarintImages(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.CreatedAt)))
+	n2, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.CreatedAt, dAtA[i:])
 	if err != nil {
 		return 0, err
 	}
 	i += n2
 	dAtA[i] = 0x42
 	i++
-	i = encodeVarintImages(dAtA, i, uint64(types.SizeOfStdTime(m.UpdatedAt)))
-	n3, err := types.StdTimeMarshalTo(m.UpdatedAt, dAtA[i:])
+	i = encodeVarintImages(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.UpdatedAt)))
+	n3, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.UpdatedAt, dAtA[i:])
 	if err != nil {
 		return 0, err
 	}
 	i += n3
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -491,6 +830,9 @@
 		i = encodeVarintImages(dAtA, i, uint64(len(m.Name)))
 		i += copy(dAtA[i:], m.Name)
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -519,6 +861,9 @@
 		}
 		i += n4
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -545,6 +890,9 @@
 		return 0, err
 	}
 	i += n5
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -571,6 +919,9 @@
 		return 0, err
 	}
 	i += n6
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -607,6 +958,9 @@
 		}
 		i += n8
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -633,6 +987,9 @@
 		return 0, err
 	}
 	i += n9
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -666,6 +1023,9 @@
 			i += copy(dAtA[i:], s)
 		}
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -696,6 +1056,9 @@
 			i += n
 		}
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -730,6 +1093,9 @@
 		}
 		i++
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -743,6 +1109,9 @@
 	return offset + 1
 }
 func (m *Image) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = len(m.Name)
@@ -759,50 +1128,80 @@
 	}
 	l = m.Target.Size()
 	n += 1 + l + sovImages(uint64(l))
-	l = types.SizeOfStdTime(m.CreatedAt)
+	l = github_com_gogo_protobuf_types.SizeOfStdTime(m.CreatedAt)
 	n += 1 + l + sovImages(uint64(l))
-	l = types.SizeOfStdTime(m.UpdatedAt)
+	l = github_com_gogo_protobuf_types.SizeOfStdTime(m.UpdatedAt)
 	n += 1 + l + sovImages(uint64(l))
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *GetImageRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = len(m.Name)
 	if l > 0 {
 		n += 1 + l + sovImages(uint64(l))
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *GetImageResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	if m.Image != nil {
 		l = m.Image.Size()
 		n += 1 + l + sovImages(uint64(l))
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *CreateImageRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = m.Image.Size()
 	n += 1 + l + sovImages(uint64(l))
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *CreateImageResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = m.Image.Size()
 	n += 1 + l + sovImages(uint64(l))
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *UpdateImageRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = m.Image.Size()
@@ -811,18 +1210,30 @@
 		l = m.UpdateMask.Size()
 		n += 1 + l + sovImages(uint64(l))
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *UpdateImageResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = m.Image.Size()
 	n += 1 + l + sovImages(uint64(l))
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *ListImagesRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	if len(m.Filters) > 0 {
@@ -831,10 +1242,16 @@
 			n += 1 + l + sovImages(uint64(l))
 		}
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *ListImagesResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	if len(m.Images) > 0 {
@@ -843,10 +1260,16 @@
 			n += 1 + l + sovImages(uint64(l))
 		}
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *DeleteImageRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = len(m.Name)
@@ -856,6 +1279,9 @@
 	if m.Sync {
 		n += 2
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
@@ -880,7 +1306,7 @@
 	for k, _ := range this.Labels {
 		keysForLabels = append(keysForLabels, k)
 	}
-	sortkeys.Strings(keysForLabels)
+	github_com_gogo_protobuf_sortkeys.Strings(keysForLabels)
 	mapStringForLabels := "map[string]string{"
 	for _, k := range keysForLabels {
 		mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k])
@@ -889,9 +1315,10 @@
 	s := strings.Join([]string{`&Image{`,
 		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
 		`Labels:` + mapStringForLabels + `,`,
-		`Target:` + strings.Replace(strings.Replace(this.Target.String(), "Descriptor", "containerd_types.Descriptor", 1), `&`, ``, 1) + `,`,
-		`CreatedAt:` + strings.Replace(strings.Replace(this.CreatedAt.String(), "Timestamp", "google_protobuf3.Timestamp", 1), `&`, ``, 1) + `,`,
-		`UpdatedAt:` + strings.Replace(strings.Replace(this.UpdatedAt.String(), "Timestamp", "google_protobuf3.Timestamp", 1), `&`, ``, 1) + `,`,
+		`Target:` + strings.Replace(strings.Replace(this.Target.String(), "Descriptor", "types.Descriptor", 1), `&`, ``, 1) + `,`,
+		`CreatedAt:` + strings.Replace(strings.Replace(this.CreatedAt.String(), "Timestamp", "types1.Timestamp", 1), `&`, ``, 1) + `,`,
+		`UpdatedAt:` + strings.Replace(strings.Replace(this.UpdatedAt.String(), "Timestamp", "types1.Timestamp", 1), `&`, ``, 1) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -902,6 +1329,7 @@
 	}
 	s := strings.Join([]string{`&GetImageRequest{`,
 		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -912,6 +1340,7 @@
 	}
 	s := strings.Join([]string{`&GetImageResponse{`,
 		`Image:` + strings.Replace(fmt.Sprintf("%v", this.Image), "Image", "Image", 1) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -922,6 +1351,7 @@
 	}
 	s := strings.Join([]string{`&CreateImageRequest{`,
 		`Image:` + strings.Replace(strings.Replace(this.Image.String(), "Image", "Image", 1), `&`, ``, 1) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -932,6 +1362,7 @@
 	}
 	s := strings.Join([]string{`&CreateImageResponse{`,
 		`Image:` + strings.Replace(strings.Replace(this.Image.String(), "Image", "Image", 1), `&`, ``, 1) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -942,7 +1373,8 @@
 	}
 	s := strings.Join([]string{`&UpdateImageRequest{`,
 		`Image:` + strings.Replace(strings.Replace(this.Image.String(), "Image", "Image", 1), `&`, ``, 1) + `,`,
-		`UpdateMask:` + strings.Replace(fmt.Sprintf("%v", this.UpdateMask), "FieldMask", "google_protobuf2.FieldMask", 1) + `,`,
+		`UpdateMask:` + strings.Replace(fmt.Sprintf("%v", this.UpdateMask), "FieldMask", "types1.FieldMask", 1) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -953,6 +1385,7 @@
 	}
 	s := strings.Join([]string{`&UpdateImageResponse{`,
 		`Image:` + strings.Replace(strings.Replace(this.Image.String(), "Image", "Image", 1), `&`, ``, 1) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -963,6 +1396,7 @@
 	}
 	s := strings.Join([]string{`&ListImagesRequest{`,
 		`Filters:` + fmt.Sprintf("%v", this.Filters) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -973,6 +1407,7 @@
 	}
 	s := strings.Join([]string{`&ListImagesResponse{`,
 		`Images:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Images), "Image", "Image", 1), `&`, ``, 1) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -984,6 +1419,7 @@
 	s := strings.Join([]string{`&DeleteImageRequest{`,
 		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
 		`Sync:` + fmt.Sprintf("%v", this.Sync) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -1011,7 +1447,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -1039,7 +1475,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1049,6 +1485,9 @@
 				return ErrInvalidLengthImages
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthImages
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -1068,7 +1507,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1077,6 +1516,9 @@
 				return ErrInvalidLengthImages
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthImages
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -1097,7 +1539,7 @@
 					}
 					b := dAtA[iNdEx]
 					iNdEx++
-					wire |= (uint64(b) & 0x7F) << shift
+					wire |= uint64(b&0x7F) << shift
 					if b < 0x80 {
 						break
 					}
@@ -1114,7 +1556,7 @@
 						}
 						b := dAtA[iNdEx]
 						iNdEx++
-						stringLenmapkey |= (uint64(b) & 0x7F) << shift
+						stringLenmapkey |= uint64(b&0x7F) << shift
 						if b < 0x80 {
 							break
 						}
@@ -1124,6 +1566,9 @@
 						return ErrInvalidLengthImages
 					}
 					postStringIndexmapkey := iNdEx + intStringLenmapkey
+					if postStringIndexmapkey < 0 {
+						return ErrInvalidLengthImages
+					}
 					if postStringIndexmapkey > l {
 						return io.ErrUnexpectedEOF
 					}
@@ -1140,7 +1585,7 @@
 						}
 						b := dAtA[iNdEx]
 						iNdEx++
-						stringLenmapvalue |= (uint64(b) & 0x7F) << shift
+						stringLenmapvalue |= uint64(b&0x7F) << shift
 						if b < 0x80 {
 							break
 						}
@@ -1150,6 +1595,9 @@
 						return ErrInvalidLengthImages
 					}
 					postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+					if postStringIndexmapvalue < 0 {
+						return ErrInvalidLengthImages
+					}
 					if postStringIndexmapvalue > l {
 						return io.ErrUnexpectedEOF
 					}
@@ -1186,7 +1634,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1195,6 +1643,9 @@
 				return ErrInvalidLengthImages
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthImages
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -1216,7 +1667,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1225,10 +1676,13 @@
 				return ErrInvalidLengthImages
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthImages
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			if err := types.StdTimeUnmarshal(&m.CreatedAt, dAtA[iNdEx:postIndex]); err != nil {
+			if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.CreatedAt, dAtA[iNdEx:postIndex]); err != nil {
 				return err
 			}
 			iNdEx = postIndex
@@ -1246,7 +1700,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1255,10 +1709,13 @@
 				return ErrInvalidLengthImages
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthImages
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			if err := types.StdTimeUnmarshal(&m.UpdatedAt, dAtA[iNdEx:postIndex]); err != nil {
+			if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.UpdatedAt, dAtA[iNdEx:postIndex]); err != nil {
 				return err
 			}
 			iNdEx = postIndex
@@ -1271,9 +1728,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthImages
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthImages
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -1298,7 +1759,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -1326,7 +1787,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1336,6 +1797,9 @@
 				return ErrInvalidLengthImages
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthImages
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -1350,9 +1814,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthImages
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthImages
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -1377,7 +1845,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -1405,7 +1873,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1414,6 +1882,9 @@
 				return ErrInvalidLengthImages
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthImages
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -1433,9 +1904,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthImages
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthImages
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -1460,7 +1935,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -1488,7 +1963,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1497,6 +1972,9 @@
 				return ErrInvalidLengthImages
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthImages
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -1513,9 +1991,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthImages
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthImages
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -1540,7 +2022,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -1568,7 +2050,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1577,6 +2059,9 @@
 				return ErrInvalidLengthImages
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthImages
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -1593,9 +2078,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthImages
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthImages
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -1620,7 +2109,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -1648,7 +2137,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1657,6 +2146,9 @@
 				return ErrInvalidLengthImages
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthImages
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -1678,7 +2170,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1687,11 +2179,14 @@
 				return ErrInvalidLengthImages
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthImages
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
 			if m.UpdateMask == nil {
-				m.UpdateMask = &google_protobuf2.FieldMask{}
+				m.UpdateMask = &types1.FieldMask{}
 			}
 			if err := m.UpdateMask.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
 				return err
@@ -1706,9 +2201,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthImages
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthImages
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -1733,7 +2232,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -1761,7 +2260,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1770,6 +2269,9 @@
 				return ErrInvalidLengthImages
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthImages
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -1786,9 +2288,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthImages
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthImages
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -1813,7 +2319,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -1841,7 +2347,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1851,6 +2357,9 @@
 				return ErrInvalidLengthImages
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthImages
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -1865,9 +2374,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthImages
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthImages
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -1892,7 +2405,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -1920,7 +2433,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1929,6 +2442,9 @@
 				return ErrInvalidLengthImages
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthImages
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -1946,9 +2462,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthImages
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthImages
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -1973,7 +2493,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -2001,7 +2521,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2011,6 +2531,9 @@
 				return ErrInvalidLengthImages
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthImages
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -2030,7 +2553,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				v |= (int(b) & 0x7F) << shift
+				v |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2045,9 +2568,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthImages
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthImages
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -2111,10 +2638,13 @@
 					break
 				}
 			}
-			iNdEx += length
 			if length < 0 {
 				return 0, ErrInvalidLengthImages
 			}
+			iNdEx += length
+			if iNdEx < 0 {
+				return 0, ErrInvalidLengthImages
+			}
 			return iNdEx, nil
 		case 3:
 			for {
@@ -2143,6 +2673,9 @@
 					return 0, err
 				}
 				iNdEx = start + next
+				if iNdEx < 0 {
+					return 0, ErrInvalidLengthImages
+				}
 			}
 			return iNdEx, nil
 		case 4:
@@ -2161,53 +2694,3 @@
 	ErrInvalidLengthImages = fmt.Errorf("proto: negative length found during unmarshaling")
 	ErrIntOverflowImages   = fmt.Errorf("proto: integer overflow")
 )
-
-func init() {
-	proto.RegisterFile("github.com/containerd/containerd/api/services/images/v1/images.proto", fileDescriptorImages)
-}
-
-var fileDescriptorImages = []byte{
-	// 659 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x55, 0xcd, 0x6e, 0xd3, 0x40,
-	0x10, 0x8e, 0x93, 0xd4, 0x6d, 0x27, 0x07, 0xca, 0x52, 0x21, 0xcb, 0x40, 0x1a, 0x45, 0x20, 0xe5,
-	0xc2, 0x9a, 0x86, 0x0b, 0xb4, 0x08, 0xd1, 0xb4, 0xa5, 0x20, 0x15, 0x0e, 0xe6, 0xaf, 0xe2, 0x52,
-	0x6d, 0x92, 0x89, 0xb1, 0x62, 0xc7, 0xc6, 0xbb, 0x89, 0x94, 0x1b, 0x8f, 0x80, 0x04, 0x0f, 0xd5,
-	0x23, 0x47, 0x4e, 0x40, 0x73, 0xe0, 0x39, 0x90, 0x77, 0x37, 0x34, 0x4d, 0x22, 0x92, 0x94, 0xde,
-	0x66, 0xed, 0xef, 0x9b, 0x9f, 0x6f, 0x66, 0x76, 0x61, 0xcf, 0xf3, 0xc5, 0x87, 0x6e, 0x9d, 0x36,
-	0xa2, 0xd0, 0x69, 0x44, 0x1d, 0xc1, 0xfc, 0x0e, 0x26, 0xcd, 0x51, 0x93, 0xc5, 0xbe, 0xc3, 0x31,
-	0xe9, 0xf9, 0x0d, 0xe4, 0x8e, 0x1f, 0x32, 0x0f, 0xb9, 0xd3, 0xdb, 0xd4, 0x16, 0x8d, 0x93, 0x48,
-	0x44, 0xe4, 0xd6, 0x19, 0x9e, 0x0e, 0xb1, 0x54, 0x23, 0x7a, 0x9b, 0xf6, 0xba, 0x17, 0x79, 0x91,
-	0x44, 0x3a, 0xa9, 0xa5, 0x48, 0xf6, 0x0d, 0x2f, 0x8a, 0xbc, 0x00, 0x1d, 0x79, 0xaa, 0x77, 0x5b,
-	0x0e, 0x86, 0xb1, 0xe8, 0xeb, 0x9f, 0xa5, 0xf1, 0x9f, 0x2d, 0x1f, 0x83, 0xe6, 0x71, 0xc8, 0x78,
-	0x5b, 0x23, 0x36, 0xc6, 0x11, 0xc2, 0x0f, 0x91, 0x0b, 0x16, 0xc6, 0x1a, 0xb0, 0x3d, 0x57, 0x69,
-	0xa2, 0x1f, 0x23, 0x77, 0x9a, 0xc8, 0x1b, 0x89, 0x1f, 0x8b, 0x28, 0x51, 0xe4, 0xf2, 0xef, 0x2c,
-	0x2c, 0x3d, 0x4f, 0x0b, 0x20, 0x04, 0xf2, 0x1d, 0x16, 0xa2, 0x65, 0x94, 0x8c, 0xca, 0xaa, 0x2b,
-	0x6d, 0xf2, 0x0c, 0xcc, 0x80, 0xd5, 0x31, 0xe0, 0x56, 0xb6, 0x94, 0xab, 0x14, 0xaa, 0xf7, 0xe8,
-	0x3f, 0x05, 0xa0, 0xd2, 0x13, 0x3d, 0x94, 0x94, 0xfd, 0x8e, 0x48, 0xfa, 0xae, 0xe6, 0x93, 0x2d,
-	0x30, 0x05, 0x4b, 0x3c, 0x14, 0x56, 0xae, 0x64, 0x54, 0x0a, 0xd5, 0x9b, 0xa3, 0x9e, 0x64, 0x6e,
-	0x74, 0xef, 0x6f, 0x6e, 0xb5, 0xfc, 0xc9, 0x8f, 0x8d, 0x8c, 0xab, 0x19, 0x64, 0x17, 0xa0, 0x91,
-	0x20, 0x13, 0xd8, 0x3c, 0x66, 0xc2, 0x5a, 0x96, 0x7c, 0x9b, 0x2a, 0x59, 0xe8, 0x50, 0x16, 0xfa,
-	0x7a, 0x28, 0x4b, 0x6d, 0x25, 0x65, 0x7f, 0xfe, 0xb9, 0x61, 0xb8, 0xab, 0x9a, 0xb7, 0x23, 0x9d,
-	0x74, 0xe3, 0xe6, 0xd0, 0xc9, 0xca, 0x22, 0x4e, 0x34, 0x6f, 0x47, 0xd8, 0x0f, 0xa1, 0x30, 0x52,
-	0x1c, 0x59, 0x83, 0x5c, 0x1b, 0xfb, 0x5a, 0xb1, 0xd4, 0x24, 0xeb, 0xb0, 0xd4, 0x63, 0x41, 0x17,
-	0xad, 0xac, 0xfc, 0xa6, 0x0e, 0x5b, 0xd9, 0x07, 0x46, 0xf9, 0x0e, 0x5c, 0x39, 0x40, 0x21, 0x05,
-	0x72, 0xf1, 0x63, 0x17, 0xb9, 0x98, 0xa6, 0x78, 0xf9, 0x25, 0xac, 0x9d, 0xc1, 0x78, 0x1c, 0x75,
-	0x38, 0x92, 0x2d, 0x58, 0x92, 0x12, 0x4b, 0x60, 0xa1, 0x7a, 0x7b, 0x9e, 0x26, 0xb8, 0x8a, 0x52,
-	0x7e, 0x0b, 0x64, 0x57, 0x6a, 0x70, 0x2e, 0xf2, 0x93, 0x0b, 0x78, 0xd4, 0x4d, 0xd1, 0x7e, 0xdf,
-	0xc1, 0xb5, 0x73, 0x7e, 0x75, 0xaa, 0xff, 0xef, 0xf8, 0x8b, 0x01, 0xe4, 0x8d, 0x14, 0xfc, 0x72,
-	0x33, 0x26, 0xdb, 0x50, 0x50, 0x8d, 0x94, 0xcb, 0x25, 0x1b, 0x34, 0x6d, 0x02, 0x9e, 0xa6, 0xfb,
-	0xf7, 0x82, 0xf1, 0xb6, 0xab, 0xe7, 0x25, 0xb5, 0xd3, 0x72, 0xcf, 0x25, 0x75, 0x69, 0xe5, 0xde,
-	0x85, 0xab, 0x87, 0x3e, 0x57, 0x0d, 0xe7, 0xc3, 0x62, 0x2d, 0x58, 0x6e, 0xf9, 0x81, 0xc0, 0x84,
-	0x5b, 0x46, 0x29, 0x57, 0x59, 0x75, 0x87, 0xc7, 0xf2, 0x11, 0x90, 0x51, 0xb8, 0x4e, 0xa3, 0x06,
-	0xa6, 0x0a, 0x22, 0xe1, 0x8b, 0xe5, 0xa1, 0x99, 0xe5, 0x47, 0x40, 0xf6, 0x30, 0xc0, 0x31, 0xd9,
-	0xa7, 0x5d, 0x0a, 0x04, 0xf2, 0xbc, 0xdf, 0x69, 0x48, 0x05, 0x57, 0x5c, 0x69, 0x57, 0xbf, 0xe6,
-	0xc1, 0x54, 0x49, 0x91, 0x16, 0xe4, 0x0e, 0x50, 0x10, 0x3a, 0x23, 0x87, 0xb1, 0x65, 0xb0, 0x9d,
-	0xb9, 0xf1, 0xba, 0xe8, 0x36, 0xe4, 0x53, 0x29, 0xc8, 0xac, 0x3b, 0x69, 0x42, 0x5e, 0x7b, 0x73,
-	0x01, 0x86, 0x0e, 0x16, 0x81, 0xa9, 0xc6, 0x9d, 0xcc, 0x22, 0x4f, 0x6e, 0x9b, 0x5d, 0x5d, 0x84,
-	0x72, 0x16, 0x50, 0x0d, 0xdc, 0xcc, 0x80, 0x93, 0xcb, 0x32, 0x33, 0xe0, 0xb4, 0x51, 0x7e, 0x05,
-	0xa6, 0xea, 0xff, 0xcc, 0x80, 0x93, 0x63, 0x62, 0x5f, 0x9f, 0x58, 0xa3, 0xfd, 0xf4, 0x8d, 0xab,
-	0x1d, 0x9d, 0x9c, 0x16, 0x33, 0xdf, 0x4f, 0x8b, 0x99, 0x4f, 0x83, 0xa2, 0x71, 0x32, 0x28, 0x1a,
-	0xdf, 0x06, 0x45, 0xe3, 0xd7, 0xa0, 0x68, 0xbc, 0x7f, 0x7c, 0xc1, 0xf7, 0x78, 0x5b, 0x59, 0x47,
-	0x99, 0xba, 0x29, 0x63, 0xdd, 0xff, 0x13, 0x00, 0x00, 0xff, 0xff, 0x24, 0x4e, 0xca, 0x64, 0xda,
-	0x07, 0x00, 0x00,
-}
diff --git a/vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection.pb.go b/vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection.pb.go
index 02bac62..016ced4 100644
--- a/vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection.pb.go
+++ b/vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection.pb.go
@@ -1,35 +1,21 @@
 // Code generated by protoc-gen-gogo. DO NOT EDIT.
 // source: github.com/containerd/containerd/api/services/introspection/v1/introspection.proto
 
-/*
-	Package introspection is a generated protocol buffer package.
-
-	It is generated from these files:
-		github.com/containerd/containerd/api/services/introspection/v1/introspection.proto
-
-	It has these top-level messages:
-		Plugin
-		PluginsRequest
-		PluginsResponse
-*/
 package introspection
 
-import proto "github.com/gogo/protobuf/proto"
-import fmt "fmt"
-import math "math"
-import containerd_types "github.com/containerd/containerd/api/types"
-import google_rpc "github.com/gogo/googleapis/google/rpc"
-
-// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto"
-
-import context "golang.org/x/net/context"
-import grpc "google.golang.org/grpc"
-
-import strings "strings"
-import reflect "reflect"
-import sortkeys "github.com/gogo/protobuf/sortkeys"
-
-import io "io"
+import (
+	context "context"
+	fmt "fmt"
+	types "github.com/containerd/containerd/api/types"
+	rpc "github.com/gogo/googleapis/google/rpc"
+	proto "github.com/gogo/protobuf/proto"
+	github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
+	grpc "google.golang.org/grpc"
+	io "io"
+	math "math"
+	reflect "reflect"
+	strings "strings"
+)
 
 // Reference imports to suppress errors if they are not otherwise used.
 var _ = proto.Marshal
@@ -51,7 +37,7 @@
 	// ID identifies the plugin uniquely in the system.
 	ID string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"`
 	// Requires lists the plugin types required by this plugin.
-	Requires []string `protobuf:"bytes,3,rep,name=requires" json:"requires,omitempty"`
+	Requires []string `protobuf:"bytes,3,rep,name=requires,proto3" json:"requires,omitempty"`
 	// Platforms enumerates the platforms this plugin will support.
 	//
 	// If values are provided here, the plugin will only be operable under the
@@ -61,30 +47,61 @@
 	//
 	// If the plugin prefers certain platforms over others, they should be
 	// listed from most to least preferred.
-	Platforms []containerd_types.Platform `protobuf:"bytes,4,rep,name=platforms" json:"platforms"`
+	Platforms []types.Platform `protobuf:"bytes,4,rep,name=platforms,proto3" json:"platforms"`
 	// Exports allows plugins to provide values about state or configuration to
 	// interested parties.
 	//
 	// One example is exposing the configured path of a snapshotter plugin.
-	Exports map[string]string `protobuf:"bytes,5,rep,name=exports" json:"exports,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	Exports map[string]string `protobuf:"bytes,5,rep,name=exports,proto3" json:"exports,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
 	// Capabilities allows plugins to communicate feature switches to allow
 	// clients to detect features that may not be on be default or may be
 	// different from version to version.
 	//
 	// Use this sparingly.
-	Capabilities []string `protobuf:"bytes,6,rep,name=capabilities" json:"capabilities,omitempty"`
+	Capabilities []string `protobuf:"bytes,6,rep,name=capabilities,proto3" json:"capabilities,omitempty"`
 	// InitErr will be set if the plugin fails initialization.
 	//
 	// This means the plugin may have been registered but a non-terminal error
 	// was encountered during initialization.
 	//
 	// Plugins that have this value set cannot be used.
-	InitErr *google_rpc.Status `protobuf:"bytes,7,opt,name=init_err,json=initErr" json:"init_err,omitempty"`
+	InitErr              *rpc.Status `protobuf:"bytes,7,opt,name=init_err,json=initErr,proto3" json:"init_err,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}    `json:"-"`
+	XXX_unrecognized     []byte      `json:"-"`
+	XXX_sizecache        int32       `json:"-"`
 }
 
-func (m *Plugin) Reset()                    { *m = Plugin{} }
-func (*Plugin) ProtoMessage()               {}
-func (*Plugin) Descriptor() ([]byte, []int) { return fileDescriptorIntrospection, []int{0} }
+func (m *Plugin) Reset()      { *m = Plugin{} }
+func (*Plugin) ProtoMessage() {}
+func (*Plugin) Descriptor() ([]byte, []int) {
+	return fileDescriptor_1a14fda866f10715, []int{0}
+}
+func (m *Plugin) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *Plugin) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_Plugin.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *Plugin) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Plugin.Merge(m, src)
+}
+func (m *Plugin) XXX_Size() int {
+	return m.Size()
+}
+func (m *Plugin) XXX_DiscardUnknown() {
+	xxx_messageInfo_Plugin.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Plugin proto.InternalMessageInfo
 
 type PluginsRequest struct {
 	// Filters contains one or more filters using the syntax defined in the
@@ -97,27 +114,129 @@
 	//   filters[0] or filters[1] or ... or filters[n-1] or filters[n]
 	//
 	// If filters is zero-length or nil, all items will be returned.
-	Filters []string `protobuf:"bytes,1,rep,name=filters" json:"filters,omitempty"`
+	Filters              []string `protobuf:"bytes,1,rep,name=filters,proto3" json:"filters,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
-func (m *PluginsRequest) Reset()                    { *m = PluginsRequest{} }
-func (*PluginsRequest) ProtoMessage()               {}
-func (*PluginsRequest) Descriptor() ([]byte, []int) { return fileDescriptorIntrospection, []int{1} }
+func (m *PluginsRequest) Reset()      { *m = PluginsRequest{} }
+func (*PluginsRequest) ProtoMessage() {}
+func (*PluginsRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_1a14fda866f10715, []int{1}
+}
+func (m *PluginsRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *PluginsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_PluginsRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *PluginsRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_PluginsRequest.Merge(m, src)
+}
+func (m *PluginsRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *PluginsRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_PluginsRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PluginsRequest proto.InternalMessageInfo
 
 type PluginsResponse struct {
-	Plugins []Plugin `protobuf:"bytes,1,rep,name=plugins" json:"plugins"`
+	Plugins              []Plugin `protobuf:"bytes,1,rep,name=plugins,proto3" json:"plugins"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
-func (m *PluginsResponse) Reset()                    { *m = PluginsResponse{} }
-func (*PluginsResponse) ProtoMessage()               {}
-func (*PluginsResponse) Descriptor() ([]byte, []int) { return fileDescriptorIntrospection, []int{2} }
+func (m *PluginsResponse) Reset()      { *m = PluginsResponse{} }
+func (*PluginsResponse) ProtoMessage() {}
+func (*PluginsResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_1a14fda866f10715, []int{2}
+}
+func (m *PluginsResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *PluginsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_PluginsResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *PluginsResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_PluginsResponse.Merge(m, src)
+}
+func (m *PluginsResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *PluginsResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_PluginsResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PluginsResponse proto.InternalMessageInfo
 
 func init() {
 	proto.RegisterType((*Plugin)(nil), "containerd.services.introspection.v1.Plugin")
+	proto.RegisterMapType((map[string]string)(nil), "containerd.services.introspection.v1.Plugin.ExportsEntry")
 	proto.RegisterType((*PluginsRequest)(nil), "containerd.services.introspection.v1.PluginsRequest")
 	proto.RegisterType((*PluginsResponse)(nil), "containerd.services.introspection.v1.PluginsResponse")
 }
 
+func init() {
+	proto.RegisterFile("github.com/containerd/containerd/api/services/introspection/v1/introspection.proto", fileDescriptor_1a14fda866f10715)
+}
+
+var fileDescriptor_1a14fda866f10715 = []byte{
+	// 487 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x53, 0x4d, 0x6f, 0xd3, 0x40,
+	0x10, 0xcd, 0x3a, 0x69, 0xdc, 0x4c, 0xca, 0x87, 0x56, 0x15, 0x58, 0x3e, 0xb8, 0x51, 0xc4, 0x21,
+	0x42, 0xb0, 0x56, 0x03, 0x48, 0xb4, 0x48, 0x1c, 0x22, 0x72, 0xa8, 0xd4, 0x43, 0xe5, 0x5e, 0x10,
+	0x97, 0xca, 0x71, 0x36, 0x66, 0x85, 0xeb, 0xdd, 0xee, 0xae, 0x2d, 0x72, 0xe3, 0xc6, 0x5f, 0xcb,
+	0x91, 0x23, 0xa7, 0x8a, 0xfa, 0x37, 0xf0, 0x03, 0x90, 0xbd, 0x76, 0x9b, 0xdc, 0x12, 0x71, 0x9b,
+	0x79, 0x7e, 0x6f, 0xe6, 0xcd, 0x93, 0x17, 0x82, 0x98, 0xe9, 0xaf, 0xd9, 0x8c, 0x44, 0xfc, 0xda,
+	0x8f, 0x78, 0xaa, 0x43, 0x96, 0x52, 0x39, 0x5f, 0x2f, 0x43, 0xc1, 0x7c, 0x45, 0x65, 0xce, 0x22,
+	0xaa, 0x7c, 0x96, 0x6a, 0xc9, 0x95, 0xa0, 0x91, 0x66, 0x3c, 0xf5, 0xf3, 0xe3, 0x4d, 0x80, 0x08,
+	0xc9, 0x35, 0xc7, 0x2f, 0x1e, 0xd4, 0xa4, 0x51, 0x92, 0x4d, 0x62, 0x7e, 0xec, 0x9e, 0x6c, 0xb5,
+	0x59, 0x2f, 0x05, 0x55, 0xbe, 0x48, 0x42, 0xbd, 0xe0, 0xf2, 0xda, 0x2c, 0x70, 0x9f, 0xc7, 0x9c,
+	0xc7, 0x09, 0xf5, 0xa5, 0x88, 0x7c, 0xa5, 0x43, 0x9d, 0xa9, 0xfa, 0xc3, 0x61, 0xcc, 0x63, 0x5e,
+	0x95, 0x7e, 0x59, 0x19, 0x74, 0xf8, 0xd7, 0x82, 0xee, 0x45, 0x92, 0xc5, 0x2c, 0xc5, 0x18, 0x3a,
+	0xe5, 0x44, 0x07, 0x0d, 0xd0, 0xa8, 0x17, 0x54, 0x35, 0x7e, 0x06, 0x16, 0x9b, 0x3b, 0x56, 0x89,
+	0x4c, 0xba, 0xc5, 0xed, 0x91, 0x75, 0xf6, 0x29, 0xb0, 0xd8, 0x1c, 0xbb, 0xb0, 0x2f, 0xe9, 0x4d,
+	0xc6, 0x24, 0x55, 0x4e, 0x7b, 0xd0, 0x1e, 0xf5, 0x82, 0xfb, 0x1e, 0x7f, 0x84, 0x5e, 0xe3, 0x49,
+	0x39, 0x9d, 0x41, 0x7b, 0xd4, 0x1f, 0xbb, 0x64, 0xed, 0xec, 0xca, 0x36, 0xb9, 0xa8, 0x29, 0x93,
+	0xce, 0xea, 0xf6, 0xa8, 0x15, 0x3c, 0x48, 0xf0, 0x25, 0xd8, 0xf4, 0xbb, 0xe0, 0x52, 0x2b, 0x67,
+	0xaf, 0x52, 0x9f, 0x90, 0x6d, 0x42, 0x23, 0xe6, 0x0c, 0x32, 0x35, 0xda, 0x69, 0xaa, 0xe5, 0x32,
+	0x68, 0x26, 0xe1, 0x21, 0x1c, 0x44, 0xa1, 0x08, 0x67, 0x2c, 0x61, 0x9a, 0x51, 0xe5, 0x74, 0x2b,
+	0xd3, 0x1b, 0x18, 0x7e, 0x0d, 0xfb, 0x2c, 0x65, 0xfa, 0x8a, 0x4a, 0xe9, 0xd8, 0x03, 0x34, 0xea,
+	0x8f, 0x31, 0x31, 0x69, 0x12, 0x29, 0x22, 0x72, 0x59, 0xa5, 0x19, 0xd8, 0x25, 0x67, 0x2a, 0xa5,
+	0x7b, 0x0a, 0x07, 0xeb, 0xbb, 0xf0, 0x53, 0x68, 0x7f, 0xa3, 0xcb, 0x3a, 0xbe, 0xb2, 0xc4, 0x87,
+	0xb0, 0x97, 0x87, 0x49, 0x46, 0x4d, 0x80, 0x81, 0x69, 0x4e, 0xad, 0xf7, 0x68, 0xf8, 0x12, 0x1e,
+	0x1b, 0xbb, 0x2a, 0xa0, 0x37, 0x19, 0x55, 0x1a, 0x3b, 0x60, 0x2f, 0x58, 0xa2, 0xa9, 0x54, 0x0e,
+	0xaa, 0xbc, 0x35, 0xed, 0xf0, 0x0a, 0x9e, 0xdc, 0x73, 0x95, 0xe0, 0xa9, 0xa2, 0xf8, 0x1c, 0x6c,
+	0x61, 0xa0, 0x8a, 0xdc, 0x1f, 0xbf, 0xda, 0x25, 0xa2, 0x3a, 0xf2, 0x66, 0xc4, 0xf8, 0x27, 0x82,
+	0x47, 0x67, 0xeb, 0x54, 0x9c, 0x83, 0x5d, 0xaf, 0xc4, 0x6f, 0x77, 0x99, 0xdc, 0x5c, 0xe3, 0xbe,
+	0xdb, 0x51, 0x65, 0xee, 0x9a, 0x2c, 0x56, 0x77, 0x5e, 0xeb, 0xf7, 0x9d, 0xd7, 0xfa, 0x51, 0x78,
+	0x68, 0x55, 0x78, 0xe8, 0x57, 0xe1, 0xa1, 0x3f, 0x85, 0x87, 0xbe, 0x9c, 0xff, 0xdf, 0x5b, 0xfc,
+	0xb0, 0x01, 0x7c, 0xb6, 0x66, 0xdd, 0xea, 0xf7, 0x7f, 0xf3, 0x2f, 0x00, 0x00, 0xff, 0xff, 0xe6,
+	0x72, 0xde, 0x35, 0xe4, 0x03, 0x00, 0x00,
+}
+
 // Reference imports to suppress errors if they are not otherwise used.
 var _ context.Context
 var _ grpc.ClientConn
@@ -126,8 +245,9 @@
 // is compatible with the grpc package it is being compiled against.
 const _ = grpc.SupportPackageIsVersion4
 
-// Client API for Introspection service
-
+// IntrospectionClient is the client API for Introspection service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
 type IntrospectionClient interface {
 	// Plugins returns a list of plugins in containerd.
 	//
@@ -146,15 +266,14 @@
 
 func (c *introspectionClient) Plugins(ctx context.Context, in *PluginsRequest, opts ...grpc.CallOption) (*PluginsResponse, error) {
 	out := new(PluginsResponse)
-	err := grpc.Invoke(ctx, "/containerd.services.introspection.v1.Introspection/Plugins", in, out, c.cc, opts...)
+	err := c.cc.Invoke(ctx, "/containerd.services.introspection.v1.Introspection/Plugins", in, out, opts...)
 	if err != nil {
 		return nil, err
 	}
 	return out, nil
 }
 
-// Server API for Introspection service
-
+// IntrospectionServer is the server API for Introspection service.
 type IntrospectionServer interface {
 	// Plugins returns a list of plugins in containerd.
 	//
@@ -294,6 +413,9 @@
 		}
 		i += n1
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -327,6 +449,9 @@
 			i += copy(dAtA[i:], s)
 		}
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -357,6 +482,9 @@
 			i += n
 		}
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -370,6 +498,9 @@
 	return offset + 1
 }
 func (m *Plugin) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = len(m.Type)
@@ -410,10 +541,16 @@
 		l = m.InitErr.Size()
 		n += 1 + l + sovIntrospection(uint64(l))
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *PluginsRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	if len(m.Filters) > 0 {
@@ -422,10 +559,16 @@
 			n += 1 + l + sovIntrospection(uint64(l))
 		}
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *PluginsResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	if len(m.Plugins) > 0 {
@@ -434,6 +577,9 @@
 			n += 1 + l + sovIntrospection(uint64(l))
 		}
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
@@ -458,7 +604,7 @@
 	for k, _ := range this.Exports {
 		keysForExports = append(keysForExports, k)
 	}
-	sortkeys.Strings(keysForExports)
+	github_com_gogo_protobuf_sortkeys.Strings(keysForExports)
 	mapStringForExports := "map[string]string{"
 	for _, k := range keysForExports {
 		mapStringForExports += fmt.Sprintf("%v: %v,", k, this.Exports[k])
@@ -468,10 +614,11 @@
 		`Type:` + fmt.Sprintf("%v", this.Type) + `,`,
 		`ID:` + fmt.Sprintf("%v", this.ID) + `,`,
 		`Requires:` + fmt.Sprintf("%v", this.Requires) + `,`,
-		`Platforms:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Platforms), "Platform", "containerd_types.Platform", 1), `&`, ``, 1) + `,`,
+		`Platforms:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Platforms), "Platform", "types.Platform", 1), `&`, ``, 1) + `,`,
 		`Exports:` + mapStringForExports + `,`,
 		`Capabilities:` + fmt.Sprintf("%v", this.Capabilities) + `,`,
-		`InitErr:` + strings.Replace(fmt.Sprintf("%v", this.InitErr), "Status", "google_rpc.Status", 1) + `,`,
+		`InitErr:` + strings.Replace(fmt.Sprintf("%v", this.InitErr), "Status", "rpc.Status", 1) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -482,6 +629,7 @@
 	}
 	s := strings.Join([]string{`&PluginsRequest{`,
 		`Filters:` + fmt.Sprintf("%v", this.Filters) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -492,6 +640,7 @@
 	}
 	s := strings.Join([]string{`&PluginsResponse{`,
 		`Plugins:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Plugins), "Plugin", "Plugin", 1), `&`, ``, 1) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -519,7 +668,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -547,7 +696,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -557,6 +706,9 @@
 				return ErrInvalidLengthIntrospection
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthIntrospection
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -576,7 +728,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -586,6 +738,9 @@
 				return ErrInvalidLengthIntrospection
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthIntrospection
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -605,7 +760,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -615,6 +770,9 @@
 				return ErrInvalidLengthIntrospection
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthIntrospection
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -634,7 +792,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -643,10 +801,13 @@
 				return ErrInvalidLengthIntrospection
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthIntrospection
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			m.Platforms = append(m.Platforms, containerd_types.Platform{})
+			m.Platforms = append(m.Platforms, types.Platform{})
 			if err := m.Platforms[len(m.Platforms)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
 				return err
 			}
@@ -665,7 +826,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -674,6 +835,9 @@
 				return ErrInvalidLengthIntrospection
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthIntrospection
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -694,7 +858,7 @@
 					}
 					b := dAtA[iNdEx]
 					iNdEx++
-					wire |= (uint64(b) & 0x7F) << shift
+					wire |= uint64(b&0x7F) << shift
 					if b < 0x80 {
 						break
 					}
@@ -711,7 +875,7 @@
 						}
 						b := dAtA[iNdEx]
 						iNdEx++
-						stringLenmapkey |= (uint64(b) & 0x7F) << shift
+						stringLenmapkey |= uint64(b&0x7F) << shift
 						if b < 0x80 {
 							break
 						}
@@ -721,6 +885,9 @@
 						return ErrInvalidLengthIntrospection
 					}
 					postStringIndexmapkey := iNdEx + intStringLenmapkey
+					if postStringIndexmapkey < 0 {
+						return ErrInvalidLengthIntrospection
+					}
 					if postStringIndexmapkey > l {
 						return io.ErrUnexpectedEOF
 					}
@@ -737,7 +904,7 @@
 						}
 						b := dAtA[iNdEx]
 						iNdEx++
-						stringLenmapvalue |= (uint64(b) & 0x7F) << shift
+						stringLenmapvalue |= uint64(b&0x7F) << shift
 						if b < 0x80 {
 							break
 						}
@@ -747,6 +914,9 @@
 						return ErrInvalidLengthIntrospection
 					}
 					postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+					if postStringIndexmapvalue < 0 {
+						return ErrInvalidLengthIntrospection
+					}
 					if postStringIndexmapvalue > l {
 						return io.ErrUnexpectedEOF
 					}
@@ -783,7 +953,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -793,6 +963,9 @@
 				return ErrInvalidLengthIntrospection
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthIntrospection
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -812,7 +985,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -821,11 +994,14 @@
 				return ErrInvalidLengthIntrospection
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthIntrospection
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
 			if m.InitErr == nil {
-				m.InitErr = &google_rpc.Status{}
+				m.InitErr = &rpc.Status{}
 			}
 			if err := m.InitErr.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
 				return err
@@ -840,9 +1016,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthIntrospection
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthIntrospection
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -867,7 +1047,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -895,7 +1075,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -905,6 +1085,9 @@
 				return ErrInvalidLengthIntrospection
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthIntrospection
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -919,9 +1102,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthIntrospection
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthIntrospection
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -946,7 +1133,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -974,7 +1161,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -983,6 +1170,9 @@
 				return ErrInvalidLengthIntrospection
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthIntrospection
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -1000,9 +1190,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthIntrospection
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthIntrospection
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -1066,10 +1260,13 @@
 					break
 				}
 			}
-			iNdEx += length
 			if length < 0 {
 				return 0, ErrInvalidLengthIntrospection
 			}
+			iNdEx += length
+			if iNdEx < 0 {
+				return 0, ErrInvalidLengthIntrospection
+			}
 			return iNdEx, nil
 		case 3:
 			for {
@@ -1098,6 +1295,9 @@
 					return 0, err
 				}
 				iNdEx = start + next
+				if iNdEx < 0 {
+					return 0, ErrInvalidLengthIntrospection
+				}
 			}
 			return iNdEx, nil
 		case 4:
@@ -1116,42 +1316,3 @@
 	ErrInvalidLengthIntrospection = fmt.Errorf("proto: negative length found during unmarshaling")
 	ErrIntOverflowIntrospection   = fmt.Errorf("proto: integer overflow")
 )
-
-func init() {
-	proto.RegisterFile("github.com/containerd/containerd/api/services/introspection/v1/introspection.proto", fileDescriptorIntrospection)
-}
-
-var fileDescriptorIntrospection = []byte{
-	// 487 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x53, 0x4d, 0x6f, 0xd3, 0x40,
-	0x10, 0xcd, 0x3a, 0x69, 0xdc, 0x4c, 0xca, 0x87, 0x56, 0x15, 0x58, 0x3e, 0xb8, 0x51, 0xc4, 0x21,
-	0x42, 0xb0, 0x56, 0x03, 0x48, 0xb4, 0x48, 0x1c, 0x22, 0x72, 0xa8, 0xd4, 0x43, 0xe5, 0x5e, 0x10,
-	0x97, 0xca, 0x71, 0x36, 0x66, 0x85, 0xeb, 0xdd, 0xee, 0xae, 0x2d, 0x72, 0xe3, 0xc6, 0x5f, 0xcb,
-	0x91, 0x23, 0xa7, 0x8a, 0xfa, 0x37, 0xf0, 0x03, 0x90, 0xbd, 0x76, 0x9b, 0xdc, 0x12, 0x71, 0x9b,
-	0x79, 0x7e, 0x6f, 0xe6, 0xcd, 0x93, 0x17, 0x82, 0x98, 0xe9, 0xaf, 0xd9, 0x8c, 0x44, 0xfc, 0xda,
-	0x8f, 0x78, 0xaa, 0x43, 0x96, 0x52, 0x39, 0x5f, 0x2f, 0x43, 0xc1, 0x7c, 0x45, 0x65, 0xce, 0x22,
-	0xaa, 0x7c, 0x96, 0x6a, 0xc9, 0x95, 0xa0, 0x91, 0x66, 0x3c, 0xf5, 0xf3, 0xe3, 0x4d, 0x80, 0x08,
-	0xc9, 0x35, 0xc7, 0x2f, 0x1e, 0xd4, 0xa4, 0x51, 0x92, 0x4d, 0x62, 0x7e, 0xec, 0x9e, 0x6c, 0xb5,
-	0x59, 0x2f, 0x05, 0x55, 0xbe, 0x48, 0x42, 0xbd, 0xe0, 0xf2, 0xda, 0x2c, 0x70, 0x9f, 0xc7, 0x9c,
-	0xc7, 0x09, 0xf5, 0xa5, 0x88, 0x7c, 0xa5, 0x43, 0x9d, 0xa9, 0xfa, 0xc3, 0x61, 0xcc, 0x63, 0x5e,
-	0x95, 0x7e, 0x59, 0x19, 0x74, 0xf8, 0xd7, 0x82, 0xee, 0x45, 0x92, 0xc5, 0x2c, 0xc5, 0x18, 0x3a,
-	0xe5, 0x44, 0x07, 0x0d, 0xd0, 0xa8, 0x17, 0x54, 0x35, 0x7e, 0x06, 0x16, 0x9b, 0x3b, 0x56, 0x89,
-	0x4c, 0xba, 0xc5, 0xed, 0x91, 0x75, 0xf6, 0x29, 0xb0, 0xd8, 0x1c, 0xbb, 0xb0, 0x2f, 0xe9, 0x4d,
-	0xc6, 0x24, 0x55, 0x4e, 0x7b, 0xd0, 0x1e, 0xf5, 0x82, 0xfb, 0x1e, 0x7f, 0x84, 0x5e, 0xe3, 0x49,
-	0x39, 0x9d, 0x41, 0x7b, 0xd4, 0x1f, 0xbb, 0x64, 0xed, 0xec, 0xca, 0x36, 0xb9, 0xa8, 0x29, 0x93,
-	0xce, 0xea, 0xf6, 0xa8, 0x15, 0x3c, 0x48, 0xf0, 0x25, 0xd8, 0xf4, 0xbb, 0xe0, 0x52, 0x2b, 0x67,
-	0xaf, 0x52, 0x9f, 0x90, 0x6d, 0x42, 0x23, 0xe6, 0x0c, 0x32, 0x35, 0xda, 0x69, 0xaa, 0xe5, 0x32,
-	0x68, 0x26, 0xe1, 0x21, 0x1c, 0x44, 0xa1, 0x08, 0x67, 0x2c, 0x61, 0x9a, 0x51, 0xe5, 0x74, 0x2b,
-	0xd3, 0x1b, 0x18, 0x7e, 0x0d, 0xfb, 0x2c, 0x65, 0xfa, 0x8a, 0x4a, 0xe9, 0xd8, 0x03, 0x34, 0xea,
-	0x8f, 0x31, 0x31, 0x69, 0x12, 0x29, 0x22, 0x72, 0x59, 0xa5, 0x19, 0xd8, 0x25, 0x67, 0x2a, 0xa5,
-	0x7b, 0x0a, 0x07, 0xeb, 0xbb, 0xf0, 0x53, 0x68, 0x7f, 0xa3, 0xcb, 0x3a, 0xbe, 0xb2, 0xc4, 0x87,
-	0xb0, 0x97, 0x87, 0x49, 0x46, 0x4d, 0x80, 0x81, 0x69, 0x4e, 0xad, 0xf7, 0x68, 0xf8, 0x12, 0x1e,
-	0x1b, 0xbb, 0x2a, 0xa0, 0x37, 0x19, 0x55, 0x1a, 0x3b, 0x60, 0x2f, 0x58, 0xa2, 0xa9, 0x54, 0x0e,
-	0xaa, 0xbc, 0x35, 0xed, 0xf0, 0x0a, 0x9e, 0xdc, 0x73, 0x95, 0xe0, 0xa9, 0xa2, 0xf8, 0x1c, 0x6c,
-	0x61, 0xa0, 0x8a, 0xdc, 0x1f, 0xbf, 0xda, 0x25, 0xa2, 0x3a, 0xf2, 0x66, 0xc4, 0xf8, 0x27, 0x82,
-	0x47, 0x67, 0xeb, 0x54, 0x9c, 0x83, 0x5d, 0xaf, 0xc4, 0x6f, 0x77, 0x99, 0xdc, 0x5c, 0xe3, 0xbe,
-	0xdb, 0x51, 0x65, 0xee, 0x9a, 0x2c, 0x56, 0x77, 0x5e, 0xeb, 0xf7, 0x9d, 0xd7, 0xfa, 0x51, 0x78,
-	0x68, 0x55, 0x78, 0xe8, 0x57, 0xe1, 0xa1, 0x3f, 0x85, 0x87, 0xbe, 0x9c, 0xff, 0xdf, 0x5b, 0xfc,
-	0xb0, 0x01, 0x7c, 0xb6, 0x66, 0xdd, 0xea, 0xf7, 0x7f, 0xf3, 0x2f, 0x00, 0x00, 0xff, 0xff, 0xe6,
-	0x72, 0xde, 0x35, 0xe4, 0x03, 0x00, 0x00,
-}
diff --git a/vendor/github.com/containerd/containerd/api/services/leases/v1/leases.pb.go b/vendor/github.com/containerd/containerd/api/services/leases/v1/leases.pb.go
index 1222c1a..3cf2138 100644
--- a/vendor/github.com/containerd/containerd/api/services/leases/v1/leases.pb.go
+++ b/vendor/github.com/containerd/containerd/api/services/leases/v1/leases.pb.go
@@ -1,42 +1,22 @@
 // Code generated by protoc-gen-gogo. DO NOT EDIT.
 // source: github.com/containerd/containerd/api/services/leases/v1/leases.proto
 
-/*
-	Package leases is a generated protocol buffer package.
-
-	It is generated from these files:
-		github.com/containerd/containerd/api/services/leases/v1/leases.proto
-
-	It has these top-level messages:
-		Lease
-		CreateRequest
-		CreateResponse
-		DeleteRequest
-		ListRequest
-		ListResponse
-*/
 package leases
 
-import proto "github.com/gogo/protobuf/proto"
-import fmt "fmt"
-import math "math"
-
-// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto"
-import google_protobuf1 "github.com/gogo/protobuf/types"
-import _ "github.com/gogo/protobuf/types"
-
-import time "time"
-
-import context "golang.org/x/net/context"
-import grpc "google.golang.org/grpc"
-
-import types "github.com/gogo/protobuf/types"
-
-import strings "strings"
-import reflect "reflect"
-import sortkeys "github.com/gogo/protobuf/sortkeys"
-
-import io "io"
+import (
+	context "context"
+	fmt "fmt"
+	proto "github.com/gogo/protobuf/proto"
+	github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
+	github_com_gogo_protobuf_types "github.com/gogo/protobuf/types"
+	types "github.com/gogo/protobuf/types"
+	grpc "google.golang.org/grpc"
+	io "io"
+	math "math"
+	reflect "reflect"
+	strings "strings"
+	time "time"
+)
 
 // Reference imports to suppress errors if they are not otherwise used.
 var _ = proto.Marshal
@@ -52,33 +32,126 @@
 
 // Lease is an object which retains resources while it exists.
 type Lease struct {
-	ID        string            `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
-	CreatedAt time.Time         `protobuf:"bytes,2,opt,name=created_at,json=createdAt,stdtime" json:"created_at"`
-	Labels    map[string]string `protobuf:"bytes,3,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	ID                   string            `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+	CreatedAt            time.Time         `protobuf:"bytes,2,opt,name=created_at,json=createdAt,proto3,stdtime" json:"created_at"`
+	Labels               map[string]string `protobuf:"bytes,3,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
+	XXX_unrecognized     []byte            `json:"-"`
+	XXX_sizecache        int32             `json:"-"`
 }
 
-func (m *Lease) Reset()                    { *m = Lease{} }
-func (*Lease) ProtoMessage()               {}
-func (*Lease) Descriptor() ([]byte, []int) { return fileDescriptorLeases, []int{0} }
+func (m *Lease) Reset()      { *m = Lease{} }
+func (*Lease) ProtoMessage() {}
+func (*Lease) Descriptor() ([]byte, []int) {
+	return fileDescriptor_fefd70dfe8d93cbf, []int{0}
+}
+func (m *Lease) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *Lease) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_Lease.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *Lease) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Lease.Merge(m, src)
+}
+func (m *Lease) XXX_Size() int {
+	return m.Size()
+}
+func (m *Lease) XXX_DiscardUnknown() {
+	xxx_messageInfo_Lease.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Lease proto.InternalMessageInfo
 
 type CreateRequest struct {
 	// ID is used to identity the lease, when the id is not set the service
 	// generates a random identifier for the lease.
-	ID     string            `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
-	Labels map[string]string `protobuf:"bytes,3,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	ID                   string            `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+	Labels               map[string]string `protobuf:"bytes,3,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
+	XXX_unrecognized     []byte            `json:"-"`
+	XXX_sizecache        int32             `json:"-"`
 }
 
-func (m *CreateRequest) Reset()                    { *m = CreateRequest{} }
-func (*CreateRequest) ProtoMessage()               {}
-func (*CreateRequest) Descriptor() ([]byte, []int) { return fileDescriptorLeases, []int{1} }
+func (m *CreateRequest) Reset()      { *m = CreateRequest{} }
+func (*CreateRequest) ProtoMessage() {}
+func (*CreateRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_fefd70dfe8d93cbf, []int{1}
+}
+func (m *CreateRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *CreateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_CreateRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *CreateRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_CreateRequest.Merge(m, src)
+}
+func (m *CreateRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *CreateRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_CreateRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CreateRequest proto.InternalMessageInfo
 
 type CreateResponse struct {
-	Lease *Lease `protobuf:"bytes,1,opt,name=lease" json:"lease,omitempty"`
+	Lease                *Lease   `protobuf:"bytes,1,opt,name=lease,proto3" json:"lease,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
-func (m *CreateResponse) Reset()                    { *m = CreateResponse{} }
-func (*CreateResponse) ProtoMessage()               {}
-func (*CreateResponse) Descriptor() ([]byte, []int) { return fileDescriptorLeases, []int{2} }
+func (m *CreateResponse) Reset()      { *m = CreateResponse{} }
+func (*CreateResponse) ProtoMessage() {}
+func (*CreateResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_fefd70dfe8d93cbf, []int{2}
+}
+func (m *CreateResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *CreateResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_CreateResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *CreateResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_CreateResponse.Merge(m, src)
+}
+func (m *CreateResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *CreateResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_CreateResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CreateResponse proto.InternalMessageInfo
 
 type DeleteRequest struct {
 	ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
@@ -86,38 +159,174 @@
 	// synchronously before returning to the caller
 	//
 	// Default is false
-	Sync bool `protobuf:"varint,2,opt,name=sync,proto3" json:"sync,omitempty"`
+	Sync                 bool     `protobuf:"varint,2,opt,name=sync,proto3" json:"sync,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
-func (m *DeleteRequest) Reset()                    { *m = DeleteRequest{} }
-func (*DeleteRequest) ProtoMessage()               {}
-func (*DeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptorLeases, []int{3} }
+func (m *DeleteRequest) Reset()      { *m = DeleteRequest{} }
+func (*DeleteRequest) ProtoMessage() {}
+func (*DeleteRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_fefd70dfe8d93cbf, []int{3}
+}
+func (m *DeleteRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *DeleteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_DeleteRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *DeleteRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DeleteRequest.Merge(m, src)
+}
+func (m *DeleteRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *DeleteRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_DeleteRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeleteRequest proto.InternalMessageInfo
 
 type ListRequest struct {
-	Filters []string `protobuf:"bytes,1,rep,name=filters" json:"filters,omitempty"`
+	Filters              []string `protobuf:"bytes,1,rep,name=filters,proto3" json:"filters,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
-func (m *ListRequest) Reset()                    { *m = ListRequest{} }
-func (*ListRequest) ProtoMessage()               {}
-func (*ListRequest) Descriptor() ([]byte, []int) { return fileDescriptorLeases, []int{4} }
+func (m *ListRequest) Reset()      { *m = ListRequest{} }
+func (*ListRequest) ProtoMessage() {}
+func (*ListRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_fefd70dfe8d93cbf, []int{4}
+}
+func (m *ListRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ListRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_ListRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *ListRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ListRequest.Merge(m, src)
+}
+func (m *ListRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *ListRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_ListRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ListRequest proto.InternalMessageInfo
 
 type ListResponse struct {
-	Leases []*Lease `protobuf:"bytes,1,rep,name=leases" json:"leases,omitempty"`
+	Leases               []*Lease `protobuf:"bytes,1,rep,name=leases,proto3" json:"leases,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
-func (m *ListResponse) Reset()                    { *m = ListResponse{} }
-func (*ListResponse) ProtoMessage()               {}
-func (*ListResponse) Descriptor() ([]byte, []int) { return fileDescriptorLeases, []int{5} }
+func (m *ListResponse) Reset()      { *m = ListResponse{} }
+func (*ListResponse) ProtoMessage() {}
+func (*ListResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_fefd70dfe8d93cbf, []int{5}
+}
+func (m *ListResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ListResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_ListResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *ListResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ListResponse.Merge(m, src)
+}
+func (m *ListResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *ListResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_ListResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ListResponse proto.InternalMessageInfo
 
 func init() {
 	proto.RegisterType((*Lease)(nil), "containerd.services.leases.v1.Lease")
+	proto.RegisterMapType((map[string]string)(nil), "containerd.services.leases.v1.Lease.LabelsEntry")
 	proto.RegisterType((*CreateRequest)(nil), "containerd.services.leases.v1.CreateRequest")
+	proto.RegisterMapType((map[string]string)(nil), "containerd.services.leases.v1.CreateRequest.LabelsEntry")
 	proto.RegisterType((*CreateResponse)(nil), "containerd.services.leases.v1.CreateResponse")
 	proto.RegisterType((*DeleteRequest)(nil), "containerd.services.leases.v1.DeleteRequest")
 	proto.RegisterType((*ListRequest)(nil), "containerd.services.leases.v1.ListRequest")
 	proto.RegisterType((*ListResponse)(nil), "containerd.services.leases.v1.ListResponse")
 }
 
+func init() {
+	proto.RegisterFile("github.com/containerd/containerd/api/services/leases/v1/leases.proto", fileDescriptor_fefd70dfe8d93cbf)
+}
+
+var fileDescriptor_fefd70dfe8d93cbf = []byte{
+	// 515 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x94, 0xdf, 0x8a, 0xd3, 0x40,
+	0x14, 0xc6, 0x3b, 0xe9, 0x36, 0x6e, 0x4f, 0x5d, 0x91, 0x61, 0x59, 0x4a, 0xc4, 0xb4, 0x04, 0xc1,
+	0xe2, 0x9f, 0x89, 0x5b, 0x6f, 0xd6, 0x5d, 0x11, 0xec, 0x76, 0x41, 0x21, 0x88, 0x04, 0x2f, 0x16,
+	0x6f, 0x96, 0x34, 0x3d, 0x1b, 0x83, 0x69, 0x12, 0x33, 0xd3, 0x42, 0xef, 0x7c, 0x04, 0x1f, 0xc1,
+	0x87, 0xf0, 0x21, 0x7a, 0xe9, 0xa5, 0x57, 0xab, 0x9b, 0x3b, 0xdf, 0x42, 0x32, 0x93, 0xb0, 0x7f,
+	0x44, 0x5b, 0x65, 0xef, 0xce, 0xcc, 0x7c, 0xdf, 0x99, 0xdf, 0xf9, 0xc2, 0x04, 0x86, 0x41, 0x28,
+	0xde, 0x4d, 0x47, 0xcc, 0x4f, 0x26, 0xb6, 0x9f, 0xc4, 0xc2, 0x0b, 0x63, 0xcc, 0xc6, 0xe7, 0x4b,
+	0x2f, 0x0d, 0x6d, 0x8e, 0xd9, 0x2c, 0xf4, 0x91, 0xdb, 0x11, 0x7a, 0x1c, 0xb9, 0x3d, 0xdb, 0x2e,
+	0x2b, 0x96, 0x66, 0x89, 0x48, 0xe8, 0xed, 0x33, 0x3d, 0xab, 0xb4, 0xac, 0x54, 0xcc, 0xb6, 0x8d,
+	0xcd, 0x20, 0x09, 0x12, 0xa9, 0xb4, 0x8b, 0x4a, 0x99, 0x8c, 0x5b, 0x41, 0x92, 0x04, 0x11, 0xda,
+	0x72, 0x35, 0x9a, 0x1e, 0xdb, 0x38, 0x49, 0xc5, 0xbc, 0x3c, 0xec, 0x5c, 0x3e, 0x14, 0xe1, 0x04,
+	0xb9, 0xf0, 0x26, 0xa9, 0x12, 0x58, 0x3f, 0x09, 0x34, 0x9c, 0xe2, 0x06, 0xba, 0x05, 0x5a, 0x38,
+	0x6e, 0x93, 0x2e, 0xe9, 0x35, 0x07, 0x7a, 0x7e, 0xd2, 0xd1, 0x5e, 0x0e, 0x5d, 0x2d, 0x1c, 0xd3,
+	0x7d, 0x00, 0x3f, 0x43, 0x4f, 0xe0, 0xf8, 0xc8, 0x13, 0x6d, 0xad, 0x4b, 0x7a, 0xad, 0xbe, 0xc1,
+	0x54, 0x5f, 0x56, 0xf5, 0x65, 0x6f, 0xaa, 0xbe, 0x83, 0xf5, 0xc5, 0x49, 0xa7, 0xf6, 0xe9, 0x7b,
+	0x87, 0xb8, 0xcd, 0xd2, 0xf7, 0x5c, 0xd0, 0x17, 0xa0, 0x47, 0xde, 0x08, 0x23, 0xde, 0xae, 0x77,
+	0xeb, 0xbd, 0x56, 0xff, 0x11, 0xfb, 0xeb, 0xa8, 0x4c, 0x22, 0x31, 0x47, 0x5a, 0x0e, 0x62, 0x91,
+	0xcd, 0xdd, 0xd2, 0x6f, 0x3c, 0x81, 0xd6, 0xb9, 0x6d, 0x7a, 0x13, 0xea, 0xef, 0x71, 0xae, 0xb0,
+	0xdd, 0xa2, 0xa4, 0x9b, 0xd0, 0x98, 0x79, 0xd1, 0x14, 0x25, 0x6a, 0xd3, 0x55, 0x8b, 0x5d, 0x6d,
+	0x87, 0x58, 0x5f, 0x08, 0x6c, 0xec, 0x4b, 0x24, 0x17, 0x3f, 0x4c, 0x91, 0x8b, 0x3f, 0xce, 0xfc,
+	0xfa, 0x12, 0xee, 0xce, 0x12, 0xdc, 0x0b, 0x5d, 0xaf, 0x1a, 0xdb, 0x81, 0x1b, 0x55, 0x7f, 0x9e,
+	0x26, 0x31, 0x47, 0xba, 0x0b, 0x0d, 0x79, 0xb7, 0xf4, 0xb7, 0xfa, 0x77, 0x56, 0x09, 0xd3, 0x55,
+	0x16, 0x6b, 0x0f, 0x36, 0x86, 0x18, 0xe1, 0xf2, 0x0c, 0x28, 0xac, 0xf1, 0x79, 0xec, 0x4b, 0x9e,
+	0x75, 0x57, 0xd6, 0xd6, 0x5d, 0x68, 0x39, 0x21, 0x17, 0x95, 0xb5, 0x0d, 0xd7, 0x8e, 0xc3, 0x48,
+	0x60, 0xc6, 0xdb, 0xa4, 0x5b, 0xef, 0x35, 0xdd, 0x6a, 0x69, 0x39, 0x70, 0x5d, 0x09, 0x4b, 0xe2,
+	0xa7, 0xa0, 0x2b, 0x1e, 0x29, 0x5c, 0x15, 0xb9, 0xf4, 0xf4, 0x3f, 0x6b, 0xa0, 0xcb, 0x1d, 0x4e,
+	0x11, 0x74, 0x15, 0x06, 0x7d, 0xf0, 0x2f, 0xdf, 0xc4, 0x78, 0xb8, 0xa2, 0xba, 0xe4, 0x7d, 0x05,
+	0xba, 0x4a, 0x69, 0xe9, 0x35, 0x17, 0xc2, 0x34, 0xb6, 0x7e, 0x7b, 0x18, 0x07, 0xc5, 0x6b, 0xa4,
+	0x47, 0xb0, 0x56, 0xe4, 0x41, 0xef, 0x2d, 0x9b, 0xfb, 0x2c, 0x5d, 0xe3, 0xfe, 0x4a, 0x5a, 0x05,
+	0x3c, 0x38, 0x5c, 0x9c, 0x9a, 0xb5, 0x6f, 0xa7, 0x66, 0xed, 0x63, 0x6e, 0x92, 0x45, 0x6e, 0x92,
+	0xaf, 0xb9, 0x49, 0x7e, 0xe4, 0x26, 0x79, 0xfb, 0xec, 0x3f, 0x7f, 0x4d, 0x7b, 0xaa, 0x3a, 0xac,
+	0x8d, 0x74, 0x39, 0xcc, 0xe3, 0x5f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x14, 0x74, 0xdd, 0x12, 0xe5,
+	0x04, 0x00, 0x00,
+}
+
 // Reference imports to suppress errors if they are not otherwise used.
 var _ context.Context
 var _ grpc.ClientConn
@@ -126,8 +335,9 @@
 // is compatible with the grpc package it is being compiled against.
 const _ = grpc.SupportPackageIsVersion4
 
-// Client API for Leases service
-
+// LeasesClient is the client API for Leases service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
 type LeasesClient interface {
 	// Create creates a new lease for managing changes to metadata. A lease
 	// can be used to protect objects from being removed.
@@ -135,7 +345,7 @@
 	// Delete deletes the lease and makes any unreferenced objects created
 	// during the lease eligible for garbage collection if not referenced
 	// or retained by other resources during the lease.
-	Delete(ctx context.Context, in *DeleteRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error)
+	Delete(ctx context.Context, in *DeleteRequest, opts ...grpc.CallOption) (*types.Empty, error)
 	// List lists all active leases, returning the full list of
 	// leases and optionally including the referenced resources.
 	List(ctx context.Context, in *ListRequest, opts ...grpc.CallOption) (*ListResponse, error)
@@ -151,16 +361,16 @@
 
 func (c *leasesClient) Create(ctx context.Context, in *CreateRequest, opts ...grpc.CallOption) (*CreateResponse, error) {
 	out := new(CreateResponse)
-	err := grpc.Invoke(ctx, "/containerd.services.leases.v1.Leases/Create", in, out, c.cc, opts...)
+	err := c.cc.Invoke(ctx, "/containerd.services.leases.v1.Leases/Create", in, out, opts...)
 	if err != nil {
 		return nil, err
 	}
 	return out, nil
 }
 
-func (c *leasesClient) Delete(ctx context.Context, in *DeleteRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) {
-	out := new(google_protobuf1.Empty)
-	err := grpc.Invoke(ctx, "/containerd.services.leases.v1.Leases/Delete", in, out, c.cc, opts...)
+func (c *leasesClient) Delete(ctx context.Context, in *DeleteRequest, opts ...grpc.CallOption) (*types.Empty, error) {
+	out := new(types.Empty)
+	err := c.cc.Invoke(ctx, "/containerd.services.leases.v1.Leases/Delete", in, out, opts...)
 	if err != nil {
 		return nil, err
 	}
@@ -169,15 +379,14 @@
 
 func (c *leasesClient) List(ctx context.Context, in *ListRequest, opts ...grpc.CallOption) (*ListResponse, error) {
 	out := new(ListResponse)
-	err := grpc.Invoke(ctx, "/containerd.services.leases.v1.Leases/List", in, out, c.cc, opts...)
+	err := c.cc.Invoke(ctx, "/containerd.services.leases.v1.Leases/List", in, out, opts...)
 	if err != nil {
 		return nil, err
 	}
 	return out, nil
 }
 
-// Server API for Leases service
-
+// LeasesServer is the server API for Leases service.
 type LeasesServer interface {
 	// Create creates a new lease for managing changes to metadata. A lease
 	// can be used to protect objects from being removed.
@@ -185,7 +394,7 @@
 	// Delete deletes the lease and makes any unreferenced objects created
 	// during the lease eligible for garbage collection if not referenced
 	// or retained by other resources during the lease.
-	Delete(context.Context, *DeleteRequest) (*google_protobuf1.Empty, error)
+	Delete(context.Context, *DeleteRequest) (*types.Empty, error)
 	// List lists all active leases, returning the full list of
 	// leases and optionally including the referenced resources.
 	List(context.Context, *ListRequest) (*ListResponse, error)
@@ -293,8 +502,8 @@
 	}
 	dAtA[i] = 0x12
 	i++
-	i = encodeVarintLeases(dAtA, i, uint64(types.SizeOfStdTime(m.CreatedAt)))
-	n1, err := types.StdTimeMarshalTo(m.CreatedAt, dAtA[i:])
+	i = encodeVarintLeases(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.CreatedAt)))
+	n1, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.CreatedAt, dAtA[i:])
 	if err != nil {
 		return 0, err
 	}
@@ -316,6 +525,9 @@
 			i += copy(dAtA[i:], v)
 		}
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -357,6 +569,9 @@
 			i += copy(dAtA[i:], v)
 		}
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -385,6 +600,9 @@
 		}
 		i += n2
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -419,6 +637,9 @@
 		}
 		i++
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -452,6 +673,9 @@
 			i += copy(dAtA[i:], s)
 		}
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -482,6 +706,9 @@
 			i += n
 		}
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -495,13 +722,16 @@
 	return offset + 1
 }
 func (m *Lease) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = len(m.ID)
 	if l > 0 {
 		n += 1 + l + sovLeases(uint64(l))
 	}
-	l = types.SizeOfStdTime(m.CreatedAt)
+	l = github_com_gogo_protobuf_types.SizeOfStdTime(m.CreatedAt)
 	n += 1 + l + sovLeases(uint64(l))
 	if len(m.Labels) > 0 {
 		for k, v := range m.Labels {
@@ -511,10 +741,16 @@
 			n += mapEntrySize + 1 + sovLeases(uint64(mapEntrySize))
 		}
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *CreateRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = len(m.ID)
@@ -529,20 +765,32 @@
 			n += mapEntrySize + 1 + sovLeases(uint64(mapEntrySize))
 		}
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *CreateResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	if m.Lease != nil {
 		l = m.Lease.Size()
 		n += 1 + l + sovLeases(uint64(l))
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *DeleteRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = len(m.ID)
@@ -552,10 +800,16 @@
 	if m.Sync {
 		n += 2
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *ListRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	if len(m.Filters) > 0 {
@@ -564,10 +818,16 @@
 			n += 1 + l + sovLeases(uint64(l))
 		}
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *ListResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	if len(m.Leases) > 0 {
@@ -576,6 +836,9 @@
 			n += 1 + l + sovLeases(uint64(l))
 		}
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
@@ -600,7 +863,7 @@
 	for k, _ := range this.Labels {
 		keysForLabels = append(keysForLabels, k)
 	}
-	sortkeys.Strings(keysForLabels)
+	github_com_gogo_protobuf_sortkeys.Strings(keysForLabels)
 	mapStringForLabels := "map[string]string{"
 	for _, k := range keysForLabels {
 		mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k])
@@ -608,8 +871,9 @@
 	mapStringForLabels += "}"
 	s := strings.Join([]string{`&Lease{`,
 		`ID:` + fmt.Sprintf("%v", this.ID) + `,`,
-		`CreatedAt:` + strings.Replace(strings.Replace(this.CreatedAt.String(), "Timestamp", "google_protobuf2.Timestamp", 1), `&`, ``, 1) + `,`,
+		`CreatedAt:` + strings.Replace(strings.Replace(this.CreatedAt.String(), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`,
 		`Labels:` + mapStringForLabels + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -622,7 +886,7 @@
 	for k, _ := range this.Labels {
 		keysForLabels = append(keysForLabels, k)
 	}
-	sortkeys.Strings(keysForLabels)
+	github_com_gogo_protobuf_sortkeys.Strings(keysForLabels)
 	mapStringForLabels := "map[string]string{"
 	for _, k := range keysForLabels {
 		mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k])
@@ -631,6 +895,7 @@
 	s := strings.Join([]string{`&CreateRequest{`,
 		`ID:` + fmt.Sprintf("%v", this.ID) + `,`,
 		`Labels:` + mapStringForLabels + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -641,6 +906,7 @@
 	}
 	s := strings.Join([]string{`&CreateResponse{`,
 		`Lease:` + strings.Replace(fmt.Sprintf("%v", this.Lease), "Lease", "Lease", 1) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -652,6 +918,7 @@
 	s := strings.Join([]string{`&DeleteRequest{`,
 		`ID:` + fmt.Sprintf("%v", this.ID) + `,`,
 		`Sync:` + fmt.Sprintf("%v", this.Sync) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -662,6 +929,7 @@
 	}
 	s := strings.Join([]string{`&ListRequest{`,
 		`Filters:` + fmt.Sprintf("%v", this.Filters) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -672,6 +940,7 @@
 	}
 	s := strings.Join([]string{`&ListResponse{`,
 		`Leases:` + strings.Replace(fmt.Sprintf("%v", this.Leases), "Lease", "Lease", 1) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -699,7 +968,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -727,7 +996,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -737,6 +1006,9 @@
 				return ErrInvalidLengthLeases
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthLeases
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -756,7 +1028,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -765,10 +1037,13 @@
 				return ErrInvalidLengthLeases
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthLeases
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			if err := types.StdTimeUnmarshal(&m.CreatedAt, dAtA[iNdEx:postIndex]); err != nil {
+			if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.CreatedAt, dAtA[iNdEx:postIndex]); err != nil {
 				return err
 			}
 			iNdEx = postIndex
@@ -786,7 +1061,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -795,6 +1070,9 @@
 				return ErrInvalidLengthLeases
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthLeases
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -815,7 +1093,7 @@
 					}
 					b := dAtA[iNdEx]
 					iNdEx++
-					wire |= (uint64(b) & 0x7F) << shift
+					wire |= uint64(b&0x7F) << shift
 					if b < 0x80 {
 						break
 					}
@@ -832,7 +1110,7 @@
 						}
 						b := dAtA[iNdEx]
 						iNdEx++
-						stringLenmapkey |= (uint64(b) & 0x7F) << shift
+						stringLenmapkey |= uint64(b&0x7F) << shift
 						if b < 0x80 {
 							break
 						}
@@ -842,6 +1120,9 @@
 						return ErrInvalidLengthLeases
 					}
 					postStringIndexmapkey := iNdEx + intStringLenmapkey
+					if postStringIndexmapkey < 0 {
+						return ErrInvalidLengthLeases
+					}
 					if postStringIndexmapkey > l {
 						return io.ErrUnexpectedEOF
 					}
@@ -858,7 +1139,7 @@
 						}
 						b := dAtA[iNdEx]
 						iNdEx++
-						stringLenmapvalue |= (uint64(b) & 0x7F) << shift
+						stringLenmapvalue |= uint64(b&0x7F) << shift
 						if b < 0x80 {
 							break
 						}
@@ -868,6 +1149,9 @@
 						return ErrInvalidLengthLeases
 					}
 					postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+					if postStringIndexmapvalue < 0 {
+						return ErrInvalidLengthLeases
+					}
 					if postStringIndexmapvalue > l {
 						return io.ErrUnexpectedEOF
 					}
@@ -899,9 +1183,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthLeases
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthLeases
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -926,7 +1214,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -954,7 +1242,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -964,6 +1252,9 @@
 				return ErrInvalidLengthLeases
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthLeases
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -983,7 +1274,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -992,6 +1283,9 @@
 				return ErrInvalidLengthLeases
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthLeases
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -1012,7 +1306,7 @@
 					}
 					b := dAtA[iNdEx]
 					iNdEx++
-					wire |= (uint64(b) & 0x7F) << shift
+					wire |= uint64(b&0x7F) << shift
 					if b < 0x80 {
 						break
 					}
@@ -1029,7 +1323,7 @@
 						}
 						b := dAtA[iNdEx]
 						iNdEx++
-						stringLenmapkey |= (uint64(b) & 0x7F) << shift
+						stringLenmapkey |= uint64(b&0x7F) << shift
 						if b < 0x80 {
 							break
 						}
@@ -1039,6 +1333,9 @@
 						return ErrInvalidLengthLeases
 					}
 					postStringIndexmapkey := iNdEx + intStringLenmapkey
+					if postStringIndexmapkey < 0 {
+						return ErrInvalidLengthLeases
+					}
 					if postStringIndexmapkey > l {
 						return io.ErrUnexpectedEOF
 					}
@@ -1055,7 +1352,7 @@
 						}
 						b := dAtA[iNdEx]
 						iNdEx++
-						stringLenmapvalue |= (uint64(b) & 0x7F) << shift
+						stringLenmapvalue |= uint64(b&0x7F) << shift
 						if b < 0x80 {
 							break
 						}
@@ -1065,6 +1362,9 @@
 						return ErrInvalidLengthLeases
 					}
 					postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+					if postStringIndexmapvalue < 0 {
+						return ErrInvalidLengthLeases
+					}
 					if postStringIndexmapvalue > l {
 						return io.ErrUnexpectedEOF
 					}
@@ -1096,9 +1396,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthLeases
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthLeases
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -1123,7 +1427,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -1151,7 +1455,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1160,6 +1464,9 @@
 				return ErrInvalidLengthLeases
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthLeases
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -1179,9 +1486,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthLeases
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthLeases
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -1206,7 +1517,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -1234,7 +1545,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1244,6 +1555,9 @@
 				return ErrInvalidLengthLeases
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthLeases
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -1263,7 +1577,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				v |= (int(b) & 0x7F) << shift
+				v |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1278,9 +1592,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthLeases
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthLeases
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -1305,7 +1623,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -1333,7 +1651,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1343,6 +1661,9 @@
 				return ErrInvalidLengthLeases
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthLeases
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -1357,9 +1678,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthLeases
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthLeases
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -1384,7 +1709,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -1412,7 +1737,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1421,6 +1746,9 @@
 				return ErrInvalidLengthLeases
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthLeases
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -1438,9 +1766,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthLeases
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthLeases
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -1504,10 +1836,13 @@
 					break
 				}
 			}
-			iNdEx += length
 			if length < 0 {
 				return 0, ErrInvalidLengthLeases
 			}
+			iNdEx += length
+			if iNdEx < 0 {
+				return 0, ErrInvalidLengthLeases
+			}
 			return iNdEx, nil
 		case 3:
 			for {
@@ -1536,6 +1871,9 @@
 					return 0, err
 				}
 				iNdEx = start + next
+				if iNdEx < 0 {
+					return 0, ErrInvalidLengthLeases
+				}
 			}
 			return iNdEx, nil
 		case 4:
@@ -1554,44 +1892,3 @@
 	ErrInvalidLengthLeases = fmt.Errorf("proto: negative length found during unmarshaling")
 	ErrIntOverflowLeases   = fmt.Errorf("proto: integer overflow")
 )
-
-func init() {
-	proto.RegisterFile("github.com/containerd/containerd/api/services/leases/v1/leases.proto", fileDescriptorLeases)
-}
-
-var fileDescriptorLeases = []byte{
-	// 515 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x94, 0xdf, 0x8a, 0xd3, 0x40,
-	0x14, 0xc6, 0x3b, 0xe9, 0x36, 0x6e, 0x4f, 0x5d, 0x91, 0x61, 0x59, 0x4a, 0xc4, 0xb4, 0x04, 0xc1,
-	0xe2, 0x9f, 0x89, 0x5b, 0x6f, 0xd6, 0x5d, 0x11, 0xec, 0x76, 0x41, 0x21, 0x88, 0x04, 0x2f, 0x16,
-	0x6f, 0x96, 0x34, 0x3d, 0x1b, 0x83, 0x69, 0x12, 0x33, 0xd3, 0x42, 0xef, 0x7c, 0x04, 0x1f, 0xc1,
-	0x87, 0xf0, 0x21, 0x7a, 0xe9, 0xa5, 0x57, 0xab, 0x9b, 0x3b, 0xdf, 0x42, 0x32, 0x93, 0xb0, 0x7f,
-	0x44, 0x5b, 0x65, 0xef, 0xce, 0xcc, 0x7c, 0xdf, 0x99, 0xdf, 0xf9, 0xc2, 0x04, 0x86, 0x41, 0x28,
-	0xde, 0x4d, 0x47, 0xcc, 0x4f, 0x26, 0xb6, 0x9f, 0xc4, 0xc2, 0x0b, 0x63, 0xcc, 0xc6, 0xe7, 0x4b,
-	0x2f, 0x0d, 0x6d, 0x8e, 0xd9, 0x2c, 0xf4, 0x91, 0xdb, 0x11, 0x7a, 0x1c, 0xb9, 0x3d, 0xdb, 0x2e,
-	0x2b, 0x96, 0x66, 0x89, 0x48, 0xe8, 0xed, 0x33, 0x3d, 0xab, 0xb4, 0xac, 0x54, 0xcc, 0xb6, 0x8d,
-	0xcd, 0x20, 0x09, 0x12, 0xa9, 0xb4, 0x8b, 0x4a, 0x99, 0x8c, 0x5b, 0x41, 0x92, 0x04, 0x11, 0xda,
-	0x72, 0x35, 0x9a, 0x1e, 0xdb, 0x38, 0x49, 0xc5, 0xbc, 0x3c, 0xec, 0x5c, 0x3e, 0x14, 0xe1, 0x04,
-	0xb9, 0xf0, 0x26, 0xa9, 0x12, 0x58, 0x3f, 0x09, 0x34, 0x9c, 0xe2, 0x06, 0xba, 0x05, 0x5a, 0x38,
-	0x6e, 0x93, 0x2e, 0xe9, 0x35, 0x07, 0x7a, 0x7e, 0xd2, 0xd1, 0x5e, 0x0e, 0x5d, 0x2d, 0x1c, 0xd3,
-	0x7d, 0x00, 0x3f, 0x43, 0x4f, 0xe0, 0xf8, 0xc8, 0x13, 0x6d, 0xad, 0x4b, 0x7a, 0xad, 0xbe, 0xc1,
-	0x54, 0x5f, 0x56, 0xf5, 0x65, 0x6f, 0xaa, 0xbe, 0x83, 0xf5, 0xc5, 0x49, 0xa7, 0xf6, 0xe9, 0x7b,
-	0x87, 0xb8, 0xcd, 0xd2, 0xf7, 0x5c, 0xd0, 0x17, 0xa0, 0x47, 0xde, 0x08, 0x23, 0xde, 0xae, 0x77,
-	0xeb, 0xbd, 0x56, 0xff, 0x11, 0xfb, 0xeb, 0xa8, 0x4c, 0x22, 0x31, 0x47, 0x5a, 0x0e, 0x62, 0x91,
-	0xcd, 0xdd, 0xd2, 0x6f, 0x3c, 0x81, 0xd6, 0xb9, 0x6d, 0x7a, 0x13, 0xea, 0xef, 0x71, 0xae, 0xb0,
-	0xdd, 0xa2, 0xa4, 0x9b, 0xd0, 0x98, 0x79, 0xd1, 0x14, 0x25, 0x6a, 0xd3, 0x55, 0x8b, 0x5d, 0x6d,
-	0x87, 0x58, 0x5f, 0x08, 0x6c, 0xec, 0x4b, 0x24, 0x17, 0x3f, 0x4c, 0x91, 0x8b, 0x3f, 0xce, 0xfc,
-	0xfa, 0x12, 0xee, 0xce, 0x12, 0xdc, 0x0b, 0x5d, 0xaf, 0x1a, 0xdb, 0x81, 0x1b, 0x55, 0x7f, 0x9e,
-	0x26, 0x31, 0x47, 0xba, 0x0b, 0x0d, 0x79, 0xb7, 0xf4, 0xb7, 0xfa, 0x77, 0x56, 0x09, 0xd3, 0x55,
-	0x16, 0x6b, 0x0f, 0x36, 0x86, 0x18, 0xe1, 0xf2, 0x0c, 0x28, 0xac, 0xf1, 0x79, 0xec, 0x4b, 0x9e,
-	0x75, 0x57, 0xd6, 0xd6, 0x5d, 0x68, 0x39, 0x21, 0x17, 0x95, 0xb5, 0x0d, 0xd7, 0x8e, 0xc3, 0x48,
-	0x60, 0xc6, 0xdb, 0xa4, 0x5b, 0xef, 0x35, 0xdd, 0x6a, 0x69, 0x39, 0x70, 0x5d, 0x09, 0x4b, 0xe2,
-	0xa7, 0xa0, 0x2b, 0x1e, 0x29, 0x5c, 0x15, 0xb9, 0xf4, 0xf4, 0x3f, 0x6b, 0xa0, 0xcb, 0x1d, 0x4e,
-	0x11, 0x74, 0x15, 0x06, 0x7d, 0xf0, 0x2f, 0xdf, 0xc4, 0x78, 0xb8, 0xa2, 0xba, 0xe4, 0x7d, 0x05,
-	0xba, 0x4a, 0x69, 0xe9, 0x35, 0x17, 0xc2, 0x34, 0xb6, 0x7e, 0x7b, 0x18, 0x07, 0xc5, 0x6b, 0xa4,
-	0x47, 0xb0, 0x56, 0xe4, 0x41, 0xef, 0x2d, 0x9b, 0xfb, 0x2c, 0x5d, 0xe3, 0xfe, 0x4a, 0x5a, 0x05,
-	0x3c, 0x38, 0x5c, 0x9c, 0x9a, 0xb5, 0x6f, 0xa7, 0x66, 0xed, 0x63, 0x6e, 0x92, 0x45, 0x6e, 0x92,
-	0xaf, 0xb9, 0x49, 0x7e, 0xe4, 0x26, 0x79, 0xfb, 0xec, 0x3f, 0x7f, 0x4d, 0x7b, 0xaa, 0x3a, 0xac,
-	0x8d, 0x74, 0x39, 0xcc, 0xe3, 0x5f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x14, 0x74, 0xdd, 0x12, 0xe5,
-	0x04, 0x00, 0x00,
-}
diff --git a/vendor/github.com/containerd/containerd/api/services/namespaces/v1/namespace.pb.go b/vendor/github.com/containerd/containerd/api/services/namespaces/v1/namespace.pb.go
index f471f1c..3c6cdd7 100644
--- a/vendor/github.com/containerd/containerd/api/services/namespaces/v1/namespace.pb.go
+++ b/vendor/github.com/containerd/containerd/api/services/namespaces/v1/namespace.pb.go
@@ -1,42 +1,20 @@
 // Code generated by protoc-gen-gogo. DO NOT EDIT.
 // source: github.com/containerd/containerd/api/services/namespaces/v1/namespace.proto
 
-/*
-	Package namespaces is a generated protocol buffer package.
-
-	It is generated from these files:
-		github.com/containerd/containerd/api/services/namespaces/v1/namespace.proto
-
-	It has these top-level messages:
-		Namespace
-		GetNamespaceRequest
-		GetNamespaceResponse
-		ListNamespacesRequest
-		ListNamespacesResponse
-		CreateNamespaceRequest
-		CreateNamespaceResponse
-		UpdateNamespaceRequest
-		UpdateNamespaceResponse
-		DeleteNamespaceRequest
-*/
 package namespaces
 
-import proto "github.com/gogo/protobuf/proto"
-import fmt "fmt"
-import math "math"
-
-// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto"
-import google_protobuf1 "github.com/gogo/protobuf/types"
-import google_protobuf2 "github.com/gogo/protobuf/types"
-
-import context "golang.org/x/net/context"
-import grpc "google.golang.org/grpc"
-
-import strings "strings"
-import reflect "reflect"
-import sortkeys "github.com/gogo/protobuf/sortkeys"
-
-import io "io"
+import (
+	context "context"
+	fmt "fmt"
+	proto "github.com/gogo/protobuf/proto"
+	github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
+	types "github.com/gogo/protobuf/types"
+	grpc "google.golang.org/grpc"
+	io "io"
+	math "math"
+	reflect "reflect"
+	strings "strings"
+)
 
 // Reference imports to suppress errors if they are not otherwise used.
 var _ = proto.Marshal
@@ -57,60 +35,277 @@
 	//
 	// Note that to add a new value to this field, read the existing set and
 	// include the entire result in the update call.
-	Labels map[string]string `protobuf:"bytes,2,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	Labels               map[string]string `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
+	XXX_unrecognized     []byte            `json:"-"`
+	XXX_sizecache        int32             `json:"-"`
 }
 
-func (m *Namespace) Reset()                    { *m = Namespace{} }
-func (*Namespace) ProtoMessage()               {}
-func (*Namespace) Descriptor() ([]byte, []int) { return fileDescriptorNamespace, []int{0} }
+func (m *Namespace) Reset()      { *m = Namespace{} }
+func (*Namespace) ProtoMessage() {}
+func (*Namespace) Descriptor() ([]byte, []int) {
+	return fileDescriptor_8c41761eaeea4fd3, []int{0}
+}
+func (m *Namespace) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *Namespace) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_Namespace.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *Namespace) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Namespace.Merge(m, src)
+}
+func (m *Namespace) XXX_Size() int {
+	return m.Size()
+}
+func (m *Namespace) XXX_DiscardUnknown() {
+	xxx_messageInfo_Namespace.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Namespace proto.InternalMessageInfo
 
 type GetNamespaceRequest struct {
-	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	Name                 string   `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
-func (m *GetNamespaceRequest) Reset()                    { *m = GetNamespaceRequest{} }
-func (*GetNamespaceRequest) ProtoMessage()               {}
-func (*GetNamespaceRequest) Descriptor() ([]byte, []int) { return fileDescriptorNamespace, []int{1} }
+func (m *GetNamespaceRequest) Reset()      { *m = GetNamespaceRequest{} }
+func (*GetNamespaceRequest) ProtoMessage() {}
+func (*GetNamespaceRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_8c41761eaeea4fd3, []int{1}
+}
+func (m *GetNamespaceRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *GetNamespaceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_GetNamespaceRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *GetNamespaceRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_GetNamespaceRequest.Merge(m, src)
+}
+func (m *GetNamespaceRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *GetNamespaceRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_GetNamespaceRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetNamespaceRequest proto.InternalMessageInfo
 
 type GetNamespaceResponse struct {
-	Namespace Namespace `protobuf:"bytes,1,opt,name=namespace" json:"namespace"`
+	Namespace            Namespace `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace"`
+	XXX_NoUnkeyedLiteral struct{}  `json:"-"`
+	XXX_unrecognized     []byte    `json:"-"`
+	XXX_sizecache        int32     `json:"-"`
 }
 
-func (m *GetNamespaceResponse) Reset()                    { *m = GetNamespaceResponse{} }
-func (*GetNamespaceResponse) ProtoMessage()               {}
-func (*GetNamespaceResponse) Descriptor() ([]byte, []int) { return fileDescriptorNamespace, []int{2} }
+func (m *GetNamespaceResponse) Reset()      { *m = GetNamespaceResponse{} }
+func (*GetNamespaceResponse) ProtoMessage() {}
+func (*GetNamespaceResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_8c41761eaeea4fd3, []int{2}
+}
+func (m *GetNamespaceResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *GetNamespaceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_GetNamespaceResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *GetNamespaceResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_GetNamespaceResponse.Merge(m, src)
+}
+func (m *GetNamespaceResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *GetNamespaceResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_GetNamespaceResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetNamespaceResponse proto.InternalMessageInfo
 
 type ListNamespacesRequest struct {
-	Filter string `protobuf:"bytes,1,opt,name=filter,proto3" json:"filter,omitempty"`
+	Filter               string   `protobuf:"bytes,1,opt,name=filter,proto3" json:"filter,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
-func (m *ListNamespacesRequest) Reset()                    { *m = ListNamespacesRequest{} }
-func (*ListNamespacesRequest) ProtoMessage()               {}
-func (*ListNamespacesRequest) Descriptor() ([]byte, []int) { return fileDescriptorNamespace, []int{3} }
+func (m *ListNamespacesRequest) Reset()      { *m = ListNamespacesRequest{} }
+func (*ListNamespacesRequest) ProtoMessage() {}
+func (*ListNamespacesRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_8c41761eaeea4fd3, []int{3}
+}
+func (m *ListNamespacesRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ListNamespacesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_ListNamespacesRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *ListNamespacesRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ListNamespacesRequest.Merge(m, src)
+}
+func (m *ListNamespacesRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *ListNamespacesRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_ListNamespacesRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ListNamespacesRequest proto.InternalMessageInfo
 
 type ListNamespacesResponse struct {
-	Namespaces []Namespace `protobuf:"bytes,1,rep,name=namespaces" json:"namespaces"`
+	Namespaces           []Namespace `protobuf:"bytes,1,rep,name=namespaces,proto3" json:"namespaces"`
+	XXX_NoUnkeyedLiteral struct{}    `json:"-"`
+	XXX_unrecognized     []byte      `json:"-"`
+	XXX_sizecache        int32       `json:"-"`
 }
 
-func (m *ListNamespacesResponse) Reset()                    { *m = ListNamespacesResponse{} }
-func (*ListNamespacesResponse) ProtoMessage()               {}
-func (*ListNamespacesResponse) Descriptor() ([]byte, []int) { return fileDescriptorNamespace, []int{4} }
+func (m *ListNamespacesResponse) Reset()      { *m = ListNamespacesResponse{} }
+func (*ListNamespacesResponse) ProtoMessage() {}
+func (*ListNamespacesResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_8c41761eaeea4fd3, []int{4}
+}
+func (m *ListNamespacesResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ListNamespacesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_ListNamespacesResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *ListNamespacesResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ListNamespacesResponse.Merge(m, src)
+}
+func (m *ListNamespacesResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *ListNamespacesResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_ListNamespacesResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ListNamespacesResponse proto.InternalMessageInfo
 
 type CreateNamespaceRequest struct {
-	Namespace Namespace `protobuf:"bytes,1,opt,name=namespace" json:"namespace"`
+	Namespace            Namespace `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace"`
+	XXX_NoUnkeyedLiteral struct{}  `json:"-"`
+	XXX_unrecognized     []byte    `json:"-"`
+	XXX_sizecache        int32     `json:"-"`
 }
 
-func (m *CreateNamespaceRequest) Reset()                    { *m = CreateNamespaceRequest{} }
-func (*CreateNamespaceRequest) ProtoMessage()               {}
-func (*CreateNamespaceRequest) Descriptor() ([]byte, []int) { return fileDescriptorNamespace, []int{5} }
+func (m *CreateNamespaceRequest) Reset()      { *m = CreateNamespaceRequest{} }
+func (*CreateNamespaceRequest) ProtoMessage() {}
+func (*CreateNamespaceRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_8c41761eaeea4fd3, []int{5}
+}
+func (m *CreateNamespaceRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *CreateNamespaceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_CreateNamespaceRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *CreateNamespaceRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_CreateNamespaceRequest.Merge(m, src)
+}
+func (m *CreateNamespaceRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *CreateNamespaceRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_CreateNamespaceRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CreateNamespaceRequest proto.InternalMessageInfo
 
 type CreateNamespaceResponse struct {
-	Namespace Namespace `protobuf:"bytes,1,opt,name=namespace" json:"namespace"`
+	Namespace            Namespace `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace"`
+	XXX_NoUnkeyedLiteral struct{}  `json:"-"`
+	XXX_unrecognized     []byte    `json:"-"`
+	XXX_sizecache        int32     `json:"-"`
 }
 
-func (m *CreateNamespaceResponse) Reset()                    { *m = CreateNamespaceResponse{} }
-func (*CreateNamespaceResponse) ProtoMessage()               {}
-func (*CreateNamespaceResponse) Descriptor() ([]byte, []int) { return fileDescriptorNamespace, []int{6} }
+func (m *CreateNamespaceResponse) Reset()      { *m = CreateNamespaceResponse{} }
+func (*CreateNamespaceResponse) ProtoMessage() {}
+func (*CreateNamespaceResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_8c41761eaeea4fd3, []int{6}
+}
+func (m *CreateNamespaceResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *CreateNamespaceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_CreateNamespaceResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *CreateNamespaceResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_CreateNamespaceResponse.Merge(m, src)
+}
+func (m *CreateNamespaceResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *CreateNamespaceResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_CreateNamespaceResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CreateNamespaceResponse proto.InternalMessageInfo
 
 // UpdateNamespaceRequest updates the metadata for a namespace.
 //
@@ -121,38 +316,132 @@
 	// Namespace provides the target value, as declared by the mask, for the update.
 	//
 	// The namespace field must be set.
-	Namespace Namespace `protobuf:"bytes,1,opt,name=namespace" json:"namespace"`
+	Namespace Namespace `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace"`
 	// UpdateMask specifies which fields to perform the update on. If empty,
 	// the operation applies to all fields.
 	//
 	// For the most part, this applies only to selectively updating labels on
 	// the namespace. While field masks are typically limited to ascii alphas
 	// and digits, we just take everything after the "labels." as the map key.
-	UpdateMask *google_protobuf2.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask" json:"update_mask,omitempty"`
+	UpdateMask           *types.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}         `json:"-"`
+	XXX_unrecognized     []byte           `json:"-"`
+	XXX_sizecache        int32            `json:"-"`
 }
 
-func (m *UpdateNamespaceRequest) Reset()                    { *m = UpdateNamespaceRequest{} }
-func (*UpdateNamespaceRequest) ProtoMessage()               {}
-func (*UpdateNamespaceRequest) Descriptor() ([]byte, []int) { return fileDescriptorNamespace, []int{7} }
+func (m *UpdateNamespaceRequest) Reset()      { *m = UpdateNamespaceRequest{} }
+func (*UpdateNamespaceRequest) ProtoMessage() {}
+func (*UpdateNamespaceRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_8c41761eaeea4fd3, []int{7}
+}
+func (m *UpdateNamespaceRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *UpdateNamespaceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_UpdateNamespaceRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *UpdateNamespaceRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_UpdateNamespaceRequest.Merge(m, src)
+}
+func (m *UpdateNamespaceRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *UpdateNamespaceRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_UpdateNamespaceRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_UpdateNamespaceRequest proto.InternalMessageInfo
 
 type UpdateNamespaceResponse struct {
-	Namespace Namespace `protobuf:"bytes,1,opt,name=namespace" json:"namespace"`
+	Namespace            Namespace `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace"`
+	XXX_NoUnkeyedLiteral struct{}  `json:"-"`
+	XXX_unrecognized     []byte    `json:"-"`
+	XXX_sizecache        int32     `json:"-"`
 }
 
-func (m *UpdateNamespaceResponse) Reset()                    { *m = UpdateNamespaceResponse{} }
-func (*UpdateNamespaceResponse) ProtoMessage()               {}
-func (*UpdateNamespaceResponse) Descriptor() ([]byte, []int) { return fileDescriptorNamespace, []int{8} }
+func (m *UpdateNamespaceResponse) Reset()      { *m = UpdateNamespaceResponse{} }
+func (*UpdateNamespaceResponse) ProtoMessage() {}
+func (*UpdateNamespaceResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_8c41761eaeea4fd3, []int{8}
+}
+func (m *UpdateNamespaceResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *UpdateNamespaceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_UpdateNamespaceResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *UpdateNamespaceResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_UpdateNamespaceResponse.Merge(m, src)
+}
+func (m *UpdateNamespaceResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *UpdateNamespaceResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_UpdateNamespaceResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_UpdateNamespaceResponse proto.InternalMessageInfo
 
 type DeleteNamespaceRequest struct {
-	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	Name                 string   `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
-func (m *DeleteNamespaceRequest) Reset()                    { *m = DeleteNamespaceRequest{} }
-func (*DeleteNamespaceRequest) ProtoMessage()               {}
-func (*DeleteNamespaceRequest) Descriptor() ([]byte, []int) { return fileDescriptorNamespace, []int{9} }
+func (m *DeleteNamespaceRequest) Reset()      { *m = DeleteNamespaceRequest{} }
+func (*DeleteNamespaceRequest) ProtoMessage() {}
+func (*DeleteNamespaceRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_8c41761eaeea4fd3, []int{9}
+}
+func (m *DeleteNamespaceRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *DeleteNamespaceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_DeleteNamespaceRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *DeleteNamespaceRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DeleteNamespaceRequest.Merge(m, src)
+}
+func (m *DeleteNamespaceRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *DeleteNamespaceRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_DeleteNamespaceRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeleteNamespaceRequest proto.InternalMessageInfo
 
 func init() {
 	proto.RegisterType((*Namespace)(nil), "containerd.services.namespaces.v1.Namespace")
+	proto.RegisterMapType((map[string]string)(nil), "containerd.services.namespaces.v1.Namespace.LabelsEntry")
 	proto.RegisterType((*GetNamespaceRequest)(nil), "containerd.services.namespaces.v1.GetNamespaceRequest")
 	proto.RegisterType((*GetNamespaceResponse)(nil), "containerd.services.namespaces.v1.GetNamespaceResponse")
 	proto.RegisterType((*ListNamespacesRequest)(nil), "containerd.services.namespaces.v1.ListNamespacesRequest")
@@ -164,6 +453,49 @@
 	proto.RegisterType((*DeleteNamespaceRequest)(nil), "containerd.services.namespaces.v1.DeleteNamespaceRequest")
 }
 
+func init() {
+	proto.RegisterFile("github.com/containerd/containerd/api/services/namespaces/v1/namespace.proto", fileDescriptor_8c41761eaeea4fd3)
+}
+
+var fileDescriptor_8c41761eaeea4fd3 = []byte{
+	// 551 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x55, 0xcd, 0x6e, 0xd3, 0x4c,
+	0x14, 0xcd, 0x24, 0xf9, 0x2c, 0xe5, 0x7a, 0xf3, 0x69, 0x08, 0x26, 0x32, 0x92, 0x09, 0x5e, 0x15,
+	0xa9, 0x1a, 0xab, 0x41, 0x82, 0xfe, 0xec, 0x0a, 0x6d, 0x17, 0x14, 0x84, 0x2c, 0x21, 0x21, 0x58,
+	0x80, 0x93, 0x4c, 0x5c, 0x13, 0xc7, 0x36, 0x9e, 0xb1, 0xa5, 0x88, 0x05, 0xbc, 0x0d, 0x1b, 0x1e,
+	0x24, 0x4b, 0x96, 0xac, 0x50, 0x9b, 0x27, 0x41, 0x33, 0x76, 0xe2, 0xd0, 0x18, 0xe1, 0x06, 0xca,
+	0xee, 0x5e, 0x7b, 0xce, 0x3d, 0x67, 0xae, 0xce, 0xb1, 0xe1, 0x89, 0xeb, 0xf1, 0xb3, 0xa4, 0x4f,
+	0x06, 0xe1, 0xc4, 0x1a, 0x84, 0x01, 0x77, 0xbc, 0x80, 0xc6, 0xc3, 0xd5, 0xd2, 0x89, 0x3c, 0x8b,
+	0xd1, 0x38, 0xf5, 0x06, 0x94, 0x59, 0x81, 0x33, 0xa1, 0x2c, 0x72, 0x44, 0x99, 0xee, 0x14, 0x1d,
+	0x89, 0xe2, 0x90, 0x87, 0xf8, 0x6e, 0x01, 0x23, 0x0b, 0x08, 0x29, 0x20, 0x24, 0xdd, 0xd1, 0xdb,
+	0x6e, 0xe8, 0x86, 0xf2, 0xb4, 0x25, 0xaa, 0x0c, 0xa8, 0xdf, 0x76, 0xc3, 0xd0, 0xf5, 0xa9, 0x25,
+	0xbb, 0x7e, 0x32, 0xb2, 0xe8, 0x24, 0xe2, 0xd3, 0xfc, 0x65, 0xf7, 0xf2, 0xcb, 0x91, 0x47, 0xfd,
+	0xe1, 0x9b, 0x89, 0xc3, 0xc6, 0xd9, 0x09, 0xf3, 0x0b, 0x82, 0xd6, 0xb3, 0x05, 0x0d, 0xc6, 0xd0,
+	0x14, 0x9c, 0x1d, 0xd4, 0x45, 0x5b, 0x2d, 0x5b, 0xd6, 0xf8, 0x39, 0x28, 0xbe, 0xd3, 0xa7, 0x3e,
+	0xeb, 0xd4, 0xbb, 0x8d, 0x2d, 0xb5, 0xb7, 0x4b, 0x7e, 0x2b, 0x95, 0x2c, 0x27, 0x92, 0x53, 0x09,
+	0x3d, 0x0a, 0x78, 0x3c, 0xb5, 0xf3, 0x39, 0xfa, 0x1e, 0xa8, 0x2b, 0x8f, 0xf1, 0xff, 0xd0, 0x18,
+	0xd3, 0x69, 0xce, 0x29, 0x4a, 0xdc, 0x86, 0xff, 0x52, 0xc7, 0x4f, 0x68, 0xa7, 0x2e, 0x9f, 0x65,
+	0xcd, 0x7e, 0x7d, 0x17, 0x99, 0xf7, 0xe0, 0xc6, 0x09, 0xe5, 0xcb, 0xf1, 0x36, 0x7d, 0x9f, 0x50,
+	0xc6, 0xcb, 0x74, 0x9b, 0x67, 0xd0, 0xfe, 0xf9, 0x28, 0x8b, 0xc2, 0x80, 0x89, 0xfb, 0xb4, 0x96,
+	0x62, 0x25, 0x40, 0xed, 0x6d, 0x5f, 0xe5, 0x4a, 0x87, 0xcd, 0xd9, 0xf7, 0x3b, 0x35, 0xbb, 0x18,
+	0x62, 0x5a, 0x70, 0xf3, 0xd4, 0x63, 0x05, 0x15, 0x5b, 0xc8, 0xd2, 0x40, 0x19, 0x79, 0x3e, 0xa7,
+	0x71, 0x2e, 0x2c, 0xef, 0x4c, 0x1f, 0xb4, 0xcb, 0x80, 0x5c, 0x9c, 0x0d, 0x50, 0xd0, 0x76, 0x90,
+	0x5c, 0xf8, 0x26, 0xea, 0x56, 0xa6, 0x98, 0xef, 0x40, 0x7b, 0x14, 0x53, 0x87, 0xd3, 0xb5, 0xb5,
+	0xfd, 0xfd, 0x55, 0x8c, 0xe1, 0xd6, 0x1a, 0xd7, 0xb5, 0xed, 0xfd, 0x33, 0x02, 0xed, 0x45, 0x34,
+	0xfc, 0x27, 0x37, 0xc3, 0x07, 0xa0, 0x26, 0x92, 0x4b, 0xa6, 0x47, 0x3a, 0x53, 0xed, 0xe9, 0x24,
+	0x0b, 0x18, 0x59, 0x04, 0x8c, 0x1c, 0x8b, 0x80, 0x3d, 0x75, 0xd8, 0xd8, 0x86, 0xec, 0xb8, 0xa8,
+	0xc5, 0x5a, 0xd6, 0x84, 0x5e, 0xdb, 0x5a, 0xb6, 0x41, 0x7b, 0x4c, 0x7d, 0x5a, 0xb2, 0x95, 0x92,
+	0x98, 0xf4, 0xce, 0x9b, 0x00, 0x85, 0x11, 0x71, 0x0a, 0x8d, 0x13, 0xca, 0xf1, 0x83, 0x0a, 0x12,
+	0x4a, 0x82, 0xa8, 0x3f, 0xbc, 0x32, 0x2e, 0x5f, 0xc3, 0x07, 0x68, 0x8a, 0x48, 0xe0, 0x2a, 0x5f,
+	0x97, 0xd2, 0xb0, 0xe9, 0x7b, 0x1b, 0x20, 0x73, 0xf2, 0x8f, 0xa0, 0x64, 0xae, 0xc5, 0x55, 0x86,
+	0x94, 0x87, 0x49, 0xdf, 0xdf, 0x04, 0x5a, 0x08, 0xc8, 0xfc, 0x51, 0x49, 0x40, 0xb9, 0xe7, 0x2b,
+	0x09, 0xf8, 0x95, 0x0b, 0x5f, 0x83, 0x92, 0x79, 0xa6, 0x92, 0x80, 0x72, 0x7b, 0xe9, 0xda, 0x5a,
+	0x1a, 0x8e, 0xc4, 0xbf, 0xe8, 0xf0, 0xed, 0xec, 0xc2, 0xa8, 0x7d, 0xbb, 0x30, 0x6a, 0x9f, 0xe6,
+	0x06, 0x9a, 0xcd, 0x0d, 0xf4, 0x75, 0x6e, 0xa0, 0xf3, 0xb9, 0x81, 0x5e, 0x1d, 0xff, 0xc1, 0x2f,
+	0xf4, 0xa0, 0xe8, 0x5e, 0xd6, 0xfa, 0x8a, 0xe4, 0xbc, 0xff, 0x23, 0x00, 0x00, 0xff, 0xff, 0x4f,
+	0x4a, 0x87, 0xf3, 0x95, 0x07, 0x00, 0x00,
+}
+
 // Reference imports to suppress errors if they are not otherwise used.
 var _ context.Context
 var _ grpc.ClientConn
@@ -172,14 +504,15 @@
 // is compatible with the grpc package it is being compiled against.
 const _ = grpc.SupportPackageIsVersion4
 
-// Client API for Namespaces service
-
+// NamespacesClient is the client API for Namespaces service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
 type NamespacesClient interface {
 	Get(ctx context.Context, in *GetNamespaceRequest, opts ...grpc.CallOption) (*GetNamespaceResponse, error)
 	List(ctx context.Context, in *ListNamespacesRequest, opts ...grpc.CallOption) (*ListNamespacesResponse, error)
 	Create(ctx context.Context, in *CreateNamespaceRequest, opts ...grpc.CallOption) (*CreateNamespaceResponse, error)
 	Update(ctx context.Context, in *UpdateNamespaceRequest, opts ...grpc.CallOption) (*UpdateNamespaceResponse, error)
-	Delete(ctx context.Context, in *DeleteNamespaceRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error)
+	Delete(ctx context.Context, in *DeleteNamespaceRequest, opts ...grpc.CallOption) (*types.Empty, error)
 }
 
 type namespacesClient struct {
@@ -192,7 +525,7 @@
 
 func (c *namespacesClient) Get(ctx context.Context, in *GetNamespaceRequest, opts ...grpc.CallOption) (*GetNamespaceResponse, error) {
 	out := new(GetNamespaceResponse)
-	err := grpc.Invoke(ctx, "/containerd.services.namespaces.v1.Namespaces/Get", in, out, c.cc, opts...)
+	err := c.cc.Invoke(ctx, "/containerd.services.namespaces.v1.Namespaces/Get", in, out, opts...)
 	if err != nil {
 		return nil, err
 	}
@@ -201,7 +534,7 @@
 
 func (c *namespacesClient) List(ctx context.Context, in *ListNamespacesRequest, opts ...grpc.CallOption) (*ListNamespacesResponse, error) {
 	out := new(ListNamespacesResponse)
-	err := grpc.Invoke(ctx, "/containerd.services.namespaces.v1.Namespaces/List", in, out, c.cc, opts...)
+	err := c.cc.Invoke(ctx, "/containerd.services.namespaces.v1.Namespaces/List", in, out, opts...)
 	if err != nil {
 		return nil, err
 	}
@@ -210,7 +543,7 @@
 
 func (c *namespacesClient) Create(ctx context.Context, in *CreateNamespaceRequest, opts ...grpc.CallOption) (*CreateNamespaceResponse, error) {
 	out := new(CreateNamespaceResponse)
-	err := grpc.Invoke(ctx, "/containerd.services.namespaces.v1.Namespaces/Create", in, out, c.cc, opts...)
+	err := c.cc.Invoke(ctx, "/containerd.services.namespaces.v1.Namespaces/Create", in, out, opts...)
 	if err != nil {
 		return nil, err
 	}
@@ -219,30 +552,29 @@
 
 func (c *namespacesClient) Update(ctx context.Context, in *UpdateNamespaceRequest, opts ...grpc.CallOption) (*UpdateNamespaceResponse, error) {
 	out := new(UpdateNamespaceResponse)
-	err := grpc.Invoke(ctx, "/containerd.services.namespaces.v1.Namespaces/Update", in, out, c.cc, opts...)
+	err := c.cc.Invoke(ctx, "/containerd.services.namespaces.v1.Namespaces/Update", in, out, opts...)
 	if err != nil {
 		return nil, err
 	}
 	return out, nil
 }
 
-func (c *namespacesClient) Delete(ctx context.Context, in *DeleteNamespaceRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) {
-	out := new(google_protobuf1.Empty)
-	err := grpc.Invoke(ctx, "/containerd.services.namespaces.v1.Namespaces/Delete", in, out, c.cc, opts...)
+func (c *namespacesClient) Delete(ctx context.Context, in *DeleteNamespaceRequest, opts ...grpc.CallOption) (*types.Empty, error) {
+	out := new(types.Empty)
+	err := c.cc.Invoke(ctx, "/containerd.services.namespaces.v1.Namespaces/Delete", in, out, opts...)
 	if err != nil {
 		return nil, err
 	}
 	return out, nil
 }
 
-// Server API for Namespaces service
-
+// NamespacesServer is the server API for Namespaces service.
 type NamespacesServer interface {
 	Get(context.Context, *GetNamespaceRequest) (*GetNamespaceResponse, error)
 	List(context.Context, *ListNamespacesRequest) (*ListNamespacesResponse, error)
 	Create(context.Context, *CreateNamespaceRequest) (*CreateNamespaceResponse, error)
 	Update(context.Context, *UpdateNamespaceRequest) (*UpdateNamespaceResponse, error)
-	Delete(context.Context, *DeleteNamespaceRequest) (*google_protobuf1.Empty, error)
+	Delete(context.Context, *DeleteNamespaceRequest) (*types.Empty, error)
 }
 
 func RegisterNamespacesServer(s *grpc.Server, srv NamespacesServer) {
@@ -406,6 +738,9 @@
 			i += copy(dAtA[i:], v)
 		}
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -430,6 +765,9 @@
 		i = encodeVarintNamespace(dAtA, i, uint64(len(m.Name)))
 		i += copy(dAtA[i:], m.Name)
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -456,6 +794,9 @@
 		return 0, err
 	}
 	i += n1
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -480,6 +821,9 @@
 		i = encodeVarintNamespace(dAtA, i, uint64(len(m.Filter)))
 		i += copy(dAtA[i:], m.Filter)
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -510,6 +854,9 @@
 			i += n
 		}
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -536,6 +883,9 @@
 		return 0, err
 	}
 	i += n2
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -562,6 +912,9 @@
 		return 0, err
 	}
 	i += n3
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -598,6 +951,9 @@
 		}
 		i += n5
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -624,6 +980,9 @@
 		return 0, err
 	}
 	i += n6
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -648,6 +1007,9 @@
 		i = encodeVarintNamespace(dAtA, i, uint64(len(m.Name)))
 		i += copy(dAtA[i:], m.Name)
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -661,6 +1023,9 @@
 	return offset + 1
 }
 func (m *Namespace) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = len(m.Name)
@@ -675,38 +1040,62 @@
 			n += mapEntrySize + 1 + sovNamespace(uint64(mapEntrySize))
 		}
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *GetNamespaceRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = len(m.Name)
 	if l > 0 {
 		n += 1 + l + sovNamespace(uint64(l))
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *GetNamespaceResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = m.Namespace.Size()
 	n += 1 + l + sovNamespace(uint64(l))
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *ListNamespacesRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = len(m.Filter)
 	if l > 0 {
 		n += 1 + l + sovNamespace(uint64(l))
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *ListNamespacesResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	if len(m.Namespaces) > 0 {
@@ -715,26 +1104,44 @@
 			n += 1 + l + sovNamespace(uint64(l))
 		}
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *CreateNamespaceRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = m.Namespace.Size()
 	n += 1 + l + sovNamespace(uint64(l))
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *CreateNamespaceResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = m.Namespace.Size()
 	n += 1 + l + sovNamespace(uint64(l))
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *UpdateNamespaceRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = m.Namespace.Size()
@@ -743,24 +1150,39 @@
 		l = m.UpdateMask.Size()
 		n += 1 + l + sovNamespace(uint64(l))
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *UpdateNamespaceResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = m.Namespace.Size()
 	n += 1 + l + sovNamespace(uint64(l))
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *DeleteNamespaceRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = len(m.Name)
 	if l > 0 {
 		n += 1 + l + sovNamespace(uint64(l))
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
@@ -785,7 +1207,7 @@
 	for k, _ := range this.Labels {
 		keysForLabels = append(keysForLabels, k)
 	}
-	sortkeys.Strings(keysForLabels)
+	github_com_gogo_protobuf_sortkeys.Strings(keysForLabels)
 	mapStringForLabels := "map[string]string{"
 	for _, k := range keysForLabels {
 		mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k])
@@ -794,6 +1216,7 @@
 	s := strings.Join([]string{`&Namespace{`,
 		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
 		`Labels:` + mapStringForLabels + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -804,6 +1227,7 @@
 	}
 	s := strings.Join([]string{`&GetNamespaceRequest{`,
 		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -814,6 +1238,7 @@
 	}
 	s := strings.Join([]string{`&GetNamespaceResponse{`,
 		`Namespace:` + strings.Replace(strings.Replace(this.Namespace.String(), "Namespace", "Namespace", 1), `&`, ``, 1) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -824,6 +1249,7 @@
 	}
 	s := strings.Join([]string{`&ListNamespacesRequest{`,
 		`Filter:` + fmt.Sprintf("%v", this.Filter) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -834,6 +1260,7 @@
 	}
 	s := strings.Join([]string{`&ListNamespacesResponse{`,
 		`Namespaces:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Namespaces), "Namespace", "Namespace", 1), `&`, ``, 1) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -844,6 +1271,7 @@
 	}
 	s := strings.Join([]string{`&CreateNamespaceRequest{`,
 		`Namespace:` + strings.Replace(strings.Replace(this.Namespace.String(), "Namespace", "Namespace", 1), `&`, ``, 1) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -854,6 +1282,7 @@
 	}
 	s := strings.Join([]string{`&CreateNamespaceResponse{`,
 		`Namespace:` + strings.Replace(strings.Replace(this.Namespace.String(), "Namespace", "Namespace", 1), `&`, ``, 1) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -864,7 +1293,8 @@
 	}
 	s := strings.Join([]string{`&UpdateNamespaceRequest{`,
 		`Namespace:` + strings.Replace(strings.Replace(this.Namespace.String(), "Namespace", "Namespace", 1), `&`, ``, 1) + `,`,
-		`UpdateMask:` + strings.Replace(fmt.Sprintf("%v", this.UpdateMask), "FieldMask", "google_protobuf2.FieldMask", 1) + `,`,
+		`UpdateMask:` + strings.Replace(fmt.Sprintf("%v", this.UpdateMask), "FieldMask", "types.FieldMask", 1) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -875,6 +1305,7 @@
 	}
 	s := strings.Join([]string{`&UpdateNamespaceResponse{`,
 		`Namespace:` + strings.Replace(strings.Replace(this.Namespace.String(), "Namespace", "Namespace", 1), `&`, ``, 1) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -885,6 +1316,7 @@
 	}
 	s := strings.Join([]string{`&DeleteNamespaceRequest{`,
 		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -912,7 +1344,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -940,7 +1372,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -950,6 +1382,9 @@
 				return ErrInvalidLengthNamespace
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthNamespace
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -969,7 +1404,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -978,6 +1413,9 @@
 				return ErrInvalidLengthNamespace
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthNamespace
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -998,7 +1436,7 @@
 					}
 					b := dAtA[iNdEx]
 					iNdEx++
-					wire |= (uint64(b) & 0x7F) << shift
+					wire |= uint64(b&0x7F) << shift
 					if b < 0x80 {
 						break
 					}
@@ -1015,7 +1453,7 @@
 						}
 						b := dAtA[iNdEx]
 						iNdEx++
-						stringLenmapkey |= (uint64(b) & 0x7F) << shift
+						stringLenmapkey |= uint64(b&0x7F) << shift
 						if b < 0x80 {
 							break
 						}
@@ -1025,6 +1463,9 @@
 						return ErrInvalidLengthNamespace
 					}
 					postStringIndexmapkey := iNdEx + intStringLenmapkey
+					if postStringIndexmapkey < 0 {
+						return ErrInvalidLengthNamespace
+					}
 					if postStringIndexmapkey > l {
 						return io.ErrUnexpectedEOF
 					}
@@ -1041,7 +1482,7 @@
 						}
 						b := dAtA[iNdEx]
 						iNdEx++
-						stringLenmapvalue |= (uint64(b) & 0x7F) << shift
+						stringLenmapvalue |= uint64(b&0x7F) << shift
 						if b < 0x80 {
 							break
 						}
@@ -1051,6 +1492,9 @@
 						return ErrInvalidLengthNamespace
 					}
 					postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+					if postStringIndexmapvalue < 0 {
+						return ErrInvalidLengthNamespace
+					}
 					if postStringIndexmapvalue > l {
 						return io.ErrUnexpectedEOF
 					}
@@ -1082,9 +1526,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthNamespace
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthNamespace
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -1109,7 +1557,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -1137,7 +1585,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1147,6 +1595,9 @@
 				return ErrInvalidLengthNamespace
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthNamespace
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -1161,9 +1612,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthNamespace
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthNamespace
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -1188,7 +1643,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -1216,7 +1671,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1225,6 +1680,9 @@
 				return ErrInvalidLengthNamespace
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthNamespace
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -1241,9 +1699,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthNamespace
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthNamespace
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -1268,7 +1730,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -1296,7 +1758,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1306,6 +1768,9 @@
 				return ErrInvalidLengthNamespace
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthNamespace
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -1320,9 +1785,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthNamespace
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthNamespace
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -1347,7 +1816,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -1375,7 +1844,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1384,6 +1853,9 @@
 				return ErrInvalidLengthNamespace
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthNamespace
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -1401,9 +1873,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthNamespace
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthNamespace
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -1428,7 +1904,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -1456,7 +1932,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1465,6 +1941,9 @@
 				return ErrInvalidLengthNamespace
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthNamespace
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -1481,9 +1960,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthNamespace
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthNamespace
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -1508,7 +1991,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -1536,7 +2019,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1545,6 +2028,9 @@
 				return ErrInvalidLengthNamespace
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthNamespace
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -1561,9 +2047,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthNamespace
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthNamespace
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -1588,7 +2078,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -1616,7 +2106,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1625,6 +2115,9 @@
 				return ErrInvalidLengthNamespace
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthNamespace
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -1646,7 +2139,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1655,11 +2148,14 @@
 				return ErrInvalidLengthNamespace
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthNamespace
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
 			if m.UpdateMask == nil {
-				m.UpdateMask = &google_protobuf2.FieldMask{}
+				m.UpdateMask = &types.FieldMask{}
 			}
 			if err := m.UpdateMask.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
 				return err
@@ -1674,9 +2170,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthNamespace
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthNamespace
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -1701,7 +2201,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -1729,7 +2229,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1738,6 +2238,9 @@
 				return ErrInvalidLengthNamespace
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthNamespace
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -1754,9 +2257,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthNamespace
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthNamespace
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -1781,7 +2288,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -1809,7 +2316,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1819,6 +2326,9 @@
 				return ErrInvalidLengthNamespace
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthNamespace
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -1833,9 +2343,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthNamespace
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthNamespace
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -1899,10 +2413,13 @@
 					break
 				}
 			}
-			iNdEx += length
 			if length < 0 {
 				return 0, ErrInvalidLengthNamespace
 			}
+			iNdEx += length
+			if iNdEx < 0 {
+				return 0, ErrInvalidLengthNamespace
+			}
 			return iNdEx, nil
 		case 3:
 			for {
@@ -1931,6 +2448,9 @@
 					return 0, err
 				}
 				iNdEx = start + next
+				if iNdEx < 0 {
+					return 0, ErrInvalidLengthNamespace
+				}
 			}
 			return iNdEx, nil
 		case 4:
@@ -1949,46 +2469,3 @@
 	ErrInvalidLengthNamespace = fmt.Errorf("proto: negative length found during unmarshaling")
 	ErrIntOverflowNamespace   = fmt.Errorf("proto: integer overflow")
 )
-
-func init() {
-	proto.RegisterFile("github.com/containerd/containerd/api/services/namespaces/v1/namespace.proto", fileDescriptorNamespace)
-}
-
-var fileDescriptorNamespace = []byte{
-	// 551 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x55, 0xcd, 0x6e, 0xd3, 0x4c,
-	0x14, 0xcd, 0x24, 0xf9, 0x2c, 0xe5, 0x7a, 0xf3, 0x69, 0x08, 0x26, 0x32, 0x92, 0x09, 0x5e, 0x15,
-	0xa9, 0x1a, 0xab, 0x41, 0x82, 0xfe, 0xec, 0x0a, 0x6d, 0x17, 0x14, 0x84, 0x2c, 0x21, 0x21, 0x58,
-	0x80, 0x93, 0x4c, 0x5c, 0x13, 0xc7, 0x36, 0x9e, 0xb1, 0xa5, 0x88, 0x05, 0xbc, 0x0d, 0x1b, 0x1e,
-	0x24, 0x4b, 0x96, 0xac, 0x50, 0x9b, 0x27, 0x41, 0x33, 0x76, 0xe2, 0xd0, 0x18, 0xe1, 0x06, 0xca,
-	0xee, 0x5e, 0x7b, 0xce, 0x3d, 0x67, 0xae, 0xce, 0xb1, 0xe1, 0x89, 0xeb, 0xf1, 0xb3, 0xa4, 0x4f,
-	0x06, 0xe1, 0xc4, 0x1a, 0x84, 0x01, 0x77, 0xbc, 0x80, 0xc6, 0xc3, 0xd5, 0xd2, 0x89, 0x3c, 0x8b,
-	0xd1, 0x38, 0xf5, 0x06, 0x94, 0x59, 0x81, 0x33, 0xa1, 0x2c, 0x72, 0x44, 0x99, 0xee, 0x14, 0x1d,
-	0x89, 0xe2, 0x90, 0x87, 0xf8, 0x6e, 0x01, 0x23, 0x0b, 0x08, 0x29, 0x20, 0x24, 0xdd, 0xd1, 0xdb,
-	0x6e, 0xe8, 0x86, 0xf2, 0xb4, 0x25, 0xaa, 0x0c, 0xa8, 0xdf, 0x76, 0xc3, 0xd0, 0xf5, 0xa9, 0x25,
-	0xbb, 0x7e, 0x32, 0xb2, 0xe8, 0x24, 0xe2, 0xd3, 0xfc, 0x65, 0xf7, 0xf2, 0xcb, 0x91, 0x47, 0xfd,
-	0xe1, 0x9b, 0x89, 0xc3, 0xc6, 0xd9, 0x09, 0xf3, 0x0b, 0x82, 0xd6, 0xb3, 0x05, 0x0d, 0xc6, 0xd0,
-	0x14, 0x9c, 0x1d, 0xd4, 0x45, 0x5b, 0x2d, 0x5b, 0xd6, 0xf8, 0x39, 0x28, 0xbe, 0xd3, 0xa7, 0x3e,
-	0xeb, 0xd4, 0xbb, 0x8d, 0x2d, 0xb5, 0xb7, 0x4b, 0x7e, 0x2b, 0x95, 0x2c, 0x27, 0x92, 0x53, 0x09,
-	0x3d, 0x0a, 0x78, 0x3c, 0xb5, 0xf3, 0x39, 0xfa, 0x1e, 0xa8, 0x2b, 0x8f, 0xf1, 0xff, 0xd0, 0x18,
-	0xd3, 0x69, 0xce, 0x29, 0x4a, 0xdc, 0x86, 0xff, 0x52, 0xc7, 0x4f, 0x68, 0xa7, 0x2e, 0x9f, 0x65,
-	0xcd, 0x7e, 0x7d, 0x17, 0x99, 0xf7, 0xe0, 0xc6, 0x09, 0xe5, 0xcb, 0xf1, 0x36, 0x7d, 0x9f, 0x50,
-	0xc6, 0xcb, 0x74, 0x9b, 0x67, 0xd0, 0xfe, 0xf9, 0x28, 0x8b, 0xc2, 0x80, 0x89, 0xfb, 0xb4, 0x96,
-	0x62, 0x25, 0x40, 0xed, 0x6d, 0x5f, 0xe5, 0x4a, 0x87, 0xcd, 0xd9, 0xf7, 0x3b, 0x35, 0xbb, 0x18,
-	0x62, 0x5a, 0x70, 0xf3, 0xd4, 0x63, 0x05, 0x15, 0x5b, 0xc8, 0xd2, 0x40, 0x19, 0x79, 0x3e, 0xa7,
-	0x71, 0x2e, 0x2c, 0xef, 0x4c, 0x1f, 0xb4, 0xcb, 0x80, 0x5c, 0x9c, 0x0d, 0x50, 0xd0, 0x76, 0x90,
-	0x5c, 0xf8, 0x26, 0xea, 0x56, 0xa6, 0x98, 0xef, 0x40, 0x7b, 0x14, 0x53, 0x87, 0xd3, 0xb5, 0xb5,
-	0xfd, 0xfd, 0x55, 0x8c, 0xe1, 0xd6, 0x1a, 0xd7, 0xb5, 0xed, 0xfd, 0x33, 0x02, 0xed, 0x45, 0x34,
-	0xfc, 0x27, 0x37, 0xc3, 0x07, 0xa0, 0x26, 0x92, 0x4b, 0xa6, 0x47, 0x3a, 0x53, 0xed, 0xe9, 0x24,
-	0x0b, 0x18, 0x59, 0x04, 0x8c, 0x1c, 0x8b, 0x80, 0x3d, 0x75, 0xd8, 0xd8, 0x86, 0xec, 0xb8, 0xa8,
-	0xc5, 0x5a, 0xd6, 0x84, 0x5e, 0xdb, 0x5a, 0xb6, 0x41, 0x7b, 0x4c, 0x7d, 0x5a, 0xb2, 0x95, 0x92,
-	0x98, 0xf4, 0xce, 0x9b, 0x00, 0x85, 0x11, 0x71, 0x0a, 0x8d, 0x13, 0xca, 0xf1, 0x83, 0x0a, 0x12,
-	0x4a, 0x82, 0xa8, 0x3f, 0xbc, 0x32, 0x2e, 0x5f, 0xc3, 0x07, 0x68, 0x8a, 0x48, 0xe0, 0x2a, 0x5f,
-	0x97, 0xd2, 0xb0, 0xe9, 0x7b, 0x1b, 0x20, 0x73, 0xf2, 0x8f, 0xa0, 0x64, 0xae, 0xc5, 0x55, 0x86,
-	0x94, 0x87, 0x49, 0xdf, 0xdf, 0x04, 0x5a, 0x08, 0xc8, 0xfc, 0x51, 0x49, 0x40, 0xb9, 0xe7, 0x2b,
-	0x09, 0xf8, 0x95, 0x0b, 0x5f, 0x83, 0x92, 0x79, 0xa6, 0x92, 0x80, 0x72, 0x7b, 0xe9, 0xda, 0x5a,
-	0x1a, 0x8e, 0xc4, 0xbf, 0xe8, 0xf0, 0xed, 0xec, 0xc2, 0xa8, 0x7d, 0xbb, 0x30, 0x6a, 0x9f, 0xe6,
-	0x06, 0x9a, 0xcd, 0x0d, 0xf4, 0x75, 0x6e, 0xa0, 0xf3, 0xb9, 0x81, 0x5e, 0x1d, 0xff, 0xc1, 0x2f,
-	0xf4, 0xa0, 0xe8, 0x5e, 0xd6, 0xfa, 0x8a, 0xe4, 0xbc, 0xff, 0x23, 0x00, 0x00, 0xff, 0xff, 0x4f,
-	0x4a, 0x87, 0xf3, 0x95, 0x07, 0x00, 0x00,
-}
diff --git a/vendor/github.com/containerd/containerd/api/services/snapshots/v1/snapshots.pb.go b/vendor/github.com/containerd/containerd/api/services/snapshots/v1/snapshots.pb.go
index 1693af0..1aa7585 100644
--- a/vendor/github.com/containerd/containerd/api/services/snapshots/v1/snapshots.pb.go
+++ b/vendor/github.com/containerd/containerd/api/services/snapshots/v1/snapshots.pb.go
@@ -1,55 +1,23 @@
 // Code generated by protoc-gen-gogo. DO NOT EDIT.
 // source: github.com/containerd/containerd/api/services/snapshots/v1/snapshots.proto
 
-/*
-	Package snapshots is a generated protocol buffer package.
-
-	It is generated from these files:
-		github.com/containerd/containerd/api/services/snapshots/v1/snapshots.proto
-
-	It has these top-level messages:
-		PrepareSnapshotRequest
-		PrepareSnapshotResponse
-		ViewSnapshotRequest
-		ViewSnapshotResponse
-		MountsRequest
-		MountsResponse
-		RemoveSnapshotRequest
-		CommitSnapshotRequest
-		StatSnapshotRequest
-		Info
-		StatSnapshotResponse
-		UpdateSnapshotRequest
-		UpdateSnapshotResponse
-		ListSnapshotsRequest
-		ListSnapshotsResponse
-		UsageRequest
-		UsageResponse
-*/
 package snapshots
 
-import proto "github.com/gogo/protobuf/proto"
-import fmt "fmt"
-import math "math"
-
-// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto"
-import google_protobuf1 "github.com/gogo/protobuf/types"
-import google_protobuf2 "github.com/gogo/protobuf/types"
-import _ "github.com/gogo/protobuf/types"
-import containerd_types "github.com/containerd/containerd/api/types"
-
-import time "time"
-
-import context "golang.org/x/net/context"
-import grpc "google.golang.org/grpc"
-
-import types "github.com/gogo/protobuf/types"
-
-import strings "strings"
-import reflect "reflect"
-import sortkeys "github.com/gogo/protobuf/sortkeys"
-
-import io "io"
+import (
+	context "context"
+	fmt "fmt"
+	types "github.com/containerd/containerd/api/types"
+	proto "github.com/gogo/protobuf/proto"
+	github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
+	github_com_gogo_protobuf_types "github.com/gogo/protobuf/types"
+	types1 "github.com/gogo/protobuf/types"
+	grpc "google.golang.org/grpc"
+	io "io"
+	math "math"
+	reflect "reflect"
+	strings "strings"
+	time "time"
+)
 
 // Reference imports to suppress errors if they are not otherwise used.
 var _ = proto.Marshal
@@ -78,6 +46,7 @@
 	2: "ACTIVE",
 	3: "COMMITTED",
 }
+
 var Kind_value = map[string]int32{
 	"UNKNOWN":   0,
 	"VIEW":      1,
@@ -88,7 +57,10 @@
 func (x Kind) String() string {
 	return proto.EnumName(Kind_name, int32(x))
 }
-func (Kind) EnumDescriptor() ([]byte, []int) { return fileDescriptorSnapshots, []int{0} }
+
+func (Kind) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_cfc0ddf12791f168, []int{0}
+}
 
 type PrepareSnapshotRequest struct {
 	Snapshotter string `protobuf:"bytes,1,opt,name=snapshotter,proto3" json:"snapshotter,omitempty"`
@@ -97,20 +69,82 @@
 	// Labels are arbitrary data on snapshots.
 	//
 	// The combined size of a key/value pair cannot exceed 4096 bytes.
-	Labels map[string]string `protobuf:"bytes,4,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	Labels               map[string]string `protobuf:"bytes,4,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
+	XXX_unrecognized     []byte            `json:"-"`
+	XXX_sizecache        int32             `json:"-"`
 }
 
-func (m *PrepareSnapshotRequest) Reset()                    { *m = PrepareSnapshotRequest{} }
-func (*PrepareSnapshotRequest) ProtoMessage()               {}
-func (*PrepareSnapshotRequest) Descriptor() ([]byte, []int) { return fileDescriptorSnapshots, []int{0} }
+func (m *PrepareSnapshotRequest) Reset()      { *m = PrepareSnapshotRequest{} }
+func (*PrepareSnapshotRequest) ProtoMessage() {}
+func (*PrepareSnapshotRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_cfc0ddf12791f168, []int{0}
+}
+func (m *PrepareSnapshotRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *PrepareSnapshotRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_PrepareSnapshotRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *PrepareSnapshotRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_PrepareSnapshotRequest.Merge(m, src)
+}
+func (m *PrepareSnapshotRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *PrepareSnapshotRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_PrepareSnapshotRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PrepareSnapshotRequest proto.InternalMessageInfo
 
 type PrepareSnapshotResponse struct {
-	Mounts []*containerd_types.Mount `protobuf:"bytes,1,rep,name=mounts" json:"mounts,omitempty"`
+	Mounts               []*types.Mount `protobuf:"bytes,1,rep,name=mounts,proto3" json:"mounts,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}       `json:"-"`
+	XXX_unrecognized     []byte         `json:"-"`
+	XXX_sizecache        int32          `json:"-"`
 }
 
-func (m *PrepareSnapshotResponse) Reset()                    { *m = PrepareSnapshotResponse{} }
-func (*PrepareSnapshotResponse) ProtoMessage()               {}
-func (*PrepareSnapshotResponse) Descriptor() ([]byte, []int) { return fileDescriptorSnapshots, []int{1} }
+func (m *PrepareSnapshotResponse) Reset()      { *m = PrepareSnapshotResponse{} }
+func (*PrepareSnapshotResponse) ProtoMessage() {}
+func (*PrepareSnapshotResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_cfc0ddf12791f168, []int{1}
+}
+func (m *PrepareSnapshotResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *PrepareSnapshotResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_PrepareSnapshotResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *PrepareSnapshotResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_PrepareSnapshotResponse.Merge(m, src)
+}
+func (m *PrepareSnapshotResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *PrepareSnapshotResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_PrepareSnapshotResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PrepareSnapshotResponse proto.InternalMessageInfo
 
 type ViewSnapshotRequest struct {
 	Snapshotter string `protobuf:"bytes,1,opt,name=snapshotter,proto3" json:"snapshotter,omitempty"`
@@ -119,46 +153,201 @@
 	// Labels are arbitrary data on snapshots.
 	//
 	// The combined size of a key/value pair cannot exceed 4096 bytes.
-	Labels map[string]string `protobuf:"bytes,4,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	Labels               map[string]string `protobuf:"bytes,4,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
+	XXX_unrecognized     []byte            `json:"-"`
+	XXX_sizecache        int32             `json:"-"`
 }
 
-func (m *ViewSnapshotRequest) Reset()                    { *m = ViewSnapshotRequest{} }
-func (*ViewSnapshotRequest) ProtoMessage()               {}
-func (*ViewSnapshotRequest) Descriptor() ([]byte, []int) { return fileDescriptorSnapshots, []int{2} }
+func (m *ViewSnapshotRequest) Reset()      { *m = ViewSnapshotRequest{} }
+func (*ViewSnapshotRequest) ProtoMessage() {}
+func (*ViewSnapshotRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_cfc0ddf12791f168, []int{2}
+}
+func (m *ViewSnapshotRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ViewSnapshotRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_ViewSnapshotRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *ViewSnapshotRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ViewSnapshotRequest.Merge(m, src)
+}
+func (m *ViewSnapshotRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *ViewSnapshotRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_ViewSnapshotRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ViewSnapshotRequest proto.InternalMessageInfo
 
 type ViewSnapshotResponse struct {
-	Mounts []*containerd_types.Mount `protobuf:"bytes,1,rep,name=mounts" json:"mounts,omitempty"`
+	Mounts               []*types.Mount `protobuf:"bytes,1,rep,name=mounts,proto3" json:"mounts,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}       `json:"-"`
+	XXX_unrecognized     []byte         `json:"-"`
+	XXX_sizecache        int32          `json:"-"`
 }
 
-func (m *ViewSnapshotResponse) Reset()                    { *m = ViewSnapshotResponse{} }
-func (*ViewSnapshotResponse) ProtoMessage()               {}
-func (*ViewSnapshotResponse) Descriptor() ([]byte, []int) { return fileDescriptorSnapshots, []int{3} }
+func (m *ViewSnapshotResponse) Reset()      { *m = ViewSnapshotResponse{} }
+func (*ViewSnapshotResponse) ProtoMessage() {}
+func (*ViewSnapshotResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_cfc0ddf12791f168, []int{3}
+}
+func (m *ViewSnapshotResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ViewSnapshotResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_ViewSnapshotResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *ViewSnapshotResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ViewSnapshotResponse.Merge(m, src)
+}
+func (m *ViewSnapshotResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *ViewSnapshotResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_ViewSnapshotResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ViewSnapshotResponse proto.InternalMessageInfo
 
 type MountsRequest struct {
-	Snapshotter string `protobuf:"bytes,1,opt,name=snapshotter,proto3" json:"snapshotter,omitempty"`
-	Key         string `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
+	Snapshotter          string   `protobuf:"bytes,1,opt,name=snapshotter,proto3" json:"snapshotter,omitempty"`
+	Key                  string   `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
-func (m *MountsRequest) Reset()                    { *m = MountsRequest{} }
-func (*MountsRequest) ProtoMessage()               {}
-func (*MountsRequest) Descriptor() ([]byte, []int) { return fileDescriptorSnapshots, []int{4} }
+func (m *MountsRequest) Reset()      { *m = MountsRequest{} }
+func (*MountsRequest) ProtoMessage() {}
+func (*MountsRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_cfc0ddf12791f168, []int{4}
+}
+func (m *MountsRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *MountsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_MountsRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *MountsRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_MountsRequest.Merge(m, src)
+}
+func (m *MountsRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *MountsRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_MountsRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MountsRequest proto.InternalMessageInfo
 
 type MountsResponse struct {
-	Mounts []*containerd_types.Mount `protobuf:"bytes,1,rep,name=mounts" json:"mounts,omitempty"`
+	Mounts               []*types.Mount `protobuf:"bytes,1,rep,name=mounts,proto3" json:"mounts,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}       `json:"-"`
+	XXX_unrecognized     []byte         `json:"-"`
+	XXX_sizecache        int32          `json:"-"`
 }
 
-func (m *MountsResponse) Reset()                    { *m = MountsResponse{} }
-func (*MountsResponse) ProtoMessage()               {}
-func (*MountsResponse) Descriptor() ([]byte, []int) { return fileDescriptorSnapshots, []int{5} }
+func (m *MountsResponse) Reset()      { *m = MountsResponse{} }
+func (*MountsResponse) ProtoMessage() {}
+func (*MountsResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_cfc0ddf12791f168, []int{5}
+}
+func (m *MountsResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *MountsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_MountsResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *MountsResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_MountsResponse.Merge(m, src)
+}
+func (m *MountsResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *MountsResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_MountsResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MountsResponse proto.InternalMessageInfo
 
 type RemoveSnapshotRequest struct {
-	Snapshotter string `protobuf:"bytes,1,opt,name=snapshotter,proto3" json:"snapshotter,omitempty"`
-	Key         string `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
+	Snapshotter          string   `protobuf:"bytes,1,opt,name=snapshotter,proto3" json:"snapshotter,omitempty"`
+	Key                  string   `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
-func (m *RemoveSnapshotRequest) Reset()                    { *m = RemoveSnapshotRequest{} }
-func (*RemoveSnapshotRequest) ProtoMessage()               {}
-func (*RemoveSnapshotRequest) Descriptor() ([]byte, []int) { return fileDescriptorSnapshots, []int{6} }
+func (m *RemoveSnapshotRequest) Reset()      { *m = RemoveSnapshotRequest{} }
+func (*RemoveSnapshotRequest) ProtoMessage() {}
+func (*RemoveSnapshotRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_cfc0ddf12791f168, []int{6}
+}
+func (m *RemoveSnapshotRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *RemoveSnapshotRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_RemoveSnapshotRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *RemoveSnapshotRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_RemoveSnapshotRequest.Merge(m, src)
+}
+func (m *RemoveSnapshotRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *RemoveSnapshotRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_RemoveSnapshotRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_RemoveSnapshotRequest proto.InternalMessageInfo
 
 type CommitSnapshotRequest struct {
 	Snapshotter string `protobuf:"bytes,1,opt,name=snapshotter,proto3" json:"snapshotter,omitempty"`
@@ -167,117 +356,432 @@
 	// Labels are arbitrary data on snapshots.
 	//
 	// The combined size of a key/value pair cannot exceed 4096 bytes.
-	Labels map[string]string `protobuf:"bytes,4,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	Labels               map[string]string `protobuf:"bytes,4,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
+	XXX_unrecognized     []byte            `json:"-"`
+	XXX_sizecache        int32             `json:"-"`
 }
 
-func (m *CommitSnapshotRequest) Reset()                    { *m = CommitSnapshotRequest{} }
-func (*CommitSnapshotRequest) ProtoMessage()               {}
-func (*CommitSnapshotRequest) Descriptor() ([]byte, []int) { return fileDescriptorSnapshots, []int{7} }
+func (m *CommitSnapshotRequest) Reset()      { *m = CommitSnapshotRequest{} }
+func (*CommitSnapshotRequest) ProtoMessage() {}
+func (*CommitSnapshotRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_cfc0ddf12791f168, []int{7}
+}
+func (m *CommitSnapshotRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *CommitSnapshotRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_CommitSnapshotRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *CommitSnapshotRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_CommitSnapshotRequest.Merge(m, src)
+}
+func (m *CommitSnapshotRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *CommitSnapshotRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_CommitSnapshotRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CommitSnapshotRequest proto.InternalMessageInfo
 
 type StatSnapshotRequest struct {
-	Snapshotter string `protobuf:"bytes,1,opt,name=snapshotter,proto3" json:"snapshotter,omitempty"`
-	Key         string `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
+	Snapshotter          string   `protobuf:"bytes,1,opt,name=snapshotter,proto3" json:"snapshotter,omitempty"`
+	Key                  string   `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
-func (m *StatSnapshotRequest) Reset()                    { *m = StatSnapshotRequest{} }
-func (*StatSnapshotRequest) ProtoMessage()               {}
-func (*StatSnapshotRequest) Descriptor() ([]byte, []int) { return fileDescriptorSnapshots, []int{8} }
+func (m *StatSnapshotRequest) Reset()      { *m = StatSnapshotRequest{} }
+func (*StatSnapshotRequest) ProtoMessage() {}
+func (*StatSnapshotRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_cfc0ddf12791f168, []int{8}
+}
+func (m *StatSnapshotRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *StatSnapshotRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_StatSnapshotRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *StatSnapshotRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_StatSnapshotRequest.Merge(m, src)
+}
+func (m *StatSnapshotRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *StatSnapshotRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_StatSnapshotRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_StatSnapshotRequest proto.InternalMessageInfo
 
 type Info struct {
 	Name   string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
 	Parent string `protobuf:"bytes,2,opt,name=parent,proto3" json:"parent,omitempty"`
 	Kind   Kind   `protobuf:"varint,3,opt,name=kind,proto3,enum=containerd.services.snapshots.v1.Kind" json:"kind,omitempty"`
 	// CreatedAt provides the time at which the snapshot was created.
-	CreatedAt time.Time `protobuf:"bytes,4,opt,name=created_at,json=createdAt,stdtime" json:"created_at"`
+	CreatedAt time.Time `protobuf:"bytes,4,opt,name=created_at,json=createdAt,proto3,stdtime" json:"created_at"`
 	// UpdatedAt provides the time the info was last updated.
-	UpdatedAt time.Time `protobuf:"bytes,5,opt,name=updated_at,json=updatedAt,stdtime" json:"updated_at"`
+	UpdatedAt time.Time `protobuf:"bytes,5,opt,name=updated_at,json=updatedAt,proto3,stdtime" json:"updated_at"`
 	// Labels are arbitrary data on snapshots.
 	//
 	// The combined size of a key/value pair cannot exceed 4096 bytes.
-	Labels map[string]string `protobuf:"bytes,6,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	Labels               map[string]string `protobuf:"bytes,6,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
+	XXX_unrecognized     []byte            `json:"-"`
+	XXX_sizecache        int32             `json:"-"`
 }
 
-func (m *Info) Reset()                    { *m = Info{} }
-func (*Info) ProtoMessage()               {}
-func (*Info) Descriptor() ([]byte, []int) { return fileDescriptorSnapshots, []int{9} }
+func (m *Info) Reset()      { *m = Info{} }
+func (*Info) ProtoMessage() {}
+func (*Info) Descriptor() ([]byte, []int) {
+	return fileDescriptor_cfc0ddf12791f168, []int{9}
+}
+func (m *Info) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *Info) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_Info.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *Info) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Info.Merge(m, src)
+}
+func (m *Info) XXX_Size() int {
+	return m.Size()
+}
+func (m *Info) XXX_DiscardUnknown() {
+	xxx_messageInfo_Info.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Info proto.InternalMessageInfo
 
 type StatSnapshotResponse struct {
-	Info Info `protobuf:"bytes,1,opt,name=info" json:"info"`
+	Info                 Info     `protobuf:"bytes,1,opt,name=info,proto3" json:"info"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
-func (m *StatSnapshotResponse) Reset()                    { *m = StatSnapshotResponse{} }
-func (*StatSnapshotResponse) ProtoMessage()               {}
-func (*StatSnapshotResponse) Descriptor() ([]byte, []int) { return fileDescriptorSnapshots, []int{10} }
+func (m *StatSnapshotResponse) Reset()      { *m = StatSnapshotResponse{} }
+func (*StatSnapshotResponse) ProtoMessage() {}
+func (*StatSnapshotResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_cfc0ddf12791f168, []int{10}
+}
+func (m *StatSnapshotResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *StatSnapshotResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_StatSnapshotResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *StatSnapshotResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_StatSnapshotResponse.Merge(m, src)
+}
+func (m *StatSnapshotResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *StatSnapshotResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_StatSnapshotResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_StatSnapshotResponse proto.InternalMessageInfo
 
 type UpdateSnapshotRequest struct {
 	Snapshotter string `protobuf:"bytes,1,opt,name=snapshotter,proto3" json:"snapshotter,omitempty"`
-	Info        Info   `protobuf:"bytes,2,opt,name=info" json:"info"`
+	Info        Info   `protobuf:"bytes,2,opt,name=info,proto3" json:"info"`
 	// UpdateMask specifies which fields to perform the update on. If empty,
 	// the operation applies to all fields.
 	//
 	// In info, Name, Parent, Kind, Created are immutable,
 	// other field may be updated using this mask.
 	// If no mask is provided, all mutable field are updated.
-	UpdateMask *google_protobuf2.FieldMask `protobuf:"bytes,3,opt,name=update_mask,json=updateMask" json:"update_mask,omitempty"`
+	UpdateMask           *types1.FieldMask `protobuf:"bytes,3,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
+	XXX_unrecognized     []byte            `json:"-"`
+	XXX_sizecache        int32             `json:"-"`
 }
 
-func (m *UpdateSnapshotRequest) Reset()                    { *m = UpdateSnapshotRequest{} }
-func (*UpdateSnapshotRequest) ProtoMessage()               {}
-func (*UpdateSnapshotRequest) Descriptor() ([]byte, []int) { return fileDescriptorSnapshots, []int{11} }
+func (m *UpdateSnapshotRequest) Reset()      { *m = UpdateSnapshotRequest{} }
+func (*UpdateSnapshotRequest) ProtoMessage() {}
+func (*UpdateSnapshotRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_cfc0ddf12791f168, []int{11}
+}
+func (m *UpdateSnapshotRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *UpdateSnapshotRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_UpdateSnapshotRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *UpdateSnapshotRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_UpdateSnapshotRequest.Merge(m, src)
+}
+func (m *UpdateSnapshotRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *UpdateSnapshotRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_UpdateSnapshotRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_UpdateSnapshotRequest proto.InternalMessageInfo
 
 type UpdateSnapshotResponse struct {
-	Info Info `protobuf:"bytes,1,opt,name=info" json:"info"`
+	Info                 Info     `protobuf:"bytes,1,opt,name=info,proto3" json:"info"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
-func (m *UpdateSnapshotResponse) Reset()                    { *m = UpdateSnapshotResponse{} }
-func (*UpdateSnapshotResponse) ProtoMessage()               {}
-func (*UpdateSnapshotResponse) Descriptor() ([]byte, []int) { return fileDescriptorSnapshots, []int{12} }
+func (m *UpdateSnapshotResponse) Reset()      { *m = UpdateSnapshotResponse{} }
+func (*UpdateSnapshotResponse) ProtoMessage() {}
+func (*UpdateSnapshotResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_cfc0ddf12791f168, []int{12}
+}
+func (m *UpdateSnapshotResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *UpdateSnapshotResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_UpdateSnapshotResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *UpdateSnapshotResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_UpdateSnapshotResponse.Merge(m, src)
+}
+func (m *UpdateSnapshotResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *UpdateSnapshotResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_UpdateSnapshotResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_UpdateSnapshotResponse proto.InternalMessageInfo
 
 type ListSnapshotsRequest struct {
-	Snapshotter string `protobuf:"bytes,1,opt,name=snapshotter,proto3" json:"snapshotter,omitempty"`
+	Snapshotter          string   `protobuf:"bytes,1,opt,name=snapshotter,proto3" json:"snapshotter,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
-func (m *ListSnapshotsRequest) Reset()                    { *m = ListSnapshotsRequest{} }
-func (*ListSnapshotsRequest) ProtoMessage()               {}
-func (*ListSnapshotsRequest) Descriptor() ([]byte, []int) { return fileDescriptorSnapshots, []int{13} }
+func (m *ListSnapshotsRequest) Reset()      { *m = ListSnapshotsRequest{} }
+func (*ListSnapshotsRequest) ProtoMessage() {}
+func (*ListSnapshotsRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_cfc0ddf12791f168, []int{13}
+}
+func (m *ListSnapshotsRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ListSnapshotsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_ListSnapshotsRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *ListSnapshotsRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ListSnapshotsRequest.Merge(m, src)
+}
+func (m *ListSnapshotsRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *ListSnapshotsRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_ListSnapshotsRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ListSnapshotsRequest proto.InternalMessageInfo
 
 type ListSnapshotsResponse struct {
-	Info []Info `protobuf:"bytes,1,rep,name=info" json:"info"`
+	Info                 []Info   `protobuf:"bytes,1,rep,name=info,proto3" json:"info"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
-func (m *ListSnapshotsResponse) Reset()                    { *m = ListSnapshotsResponse{} }
-func (*ListSnapshotsResponse) ProtoMessage()               {}
-func (*ListSnapshotsResponse) Descriptor() ([]byte, []int) { return fileDescriptorSnapshots, []int{14} }
+func (m *ListSnapshotsResponse) Reset()      { *m = ListSnapshotsResponse{} }
+func (*ListSnapshotsResponse) ProtoMessage() {}
+func (*ListSnapshotsResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_cfc0ddf12791f168, []int{14}
+}
+func (m *ListSnapshotsResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ListSnapshotsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_ListSnapshotsResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *ListSnapshotsResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ListSnapshotsResponse.Merge(m, src)
+}
+func (m *ListSnapshotsResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *ListSnapshotsResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_ListSnapshotsResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ListSnapshotsResponse proto.InternalMessageInfo
 
 type UsageRequest struct {
-	Snapshotter string `protobuf:"bytes,1,opt,name=snapshotter,proto3" json:"snapshotter,omitempty"`
-	Key         string `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
+	Snapshotter          string   `protobuf:"bytes,1,opt,name=snapshotter,proto3" json:"snapshotter,omitempty"`
+	Key                  string   `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
-func (m *UsageRequest) Reset()                    { *m = UsageRequest{} }
-func (*UsageRequest) ProtoMessage()               {}
-func (*UsageRequest) Descriptor() ([]byte, []int) { return fileDescriptorSnapshots, []int{15} }
+func (m *UsageRequest) Reset()      { *m = UsageRequest{} }
+func (*UsageRequest) ProtoMessage() {}
+func (*UsageRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_cfc0ddf12791f168, []int{15}
+}
+func (m *UsageRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *UsageRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_UsageRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *UsageRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_UsageRequest.Merge(m, src)
+}
+func (m *UsageRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *UsageRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_UsageRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_UsageRequest proto.InternalMessageInfo
 
 type UsageResponse struct {
-	Size_  int64 `protobuf:"varint,1,opt,name=size,proto3" json:"size,omitempty"`
-	Inodes int64 `protobuf:"varint,2,opt,name=inodes,proto3" json:"inodes,omitempty"`
+	Size_                int64    `protobuf:"varint,1,opt,name=size,proto3" json:"size,omitempty"`
+	Inodes               int64    `protobuf:"varint,2,opt,name=inodes,proto3" json:"inodes,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
-func (m *UsageResponse) Reset()                    { *m = UsageResponse{} }
-func (*UsageResponse) ProtoMessage()               {}
-func (*UsageResponse) Descriptor() ([]byte, []int) { return fileDescriptorSnapshots, []int{16} }
+func (m *UsageResponse) Reset()      { *m = UsageResponse{} }
+func (*UsageResponse) ProtoMessage() {}
+func (*UsageResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_cfc0ddf12791f168, []int{16}
+}
+func (m *UsageResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *UsageResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_UsageResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *UsageResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_UsageResponse.Merge(m, src)
+}
+func (m *UsageResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *UsageResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_UsageResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_UsageResponse proto.InternalMessageInfo
 
 func init() {
+	proto.RegisterEnum("containerd.services.snapshots.v1.Kind", Kind_name, Kind_value)
 	proto.RegisterType((*PrepareSnapshotRequest)(nil), "containerd.services.snapshots.v1.PrepareSnapshotRequest")
+	proto.RegisterMapType((map[string]string)(nil), "containerd.services.snapshots.v1.PrepareSnapshotRequest.LabelsEntry")
 	proto.RegisterType((*PrepareSnapshotResponse)(nil), "containerd.services.snapshots.v1.PrepareSnapshotResponse")
 	proto.RegisterType((*ViewSnapshotRequest)(nil), "containerd.services.snapshots.v1.ViewSnapshotRequest")
+	proto.RegisterMapType((map[string]string)(nil), "containerd.services.snapshots.v1.ViewSnapshotRequest.LabelsEntry")
 	proto.RegisterType((*ViewSnapshotResponse)(nil), "containerd.services.snapshots.v1.ViewSnapshotResponse")
 	proto.RegisterType((*MountsRequest)(nil), "containerd.services.snapshots.v1.MountsRequest")
 	proto.RegisterType((*MountsResponse)(nil), "containerd.services.snapshots.v1.MountsResponse")
 	proto.RegisterType((*RemoveSnapshotRequest)(nil), "containerd.services.snapshots.v1.RemoveSnapshotRequest")
 	proto.RegisterType((*CommitSnapshotRequest)(nil), "containerd.services.snapshots.v1.CommitSnapshotRequest")
+	proto.RegisterMapType((map[string]string)(nil), "containerd.services.snapshots.v1.CommitSnapshotRequest.LabelsEntry")
 	proto.RegisterType((*StatSnapshotRequest)(nil), "containerd.services.snapshots.v1.StatSnapshotRequest")
 	proto.RegisterType((*Info)(nil), "containerd.services.snapshots.v1.Info")
+	proto.RegisterMapType((map[string]string)(nil), "containerd.services.snapshots.v1.Info.LabelsEntry")
 	proto.RegisterType((*StatSnapshotResponse)(nil), "containerd.services.snapshots.v1.StatSnapshotResponse")
 	proto.RegisterType((*UpdateSnapshotRequest)(nil), "containerd.services.snapshots.v1.UpdateSnapshotRequest")
 	proto.RegisterType((*UpdateSnapshotResponse)(nil), "containerd.services.snapshots.v1.UpdateSnapshotResponse")
@@ -285,7 +789,77 @@
 	proto.RegisterType((*ListSnapshotsResponse)(nil), "containerd.services.snapshots.v1.ListSnapshotsResponse")
 	proto.RegisterType((*UsageRequest)(nil), "containerd.services.snapshots.v1.UsageRequest")
 	proto.RegisterType((*UsageResponse)(nil), "containerd.services.snapshots.v1.UsageResponse")
-	proto.RegisterEnum("containerd.services.snapshots.v1.Kind", Kind_name, Kind_value)
+}
+
+func init() {
+	proto.RegisterFile("github.com/containerd/containerd/api/services/snapshots/v1/snapshots.proto", fileDescriptor_cfc0ddf12791f168)
+}
+
+var fileDescriptor_cfc0ddf12791f168 = []byte{
+	// 1007 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x57, 0x4f, 0x6f, 0x1a, 0x47,
+	0x14, 0x67, 0x60, 0x8d, 0xe3, 0x87, 0xed, 0xd2, 0x09, 0x26, 0x68, 0x5b, 0xe1, 0x15, 0x87, 0xca,
+	0xea, 0x61, 0x37, 0xa1, 0x6a, 0xe2, 0xc4, 0x97, 0x62, 0x4c, 0x2b, 0xec, 0xd8, 0xa9, 0x36, 0xb6,
+	0x13, 0xa7, 0x55, 0xa3, 0x35, 0x8c, 0xf1, 0x0a, 0x76, 0x97, 0x32, 0x03, 0x11, 0xad, 0x54, 0xf5,
+	0x18, 0xf9, 0xd4, 0x2f, 0xe0, 0x53, 0xfb, 0x21, 0xaa, 0x7e, 0x02, 0x1f, 0x7b, 0xec, 0xa9, 0x6d,
+	0xfc, 0x25, 0x7a, 0xea, 0x1f, 0xcd, 0xec, 0x2c, 0x60, 0x4c, 0xc5, 0x82, 0xc9, 0x6d, 0x66, 0x67,
+	0x7e, 0xef, 0xfd, 0xe6, 0xf7, 0xe6, 0xbd, 0x37, 0x0b, 0xdb, 0x35, 0x9b, 0x9d, 0xb6, 0x8f, 0xf5,
+	0x8a, 0xe7, 0x18, 0x15, 0xcf, 0x65, 0x96, 0xed, 0x92, 0x56, 0x75, 0x70, 0x68, 0x35, 0x6d, 0x83,
+	0x92, 0x56, 0xc7, 0xae, 0x10, 0x6a, 0x50, 0xd7, 0x6a, 0xd2, 0x53, 0x8f, 0x51, 0xa3, 0x73, 0xaf,
+	0x3f, 0xd1, 0x9b, 0x2d, 0x8f, 0x79, 0x58, 0xeb, 0xa3, 0xf4, 0x00, 0xa1, 0xf7, 0x37, 0x75, 0xee,
+	0xa9, 0xa9, 0x9a, 0x57, 0xf3, 0xc4, 0x66, 0x83, 0x8f, 0x7c, 0x9c, 0xfa, 0x5e, 0xcd, 0xf3, 0x6a,
+	0x0d, 0x62, 0x88, 0xd9, 0x71, 0xfb, 0xc4, 0x20, 0x4e, 0x93, 0x75, 0xe5, 0xa2, 0x36, 0xbc, 0x78,
+	0x62, 0x93, 0x46, 0xf5, 0xa5, 0x63, 0xd1, 0xba, 0xdc, 0xb1, 0x3a, 0xbc, 0x83, 0xd9, 0x0e, 0xa1,
+	0xcc, 0x72, 0x9a, 0x72, 0xc3, 0xfd, 0x50, 0x67, 0x64, 0xdd, 0x26, 0xa1, 0x86, 0xe3, 0xb5, 0x5d,
+	0xe6, 0xe3, 0x72, 0x7f, 0x23, 0x48, 0x7f, 0xde, 0x22, 0x4d, 0xab, 0x45, 0x9e, 0xca, 0x53, 0x98,
+	0xe4, 0xeb, 0x36, 0xa1, 0x0c, 0x6b, 0x90, 0x08, 0x0e, 0xc6, 0x48, 0x2b, 0x83, 0x34, 0xb4, 0xb6,
+	0x60, 0x0e, 0x7e, 0xc2, 0x49, 0x88, 0xd5, 0x49, 0x37, 0x13, 0x15, 0x2b, 0x7c, 0x88, 0xd3, 0x10,
+	0xe7, 0xa6, 0x5c, 0x96, 0x89, 0x89, 0x8f, 0x72, 0x86, 0xbf, 0x84, 0x78, 0xc3, 0x3a, 0x26, 0x0d,
+	0x9a, 0x51, 0xb4, 0xd8, 0x5a, 0x22, 0xbf, 0xa5, 0x8f, 0xd3, 0x51, 0x1f, 0xcd, 0x4a, 0x7f, 0x2c,
+	0xcc, 0x94, 0x5c, 0xd6, 0xea, 0x9a, 0xd2, 0xa6, 0xfa, 0x10, 0x12, 0x03, 0x9f, 0x03, 0x5a, 0xa8,
+	0x4f, 0x2b, 0x05, 0x73, 0x1d, 0xab, 0xd1, 0x26, 0x92, 0xaa, 0x3f, 0x79, 0x14, 0x5d, 0x47, 0xb9,
+	0x6d, 0xb8, 0x73, 0xcd, 0x11, 0x6d, 0x7a, 0x2e, 0x25, 0xd8, 0x80, 0xb8, 0x50, 0x8a, 0x66, 0x90,
+	0xe0, 0x7c, 0x67, 0x90, 0xb3, 0x50, 0x52, 0xdf, 0xe5, 0xeb, 0xa6, 0xdc, 0x96, 0xfb, 0x0b, 0xc1,
+	0xed, 0x43, 0x9b, 0xbc, 0x7a, 0x9b, 0x42, 0x1e, 0x0d, 0x09, 0x59, 0x18, 0x2f, 0xe4, 0x08, 0x4a,
+	0xb3, 0x56, 0xf1, 0x33, 0x48, 0x5d, 0xf5, 0x32, 0xad, 0x84, 0x45, 0x58, 0x12, 0x1f, 0xe8, 0x0d,
+	0xb4, 0xcb, 0x15, 0x60, 0x39, 0x30, 0x32, 0x2d, 0x8f, 0x1d, 0x58, 0x31, 0x89, 0xe3, 0x75, 0x66,
+	0x91, 0x14, 0xfc, 0x5e, 0xac, 0x14, 0x3d, 0xc7, 0xb1, 0xd9, 0xe4, 0xd6, 0x30, 0x28, 0xae, 0xe5,
+	0x04, 0x92, 0x8b, 0x71, 0xe0, 0x21, 0xd6, 0x8f, 0xcc, 0x17, 0x43, 0xb7, 0xa2, 0x38, 0xfe, 0x56,
+	0x8c, 0x24, 0x34, 0xeb, 0x7b, 0x51, 0x86, 0xdb, 0x4f, 0x99, 0xc5, 0x66, 0x21, 0xe2, 0xbf, 0x51,
+	0x50, 0xca, 0xee, 0x89, 0xd7, 0x53, 0x04, 0x0d, 0x28, 0xd2, 0xcf, 0x96, 0xe8, 0x95, 0x6c, 0x79,
+	0x04, 0x4a, 0xdd, 0x76, 0xab, 0x42, 0xaa, 0xe5, 0xfc, 0x07, 0xe3, 0x55, 0xd9, 0xb1, 0xdd, 0xaa,
+	0x29, 0x30, 0xb8, 0x08, 0x50, 0x69, 0x11, 0x8b, 0x91, 0xea, 0x4b, 0x8b, 0x65, 0x14, 0x0d, 0xad,
+	0x25, 0xf2, 0xaa, 0xee, 0xd7, 0x61, 0x3d, 0xa8, 0xc3, 0xfa, 0x7e, 0x50, 0x87, 0x37, 0x6f, 0x5d,
+	0xfc, 0xbe, 0x1a, 0xf9, 0xe1, 0x8f, 0x55, 0x64, 0x2e, 0x48, 0x5c, 0x81, 0x71, 0x23, 0xed, 0x66,
+	0x35, 0x30, 0x32, 0x37, 0x89, 0x11, 0x89, 0x2b, 0x30, 0xbc, 0xdd, 0x8b, 0x6e, 0x5c, 0x44, 0x37,
+	0x3f, 0xfe, 0x1c, 0x5c, 0xa9, 0x59, 0x07, 0xf3, 0x39, 0xa4, 0xae, 0x06, 0x53, 0x26, 0xd7, 0x27,
+	0xa0, 0xd8, 0xee, 0x89, 0x27, 0x8c, 0x24, 0xc2, 0x88, 0xcc, 0xc9, 0x6d, 0x2a, 0xfc, 0xa4, 0xa6,
+	0x40, 0xe6, 0x7e, 0x46, 0xb0, 0x72, 0x20, 0x8e, 0x3b, 0xf9, 0x4d, 0x09, 0xbc, 0x47, 0xa7, 0xf5,
+	0x8e, 0x37, 0x20, 0xe1, 0x6b, 0x2d, 0x1a, 0xae, 0xb8, 0x2b, 0xa3, 0x82, 0xf4, 0x29, 0xef, 0xc9,
+	0xbb, 0x16, 0xad, 0x9b, 0x32, 0xa4, 0x7c, 0x9c, 0x7b, 0x01, 0xe9, 0x61, 0xe6, 0x33, 0x93, 0x65,
+	0x1d, 0x52, 0x8f, 0x6d, 0xda, 0x13, 0x3c, 0x7c, 0x4d, 0xcc, 0x1d, 0xc1, 0xca, 0x10, 0xf2, 0x1a,
+	0xa9, 0xd8, 0x94, 0xa4, 0x36, 0x61, 0xf1, 0x80, 0x5a, 0x35, 0x72, 0x93, 0x5c, 0xde, 0x80, 0x25,
+	0x69, 0x43, 0xd2, 0xc2, 0xa0, 0x50, 0xfb, 0x1b, 0x3f, 0xa7, 0x63, 0xa6, 0x18, 0xf3, 0x9c, 0xb6,
+	0x5d, 0xaf, 0x4a, 0xa8, 0x40, 0xc6, 0x4c, 0x39, 0xfb, 0xf0, 0x35, 0x02, 0x85, 0xa7, 0x29, 0x7e,
+	0x1f, 0xe6, 0x0f, 0xf6, 0x76, 0xf6, 0x9e, 0x3c, 0xdb, 0x4b, 0x46, 0xd4, 0x77, 0xce, 0xce, 0xb5,
+	0x04, 0xff, 0x7c, 0xe0, 0xd6, 0x5d, 0xef, 0x95, 0x8b, 0xd3, 0xa0, 0x1c, 0x96, 0x4b, 0xcf, 0x92,
+	0x48, 0x5d, 0x3c, 0x3b, 0xd7, 0x6e, 0xf1, 0x25, 0xde, 0xa2, 0xb0, 0x0a, 0xf1, 0x42, 0x71, 0xbf,
+	0x7c, 0x58, 0x4a, 0x46, 0xd5, 0xe5, 0xb3, 0x73, 0x0d, 0xf8, 0x4a, 0xa1, 0xc2, 0xec, 0x0e, 0xc1,
+	0x1a, 0x2c, 0x14, 0x9f, 0xec, 0xee, 0x96, 0xf7, 0xf7, 0x4b, 0x5b, 0xc9, 0x98, 0xfa, 0xee, 0xd9,
+	0xb9, 0xb6, 0xc4, 0x97, 0xfd, 0x5a, 0xc9, 0x48, 0x55, 0x5d, 0x7c, 0xfd, 0x63, 0x36, 0xf2, 0xcb,
+	0x4f, 0x59, 0xc1, 0x20, 0xff, 0xcf, 0x3c, 0x2c, 0xf4, 0x34, 0xc6, 0xdf, 0xc1, 0xbc, 0x7c, 0x4a,
+	0xe0, 0xf5, 0x69, 0x9f, 0x37, 0xea, 0xc3, 0x29, 0x90, 0x52, 0xc4, 0x36, 0x28, 0xe2, 0x84, 0x1f,
+	0x4f, 0xf5, 0x24, 0x50, 0xef, 0x4f, 0x0a, 0x93, 0x6e, 0xeb, 0x10, 0xf7, 0xbb, 0x2d, 0x36, 0xc6,
+	0x5b, 0xb8, 0xd2, 0xdc, 0xd5, 0xbb, 0xe1, 0x01, 0xd2, 0xd9, 0x11, 0xc4, 0xfd, 0x60, 0xe0, 0x07,
+	0x53, 0xb6, 0x38, 0x35, 0x7d, 0x2d, 0xb3, 0x4b, 0xfc, 0x29, 0xce, 0x4d, 0xfb, 0x2d, 0x3f, 0x8c,
+	0xe9, 0x91, 0x8f, 0x83, 0xff, 0x35, 0xdd, 0x06, 0x85, 0x57, 0xce, 0x30, 0x91, 0x19, 0xd1, 0x2e,
+	0xc3, 0x44, 0x66, 0x64, 0x61, 0xfe, 0x16, 0xe2, 0x7e, 0x6d, 0x0a, 0x73, 0xa2, 0x91, 0xf5, 0x57,
+	0x5d, 0x9f, 0x1c, 0x28, 0x9d, 0x77, 0x41, 0xe1, 0x25, 0x08, 0x87, 0x20, 0x3f, 0xaa, 0xc8, 0xa9,
+	0x0f, 0x26, 0xc6, 0xf9, 0x8e, 0xef, 0x22, 0x7c, 0x0a, 0x73, 0xa2, 0xbc, 0x60, 0x3d, 0x04, 0xfb,
+	0x81, 0x5a, 0xa6, 0x1a, 0xa1, 0xf7, 0xfb, 0xbe, 0x36, 0xbf, 0xba, 0x78, 0x93, 0x8d, 0xfc, 0xf6,
+	0x26, 0x1b, 0xf9, 0xfe, 0x32, 0x8b, 0x2e, 0x2e, 0xb3, 0xe8, 0xd7, 0xcb, 0x2c, 0xfa, 0xf3, 0x32,
+	0x8b, 0x5e, 0x6c, 0x4d, 0xff, 0xcf, 0xb9, 0xd1, 0x9b, 0x3c, 0x8f, 0x1c, 0xc7, 0xc5, 0x55, 0xfa,
+	0xe8, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x8e, 0xa0, 0xb2, 0xda, 0xc4, 0x0e, 0x00, 0x00,
 }
 
 // Reference imports to suppress errors if they are not otherwise used.
@@ -296,14 +870,15 @@
 // is compatible with the grpc package it is being compiled against.
 const _ = grpc.SupportPackageIsVersion4
 
-// Client API for Snapshots service
-
+// SnapshotsClient is the client API for Snapshots service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
 type SnapshotsClient interface {
 	Prepare(ctx context.Context, in *PrepareSnapshotRequest, opts ...grpc.CallOption) (*PrepareSnapshotResponse, error)
 	View(ctx context.Context, in *ViewSnapshotRequest, opts ...grpc.CallOption) (*ViewSnapshotResponse, error)
 	Mounts(ctx context.Context, in *MountsRequest, opts ...grpc.CallOption) (*MountsResponse, error)
-	Commit(ctx context.Context, in *CommitSnapshotRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error)
-	Remove(ctx context.Context, in *RemoveSnapshotRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error)
+	Commit(ctx context.Context, in *CommitSnapshotRequest, opts ...grpc.CallOption) (*types1.Empty, error)
+	Remove(ctx context.Context, in *RemoveSnapshotRequest, opts ...grpc.CallOption) (*types1.Empty, error)
 	Stat(ctx context.Context, in *StatSnapshotRequest, opts ...grpc.CallOption) (*StatSnapshotResponse, error)
 	Update(ctx context.Context, in *UpdateSnapshotRequest, opts ...grpc.CallOption) (*UpdateSnapshotResponse, error)
 	List(ctx context.Context, in *ListSnapshotsRequest, opts ...grpc.CallOption) (Snapshots_ListClient, error)
@@ -320,7 +895,7 @@
 
 func (c *snapshotsClient) Prepare(ctx context.Context, in *PrepareSnapshotRequest, opts ...grpc.CallOption) (*PrepareSnapshotResponse, error) {
 	out := new(PrepareSnapshotResponse)
-	err := grpc.Invoke(ctx, "/containerd.services.snapshots.v1.Snapshots/Prepare", in, out, c.cc, opts...)
+	err := c.cc.Invoke(ctx, "/containerd.services.snapshots.v1.Snapshots/Prepare", in, out, opts...)
 	if err != nil {
 		return nil, err
 	}
@@ -329,7 +904,7 @@
 
 func (c *snapshotsClient) View(ctx context.Context, in *ViewSnapshotRequest, opts ...grpc.CallOption) (*ViewSnapshotResponse, error) {
 	out := new(ViewSnapshotResponse)
-	err := grpc.Invoke(ctx, "/containerd.services.snapshots.v1.Snapshots/View", in, out, c.cc, opts...)
+	err := c.cc.Invoke(ctx, "/containerd.services.snapshots.v1.Snapshots/View", in, out, opts...)
 	if err != nil {
 		return nil, err
 	}
@@ -338,25 +913,25 @@
 
 func (c *snapshotsClient) Mounts(ctx context.Context, in *MountsRequest, opts ...grpc.CallOption) (*MountsResponse, error) {
 	out := new(MountsResponse)
-	err := grpc.Invoke(ctx, "/containerd.services.snapshots.v1.Snapshots/Mounts", in, out, c.cc, opts...)
+	err := c.cc.Invoke(ctx, "/containerd.services.snapshots.v1.Snapshots/Mounts", in, out, opts...)
 	if err != nil {
 		return nil, err
 	}
 	return out, nil
 }
 
-func (c *snapshotsClient) Commit(ctx context.Context, in *CommitSnapshotRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) {
-	out := new(google_protobuf1.Empty)
-	err := grpc.Invoke(ctx, "/containerd.services.snapshots.v1.Snapshots/Commit", in, out, c.cc, opts...)
+func (c *snapshotsClient) Commit(ctx context.Context, in *CommitSnapshotRequest, opts ...grpc.CallOption) (*types1.Empty, error) {
+	out := new(types1.Empty)
+	err := c.cc.Invoke(ctx, "/containerd.services.snapshots.v1.Snapshots/Commit", in, out, opts...)
 	if err != nil {
 		return nil, err
 	}
 	return out, nil
 }
 
-func (c *snapshotsClient) Remove(ctx context.Context, in *RemoveSnapshotRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) {
-	out := new(google_protobuf1.Empty)
-	err := grpc.Invoke(ctx, "/containerd.services.snapshots.v1.Snapshots/Remove", in, out, c.cc, opts...)
+func (c *snapshotsClient) Remove(ctx context.Context, in *RemoveSnapshotRequest, opts ...grpc.CallOption) (*types1.Empty, error) {
+	out := new(types1.Empty)
+	err := c.cc.Invoke(ctx, "/containerd.services.snapshots.v1.Snapshots/Remove", in, out, opts...)
 	if err != nil {
 		return nil, err
 	}
@@ -365,7 +940,7 @@
 
 func (c *snapshotsClient) Stat(ctx context.Context, in *StatSnapshotRequest, opts ...grpc.CallOption) (*StatSnapshotResponse, error) {
 	out := new(StatSnapshotResponse)
-	err := grpc.Invoke(ctx, "/containerd.services.snapshots.v1.Snapshots/Stat", in, out, c.cc, opts...)
+	err := c.cc.Invoke(ctx, "/containerd.services.snapshots.v1.Snapshots/Stat", in, out, opts...)
 	if err != nil {
 		return nil, err
 	}
@@ -374,7 +949,7 @@
 
 func (c *snapshotsClient) Update(ctx context.Context, in *UpdateSnapshotRequest, opts ...grpc.CallOption) (*UpdateSnapshotResponse, error) {
 	out := new(UpdateSnapshotResponse)
-	err := grpc.Invoke(ctx, "/containerd.services.snapshots.v1.Snapshots/Update", in, out, c.cc, opts...)
+	err := c.cc.Invoke(ctx, "/containerd.services.snapshots.v1.Snapshots/Update", in, out, opts...)
 	if err != nil {
 		return nil, err
 	}
@@ -382,7 +957,7 @@
 }
 
 func (c *snapshotsClient) List(ctx context.Context, in *ListSnapshotsRequest, opts ...grpc.CallOption) (Snapshots_ListClient, error) {
-	stream, err := grpc.NewClientStream(ctx, &_Snapshots_serviceDesc.Streams[0], c.cc, "/containerd.services.snapshots.v1.Snapshots/List", opts...)
+	stream, err := c.cc.NewStream(ctx, &_Snapshots_serviceDesc.Streams[0], "/containerd.services.snapshots.v1.Snapshots/List", opts...)
 	if err != nil {
 		return nil, err
 	}
@@ -415,21 +990,20 @@
 
 func (c *snapshotsClient) Usage(ctx context.Context, in *UsageRequest, opts ...grpc.CallOption) (*UsageResponse, error) {
 	out := new(UsageResponse)
-	err := grpc.Invoke(ctx, "/containerd.services.snapshots.v1.Snapshots/Usage", in, out, c.cc, opts...)
+	err := c.cc.Invoke(ctx, "/containerd.services.snapshots.v1.Snapshots/Usage", in, out, opts...)
 	if err != nil {
 		return nil, err
 	}
 	return out, nil
 }
 
-// Server API for Snapshots service
-
+// SnapshotsServer is the server API for Snapshots service.
 type SnapshotsServer interface {
 	Prepare(context.Context, *PrepareSnapshotRequest) (*PrepareSnapshotResponse, error)
 	View(context.Context, *ViewSnapshotRequest) (*ViewSnapshotResponse, error)
 	Mounts(context.Context, *MountsRequest) (*MountsResponse, error)
-	Commit(context.Context, *CommitSnapshotRequest) (*google_protobuf1.Empty, error)
-	Remove(context.Context, *RemoveSnapshotRequest) (*google_protobuf1.Empty, error)
+	Commit(context.Context, *CommitSnapshotRequest) (*types1.Empty, error)
+	Remove(context.Context, *RemoveSnapshotRequest) (*types1.Empty, error)
 	Stat(context.Context, *StatSnapshotRequest) (*StatSnapshotResponse, error)
 	Update(context.Context, *UpdateSnapshotRequest) (*UpdateSnapshotResponse, error)
 	List(*ListSnapshotsRequest, Snapshots_ListServer) error
@@ -702,6 +1276,9 @@
 			i += copy(dAtA[i:], v)
 		}
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -732,6 +1309,9 @@
 			i += n
 		}
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -785,6 +1365,9 @@
 			i += copy(dAtA[i:], v)
 		}
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -815,6 +1398,9 @@
 			i += n
 		}
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -845,6 +1431,9 @@
 		i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Key)))
 		i += copy(dAtA[i:], m.Key)
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -875,6 +1464,9 @@
 			i += n
 		}
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -905,6 +1497,9 @@
 		i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Key)))
 		i += copy(dAtA[i:], m.Key)
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -958,6 +1553,9 @@
 			i += copy(dAtA[i:], v)
 		}
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -988,6 +1586,9 @@
 		i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Key)))
 		i += copy(dAtA[i:], m.Key)
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -1025,16 +1626,16 @@
 	}
 	dAtA[i] = 0x22
 	i++
-	i = encodeVarintSnapshots(dAtA, i, uint64(types.SizeOfStdTime(m.CreatedAt)))
-	n1, err := types.StdTimeMarshalTo(m.CreatedAt, dAtA[i:])
+	i = encodeVarintSnapshots(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.CreatedAt)))
+	n1, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.CreatedAt, dAtA[i:])
 	if err != nil {
 		return 0, err
 	}
 	i += n1
 	dAtA[i] = 0x2a
 	i++
-	i = encodeVarintSnapshots(dAtA, i, uint64(types.SizeOfStdTime(m.UpdatedAt)))
-	n2, err := types.StdTimeMarshalTo(m.UpdatedAt, dAtA[i:])
+	i = encodeVarintSnapshots(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.UpdatedAt)))
+	n2, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.UpdatedAt, dAtA[i:])
 	if err != nil {
 		return 0, err
 	}
@@ -1056,6 +1657,9 @@
 			i += copy(dAtA[i:], v)
 		}
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -1082,6 +1686,9 @@
 		return 0, err
 	}
 	i += n3
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -1124,6 +1731,9 @@
 		}
 		i += n5
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -1150,6 +1760,9 @@
 		return 0, err
 	}
 	i += n6
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -1174,6 +1787,9 @@
 		i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Snapshotter)))
 		i += copy(dAtA[i:], m.Snapshotter)
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -1204,6 +1820,9 @@
 			i += n
 		}
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -1234,6 +1853,9 @@
 		i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Key)))
 		i += copy(dAtA[i:], m.Key)
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -1262,6 +1884,9 @@
 		i++
 		i = encodeVarintSnapshots(dAtA, i, uint64(m.Inodes))
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -1275,6 +1900,9 @@
 	return offset + 1
 }
 func (m *PrepareSnapshotRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = len(m.Snapshotter)
@@ -1297,10 +1925,16 @@
 			n += mapEntrySize + 1 + sovSnapshots(uint64(mapEntrySize))
 		}
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *PrepareSnapshotResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	if len(m.Mounts) > 0 {
@@ -1309,10 +1943,16 @@
 			n += 1 + l + sovSnapshots(uint64(l))
 		}
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *ViewSnapshotRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = len(m.Snapshotter)
@@ -1335,10 +1975,16 @@
 			n += mapEntrySize + 1 + sovSnapshots(uint64(mapEntrySize))
 		}
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *ViewSnapshotResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	if len(m.Mounts) > 0 {
@@ -1347,10 +1993,16 @@
 			n += 1 + l + sovSnapshots(uint64(l))
 		}
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *MountsRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = len(m.Snapshotter)
@@ -1361,10 +2013,16 @@
 	if l > 0 {
 		n += 1 + l + sovSnapshots(uint64(l))
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *MountsResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	if len(m.Mounts) > 0 {
@@ -1373,10 +2031,16 @@
 			n += 1 + l + sovSnapshots(uint64(l))
 		}
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *RemoveSnapshotRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = len(m.Snapshotter)
@@ -1387,10 +2051,16 @@
 	if l > 0 {
 		n += 1 + l + sovSnapshots(uint64(l))
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *CommitSnapshotRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = len(m.Snapshotter)
@@ -1413,10 +2083,16 @@
 			n += mapEntrySize + 1 + sovSnapshots(uint64(mapEntrySize))
 		}
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *StatSnapshotRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = len(m.Snapshotter)
@@ -1427,10 +2103,16 @@
 	if l > 0 {
 		n += 1 + l + sovSnapshots(uint64(l))
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *Info) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = len(m.Name)
@@ -1444,9 +2126,9 @@
 	if m.Kind != 0 {
 		n += 1 + sovSnapshots(uint64(m.Kind))
 	}
-	l = types.SizeOfStdTime(m.CreatedAt)
+	l = github_com_gogo_protobuf_types.SizeOfStdTime(m.CreatedAt)
 	n += 1 + l + sovSnapshots(uint64(l))
-	l = types.SizeOfStdTime(m.UpdatedAt)
+	l = github_com_gogo_protobuf_types.SizeOfStdTime(m.UpdatedAt)
 	n += 1 + l + sovSnapshots(uint64(l))
 	if len(m.Labels) > 0 {
 		for k, v := range m.Labels {
@@ -1456,18 +2138,30 @@
 			n += mapEntrySize + 1 + sovSnapshots(uint64(mapEntrySize))
 		}
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *StatSnapshotResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = m.Info.Size()
 	n += 1 + l + sovSnapshots(uint64(l))
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *UpdateSnapshotRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = len(m.Snapshotter)
@@ -1480,28 +2174,46 @@
 		l = m.UpdateMask.Size()
 		n += 1 + l + sovSnapshots(uint64(l))
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *UpdateSnapshotResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = m.Info.Size()
 	n += 1 + l + sovSnapshots(uint64(l))
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *ListSnapshotsRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = len(m.Snapshotter)
 	if l > 0 {
 		n += 1 + l + sovSnapshots(uint64(l))
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *ListSnapshotsResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	if len(m.Info) > 0 {
@@ -1510,10 +2222,16 @@
 			n += 1 + l + sovSnapshots(uint64(l))
 		}
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *UsageRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = len(m.Snapshotter)
@@ -1524,10 +2242,16 @@
 	if l > 0 {
 		n += 1 + l + sovSnapshots(uint64(l))
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *UsageResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	if m.Size_ != 0 {
@@ -1536,6 +2260,9 @@
 	if m.Inodes != 0 {
 		n += 1 + sovSnapshots(uint64(m.Inodes))
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
@@ -1560,7 +2287,7 @@
 	for k, _ := range this.Labels {
 		keysForLabels = append(keysForLabels, k)
 	}
-	sortkeys.Strings(keysForLabels)
+	github_com_gogo_protobuf_sortkeys.Strings(keysForLabels)
 	mapStringForLabels := "map[string]string{"
 	for _, k := range keysForLabels {
 		mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k])
@@ -1571,6 +2298,7 @@
 		`Key:` + fmt.Sprintf("%v", this.Key) + `,`,
 		`Parent:` + fmt.Sprintf("%v", this.Parent) + `,`,
 		`Labels:` + mapStringForLabels + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -1580,7 +2308,8 @@
 		return "nil"
 	}
 	s := strings.Join([]string{`&PrepareSnapshotResponse{`,
-		`Mounts:` + strings.Replace(fmt.Sprintf("%v", this.Mounts), "Mount", "containerd_types.Mount", 1) + `,`,
+		`Mounts:` + strings.Replace(fmt.Sprintf("%v", this.Mounts), "Mount", "types.Mount", 1) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -1593,7 +2322,7 @@
 	for k, _ := range this.Labels {
 		keysForLabels = append(keysForLabels, k)
 	}
-	sortkeys.Strings(keysForLabels)
+	github_com_gogo_protobuf_sortkeys.Strings(keysForLabels)
 	mapStringForLabels := "map[string]string{"
 	for _, k := range keysForLabels {
 		mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k])
@@ -1604,6 +2333,7 @@
 		`Key:` + fmt.Sprintf("%v", this.Key) + `,`,
 		`Parent:` + fmt.Sprintf("%v", this.Parent) + `,`,
 		`Labels:` + mapStringForLabels + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -1613,7 +2343,8 @@
 		return "nil"
 	}
 	s := strings.Join([]string{`&ViewSnapshotResponse{`,
-		`Mounts:` + strings.Replace(fmt.Sprintf("%v", this.Mounts), "Mount", "containerd_types.Mount", 1) + `,`,
+		`Mounts:` + strings.Replace(fmt.Sprintf("%v", this.Mounts), "Mount", "types.Mount", 1) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -1625,6 +2356,7 @@
 	s := strings.Join([]string{`&MountsRequest{`,
 		`Snapshotter:` + fmt.Sprintf("%v", this.Snapshotter) + `,`,
 		`Key:` + fmt.Sprintf("%v", this.Key) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -1634,7 +2366,8 @@
 		return "nil"
 	}
 	s := strings.Join([]string{`&MountsResponse{`,
-		`Mounts:` + strings.Replace(fmt.Sprintf("%v", this.Mounts), "Mount", "containerd_types.Mount", 1) + `,`,
+		`Mounts:` + strings.Replace(fmt.Sprintf("%v", this.Mounts), "Mount", "types.Mount", 1) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -1646,6 +2379,7 @@
 	s := strings.Join([]string{`&RemoveSnapshotRequest{`,
 		`Snapshotter:` + fmt.Sprintf("%v", this.Snapshotter) + `,`,
 		`Key:` + fmt.Sprintf("%v", this.Key) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -1658,7 +2392,7 @@
 	for k, _ := range this.Labels {
 		keysForLabels = append(keysForLabels, k)
 	}
-	sortkeys.Strings(keysForLabels)
+	github_com_gogo_protobuf_sortkeys.Strings(keysForLabels)
 	mapStringForLabels := "map[string]string{"
 	for _, k := range keysForLabels {
 		mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k])
@@ -1669,6 +2403,7 @@
 		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
 		`Key:` + fmt.Sprintf("%v", this.Key) + `,`,
 		`Labels:` + mapStringForLabels + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -1680,6 +2415,7 @@
 	s := strings.Join([]string{`&StatSnapshotRequest{`,
 		`Snapshotter:` + fmt.Sprintf("%v", this.Snapshotter) + `,`,
 		`Key:` + fmt.Sprintf("%v", this.Key) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -1692,7 +2428,7 @@
 	for k, _ := range this.Labels {
 		keysForLabels = append(keysForLabels, k)
 	}
-	sortkeys.Strings(keysForLabels)
+	github_com_gogo_protobuf_sortkeys.Strings(keysForLabels)
 	mapStringForLabels := "map[string]string{"
 	for _, k := range keysForLabels {
 		mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k])
@@ -1702,9 +2438,10 @@
 		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
 		`Parent:` + fmt.Sprintf("%v", this.Parent) + `,`,
 		`Kind:` + fmt.Sprintf("%v", this.Kind) + `,`,
-		`CreatedAt:` + strings.Replace(strings.Replace(this.CreatedAt.String(), "Timestamp", "google_protobuf3.Timestamp", 1), `&`, ``, 1) + `,`,
-		`UpdatedAt:` + strings.Replace(strings.Replace(this.UpdatedAt.String(), "Timestamp", "google_protobuf3.Timestamp", 1), `&`, ``, 1) + `,`,
+		`CreatedAt:` + strings.Replace(strings.Replace(this.CreatedAt.String(), "Timestamp", "types1.Timestamp", 1), `&`, ``, 1) + `,`,
+		`UpdatedAt:` + strings.Replace(strings.Replace(this.UpdatedAt.String(), "Timestamp", "types1.Timestamp", 1), `&`, ``, 1) + `,`,
 		`Labels:` + mapStringForLabels + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -1715,6 +2452,7 @@
 	}
 	s := strings.Join([]string{`&StatSnapshotResponse{`,
 		`Info:` + strings.Replace(strings.Replace(this.Info.String(), "Info", "Info", 1), `&`, ``, 1) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -1726,7 +2464,8 @@
 	s := strings.Join([]string{`&UpdateSnapshotRequest{`,
 		`Snapshotter:` + fmt.Sprintf("%v", this.Snapshotter) + `,`,
 		`Info:` + strings.Replace(strings.Replace(this.Info.String(), "Info", "Info", 1), `&`, ``, 1) + `,`,
-		`UpdateMask:` + strings.Replace(fmt.Sprintf("%v", this.UpdateMask), "FieldMask", "google_protobuf2.FieldMask", 1) + `,`,
+		`UpdateMask:` + strings.Replace(fmt.Sprintf("%v", this.UpdateMask), "FieldMask", "types1.FieldMask", 1) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -1737,6 +2476,7 @@
 	}
 	s := strings.Join([]string{`&UpdateSnapshotResponse{`,
 		`Info:` + strings.Replace(strings.Replace(this.Info.String(), "Info", "Info", 1), `&`, ``, 1) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -1747,6 +2487,7 @@
 	}
 	s := strings.Join([]string{`&ListSnapshotsRequest{`,
 		`Snapshotter:` + fmt.Sprintf("%v", this.Snapshotter) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -1757,6 +2498,7 @@
 	}
 	s := strings.Join([]string{`&ListSnapshotsResponse{`,
 		`Info:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Info), "Info", "Info", 1), `&`, ``, 1) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -1768,6 +2510,7 @@
 	s := strings.Join([]string{`&UsageRequest{`,
 		`Snapshotter:` + fmt.Sprintf("%v", this.Snapshotter) + `,`,
 		`Key:` + fmt.Sprintf("%v", this.Key) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -1779,6 +2522,7 @@
 	s := strings.Join([]string{`&UsageResponse{`,
 		`Size_:` + fmt.Sprintf("%v", this.Size_) + `,`,
 		`Inodes:` + fmt.Sprintf("%v", this.Inodes) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -1806,7 +2550,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -1834,7 +2578,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1844,6 +2588,9 @@
 				return ErrInvalidLengthSnapshots
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthSnapshots
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -1863,7 +2610,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1873,6 +2620,9 @@
 				return ErrInvalidLengthSnapshots
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthSnapshots
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -1892,7 +2642,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1902,6 +2652,9 @@
 				return ErrInvalidLengthSnapshots
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthSnapshots
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -1921,7 +2674,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1930,6 +2683,9 @@
 				return ErrInvalidLengthSnapshots
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthSnapshots
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -1950,7 +2706,7 @@
 					}
 					b := dAtA[iNdEx]
 					iNdEx++
-					wire |= (uint64(b) & 0x7F) << shift
+					wire |= uint64(b&0x7F) << shift
 					if b < 0x80 {
 						break
 					}
@@ -1967,7 +2723,7 @@
 						}
 						b := dAtA[iNdEx]
 						iNdEx++
-						stringLenmapkey |= (uint64(b) & 0x7F) << shift
+						stringLenmapkey |= uint64(b&0x7F) << shift
 						if b < 0x80 {
 							break
 						}
@@ -1977,6 +2733,9 @@
 						return ErrInvalidLengthSnapshots
 					}
 					postStringIndexmapkey := iNdEx + intStringLenmapkey
+					if postStringIndexmapkey < 0 {
+						return ErrInvalidLengthSnapshots
+					}
 					if postStringIndexmapkey > l {
 						return io.ErrUnexpectedEOF
 					}
@@ -1993,7 +2752,7 @@
 						}
 						b := dAtA[iNdEx]
 						iNdEx++
-						stringLenmapvalue |= (uint64(b) & 0x7F) << shift
+						stringLenmapvalue |= uint64(b&0x7F) << shift
 						if b < 0x80 {
 							break
 						}
@@ -2003,6 +2762,9 @@
 						return ErrInvalidLengthSnapshots
 					}
 					postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+					if postStringIndexmapvalue < 0 {
+						return ErrInvalidLengthSnapshots
+					}
 					if postStringIndexmapvalue > l {
 						return io.ErrUnexpectedEOF
 					}
@@ -2034,9 +2796,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthSnapshots
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthSnapshots
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -2061,7 +2827,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -2089,7 +2855,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2098,10 +2864,13 @@
 				return ErrInvalidLengthSnapshots
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthSnapshots
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			m.Mounts = append(m.Mounts, &containerd_types.Mount{})
+			m.Mounts = append(m.Mounts, &types.Mount{})
 			if err := m.Mounts[len(m.Mounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
 				return err
 			}
@@ -2115,9 +2884,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthSnapshots
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthSnapshots
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -2142,7 +2915,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -2170,7 +2943,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2180,6 +2953,9 @@
 				return ErrInvalidLengthSnapshots
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthSnapshots
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -2199,7 +2975,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2209,6 +2985,9 @@
 				return ErrInvalidLengthSnapshots
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthSnapshots
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -2228,7 +3007,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2238,6 +3017,9 @@
 				return ErrInvalidLengthSnapshots
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthSnapshots
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -2257,7 +3039,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2266,6 +3048,9 @@
 				return ErrInvalidLengthSnapshots
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthSnapshots
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -2286,7 +3071,7 @@
 					}
 					b := dAtA[iNdEx]
 					iNdEx++
-					wire |= (uint64(b) & 0x7F) << shift
+					wire |= uint64(b&0x7F) << shift
 					if b < 0x80 {
 						break
 					}
@@ -2303,7 +3088,7 @@
 						}
 						b := dAtA[iNdEx]
 						iNdEx++
-						stringLenmapkey |= (uint64(b) & 0x7F) << shift
+						stringLenmapkey |= uint64(b&0x7F) << shift
 						if b < 0x80 {
 							break
 						}
@@ -2313,6 +3098,9 @@
 						return ErrInvalidLengthSnapshots
 					}
 					postStringIndexmapkey := iNdEx + intStringLenmapkey
+					if postStringIndexmapkey < 0 {
+						return ErrInvalidLengthSnapshots
+					}
 					if postStringIndexmapkey > l {
 						return io.ErrUnexpectedEOF
 					}
@@ -2329,7 +3117,7 @@
 						}
 						b := dAtA[iNdEx]
 						iNdEx++
-						stringLenmapvalue |= (uint64(b) & 0x7F) << shift
+						stringLenmapvalue |= uint64(b&0x7F) << shift
 						if b < 0x80 {
 							break
 						}
@@ -2339,6 +3127,9 @@
 						return ErrInvalidLengthSnapshots
 					}
 					postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+					if postStringIndexmapvalue < 0 {
+						return ErrInvalidLengthSnapshots
+					}
 					if postStringIndexmapvalue > l {
 						return io.ErrUnexpectedEOF
 					}
@@ -2370,9 +3161,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthSnapshots
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthSnapshots
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -2397,7 +3192,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -2425,7 +3220,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2434,10 +3229,13 @@
 				return ErrInvalidLengthSnapshots
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthSnapshots
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			m.Mounts = append(m.Mounts, &containerd_types.Mount{})
+			m.Mounts = append(m.Mounts, &types.Mount{})
 			if err := m.Mounts[len(m.Mounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
 				return err
 			}
@@ -2451,9 +3249,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthSnapshots
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthSnapshots
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -2478,7 +3280,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -2506,7 +3308,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2516,6 +3318,9 @@
 				return ErrInvalidLengthSnapshots
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthSnapshots
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -2535,7 +3340,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2545,6 +3350,9 @@
 				return ErrInvalidLengthSnapshots
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthSnapshots
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -2559,9 +3367,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthSnapshots
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthSnapshots
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -2586,7 +3398,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -2614,7 +3426,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2623,10 +3435,13 @@
 				return ErrInvalidLengthSnapshots
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthSnapshots
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			m.Mounts = append(m.Mounts, &containerd_types.Mount{})
+			m.Mounts = append(m.Mounts, &types.Mount{})
 			if err := m.Mounts[len(m.Mounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
 				return err
 			}
@@ -2640,9 +3455,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthSnapshots
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthSnapshots
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -2667,7 +3486,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -2695,7 +3514,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2705,6 +3524,9 @@
 				return ErrInvalidLengthSnapshots
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthSnapshots
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -2724,7 +3546,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2734,6 +3556,9 @@
 				return ErrInvalidLengthSnapshots
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthSnapshots
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -2748,9 +3573,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthSnapshots
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthSnapshots
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -2775,7 +3604,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -2803,7 +3632,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2813,6 +3642,9 @@
 				return ErrInvalidLengthSnapshots
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthSnapshots
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -2832,7 +3664,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2842,6 +3674,9 @@
 				return ErrInvalidLengthSnapshots
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthSnapshots
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -2861,7 +3696,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2871,6 +3706,9 @@
 				return ErrInvalidLengthSnapshots
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthSnapshots
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -2890,7 +3728,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2899,6 +3737,9 @@
 				return ErrInvalidLengthSnapshots
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthSnapshots
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -2919,7 +3760,7 @@
 					}
 					b := dAtA[iNdEx]
 					iNdEx++
-					wire |= (uint64(b) & 0x7F) << shift
+					wire |= uint64(b&0x7F) << shift
 					if b < 0x80 {
 						break
 					}
@@ -2936,7 +3777,7 @@
 						}
 						b := dAtA[iNdEx]
 						iNdEx++
-						stringLenmapkey |= (uint64(b) & 0x7F) << shift
+						stringLenmapkey |= uint64(b&0x7F) << shift
 						if b < 0x80 {
 							break
 						}
@@ -2946,6 +3787,9 @@
 						return ErrInvalidLengthSnapshots
 					}
 					postStringIndexmapkey := iNdEx + intStringLenmapkey
+					if postStringIndexmapkey < 0 {
+						return ErrInvalidLengthSnapshots
+					}
 					if postStringIndexmapkey > l {
 						return io.ErrUnexpectedEOF
 					}
@@ -2962,7 +3806,7 @@
 						}
 						b := dAtA[iNdEx]
 						iNdEx++
-						stringLenmapvalue |= (uint64(b) & 0x7F) << shift
+						stringLenmapvalue |= uint64(b&0x7F) << shift
 						if b < 0x80 {
 							break
 						}
@@ -2972,6 +3816,9 @@
 						return ErrInvalidLengthSnapshots
 					}
 					postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+					if postStringIndexmapvalue < 0 {
+						return ErrInvalidLengthSnapshots
+					}
 					if postStringIndexmapvalue > l {
 						return io.ErrUnexpectedEOF
 					}
@@ -3003,9 +3850,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthSnapshots
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthSnapshots
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -3030,7 +3881,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -3058,7 +3909,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -3068,6 +3919,9 @@
 				return ErrInvalidLengthSnapshots
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthSnapshots
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -3087,7 +3941,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -3097,6 +3951,9 @@
 				return ErrInvalidLengthSnapshots
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthSnapshots
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -3111,9 +3968,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthSnapshots
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthSnapshots
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -3138,7 +3999,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -3166,7 +4027,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -3176,6 +4037,9 @@
 				return ErrInvalidLengthSnapshots
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthSnapshots
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -3195,7 +4059,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -3205,6 +4069,9 @@
 				return ErrInvalidLengthSnapshots
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthSnapshots
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -3224,7 +4091,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				m.Kind |= (Kind(b) & 0x7F) << shift
+				m.Kind |= Kind(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -3243,7 +4110,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -3252,10 +4119,13 @@
 				return ErrInvalidLengthSnapshots
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthSnapshots
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			if err := types.StdTimeUnmarshal(&m.CreatedAt, dAtA[iNdEx:postIndex]); err != nil {
+			if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.CreatedAt, dAtA[iNdEx:postIndex]); err != nil {
 				return err
 			}
 			iNdEx = postIndex
@@ -3273,7 +4143,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -3282,10 +4152,13 @@
 				return ErrInvalidLengthSnapshots
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthSnapshots
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			if err := types.StdTimeUnmarshal(&m.UpdatedAt, dAtA[iNdEx:postIndex]); err != nil {
+			if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.UpdatedAt, dAtA[iNdEx:postIndex]); err != nil {
 				return err
 			}
 			iNdEx = postIndex
@@ -3303,7 +4176,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -3312,6 +4185,9 @@
 				return ErrInvalidLengthSnapshots
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthSnapshots
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -3332,7 +4208,7 @@
 					}
 					b := dAtA[iNdEx]
 					iNdEx++
-					wire |= (uint64(b) & 0x7F) << shift
+					wire |= uint64(b&0x7F) << shift
 					if b < 0x80 {
 						break
 					}
@@ -3349,7 +4225,7 @@
 						}
 						b := dAtA[iNdEx]
 						iNdEx++
-						stringLenmapkey |= (uint64(b) & 0x7F) << shift
+						stringLenmapkey |= uint64(b&0x7F) << shift
 						if b < 0x80 {
 							break
 						}
@@ -3359,6 +4235,9 @@
 						return ErrInvalidLengthSnapshots
 					}
 					postStringIndexmapkey := iNdEx + intStringLenmapkey
+					if postStringIndexmapkey < 0 {
+						return ErrInvalidLengthSnapshots
+					}
 					if postStringIndexmapkey > l {
 						return io.ErrUnexpectedEOF
 					}
@@ -3375,7 +4254,7 @@
 						}
 						b := dAtA[iNdEx]
 						iNdEx++
-						stringLenmapvalue |= (uint64(b) & 0x7F) << shift
+						stringLenmapvalue |= uint64(b&0x7F) << shift
 						if b < 0x80 {
 							break
 						}
@@ -3385,6 +4264,9 @@
 						return ErrInvalidLengthSnapshots
 					}
 					postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+					if postStringIndexmapvalue < 0 {
+						return ErrInvalidLengthSnapshots
+					}
 					if postStringIndexmapvalue > l {
 						return io.ErrUnexpectedEOF
 					}
@@ -3416,9 +4298,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthSnapshots
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthSnapshots
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -3443,7 +4329,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -3471,7 +4357,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -3480,6 +4366,9 @@
 				return ErrInvalidLengthSnapshots
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthSnapshots
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -3496,9 +4385,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthSnapshots
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthSnapshots
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -3523,7 +4416,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -3551,7 +4444,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -3561,6 +4454,9 @@
 				return ErrInvalidLengthSnapshots
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthSnapshots
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -3580,7 +4476,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -3589,6 +4485,9 @@
 				return ErrInvalidLengthSnapshots
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthSnapshots
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -3610,7 +4509,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -3619,11 +4518,14 @@
 				return ErrInvalidLengthSnapshots
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthSnapshots
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
 			if m.UpdateMask == nil {
-				m.UpdateMask = &google_protobuf2.FieldMask{}
+				m.UpdateMask = &types1.FieldMask{}
 			}
 			if err := m.UpdateMask.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
 				return err
@@ -3638,9 +4540,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthSnapshots
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthSnapshots
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -3665,7 +4571,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -3693,7 +4599,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -3702,6 +4608,9 @@
 				return ErrInvalidLengthSnapshots
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthSnapshots
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -3718,9 +4627,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthSnapshots
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthSnapshots
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -3745,7 +4658,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -3773,7 +4686,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -3783,6 +4696,9 @@
 				return ErrInvalidLengthSnapshots
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthSnapshots
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -3797,9 +4713,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthSnapshots
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthSnapshots
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -3824,7 +4744,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -3852,7 +4772,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -3861,6 +4781,9 @@
 				return ErrInvalidLengthSnapshots
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthSnapshots
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -3878,9 +4801,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthSnapshots
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthSnapshots
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -3905,7 +4832,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -3933,7 +4860,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -3943,6 +4870,9 @@
 				return ErrInvalidLengthSnapshots
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthSnapshots
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -3962,7 +4892,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -3972,6 +4902,9 @@
 				return ErrInvalidLengthSnapshots
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthSnapshots
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -3986,9 +4919,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthSnapshots
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthSnapshots
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -4013,7 +4950,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -4041,7 +4978,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				m.Size_ |= (int64(b) & 0x7F) << shift
+				m.Size_ |= int64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -4060,7 +4997,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				m.Inodes |= (int64(b) & 0x7F) << shift
+				m.Inodes |= int64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -4074,9 +5011,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthSnapshots
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthSnapshots
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -4140,10 +5081,13 @@
 					break
 				}
 			}
-			iNdEx += length
 			if length < 0 {
 				return 0, ErrInvalidLengthSnapshots
 			}
+			iNdEx += length
+			if iNdEx < 0 {
+				return 0, ErrInvalidLengthSnapshots
+			}
 			return iNdEx, nil
 		case 3:
 			for {
@@ -4172,6 +5116,9 @@
 					return 0, err
 				}
 				iNdEx = start + next
+				if iNdEx < 0 {
+					return 0, ErrInvalidLengthSnapshots
+				}
 			}
 			return iNdEx, nil
 		case 4:
@@ -4190,74 +5137,3 @@
 	ErrInvalidLengthSnapshots = fmt.Errorf("proto: negative length found during unmarshaling")
 	ErrIntOverflowSnapshots   = fmt.Errorf("proto: integer overflow")
 )
-
-func init() {
-	proto.RegisterFile("github.com/containerd/containerd/api/services/snapshots/v1/snapshots.proto", fileDescriptorSnapshots)
-}
-
-var fileDescriptorSnapshots = []byte{
-	// 1007 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x57, 0x4f, 0x6f, 0x1a, 0x47,
-	0x14, 0x67, 0x60, 0x8d, 0xe3, 0x87, 0xed, 0xd2, 0x09, 0x26, 0x68, 0x5b, 0xe1, 0x15, 0x87, 0xca,
-	0xea, 0x61, 0x37, 0xa1, 0x6a, 0xe2, 0xc4, 0x97, 0x62, 0x4c, 0x2b, 0xec, 0xd8, 0xa9, 0x36, 0xb6,
-	0x13, 0xa7, 0x55, 0xa3, 0x35, 0x8c, 0xf1, 0x0a, 0x76, 0x97, 0x32, 0x03, 0x11, 0xad, 0x54, 0xf5,
-	0x18, 0xf9, 0xd4, 0x2f, 0xe0, 0x53, 0xfb, 0x21, 0xaa, 0x7e, 0x02, 0x1f, 0x7b, 0xec, 0xa9, 0x6d,
-	0xfc, 0x25, 0x7a, 0xea, 0x1f, 0xcd, 0xec, 0x2c, 0x60, 0x4c, 0xc5, 0x82, 0xc9, 0x6d, 0x66, 0x67,
-	0x7e, 0xef, 0xfd, 0xe6, 0xf7, 0xe6, 0xbd, 0x37, 0x0b, 0xdb, 0x35, 0x9b, 0x9d, 0xb6, 0x8f, 0xf5,
-	0x8a, 0xe7, 0x18, 0x15, 0xcf, 0x65, 0x96, 0xed, 0x92, 0x56, 0x75, 0x70, 0x68, 0x35, 0x6d, 0x83,
-	0x92, 0x56, 0xc7, 0xae, 0x10, 0x6a, 0x50, 0xd7, 0x6a, 0xd2, 0x53, 0x8f, 0x51, 0xa3, 0x73, 0xaf,
-	0x3f, 0xd1, 0x9b, 0x2d, 0x8f, 0x79, 0x58, 0xeb, 0xa3, 0xf4, 0x00, 0xa1, 0xf7, 0x37, 0x75, 0xee,
-	0xa9, 0xa9, 0x9a, 0x57, 0xf3, 0xc4, 0x66, 0x83, 0x8f, 0x7c, 0x9c, 0xfa, 0x5e, 0xcd, 0xf3, 0x6a,
-	0x0d, 0x62, 0x88, 0xd9, 0x71, 0xfb, 0xc4, 0x20, 0x4e, 0x93, 0x75, 0xe5, 0xa2, 0x36, 0xbc, 0x78,
-	0x62, 0x93, 0x46, 0xf5, 0xa5, 0x63, 0xd1, 0xba, 0xdc, 0xb1, 0x3a, 0xbc, 0x83, 0xd9, 0x0e, 0xa1,
-	0xcc, 0x72, 0x9a, 0x72, 0xc3, 0xfd, 0x50, 0x67, 0x64, 0xdd, 0x26, 0xa1, 0x86, 0xe3, 0xb5, 0x5d,
-	0xe6, 0xe3, 0x72, 0x7f, 0x23, 0x48, 0x7f, 0xde, 0x22, 0x4d, 0xab, 0x45, 0x9e, 0xca, 0x53, 0x98,
-	0xe4, 0xeb, 0x36, 0xa1, 0x0c, 0x6b, 0x90, 0x08, 0x0e, 0xc6, 0x48, 0x2b, 0x83, 0x34, 0xb4, 0xb6,
-	0x60, 0x0e, 0x7e, 0xc2, 0x49, 0x88, 0xd5, 0x49, 0x37, 0x13, 0x15, 0x2b, 0x7c, 0x88, 0xd3, 0x10,
-	0xe7, 0xa6, 0x5c, 0x96, 0x89, 0x89, 0x8f, 0x72, 0x86, 0xbf, 0x84, 0x78, 0xc3, 0x3a, 0x26, 0x0d,
-	0x9a, 0x51, 0xb4, 0xd8, 0x5a, 0x22, 0xbf, 0xa5, 0x8f, 0xd3, 0x51, 0x1f, 0xcd, 0x4a, 0x7f, 0x2c,
-	0xcc, 0x94, 0x5c, 0xd6, 0xea, 0x9a, 0xd2, 0xa6, 0xfa, 0x10, 0x12, 0x03, 0x9f, 0x03, 0x5a, 0xa8,
-	0x4f, 0x2b, 0x05, 0x73, 0x1d, 0xab, 0xd1, 0x26, 0x92, 0xaa, 0x3f, 0x79, 0x14, 0x5d, 0x47, 0xb9,
-	0x6d, 0xb8, 0x73, 0xcd, 0x11, 0x6d, 0x7a, 0x2e, 0x25, 0xd8, 0x80, 0xb8, 0x50, 0x8a, 0x66, 0x90,
-	0xe0, 0x7c, 0x67, 0x90, 0xb3, 0x50, 0x52, 0xdf, 0xe5, 0xeb, 0xa6, 0xdc, 0x96, 0xfb, 0x0b, 0xc1,
-	0xed, 0x43, 0x9b, 0xbc, 0x7a, 0x9b, 0x42, 0x1e, 0x0d, 0x09, 0x59, 0x18, 0x2f, 0xe4, 0x08, 0x4a,
-	0xb3, 0x56, 0xf1, 0x33, 0x48, 0x5d, 0xf5, 0x32, 0xad, 0x84, 0x45, 0x58, 0x12, 0x1f, 0xe8, 0x0d,
-	0xb4, 0xcb, 0x15, 0x60, 0x39, 0x30, 0x32, 0x2d, 0x8f, 0x1d, 0x58, 0x31, 0x89, 0xe3, 0x75, 0x66,
-	0x91, 0x14, 0xfc, 0x5e, 0xac, 0x14, 0x3d, 0xc7, 0xb1, 0xd9, 0xe4, 0xd6, 0x30, 0x28, 0xae, 0xe5,
-	0x04, 0x92, 0x8b, 0x71, 0xe0, 0x21, 0xd6, 0x8f, 0xcc, 0x17, 0x43, 0xb7, 0xa2, 0x38, 0xfe, 0x56,
-	0x8c, 0x24, 0x34, 0xeb, 0x7b, 0x51, 0x86, 0xdb, 0x4f, 0x99, 0xc5, 0x66, 0x21, 0xe2, 0xbf, 0x51,
-	0x50, 0xca, 0xee, 0x89, 0xd7, 0x53, 0x04, 0x0d, 0x28, 0xd2, 0xcf, 0x96, 0xe8, 0x95, 0x6c, 0x79,
-	0x04, 0x4a, 0xdd, 0x76, 0xab, 0x42, 0xaa, 0xe5, 0xfc, 0x07, 0xe3, 0x55, 0xd9, 0xb1, 0xdd, 0xaa,
-	0x29, 0x30, 0xb8, 0x08, 0x50, 0x69, 0x11, 0x8b, 0x91, 0xea, 0x4b, 0x8b, 0x65, 0x14, 0x0d, 0xad,
-	0x25, 0xf2, 0xaa, 0xee, 0xd7, 0x61, 0x3d, 0xa8, 0xc3, 0xfa, 0x7e, 0x50, 0x87, 0x37, 0x6f, 0x5d,
-	0xfc, 0xbe, 0x1a, 0xf9, 0xe1, 0x8f, 0x55, 0x64, 0x2e, 0x48, 0x5c, 0x81, 0x71, 0x23, 0xed, 0x66,
-	0x35, 0x30, 0x32, 0x37, 0x89, 0x11, 0x89, 0x2b, 0x30, 0xbc, 0xdd, 0x8b, 0x6e, 0x5c, 0x44, 0x37,
-	0x3f, 0xfe, 0x1c, 0x5c, 0xa9, 0x59, 0x07, 0xf3, 0x39, 0xa4, 0xae, 0x06, 0x53, 0x26, 0xd7, 0x27,
-	0xa0, 0xd8, 0xee, 0x89, 0x27, 0x8c, 0x24, 0xc2, 0x88, 0xcc, 0xc9, 0x6d, 0x2a, 0xfc, 0xa4, 0xa6,
-	0x40, 0xe6, 0x7e, 0x46, 0xb0, 0x72, 0x20, 0x8e, 0x3b, 0xf9, 0x4d, 0x09, 0xbc, 0x47, 0xa7, 0xf5,
-	0x8e, 0x37, 0x20, 0xe1, 0x6b, 0x2d, 0x1a, 0xae, 0xb8, 0x2b, 0xa3, 0x82, 0xf4, 0x29, 0xef, 0xc9,
-	0xbb, 0x16, 0xad, 0x9b, 0x32, 0xa4, 0x7c, 0x9c, 0x7b, 0x01, 0xe9, 0x61, 0xe6, 0x33, 0x93, 0x65,
-	0x1d, 0x52, 0x8f, 0x6d, 0xda, 0x13, 0x3c, 0x7c, 0x4d, 0xcc, 0x1d, 0xc1, 0xca, 0x10, 0xf2, 0x1a,
-	0xa9, 0xd8, 0x94, 0xa4, 0x36, 0x61, 0xf1, 0x80, 0x5a, 0x35, 0x72, 0x93, 0x5c, 0xde, 0x80, 0x25,
-	0x69, 0x43, 0xd2, 0xc2, 0xa0, 0x50, 0xfb, 0x1b, 0x3f, 0xa7, 0x63, 0xa6, 0x18, 0xf3, 0x9c, 0xb6,
-	0x5d, 0xaf, 0x4a, 0xa8, 0x40, 0xc6, 0x4c, 0x39, 0xfb, 0xf0, 0x35, 0x02, 0x85, 0xa7, 0x29, 0x7e,
-	0x1f, 0xe6, 0x0f, 0xf6, 0x76, 0xf6, 0x9e, 0x3c, 0xdb, 0x4b, 0x46, 0xd4, 0x77, 0xce, 0xce, 0xb5,
-	0x04, 0xff, 0x7c, 0xe0, 0xd6, 0x5d, 0xef, 0x95, 0x8b, 0xd3, 0xa0, 0x1c, 0x96, 0x4b, 0xcf, 0x92,
-	0x48, 0x5d, 0x3c, 0x3b, 0xd7, 0x6e, 0xf1, 0x25, 0xde, 0xa2, 0xb0, 0x0a, 0xf1, 0x42, 0x71, 0xbf,
-	0x7c, 0x58, 0x4a, 0x46, 0xd5, 0xe5, 0xb3, 0x73, 0x0d, 0xf8, 0x4a, 0xa1, 0xc2, 0xec, 0x0e, 0xc1,
-	0x1a, 0x2c, 0x14, 0x9f, 0xec, 0xee, 0x96, 0xf7, 0xf7, 0x4b, 0x5b, 0xc9, 0x98, 0xfa, 0xee, 0xd9,
-	0xb9, 0xb6, 0xc4, 0x97, 0xfd, 0x5a, 0xc9, 0x48, 0x55, 0x5d, 0x7c, 0xfd, 0x63, 0x36, 0xf2, 0xcb,
-	0x4f, 0x59, 0xc1, 0x20, 0xff, 0xcf, 0x3c, 0x2c, 0xf4, 0x34, 0xc6, 0xdf, 0xc1, 0xbc, 0x7c, 0x4a,
-	0xe0, 0xf5, 0x69, 0x9f, 0x37, 0xea, 0xc3, 0x29, 0x90, 0x52, 0xc4, 0x36, 0x28, 0xe2, 0x84, 0x1f,
-	0x4f, 0xf5, 0x24, 0x50, 0xef, 0x4f, 0x0a, 0x93, 0x6e, 0xeb, 0x10, 0xf7, 0xbb, 0x2d, 0x36, 0xc6,
-	0x5b, 0xb8, 0xd2, 0xdc, 0xd5, 0xbb, 0xe1, 0x01, 0xd2, 0xd9, 0x11, 0xc4, 0xfd, 0x60, 0xe0, 0x07,
-	0x53, 0xb6, 0x38, 0x35, 0x7d, 0x2d, 0xb3, 0x4b, 0xfc, 0x29, 0xce, 0x4d, 0xfb, 0x2d, 0x3f, 0x8c,
-	0xe9, 0x91, 0x8f, 0x83, 0xff, 0x35, 0xdd, 0x06, 0x85, 0x57, 0xce, 0x30, 0x91, 0x19, 0xd1, 0x2e,
-	0xc3, 0x44, 0x66, 0x64, 0x61, 0xfe, 0x16, 0xe2, 0x7e, 0x6d, 0x0a, 0x73, 0xa2, 0x91, 0xf5, 0x57,
-	0x5d, 0x9f, 0x1c, 0x28, 0x9d, 0x77, 0x41, 0xe1, 0x25, 0x08, 0x87, 0x20, 0x3f, 0xaa, 0xc8, 0xa9,
-	0x0f, 0x26, 0xc6, 0xf9, 0x8e, 0xef, 0x22, 0x7c, 0x0a, 0x73, 0xa2, 0xbc, 0x60, 0x3d, 0x04, 0xfb,
-	0x81, 0x5a, 0xa6, 0x1a, 0xa1, 0xf7, 0xfb, 0xbe, 0x36, 0xbf, 0xba, 0x78, 0x93, 0x8d, 0xfc, 0xf6,
-	0x26, 0x1b, 0xf9, 0xfe, 0x32, 0x8b, 0x2e, 0x2e, 0xb3, 0xe8, 0xd7, 0xcb, 0x2c, 0xfa, 0xf3, 0x32,
-	0x8b, 0x5e, 0x6c, 0x4d, 0xff, 0xcf, 0xb9, 0xd1, 0x9b, 0x3c, 0x8f, 0x1c, 0xc7, 0xc5, 0x55, 0xfa,
-	0xe8, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x8e, 0xa0, 0xb2, 0xda, 0xc4, 0x0e, 0x00, 0x00,
-}
diff --git a/vendor/github.com/containerd/containerd/api/services/tasks/v1/tasks.pb.go b/vendor/github.com/containerd/containerd/api/services/tasks/v1/tasks.pb.go
index 0dfee91..cadaf74 100644
--- a/vendor/github.com/containerd/containerd/api/services/tasks/v1/tasks.pb.go
+++ b/vendor/github.com/containerd/containerd/api/services/tasks/v1/tasks.pb.go
@@ -1,68 +1,24 @@
 // Code generated by protoc-gen-gogo. DO NOT EDIT.
 // source: github.com/containerd/containerd/api/services/tasks/v1/tasks.proto
 
-/*
-	Package tasks is a generated protocol buffer package.
-
-	It is generated from these files:
-		github.com/containerd/containerd/api/services/tasks/v1/tasks.proto
-
-	It has these top-level messages:
-		CreateTaskRequest
-		CreateTaskResponse
-		StartRequest
-		StartResponse
-		DeleteTaskRequest
-		DeleteResponse
-		DeleteProcessRequest
-		GetRequest
-		GetResponse
-		ListTasksRequest
-		ListTasksResponse
-		KillRequest
-		ExecProcessRequest
-		ExecProcessResponse
-		ResizePtyRequest
-		CloseIORequest
-		PauseTaskRequest
-		ResumeTaskRequest
-		ListPidsRequest
-		ListPidsResponse
-		CheckpointTaskRequest
-		CheckpointTaskResponse
-		UpdateTaskRequest
-		MetricsRequest
-		MetricsResponse
-		WaitRequest
-		WaitResponse
-*/
 package tasks
 
-import proto "github.com/gogo/protobuf/proto"
-import fmt "fmt"
-import math "math"
-import google_protobuf "github.com/gogo/protobuf/types"
-import google_protobuf1 "github.com/gogo/protobuf/types"
-
-// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto"
-import containerd_types "github.com/containerd/containerd/api/types"
-import containerd_types1 "github.com/containerd/containerd/api/types"
-import containerd_types2 "github.com/containerd/containerd/api/types"
-import containerd_v1_types "github.com/containerd/containerd/api/types/task"
-import _ "github.com/gogo/protobuf/types"
-
-import time "time"
-import github_com_opencontainers_go_digest "github.com/opencontainers/go-digest"
-
-import context "golang.org/x/net/context"
-import grpc "google.golang.org/grpc"
-
-import types "github.com/gogo/protobuf/types"
-
-import strings "strings"
-import reflect "reflect"
-
-import io "io"
+import (
+	context "context"
+	fmt "fmt"
+	types "github.com/containerd/containerd/api/types"
+	task "github.com/containerd/containerd/api/types/task"
+	proto "github.com/gogo/protobuf/proto"
+	github_com_gogo_protobuf_types "github.com/gogo/protobuf/types"
+	types1 "github.com/gogo/protobuf/types"
+	github_com_opencontainers_go_digest "github.com/opencontainers/go-digest"
+	grpc "google.golang.org/grpc"
+	io "io"
+	math "math"
+	reflect "reflect"
+	strings "strings"
+	time "time"
+)
 
 // Reference imports to suppress errors if they are not otherwise used.
 var _ = proto.Marshal
@@ -84,116 +40,488 @@
 	// These are for mounts that cannot be performed in the user namespace.
 	// Typically, these mounts should be resolved from snapshots specified on
 	// the container object.
-	Rootfs     []*containerd_types.Mount     `protobuf:"bytes,3,rep,name=rootfs" json:"rootfs,omitempty"`
-	Stdin      string                        `protobuf:"bytes,4,opt,name=stdin,proto3" json:"stdin,omitempty"`
-	Stdout     string                        `protobuf:"bytes,5,opt,name=stdout,proto3" json:"stdout,omitempty"`
-	Stderr     string                        `protobuf:"bytes,6,opt,name=stderr,proto3" json:"stderr,omitempty"`
-	Terminal   bool                          `protobuf:"varint,7,opt,name=terminal,proto3" json:"terminal,omitempty"`
-	Checkpoint *containerd_types2.Descriptor `protobuf:"bytes,8,opt,name=checkpoint" json:"checkpoint,omitempty"`
-	Options    *google_protobuf1.Any         `protobuf:"bytes,9,opt,name=options" json:"options,omitempty"`
+	Rootfs               []*types.Mount    `protobuf:"bytes,3,rep,name=rootfs,proto3" json:"rootfs,omitempty"`
+	Stdin                string            `protobuf:"bytes,4,opt,name=stdin,proto3" json:"stdin,omitempty"`
+	Stdout               string            `protobuf:"bytes,5,opt,name=stdout,proto3" json:"stdout,omitempty"`
+	Stderr               string            `protobuf:"bytes,6,opt,name=stderr,proto3" json:"stderr,omitempty"`
+	Terminal             bool              `protobuf:"varint,7,opt,name=terminal,proto3" json:"terminal,omitempty"`
+	Checkpoint           *types.Descriptor `protobuf:"bytes,8,opt,name=checkpoint,proto3" json:"checkpoint,omitempty"`
+	Options              *types1.Any       `protobuf:"bytes,9,opt,name=options,proto3" json:"options,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
+	XXX_unrecognized     []byte            `json:"-"`
+	XXX_sizecache        int32             `json:"-"`
 }
 
-func (m *CreateTaskRequest) Reset()                    { *m = CreateTaskRequest{} }
-func (*CreateTaskRequest) ProtoMessage()               {}
-func (*CreateTaskRequest) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{0} }
+func (m *CreateTaskRequest) Reset()      { *m = CreateTaskRequest{} }
+func (*CreateTaskRequest) ProtoMessage() {}
+func (*CreateTaskRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_310e7127b8a26f14, []int{0}
+}
+func (m *CreateTaskRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *CreateTaskRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_CreateTaskRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *CreateTaskRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_CreateTaskRequest.Merge(m, src)
+}
+func (m *CreateTaskRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *CreateTaskRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_CreateTaskRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CreateTaskRequest proto.InternalMessageInfo
 
 type CreateTaskResponse struct {
-	ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
-	Pid         uint32 `protobuf:"varint,2,opt,name=pid,proto3" json:"pid,omitempty"`
+	ContainerID          string   `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
+	Pid                  uint32   `protobuf:"varint,2,opt,name=pid,proto3" json:"pid,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
-func (m *CreateTaskResponse) Reset()                    { *m = CreateTaskResponse{} }
-func (*CreateTaskResponse) ProtoMessage()               {}
-func (*CreateTaskResponse) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{1} }
+func (m *CreateTaskResponse) Reset()      { *m = CreateTaskResponse{} }
+func (*CreateTaskResponse) ProtoMessage() {}
+func (*CreateTaskResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_310e7127b8a26f14, []int{1}
+}
+func (m *CreateTaskResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *CreateTaskResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_CreateTaskResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *CreateTaskResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_CreateTaskResponse.Merge(m, src)
+}
+func (m *CreateTaskResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *CreateTaskResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_CreateTaskResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CreateTaskResponse proto.InternalMessageInfo
 
 type StartRequest struct {
-	ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
-	ExecID      string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"`
+	ContainerID          string   `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
+	ExecID               string   `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
-func (m *StartRequest) Reset()                    { *m = StartRequest{} }
-func (*StartRequest) ProtoMessage()               {}
-func (*StartRequest) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{2} }
+func (m *StartRequest) Reset()      { *m = StartRequest{} }
+func (*StartRequest) ProtoMessage() {}
+func (*StartRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_310e7127b8a26f14, []int{2}
+}
+func (m *StartRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *StartRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_StartRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *StartRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_StartRequest.Merge(m, src)
+}
+func (m *StartRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *StartRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_StartRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_StartRequest proto.InternalMessageInfo
 
 type StartResponse struct {
-	Pid uint32 `protobuf:"varint,1,opt,name=pid,proto3" json:"pid,omitempty"`
+	Pid                  uint32   `protobuf:"varint,1,opt,name=pid,proto3" json:"pid,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
-func (m *StartResponse) Reset()                    { *m = StartResponse{} }
-func (*StartResponse) ProtoMessage()               {}
-func (*StartResponse) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{3} }
+func (m *StartResponse) Reset()      { *m = StartResponse{} }
+func (*StartResponse) ProtoMessage() {}
+func (*StartResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_310e7127b8a26f14, []int{3}
+}
+func (m *StartResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *StartResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_StartResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *StartResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_StartResponse.Merge(m, src)
+}
+func (m *StartResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *StartResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_StartResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_StartResponse proto.InternalMessageInfo
 
 type DeleteTaskRequest struct {
-	ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
+	ContainerID          string   `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
-func (m *DeleteTaskRequest) Reset()                    { *m = DeleteTaskRequest{} }
-func (*DeleteTaskRequest) ProtoMessage()               {}
-func (*DeleteTaskRequest) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{4} }
+func (m *DeleteTaskRequest) Reset()      { *m = DeleteTaskRequest{} }
+func (*DeleteTaskRequest) ProtoMessage() {}
+func (*DeleteTaskRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_310e7127b8a26f14, []int{4}
+}
+func (m *DeleteTaskRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *DeleteTaskRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_DeleteTaskRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *DeleteTaskRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DeleteTaskRequest.Merge(m, src)
+}
+func (m *DeleteTaskRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *DeleteTaskRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_DeleteTaskRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeleteTaskRequest proto.InternalMessageInfo
 
 type DeleteResponse struct {
-	ID         string    `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
-	Pid        uint32    `protobuf:"varint,2,opt,name=pid,proto3" json:"pid,omitempty"`
-	ExitStatus uint32    `protobuf:"varint,3,opt,name=exit_status,json=exitStatus,proto3" json:"exit_status,omitempty"`
-	ExitedAt   time.Time `protobuf:"bytes,4,opt,name=exited_at,json=exitedAt,stdtime" json:"exited_at"`
+	ID                   string    `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+	Pid                  uint32    `protobuf:"varint,2,opt,name=pid,proto3" json:"pid,omitempty"`
+	ExitStatus           uint32    `protobuf:"varint,3,opt,name=exit_status,json=exitStatus,proto3" json:"exit_status,omitempty"`
+	ExitedAt             time.Time `protobuf:"bytes,4,opt,name=exited_at,json=exitedAt,proto3,stdtime" json:"exited_at"`
+	XXX_NoUnkeyedLiteral struct{}  `json:"-"`
+	XXX_unrecognized     []byte    `json:"-"`
+	XXX_sizecache        int32     `json:"-"`
 }
 
-func (m *DeleteResponse) Reset()                    { *m = DeleteResponse{} }
-func (*DeleteResponse) ProtoMessage()               {}
-func (*DeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{5} }
+func (m *DeleteResponse) Reset()      { *m = DeleteResponse{} }
+func (*DeleteResponse) ProtoMessage() {}
+func (*DeleteResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_310e7127b8a26f14, []int{5}
+}
+func (m *DeleteResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *DeleteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_DeleteResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *DeleteResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DeleteResponse.Merge(m, src)
+}
+func (m *DeleteResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *DeleteResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_DeleteResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeleteResponse proto.InternalMessageInfo
 
 type DeleteProcessRequest struct {
-	ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
-	ExecID      string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"`
+	ContainerID          string   `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
+	ExecID               string   `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
-func (m *DeleteProcessRequest) Reset()                    { *m = DeleteProcessRequest{} }
-func (*DeleteProcessRequest) ProtoMessage()               {}
-func (*DeleteProcessRequest) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{6} }
+func (m *DeleteProcessRequest) Reset()      { *m = DeleteProcessRequest{} }
+func (*DeleteProcessRequest) ProtoMessage() {}
+func (*DeleteProcessRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_310e7127b8a26f14, []int{6}
+}
+func (m *DeleteProcessRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *DeleteProcessRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_DeleteProcessRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *DeleteProcessRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DeleteProcessRequest.Merge(m, src)
+}
+func (m *DeleteProcessRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *DeleteProcessRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_DeleteProcessRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeleteProcessRequest proto.InternalMessageInfo
 
 type GetRequest struct {
-	ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
-	ExecID      string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"`
+	ContainerID          string   `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
+	ExecID               string   `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
-func (m *GetRequest) Reset()                    { *m = GetRequest{} }
-func (*GetRequest) ProtoMessage()               {}
-func (*GetRequest) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{7} }
+func (m *GetRequest) Reset()      { *m = GetRequest{} }
+func (*GetRequest) ProtoMessage() {}
+func (*GetRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_310e7127b8a26f14, []int{7}
+}
+func (m *GetRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *GetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_GetRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *GetRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_GetRequest.Merge(m, src)
+}
+func (m *GetRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *GetRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_GetRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetRequest proto.InternalMessageInfo
 
 type GetResponse struct {
-	Process *containerd_v1_types.Process `protobuf:"bytes,1,opt,name=process" json:"process,omitempty"`
+	Process              *task.Process `protobuf:"bytes,1,opt,name=process,proto3" json:"process,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}      `json:"-"`
+	XXX_unrecognized     []byte        `json:"-"`
+	XXX_sizecache        int32         `json:"-"`
 }
 
-func (m *GetResponse) Reset()                    { *m = GetResponse{} }
-func (*GetResponse) ProtoMessage()               {}
-func (*GetResponse) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{8} }
+func (m *GetResponse) Reset()      { *m = GetResponse{} }
+func (*GetResponse) ProtoMessage() {}
+func (*GetResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_310e7127b8a26f14, []int{8}
+}
+func (m *GetResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *GetResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_GetResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *GetResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_GetResponse.Merge(m, src)
+}
+func (m *GetResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *GetResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_GetResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetResponse proto.InternalMessageInfo
 
 type ListTasksRequest struct {
-	Filter string `protobuf:"bytes,1,opt,name=filter,proto3" json:"filter,omitempty"`
+	Filter               string   `protobuf:"bytes,1,opt,name=filter,proto3" json:"filter,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
-func (m *ListTasksRequest) Reset()                    { *m = ListTasksRequest{} }
-func (*ListTasksRequest) ProtoMessage()               {}
-func (*ListTasksRequest) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{9} }
+func (m *ListTasksRequest) Reset()      { *m = ListTasksRequest{} }
+func (*ListTasksRequest) ProtoMessage() {}
+func (*ListTasksRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_310e7127b8a26f14, []int{9}
+}
+func (m *ListTasksRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ListTasksRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_ListTasksRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *ListTasksRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ListTasksRequest.Merge(m, src)
+}
+func (m *ListTasksRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *ListTasksRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_ListTasksRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ListTasksRequest proto.InternalMessageInfo
 
 type ListTasksResponse struct {
-	Tasks []*containerd_v1_types.Process `protobuf:"bytes,1,rep,name=tasks" json:"tasks,omitempty"`
+	Tasks                []*task.Process `protobuf:"bytes,1,rep,name=tasks,proto3" json:"tasks,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
+	XXX_unrecognized     []byte          `json:"-"`
+	XXX_sizecache        int32           `json:"-"`
 }
 
-func (m *ListTasksResponse) Reset()                    { *m = ListTasksResponse{} }
-func (*ListTasksResponse) ProtoMessage()               {}
-func (*ListTasksResponse) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{10} }
+func (m *ListTasksResponse) Reset()      { *m = ListTasksResponse{} }
+func (*ListTasksResponse) ProtoMessage() {}
+func (*ListTasksResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_310e7127b8a26f14, []int{10}
+}
+func (m *ListTasksResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ListTasksResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_ListTasksResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *ListTasksResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ListTasksResponse.Merge(m, src)
+}
+func (m *ListTasksResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *ListTasksResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_ListTasksResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ListTasksResponse proto.InternalMessageInfo
 
 type KillRequest struct {
-	ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
-	ExecID      string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"`
-	Signal      uint32 `protobuf:"varint,3,opt,name=signal,proto3" json:"signal,omitempty"`
-	All         bool   `protobuf:"varint,4,opt,name=all,proto3" json:"all,omitempty"`
+	ContainerID          string   `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
+	ExecID               string   `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"`
+	Signal               uint32   `protobuf:"varint,3,opt,name=signal,proto3" json:"signal,omitempty"`
+	All                  bool     `protobuf:"varint,4,opt,name=all,proto3" json:"all,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
-func (m *KillRequest) Reset()                    { *m = KillRequest{} }
-func (*KillRequest) ProtoMessage()               {}
-func (*KillRequest) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{11} }
+func (m *KillRequest) Reset()      { *m = KillRequest{} }
+func (*KillRequest) ProtoMessage() {}
+func (*KillRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_310e7127b8a26f14, []int{11}
+}
+func (m *KillRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *KillRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_KillRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *KillRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_KillRequest.Merge(m, src)
+}
+func (m *KillRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *KillRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_KillRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_KillRequest proto.InternalMessageInfo
 
 type ExecProcessRequest struct {
 	ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
@@ -204,136 +532,601 @@
 	// Spec for starting a process in the target container.
 	//
 	// For runc, this is a process spec, for example.
-	Spec *google_protobuf1.Any `protobuf:"bytes,6,opt,name=spec" json:"spec,omitempty"`
+	Spec *types1.Any `protobuf:"bytes,6,opt,name=spec,proto3" json:"spec,omitempty"`
 	// id of the exec process
-	ExecID string `protobuf:"bytes,7,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"`
+	ExecID               string   `protobuf:"bytes,7,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
-func (m *ExecProcessRequest) Reset()                    { *m = ExecProcessRequest{} }
-func (*ExecProcessRequest) ProtoMessage()               {}
-func (*ExecProcessRequest) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{12} }
+func (m *ExecProcessRequest) Reset()      { *m = ExecProcessRequest{} }
+func (*ExecProcessRequest) ProtoMessage() {}
+func (*ExecProcessRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_310e7127b8a26f14, []int{12}
+}
+func (m *ExecProcessRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ExecProcessRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_ExecProcessRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *ExecProcessRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ExecProcessRequest.Merge(m, src)
+}
+func (m *ExecProcessRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *ExecProcessRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_ExecProcessRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ExecProcessRequest proto.InternalMessageInfo
 
 type ExecProcessResponse struct {
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
-func (m *ExecProcessResponse) Reset()                    { *m = ExecProcessResponse{} }
-func (*ExecProcessResponse) ProtoMessage()               {}
-func (*ExecProcessResponse) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{13} }
+func (m *ExecProcessResponse) Reset()      { *m = ExecProcessResponse{} }
+func (*ExecProcessResponse) ProtoMessage() {}
+func (*ExecProcessResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_310e7127b8a26f14, []int{13}
+}
+func (m *ExecProcessResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ExecProcessResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_ExecProcessResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *ExecProcessResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ExecProcessResponse.Merge(m, src)
+}
+func (m *ExecProcessResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *ExecProcessResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_ExecProcessResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ExecProcessResponse proto.InternalMessageInfo
 
 type ResizePtyRequest struct {
-	ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
-	ExecID      string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"`
-	Width       uint32 `protobuf:"varint,3,opt,name=width,proto3" json:"width,omitempty"`
-	Height      uint32 `protobuf:"varint,4,opt,name=height,proto3" json:"height,omitempty"`
+	ContainerID          string   `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
+	ExecID               string   `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"`
+	Width                uint32   `protobuf:"varint,3,opt,name=width,proto3" json:"width,omitempty"`
+	Height               uint32   `protobuf:"varint,4,opt,name=height,proto3" json:"height,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
-func (m *ResizePtyRequest) Reset()                    { *m = ResizePtyRequest{} }
-func (*ResizePtyRequest) ProtoMessage()               {}
-func (*ResizePtyRequest) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{14} }
+func (m *ResizePtyRequest) Reset()      { *m = ResizePtyRequest{} }
+func (*ResizePtyRequest) ProtoMessage() {}
+func (*ResizePtyRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_310e7127b8a26f14, []int{14}
+}
+func (m *ResizePtyRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ResizePtyRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_ResizePtyRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *ResizePtyRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ResizePtyRequest.Merge(m, src)
+}
+func (m *ResizePtyRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *ResizePtyRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_ResizePtyRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ResizePtyRequest proto.InternalMessageInfo
 
 type CloseIORequest struct {
-	ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
-	ExecID      string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"`
-	Stdin       bool   `protobuf:"varint,3,opt,name=stdin,proto3" json:"stdin,omitempty"`
+	ContainerID          string   `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
+	ExecID               string   `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"`
+	Stdin                bool     `protobuf:"varint,3,opt,name=stdin,proto3" json:"stdin,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
-func (m *CloseIORequest) Reset()                    { *m = CloseIORequest{} }
-func (*CloseIORequest) ProtoMessage()               {}
-func (*CloseIORequest) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{15} }
+func (m *CloseIORequest) Reset()      { *m = CloseIORequest{} }
+func (*CloseIORequest) ProtoMessage() {}
+func (*CloseIORequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_310e7127b8a26f14, []int{15}
+}
+func (m *CloseIORequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *CloseIORequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_CloseIORequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *CloseIORequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_CloseIORequest.Merge(m, src)
+}
+func (m *CloseIORequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *CloseIORequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_CloseIORequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CloseIORequest proto.InternalMessageInfo
 
 type PauseTaskRequest struct {
-	ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
+	ContainerID          string   `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
-func (m *PauseTaskRequest) Reset()                    { *m = PauseTaskRequest{} }
-func (*PauseTaskRequest) ProtoMessage()               {}
-func (*PauseTaskRequest) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{16} }
+func (m *PauseTaskRequest) Reset()      { *m = PauseTaskRequest{} }
+func (*PauseTaskRequest) ProtoMessage() {}
+func (*PauseTaskRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_310e7127b8a26f14, []int{16}
+}
+func (m *PauseTaskRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *PauseTaskRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_PauseTaskRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *PauseTaskRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_PauseTaskRequest.Merge(m, src)
+}
+func (m *PauseTaskRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *PauseTaskRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_PauseTaskRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PauseTaskRequest proto.InternalMessageInfo
 
 type ResumeTaskRequest struct {
-	ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
+	ContainerID          string   `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
-func (m *ResumeTaskRequest) Reset()                    { *m = ResumeTaskRequest{} }
-func (*ResumeTaskRequest) ProtoMessage()               {}
-func (*ResumeTaskRequest) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{17} }
+func (m *ResumeTaskRequest) Reset()      { *m = ResumeTaskRequest{} }
+func (*ResumeTaskRequest) ProtoMessage() {}
+func (*ResumeTaskRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_310e7127b8a26f14, []int{17}
+}
+func (m *ResumeTaskRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ResumeTaskRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_ResumeTaskRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *ResumeTaskRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ResumeTaskRequest.Merge(m, src)
+}
+func (m *ResumeTaskRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *ResumeTaskRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_ResumeTaskRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ResumeTaskRequest proto.InternalMessageInfo
 
 type ListPidsRequest struct {
-	ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
+	ContainerID          string   `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
-func (m *ListPidsRequest) Reset()                    { *m = ListPidsRequest{} }
-func (*ListPidsRequest) ProtoMessage()               {}
-func (*ListPidsRequest) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{18} }
+func (m *ListPidsRequest) Reset()      { *m = ListPidsRequest{} }
+func (*ListPidsRequest) ProtoMessage() {}
+func (*ListPidsRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_310e7127b8a26f14, []int{18}
+}
+func (m *ListPidsRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ListPidsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_ListPidsRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *ListPidsRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ListPidsRequest.Merge(m, src)
+}
+func (m *ListPidsRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *ListPidsRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_ListPidsRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ListPidsRequest proto.InternalMessageInfo
 
 type ListPidsResponse struct {
 	// Processes includes the process ID and additional process information
-	Processes []*containerd_v1_types.ProcessInfo `protobuf:"bytes,1,rep,name=processes" json:"processes,omitempty"`
+	Processes            []*task.ProcessInfo `protobuf:"bytes,1,rep,name=processes,proto3" json:"processes,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}            `json:"-"`
+	XXX_unrecognized     []byte              `json:"-"`
+	XXX_sizecache        int32               `json:"-"`
 }
 
-func (m *ListPidsResponse) Reset()                    { *m = ListPidsResponse{} }
-func (*ListPidsResponse) ProtoMessage()               {}
-func (*ListPidsResponse) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{19} }
+func (m *ListPidsResponse) Reset()      { *m = ListPidsResponse{} }
+func (*ListPidsResponse) ProtoMessage() {}
+func (*ListPidsResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_310e7127b8a26f14, []int{19}
+}
+func (m *ListPidsResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ListPidsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_ListPidsResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *ListPidsResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ListPidsResponse.Merge(m, src)
+}
+func (m *ListPidsResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *ListPidsResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_ListPidsResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ListPidsResponse proto.InternalMessageInfo
 
 type CheckpointTaskRequest struct {
-	ContainerID      string                                     `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
-	ParentCheckpoint github_com_opencontainers_go_digest.Digest `protobuf:"bytes,2,opt,name=parent_checkpoint,json=parentCheckpoint,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"parent_checkpoint"`
-	Options          *google_protobuf1.Any                      `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"`
+	ContainerID          string                                     `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
+	ParentCheckpoint     github_com_opencontainers_go_digest.Digest `protobuf:"bytes,2,opt,name=parent_checkpoint,json=parentCheckpoint,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"parent_checkpoint"`
+	Options              *types1.Any                                `protobuf:"bytes,3,opt,name=options,proto3" json:"options,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}                                   `json:"-"`
+	XXX_unrecognized     []byte                                     `json:"-"`
+	XXX_sizecache        int32                                      `json:"-"`
 }
 
-func (m *CheckpointTaskRequest) Reset()                    { *m = CheckpointTaskRequest{} }
-func (*CheckpointTaskRequest) ProtoMessage()               {}
-func (*CheckpointTaskRequest) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{20} }
+func (m *CheckpointTaskRequest) Reset()      { *m = CheckpointTaskRequest{} }
+func (*CheckpointTaskRequest) ProtoMessage() {}
+func (*CheckpointTaskRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_310e7127b8a26f14, []int{20}
+}
+func (m *CheckpointTaskRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *CheckpointTaskRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_CheckpointTaskRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *CheckpointTaskRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_CheckpointTaskRequest.Merge(m, src)
+}
+func (m *CheckpointTaskRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *CheckpointTaskRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_CheckpointTaskRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CheckpointTaskRequest proto.InternalMessageInfo
 
 type CheckpointTaskResponse struct {
-	Descriptors []*containerd_types2.Descriptor `protobuf:"bytes,1,rep,name=descriptors" json:"descriptors,omitempty"`
+	Descriptors          []*types.Descriptor `protobuf:"bytes,1,rep,name=descriptors,proto3" json:"descriptors,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}            `json:"-"`
+	XXX_unrecognized     []byte              `json:"-"`
+	XXX_sizecache        int32               `json:"-"`
 }
 
-func (m *CheckpointTaskResponse) Reset()                    { *m = CheckpointTaskResponse{} }
-func (*CheckpointTaskResponse) ProtoMessage()               {}
-func (*CheckpointTaskResponse) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{21} }
+func (m *CheckpointTaskResponse) Reset()      { *m = CheckpointTaskResponse{} }
+func (*CheckpointTaskResponse) ProtoMessage() {}
+func (*CheckpointTaskResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_310e7127b8a26f14, []int{21}
+}
+func (m *CheckpointTaskResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *CheckpointTaskResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_CheckpointTaskResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *CheckpointTaskResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_CheckpointTaskResponse.Merge(m, src)
+}
+func (m *CheckpointTaskResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *CheckpointTaskResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_CheckpointTaskResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CheckpointTaskResponse proto.InternalMessageInfo
 
 type UpdateTaskRequest struct {
-	ContainerID string                `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
-	Resources   *google_protobuf1.Any `protobuf:"bytes,2,opt,name=resources" json:"resources,omitempty"`
+	ContainerID          string      `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
+	Resources            *types1.Any `protobuf:"bytes,2,opt,name=resources,proto3" json:"resources,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}    `json:"-"`
+	XXX_unrecognized     []byte      `json:"-"`
+	XXX_sizecache        int32       `json:"-"`
 }
 
-func (m *UpdateTaskRequest) Reset()                    { *m = UpdateTaskRequest{} }
-func (*UpdateTaskRequest) ProtoMessage()               {}
-func (*UpdateTaskRequest) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{22} }
+func (m *UpdateTaskRequest) Reset()      { *m = UpdateTaskRequest{} }
+func (*UpdateTaskRequest) ProtoMessage() {}
+func (*UpdateTaskRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_310e7127b8a26f14, []int{22}
+}
+func (m *UpdateTaskRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *UpdateTaskRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_UpdateTaskRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *UpdateTaskRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_UpdateTaskRequest.Merge(m, src)
+}
+func (m *UpdateTaskRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *UpdateTaskRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_UpdateTaskRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_UpdateTaskRequest proto.InternalMessageInfo
 
 type MetricsRequest struct {
-	Filters []string `protobuf:"bytes,1,rep,name=filters" json:"filters,omitempty"`
+	Filters              []string `protobuf:"bytes,1,rep,name=filters,proto3" json:"filters,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
-func (m *MetricsRequest) Reset()                    { *m = MetricsRequest{} }
-func (*MetricsRequest) ProtoMessage()               {}
-func (*MetricsRequest) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{23} }
+func (m *MetricsRequest) Reset()      { *m = MetricsRequest{} }
+func (*MetricsRequest) ProtoMessage() {}
+func (*MetricsRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_310e7127b8a26f14, []int{23}
+}
+func (m *MetricsRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *MetricsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_MetricsRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *MetricsRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_MetricsRequest.Merge(m, src)
+}
+func (m *MetricsRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *MetricsRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_MetricsRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MetricsRequest proto.InternalMessageInfo
 
 type MetricsResponse struct {
-	Metrics []*containerd_types1.Metric `protobuf:"bytes,1,rep,name=metrics" json:"metrics,omitempty"`
+	Metrics              []*types.Metric `protobuf:"bytes,1,rep,name=metrics,proto3" json:"metrics,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
+	XXX_unrecognized     []byte          `json:"-"`
+	XXX_sizecache        int32           `json:"-"`
 }
 
-func (m *MetricsResponse) Reset()                    { *m = MetricsResponse{} }
-func (*MetricsResponse) ProtoMessage()               {}
-func (*MetricsResponse) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{24} }
+func (m *MetricsResponse) Reset()      { *m = MetricsResponse{} }
+func (*MetricsResponse) ProtoMessage() {}
+func (*MetricsResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_310e7127b8a26f14, []int{24}
+}
+func (m *MetricsResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *MetricsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_MetricsResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *MetricsResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_MetricsResponse.Merge(m, src)
+}
+func (m *MetricsResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *MetricsResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_MetricsResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MetricsResponse proto.InternalMessageInfo
 
 type WaitRequest struct {
-	ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
-	ExecID      string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"`
+	ContainerID          string   `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
+	ExecID               string   `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
-func (m *WaitRequest) Reset()                    { *m = WaitRequest{} }
-func (*WaitRequest) ProtoMessage()               {}
-func (*WaitRequest) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{25} }
+func (m *WaitRequest) Reset()      { *m = WaitRequest{} }
+func (*WaitRequest) ProtoMessage() {}
+func (*WaitRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_310e7127b8a26f14, []int{25}
+}
+func (m *WaitRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *WaitRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_WaitRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *WaitRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_WaitRequest.Merge(m, src)
+}
+func (m *WaitRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *WaitRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_WaitRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_WaitRequest proto.InternalMessageInfo
 
 type WaitResponse struct {
-	ExitStatus uint32    `protobuf:"varint,1,opt,name=exit_status,json=exitStatus,proto3" json:"exit_status,omitempty"`
-	ExitedAt   time.Time `protobuf:"bytes,2,opt,name=exited_at,json=exitedAt,stdtime" json:"exited_at"`
+	ExitStatus           uint32    `protobuf:"varint,1,opt,name=exit_status,json=exitStatus,proto3" json:"exit_status,omitempty"`
+	ExitedAt             time.Time `protobuf:"bytes,2,opt,name=exited_at,json=exitedAt,proto3,stdtime" json:"exited_at"`
+	XXX_NoUnkeyedLiteral struct{}  `json:"-"`
+	XXX_unrecognized     []byte    `json:"-"`
+	XXX_sizecache        int32     `json:"-"`
 }
 
-func (m *WaitResponse) Reset()                    { *m = WaitResponse{} }
-func (*WaitResponse) ProtoMessage()               {}
-func (*WaitResponse) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{26} }
+func (m *WaitResponse) Reset()      { *m = WaitResponse{} }
+func (*WaitResponse) ProtoMessage() {}
+func (*WaitResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_310e7127b8a26f14, []int{26}
+}
+func (m *WaitResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *WaitResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_WaitResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *WaitResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_WaitResponse.Merge(m, src)
+}
+func (m *WaitResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *WaitResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_WaitResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_WaitResponse proto.InternalMessageInfo
 
 func init() {
 	proto.RegisterType((*CreateTaskRequest)(nil), "containerd.services.tasks.v1.CreateTaskRequest")
@@ -365,6 +1158,97 @@
 	proto.RegisterType((*WaitResponse)(nil), "containerd.services.tasks.v1.WaitResponse")
 }
 
+func init() {
+	proto.RegisterFile("github.com/containerd/containerd/api/services/tasks/v1/tasks.proto", fileDescriptor_310e7127b8a26f14)
+}
+
+var fileDescriptor_310e7127b8a26f14 = []byte{
+	// 1318 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0x4b, 0x6f, 0x1b, 0x45,
+	0x1c, 0xef, 0xfa, 0xed, 0xbf, 0x93, 0x36, 0x59, 0xd2, 0x60, 0x96, 0x2a, 0x0e, 0xcb, 0xc5, 0x04,
+	0xba, 0x4b, 0x5d, 0x54, 0x21, 0x5a, 0x21, 0x35, 0x0f, 0x22, 0x0b, 0xaa, 0xa6, 0xdb, 0x02, 0x55,
+	0x25, 0x14, 0xb6, 0xbb, 0x13, 0x67, 0x14, 0x7b, 0x67, 0xbb, 0x33, 0x4e, 0x1b, 0x38, 0xc0, 0x47,
+	0xe8, 0x95, 0x0b, 0x9f, 0x27, 0x47, 0x8e, 0x08, 0x55, 0x81, 0xfa, 0x5b, 0x70, 0x43, 0xf3, 0xd8,
+	0xcd, 0xc6, 0x8e, 0xbd, 0x4e, 0xd3, 0x70, 0x69, 0x67, 0x66, 0xff, 0xaf, 0xf9, 0xcd, 0xff, 0xf1,
+	0x73, 0x60, 0xb5, 0x83, 0xd9, 0x6e, 0xff, 0xa9, 0xe5, 0x91, 0x9e, 0xed, 0x91, 0x80, 0xb9, 0x38,
+	0x40, 0x91, 0x9f, 0x5e, 0xba, 0x21, 0xb6, 0x29, 0x8a, 0xf6, 0xb1, 0x87, 0xa8, 0xcd, 0x5c, 0xba,
+	0x47, 0xed, 0xfd, 0x1b, 0x72, 0x61, 0x85, 0x11, 0x61, 0x44, 0xbf, 0x76, 0x2c, 0x6d, 0xc5, 0x92,
+	0x96, 0x14, 0xd8, 0xbf, 0x61, 0xbc, 0xdf, 0x21, 0xa4, 0xd3, 0x45, 0xb6, 0x90, 0x7d, 0xda, 0xdf,
+	0xb1, 0x51, 0x2f, 0x64, 0x07, 0x52, 0xd5, 0x78, 0x6f, 0xf8, 0xa3, 0x1b, 0xc4, 0x9f, 0x16, 0x3a,
+	0xa4, 0x43, 0xc4, 0xd2, 0xe6, 0x2b, 0x75, 0x7a, 0x6b, 0xaa, 0x78, 0xd9, 0x41, 0x88, 0xa8, 0xdd,
+	0x23, 0xfd, 0x80, 0x29, 0xbd, 0xcf, 0xcf, 0xa2, 0x87, 0x58, 0x84, 0x3d, 0x75, 0x3b, 0xe3, 0xf6,
+	0x19, 0x34, 0x7d, 0x44, 0xbd, 0x08, 0x87, 0x8c, 0x44, 0x4a, 0xf9, 0x8b, 0x33, 0x28, 0x73, 0xc4,
+	0xc4, 0x3f, 0x4a, 0xb7, 0x31, 0x8c, 0x0d, 0xc3, 0x3d, 0x44, 0x99, 0xdb, 0x0b, 0xa5, 0x80, 0x79,
+	0x98, 0x83, 0xf9, 0xb5, 0x08, 0xb9, 0x0c, 0x3d, 0x72, 0xe9, 0x9e, 0x83, 0x9e, 0xf5, 0x11, 0x65,
+	0x7a, 0x0b, 0x66, 0x12, 0xf3, 0xdb, 0xd8, 0xaf, 0x6b, 0xcb, 0x5a, 0xb3, 0xba, 0x7a, 0x65, 0x70,
+	0xd4, 0xa8, 0xad, 0xc5, 0xe7, 0xed, 0x75, 0xa7, 0x96, 0x08, 0xb5, 0x7d, 0xdd, 0x86, 0x52, 0x44,
+	0x08, 0xdb, 0xa1, 0xf5, 0xfc, 0x72, 0xbe, 0x59, 0x6b, 0xbd, 0x6b, 0xa5, 0x9e, 0x54, 0x44, 0x67,
+	0xdd, 0xe3, 0x60, 0x3a, 0x4a, 0x4c, 0x5f, 0x80, 0x22, 0x65, 0x3e, 0x0e, 0xea, 0x05, 0x6e, 0xdd,
+	0x91, 0x1b, 0x7d, 0x11, 0x4a, 0x94, 0xf9, 0xa4, 0xcf, 0xea, 0x45, 0x71, 0xac, 0x76, 0xea, 0x1c,
+	0x45, 0x51, 0xbd, 0x94, 0x9c, 0xa3, 0x28, 0xd2, 0x0d, 0xa8, 0x30, 0x14, 0xf5, 0x70, 0xe0, 0x76,
+	0xeb, 0xe5, 0x65, 0xad, 0x59, 0x71, 0x92, 0xbd, 0x7e, 0x07, 0xc0, 0xdb, 0x45, 0xde, 0x5e, 0x48,
+	0x70, 0xc0, 0xea, 0x95, 0x65, 0xad, 0x59, 0x6b, 0x5d, 0x1b, 0x0d, 0x6b, 0x3d, 0x41, 0xdc, 0x49,
+	0xc9, 0xeb, 0x16, 0x94, 0x49, 0xc8, 0x30, 0x09, 0x68, 0xbd, 0x2a, 0x54, 0x17, 0x2c, 0x89, 0xa6,
+	0x15, 0xa3, 0x69, 0xdd, 0x0d, 0x0e, 0x9c, 0x58, 0xc8, 0x7c, 0x02, 0x7a, 0x1a, 0x49, 0x1a, 0x92,
+	0x80, 0xa2, 0x37, 0x82, 0x72, 0x0e, 0xf2, 0x21, 0xf6, 0xeb, 0xb9, 0x65, 0xad, 0x39, 0xeb, 0xf0,
+	0xa5, 0xd9, 0x81, 0x99, 0x87, 0xcc, 0x8d, 0xd8, 0x79, 0x1e, 0xe8, 0x43, 0x28, 0xa3, 0x17, 0xc8,
+	0xdb, 0x56, 0x96, 0xab, 0xab, 0x30, 0x38, 0x6a, 0x94, 0x36, 0x5e, 0x20, 0xaf, 0xbd, 0xee, 0x94,
+	0xf8, 0xa7, 0xb6, 0x6f, 0x7e, 0x00, 0xb3, 0xca, 0x91, 0x8a, 0x5f, 0xc5, 0xa2, 0x1d, 0xc7, 0xb2,
+	0x09, 0xf3, 0xeb, 0xa8, 0x8b, 0xce, 0x9d, 0x31, 0xe6, 0xef, 0x1a, 0x5c, 0x96, 0x96, 0x12, 0x6f,
+	0x8b, 0x90, 0x4b, 0x94, 0x4b, 0x83, 0xa3, 0x46, 0xae, 0xbd, 0xee, 0xe4, 0xf0, 0x29, 0x88, 0xe8,
+	0x0d, 0xa8, 0xa1, 0x17, 0x98, 0x6d, 0x53, 0xe6, 0xb2, 0x3e, 0xcf, 0x39, 0xfe, 0x05, 0xf8, 0xd1,
+	0x43, 0x71, 0xa2, 0xdf, 0x85, 0x2a, 0xdf, 0x21, 0x7f, 0xdb, 0x65, 0x22, 0xc5, 0x6a, 0x2d, 0x63,
+	0xe4, 0x01, 0x1f, 0xc5, 0xe5, 0xb0, 0x5a, 0x39, 0x3c, 0x6a, 0x5c, 0x7a, 0xf9, 0x77, 0x43, 0x73,
+	0x2a, 0x52, 0xed, 0x2e, 0x33, 0x09, 0x2c, 0xc8, 0xf8, 0xb6, 0x22, 0xe2, 0x21, 0x4a, 0x2f, 0x1c,
+	0x7d, 0x04, 0xb0, 0x89, 0x2e, 0xfe, 0x91, 0x37, 0xa0, 0x26, 0xdc, 0x28, 0xd0, 0x6f, 0x41, 0x39,
+	0x94, 0x17, 0x14, 0x2e, 0x86, 0x6a, 0x64, 0xff, 0x86, 0x2a, 0x93, 0x18, 0x84, 0x58, 0xd8, 0x5c,
+	0x81, 0xb9, 0x6f, 0x30, 0x65, 0x3c, 0x0d, 0x12, 0x68, 0x16, 0xa1, 0xb4, 0x83, 0xbb, 0x0c, 0x45,
+	0x32, 0x5a, 0x47, 0xed, 0x78, 0xd2, 0xa4, 0x64, 0x93, 0xda, 0x28, 0x8a, 0x16, 0x5f, 0xd7, 0x44,
+	0xc7, 0x98, 0xec, 0x56, 0x8a, 0x9a, 0x2f, 0x35, 0xa8, 0x7d, 0x8d, 0xbb, 0xdd, 0x8b, 0x06, 0x49,
+	0x34, 0x1c, 0xdc, 0xe1, 0x6d, 0x45, 0xe6, 0x96, 0xda, 0xf1, 0x54, 0x74, 0xbb, 0x5d, 0x91, 0x51,
+	0x15, 0x87, 0x2f, 0xcd, 0x7f, 0x35, 0xd0, 0xb9, 0xf2, 0x5b, 0xc8, 0x92, 0xa4, 0x27, 0xe6, 0x4e,
+	0xef, 0x89, 0xf9, 0x31, 0x3d, 0xb1, 0x30, 0xb6, 0x27, 0x16, 0x87, 0x7a, 0x62, 0x13, 0x0a, 0x34,
+	0x44, 0x9e, 0xe8, 0xa2, 0xe3, 0x5a, 0x9a, 0x90, 0x48, 0xa3, 0x54, 0x1e, 0x9b, 0x4a, 0x57, 0xe1,
+	0x9d, 0x13, 0x57, 0x97, 0x2f, 0x6b, 0xfe, 0xa6, 0xc1, 0x9c, 0x83, 0x28, 0xfe, 0x09, 0x6d, 0xb1,
+	0x83, 0x0b, 0x7f, 0xaa, 0x05, 0x28, 0x3e, 0xc7, 0x3e, 0xdb, 0x55, 0x2f, 0x25, 0x37, 0x1c, 0x9d,
+	0x5d, 0x84, 0x3b, 0xbb, 0xb2, 0xfa, 0x67, 0x1d, 0xb5, 0x33, 0x7f, 0x81, 0xcb, 0x6b, 0x5d, 0x42,
+	0x51, 0xfb, 0xfe, 0xff, 0x11, 0x98, 0x7c, 0xce, 0xbc, 0x78, 0x05, 0xb9, 0x31, 0xbf, 0x82, 0xb9,
+	0x2d, 0xb7, 0x4f, 0xcf, 0xdd, 0x3f, 0x37, 0x61, 0xde, 0x41, 0xb4, 0xdf, 0x3b, 0xb7, 0xa1, 0x0d,
+	0xb8, 0xc2, 0x8b, 0x73, 0x0b, 0xfb, 0xe7, 0x49, 0x5e, 0xd3, 0x91, 0xfd, 0x40, 0x9a, 0x51, 0x25,
+	0xfe, 0x25, 0x54, 0x55, 0xbb, 0x40, 0x71, 0x99, 0x2f, 0x4f, 0x2a, 0xf3, 0x76, 0xb0, 0x43, 0x9c,
+	0x63, 0x15, 0xf3, 0x95, 0x06, 0x57, 0xd7, 0x92, 0x99, 0x7c, 0x5e, 0x8e, 0xb2, 0x0d, 0xf3, 0xa1,
+	0x1b, 0xa1, 0x80, 0x6d, 0xa7, 0x78, 0x81, 0x7c, 0xbe, 0x16, 0xef, 0xff, 0x7f, 0x1d, 0x35, 0x56,
+	0x52, 0x6c, 0x8b, 0x84, 0x28, 0x48, 0xd4, 0xa9, 0xdd, 0x21, 0xd7, 0x7d, 0xdc, 0x41, 0x94, 0x59,
+	0xeb, 0xe2, 0x3f, 0x67, 0x4e, 0x1a, 0x5b, 0x3b, 0x95, 0x33, 0xe4, 0xa7, 0xe1, 0x0c, 0x8f, 0x61,
+	0x71, 0xf8, 0x76, 0x09, 0x70, 0xb5, 0x63, 0x26, 0x78, 0x6a, 0x87, 0x1c, 0x21, 0x2f, 0x69, 0x05,
+	0xf3, 0x67, 0x98, 0xff, 0x36, 0xf4, 0xdf, 0x02, 0xaf, 0x6b, 0x41, 0x35, 0x42, 0x94, 0xf4, 0x23,
+	0x0f, 0x51, 0x81, 0xd5, 0xb8, 0x4b, 0x1d, 0x8b, 0x99, 0x2b, 0x70, 0xf9, 0x9e, 0x24, 0xc0, 0xb1,
+	0xe7, 0x3a, 0x94, 0xe5, 0x24, 0x90, 0x57, 0xa9, 0x3a, 0xf1, 0x96, 0x27, 0x5f, 0x22, 0x9b, 0xcc,
+	0x85, 0xb2, 0xe2, 0xcf, 0xea, 0xde, 0xf5, 0x53, 0xb8, 0xa4, 0x10, 0x70, 0x62, 0x41, 0x73, 0x07,
+	0x6a, 0xdf, 0xbb, 0xf8, 0xe2, 0x67, 0x67, 0x04, 0x33, 0xd2, 0x8f, 0x8a, 0x75, 0x88, 0x87, 0x68,
+	0x93, 0x79, 0x48, 0xee, 0x4d, 0x78, 0x48, 0xeb, 0xd5, 0x0c, 0x14, 0xc5, 0xe4, 0xd4, 0xf7, 0xa0,
+	0x24, 0x39, 0xa6, 0x6e, 0x5b, 0x93, 0x7e, 0x31, 0x59, 0x23, 0x9c, 0xde, 0xf8, 0x74, 0x7a, 0x05,
+	0x75, 0xb5, 0x1f, 0xa1, 0x28, 0xb8, 0xa0, 0xbe, 0x32, 0x59, 0x35, 0xcd, 0x4c, 0x8d, 0x8f, 0xa7,
+	0x92, 0x55, 0x1e, 0x3a, 0x50, 0x92, 0x04, 0x2b, 0xeb, 0x3a, 0x23, 0x84, 0xd3, 0xf8, 0x64, 0x1a,
+	0x85, 0xc4, 0xd1, 0x33, 0x98, 0x3d, 0xc1, 0xe4, 0xf4, 0xd6, 0x34, 0xea, 0x27, 0x07, 0xfa, 0x19,
+	0x5d, 0x3e, 0x81, 0xfc, 0x26, 0x62, 0x7a, 0x73, 0xb2, 0xd2, 0x31, 0xdd, 0x33, 0x3e, 0x9a, 0x42,
+	0x32, 0xc1, 0xad, 0xc0, 0x3b, 0xad, 0x6e, 0x4d, 0x56, 0x19, 0x66, 0x67, 0x86, 0x3d, 0xb5, 0xbc,
+	0x72, 0xd4, 0x86, 0x02, 0x27, 0x5b, 0x7a, 0x46, 0x6c, 0x29, 0x42, 0x66, 0x2c, 0x8e, 0x24, 0xf7,
+	0x06, 0xff, 0xb1, 0xae, 0x6f, 0x41, 0x81, 0x97, 0x92, 0x9e, 0x91, 0x87, 0xa3, 0x44, 0x6a, 0xac,
+	0xc5, 0x87, 0x50, 0x4d, 0x38, 0x46, 0x16, 0x14, 0xc3, 0x64, 0x64, 0xac, 0xd1, 0xfb, 0x50, 0x56,
+	0xec, 0x40, 0xcf, 0x78, 0xef, 0x93, 0x24, 0x62, 0x82, 0xc1, 0xa2, 0x98, 0xf6, 0x59, 0x11, 0x0e,
+	0x53, 0x82, 0xb1, 0x06, 0x1f, 0x40, 0x49, 0x8e, 0xfd, 0xac, 0xa2, 0x19, 0x21, 0x07, 0x63, 0x4d,
+	0x62, 0xa8, 0xc4, 0x93, 0x5b, 0xbf, 0x9e, 0x9d, 0x23, 0x29, 0xa2, 0x60, 0x58, 0xd3, 0x8a, 0xab,
+	0x8c, 0x7a, 0x0e, 0x90, 0x9a, 0x97, 0x37, 0x33, 0x20, 0x3e, 0x6d, 0xf2, 0x1b, 0x9f, 0x9d, 0x4d,
+	0x49, 0x39, 0x7e, 0x00, 0x25, 0x39, 0x10, 0xb3, 0x60, 0x1b, 0x19, 0x9b, 0x63, 0x61, 0xdb, 0x81,
+	0xb2, 0x1a, 0x5d, 0x59, 0xb9, 0x72, 0x72, 0x1a, 0x1a, 0xd7, 0xa7, 0x94, 0x56, 0xa1, 0xff, 0x00,
+	0x05, 0x3e, 0x73, 0xb2, 0xaa, 0x30, 0x35, 0xff, 0x8c, 0x95, 0x69, 0x44, 0xa5, 0xf9, 0xd5, 0xef,
+	0x0e, 0x5f, 0x2f, 0x5d, 0xfa, 0xf3, 0xf5, 0xd2, 0xa5, 0x5f, 0x07, 0x4b, 0xda, 0xe1, 0x60, 0x49,
+	0xfb, 0x63, 0xb0, 0xa4, 0xfd, 0x33, 0x58, 0xd2, 0x9e, 0xdc, 0x79, 0xb3, 0xbf, 0xec, 0xdd, 0x16,
+	0x8b, 0xc7, 0xb9, 0xa7, 0x25, 0x01, 0xd8, 0xcd, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0x85, 0xa2,
+	0x4f, 0xd1, 0x22, 0x14, 0x00, 0x00,
+}
+
 // Reference imports to suppress errors if they are not otherwise used.
 var _ context.Context
 var _ grpc.ClientConn
@@ -373,8 +1257,9 @@
 // is compatible with the grpc package it is being compiled against.
 const _ = grpc.SupportPackageIsVersion4
 
-// Client API for Tasks service
-
+// TasksClient is the client API for Tasks service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
 type TasksClient interface {
 	// Create a task.
 	Create(ctx context.Context, in *CreateTaskRequest, opts ...grpc.CallOption) (*CreateTaskResponse, error)
@@ -386,15 +1271,15 @@
 	Get(ctx context.Context, in *GetRequest, opts ...grpc.CallOption) (*GetResponse, error)
 	List(ctx context.Context, in *ListTasksRequest, opts ...grpc.CallOption) (*ListTasksResponse, error)
 	// Kill a task or process.
-	Kill(ctx context.Context, in *KillRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error)
-	Exec(ctx context.Context, in *ExecProcessRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error)
-	ResizePty(ctx context.Context, in *ResizePtyRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error)
-	CloseIO(ctx context.Context, in *CloseIORequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error)
-	Pause(ctx context.Context, in *PauseTaskRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error)
-	Resume(ctx context.Context, in *ResumeTaskRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error)
+	Kill(ctx context.Context, in *KillRequest, opts ...grpc.CallOption) (*types1.Empty, error)
+	Exec(ctx context.Context, in *ExecProcessRequest, opts ...grpc.CallOption) (*types1.Empty, error)
+	ResizePty(ctx context.Context, in *ResizePtyRequest, opts ...grpc.CallOption) (*types1.Empty, error)
+	CloseIO(ctx context.Context, in *CloseIORequest, opts ...grpc.CallOption) (*types1.Empty, error)
+	Pause(ctx context.Context, in *PauseTaskRequest, opts ...grpc.CallOption) (*types1.Empty, error)
+	Resume(ctx context.Context, in *ResumeTaskRequest, opts ...grpc.CallOption) (*types1.Empty, error)
 	ListPids(ctx context.Context, in *ListPidsRequest, opts ...grpc.CallOption) (*ListPidsResponse, error)
 	Checkpoint(ctx context.Context, in *CheckpointTaskRequest, opts ...grpc.CallOption) (*CheckpointTaskResponse, error)
-	Update(ctx context.Context, in *UpdateTaskRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error)
+	Update(ctx context.Context, in *UpdateTaskRequest, opts ...grpc.CallOption) (*types1.Empty, error)
 	Metrics(ctx context.Context, in *MetricsRequest, opts ...grpc.CallOption) (*MetricsResponse, error)
 	Wait(ctx context.Context, in *WaitRequest, opts ...grpc.CallOption) (*WaitResponse, error)
 }
@@ -409,7 +1294,7 @@
 
 func (c *tasksClient) Create(ctx context.Context, in *CreateTaskRequest, opts ...grpc.CallOption) (*CreateTaskResponse, error) {
 	out := new(CreateTaskResponse)
-	err := grpc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Create", in, out, c.cc, opts...)
+	err := c.cc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Create", in, out, opts...)
 	if err != nil {
 		return nil, err
 	}
@@ -418,7 +1303,7 @@
 
 func (c *tasksClient) Start(ctx context.Context, in *StartRequest, opts ...grpc.CallOption) (*StartResponse, error) {
 	out := new(StartResponse)
-	err := grpc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Start", in, out, c.cc, opts...)
+	err := c.cc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Start", in, out, opts...)
 	if err != nil {
 		return nil, err
 	}
@@ -427,7 +1312,7 @@
 
 func (c *tasksClient) Delete(ctx context.Context, in *DeleteTaskRequest, opts ...grpc.CallOption) (*DeleteResponse, error) {
 	out := new(DeleteResponse)
-	err := grpc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Delete", in, out, c.cc, opts...)
+	err := c.cc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Delete", in, out, opts...)
 	if err != nil {
 		return nil, err
 	}
@@ -436,7 +1321,7 @@
 
 func (c *tasksClient) DeleteProcess(ctx context.Context, in *DeleteProcessRequest, opts ...grpc.CallOption) (*DeleteResponse, error) {
 	out := new(DeleteResponse)
-	err := grpc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/DeleteProcess", in, out, c.cc, opts...)
+	err := c.cc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/DeleteProcess", in, out, opts...)
 	if err != nil {
 		return nil, err
 	}
@@ -445,7 +1330,7 @@
 
 func (c *tasksClient) Get(ctx context.Context, in *GetRequest, opts ...grpc.CallOption) (*GetResponse, error) {
 	out := new(GetResponse)
-	err := grpc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Get", in, out, c.cc, opts...)
+	err := c.cc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Get", in, out, opts...)
 	if err != nil {
 		return nil, err
 	}
@@ -454,61 +1339,61 @@
 
 func (c *tasksClient) List(ctx context.Context, in *ListTasksRequest, opts ...grpc.CallOption) (*ListTasksResponse, error) {
 	out := new(ListTasksResponse)
-	err := grpc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/List", in, out, c.cc, opts...)
+	err := c.cc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/List", in, out, opts...)
 	if err != nil {
 		return nil, err
 	}
 	return out, nil
 }
 
-func (c *tasksClient) Kill(ctx context.Context, in *KillRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error) {
-	out := new(google_protobuf.Empty)
-	err := grpc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Kill", in, out, c.cc, opts...)
+func (c *tasksClient) Kill(ctx context.Context, in *KillRequest, opts ...grpc.CallOption) (*types1.Empty, error) {
+	out := new(types1.Empty)
+	err := c.cc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Kill", in, out, opts...)
 	if err != nil {
 		return nil, err
 	}
 	return out, nil
 }
 
-func (c *tasksClient) Exec(ctx context.Context, in *ExecProcessRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error) {
-	out := new(google_protobuf.Empty)
-	err := grpc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Exec", in, out, c.cc, opts...)
+func (c *tasksClient) Exec(ctx context.Context, in *ExecProcessRequest, opts ...grpc.CallOption) (*types1.Empty, error) {
+	out := new(types1.Empty)
+	err := c.cc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Exec", in, out, opts...)
 	if err != nil {
 		return nil, err
 	}
 	return out, nil
 }
 
-func (c *tasksClient) ResizePty(ctx context.Context, in *ResizePtyRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error) {
-	out := new(google_protobuf.Empty)
-	err := grpc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/ResizePty", in, out, c.cc, opts...)
+func (c *tasksClient) ResizePty(ctx context.Context, in *ResizePtyRequest, opts ...grpc.CallOption) (*types1.Empty, error) {
+	out := new(types1.Empty)
+	err := c.cc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/ResizePty", in, out, opts...)
 	if err != nil {
 		return nil, err
 	}
 	return out, nil
 }
 
-func (c *tasksClient) CloseIO(ctx context.Context, in *CloseIORequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error) {
-	out := new(google_protobuf.Empty)
-	err := grpc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/CloseIO", in, out, c.cc, opts...)
+func (c *tasksClient) CloseIO(ctx context.Context, in *CloseIORequest, opts ...grpc.CallOption) (*types1.Empty, error) {
+	out := new(types1.Empty)
+	err := c.cc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/CloseIO", in, out, opts...)
 	if err != nil {
 		return nil, err
 	}
 	return out, nil
 }
 
-func (c *tasksClient) Pause(ctx context.Context, in *PauseTaskRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error) {
-	out := new(google_protobuf.Empty)
-	err := grpc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Pause", in, out, c.cc, opts...)
+func (c *tasksClient) Pause(ctx context.Context, in *PauseTaskRequest, opts ...grpc.CallOption) (*types1.Empty, error) {
+	out := new(types1.Empty)
+	err := c.cc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Pause", in, out, opts...)
 	if err != nil {
 		return nil, err
 	}
 	return out, nil
 }
 
-func (c *tasksClient) Resume(ctx context.Context, in *ResumeTaskRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error) {
-	out := new(google_protobuf.Empty)
-	err := grpc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Resume", in, out, c.cc, opts...)
+func (c *tasksClient) Resume(ctx context.Context, in *ResumeTaskRequest, opts ...grpc.CallOption) (*types1.Empty, error) {
+	out := new(types1.Empty)
+	err := c.cc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Resume", in, out, opts...)
 	if err != nil {
 		return nil, err
 	}
@@ -517,7 +1402,7 @@
 
 func (c *tasksClient) ListPids(ctx context.Context, in *ListPidsRequest, opts ...grpc.CallOption) (*ListPidsResponse, error) {
 	out := new(ListPidsResponse)
-	err := grpc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/ListPids", in, out, c.cc, opts...)
+	err := c.cc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/ListPids", in, out, opts...)
 	if err != nil {
 		return nil, err
 	}
@@ -526,16 +1411,16 @@
 
 func (c *tasksClient) Checkpoint(ctx context.Context, in *CheckpointTaskRequest, opts ...grpc.CallOption) (*CheckpointTaskResponse, error) {
 	out := new(CheckpointTaskResponse)
-	err := grpc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Checkpoint", in, out, c.cc, opts...)
+	err := c.cc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Checkpoint", in, out, opts...)
 	if err != nil {
 		return nil, err
 	}
 	return out, nil
 }
 
-func (c *tasksClient) Update(ctx context.Context, in *UpdateTaskRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error) {
-	out := new(google_protobuf.Empty)
-	err := grpc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Update", in, out, c.cc, opts...)
+func (c *tasksClient) Update(ctx context.Context, in *UpdateTaskRequest, opts ...grpc.CallOption) (*types1.Empty, error) {
+	out := new(types1.Empty)
+	err := c.cc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Update", in, out, opts...)
 	if err != nil {
 		return nil, err
 	}
@@ -544,7 +1429,7 @@
 
 func (c *tasksClient) Metrics(ctx context.Context, in *MetricsRequest, opts ...grpc.CallOption) (*MetricsResponse, error) {
 	out := new(MetricsResponse)
-	err := grpc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Metrics", in, out, c.cc, opts...)
+	err := c.cc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Metrics", in, out, opts...)
 	if err != nil {
 		return nil, err
 	}
@@ -553,15 +1438,14 @@
 
 func (c *tasksClient) Wait(ctx context.Context, in *WaitRequest, opts ...grpc.CallOption) (*WaitResponse, error) {
 	out := new(WaitResponse)
-	err := grpc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Wait", in, out, c.cc, opts...)
+	err := c.cc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Wait", in, out, opts...)
 	if err != nil {
 		return nil, err
 	}
 	return out, nil
 }
 
-// Server API for Tasks service
-
+// TasksServer is the server API for Tasks service.
 type TasksServer interface {
 	// Create a task.
 	Create(context.Context, *CreateTaskRequest) (*CreateTaskResponse, error)
@@ -573,15 +1457,15 @@
 	Get(context.Context, *GetRequest) (*GetResponse, error)
 	List(context.Context, *ListTasksRequest) (*ListTasksResponse, error)
 	// Kill a task or process.
-	Kill(context.Context, *KillRequest) (*google_protobuf.Empty, error)
-	Exec(context.Context, *ExecProcessRequest) (*google_protobuf.Empty, error)
-	ResizePty(context.Context, *ResizePtyRequest) (*google_protobuf.Empty, error)
-	CloseIO(context.Context, *CloseIORequest) (*google_protobuf.Empty, error)
-	Pause(context.Context, *PauseTaskRequest) (*google_protobuf.Empty, error)
-	Resume(context.Context, *ResumeTaskRequest) (*google_protobuf.Empty, error)
+	Kill(context.Context, *KillRequest) (*types1.Empty, error)
+	Exec(context.Context, *ExecProcessRequest) (*types1.Empty, error)
+	ResizePty(context.Context, *ResizePtyRequest) (*types1.Empty, error)
+	CloseIO(context.Context, *CloseIORequest) (*types1.Empty, error)
+	Pause(context.Context, *PauseTaskRequest) (*types1.Empty, error)
+	Resume(context.Context, *ResumeTaskRequest) (*types1.Empty, error)
 	ListPids(context.Context, *ListPidsRequest) (*ListPidsResponse, error)
 	Checkpoint(context.Context, *CheckpointTaskRequest) (*CheckpointTaskResponse, error)
-	Update(context.Context, *UpdateTaskRequest) (*google_protobuf.Empty, error)
+	Update(context.Context, *UpdateTaskRequest) (*types1.Empty, error)
 	Metrics(context.Context, *MetricsRequest) (*MetricsResponse, error)
 	Wait(context.Context, *WaitRequest) (*WaitResponse, error)
 }
@@ -1054,6 +1938,9 @@
 		}
 		i += n2
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -1083,6 +1970,9 @@
 		i++
 		i = encodeVarintTasks(dAtA, i, uint64(m.Pid))
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -1113,6 +2003,9 @@
 		i = encodeVarintTasks(dAtA, i, uint64(len(m.ExecID)))
 		i += copy(dAtA[i:], m.ExecID)
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -1136,6 +2029,9 @@
 		i++
 		i = encodeVarintTasks(dAtA, i, uint64(m.Pid))
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -1160,6 +2056,9 @@
 		i = encodeVarintTasks(dAtA, i, uint64(len(m.ContainerID)))
 		i += copy(dAtA[i:], m.ContainerID)
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -1196,12 +2095,15 @@
 	}
 	dAtA[i] = 0x22
 	i++
-	i = encodeVarintTasks(dAtA, i, uint64(types.SizeOfStdTime(m.ExitedAt)))
-	n3, err := types.StdTimeMarshalTo(m.ExitedAt, dAtA[i:])
+	i = encodeVarintTasks(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt)))
+	n3, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.ExitedAt, dAtA[i:])
 	if err != nil {
 		return 0, err
 	}
 	i += n3
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -1232,6 +2134,9 @@
 		i = encodeVarintTasks(dAtA, i, uint64(len(m.ExecID)))
 		i += copy(dAtA[i:], m.ExecID)
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -1262,6 +2167,9 @@
 		i = encodeVarintTasks(dAtA, i, uint64(len(m.ExecID)))
 		i += copy(dAtA[i:], m.ExecID)
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -1290,6 +2198,9 @@
 		}
 		i += n4
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -1314,6 +2225,9 @@
 		i = encodeVarintTasks(dAtA, i, uint64(len(m.Filter)))
 		i += copy(dAtA[i:], m.Filter)
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -1344,6 +2258,9 @@
 			i += n
 		}
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -1389,6 +2306,9 @@
 		}
 		i++
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -1457,6 +2377,9 @@
 		i = encodeVarintTasks(dAtA, i, uint64(len(m.ExecID)))
 		i += copy(dAtA[i:], m.ExecID)
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -1475,6 +2398,9 @@
 	_ = i
 	var l int
 	_ = l
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -1515,6 +2441,9 @@
 		i++
 		i = encodeVarintTasks(dAtA, i, uint64(m.Height))
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -1555,6 +2484,9 @@
 		}
 		i++
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -1579,6 +2511,9 @@
 		i = encodeVarintTasks(dAtA, i, uint64(len(m.ContainerID)))
 		i += copy(dAtA[i:], m.ContainerID)
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -1603,6 +2538,9 @@
 		i = encodeVarintTasks(dAtA, i, uint64(len(m.ContainerID)))
 		i += copy(dAtA[i:], m.ContainerID)
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -1627,6 +2565,9 @@
 		i = encodeVarintTasks(dAtA, i, uint64(len(m.ContainerID)))
 		i += copy(dAtA[i:], m.ContainerID)
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -1657,6 +2598,9 @@
 			i += n
 		}
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -1697,6 +2641,9 @@
 		}
 		i += n6
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -1727,6 +2674,9 @@
 			i += n
 		}
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -1761,6 +2711,9 @@
 		}
 		i += n7
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -1794,6 +2747,9 @@
 			i += copy(dAtA[i:], s)
 		}
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -1824,6 +2780,9 @@
 			i += n
 		}
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -1854,6 +2813,9 @@
 		i = encodeVarintTasks(dAtA, i, uint64(len(m.ExecID)))
 		i += copy(dAtA[i:], m.ExecID)
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -1879,12 +2841,15 @@
 	}
 	dAtA[i] = 0x12
 	i++
-	i = encodeVarintTasks(dAtA, i, uint64(types.SizeOfStdTime(m.ExitedAt)))
-	n8, err := types.StdTimeMarshalTo(m.ExitedAt, dAtA[i:])
+	i = encodeVarintTasks(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt)))
+	n8, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.ExitedAt, dAtA[i:])
 	if err != nil {
 		return 0, err
 	}
 	i += n8
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -1898,6 +2863,9 @@
 	return offset + 1
 }
 func (m *CreateTaskRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = len(m.ContainerID)
@@ -1933,10 +2901,16 @@
 		l = m.Options.Size()
 		n += 1 + l + sovTasks(uint64(l))
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *CreateTaskResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = len(m.ContainerID)
@@ -1946,10 +2920,16 @@
 	if m.Pid != 0 {
 		n += 1 + sovTasks(uint64(m.Pid))
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *StartRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = len(m.ContainerID)
@@ -1960,29 +2940,47 @@
 	if l > 0 {
 		n += 1 + l + sovTasks(uint64(l))
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *StartResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	if m.Pid != 0 {
 		n += 1 + sovTasks(uint64(m.Pid))
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *DeleteTaskRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = len(m.ContainerID)
 	if l > 0 {
 		n += 1 + l + sovTasks(uint64(l))
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *DeleteResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = len(m.ID)
@@ -1995,12 +2993,18 @@
 	if m.ExitStatus != 0 {
 		n += 1 + sovTasks(uint64(m.ExitStatus))
 	}
-	l = types.SizeOfStdTime(m.ExitedAt)
+	l = github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt)
 	n += 1 + l + sovTasks(uint64(l))
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *DeleteProcessRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = len(m.ContainerID)
@@ -2011,10 +3015,16 @@
 	if l > 0 {
 		n += 1 + l + sovTasks(uint64(l))
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *GetRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = len(m.ContainerID)
@@ -2025,30 +3035,48 @@
 	if l > 0 {
 		n += 1 + l + sovTasks(uint64(l))
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *GetResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	if m.Process != nil {
 		l = m.Process.Size()
 		n += 1 + l + sovTasks(uint64(l))
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *ListTasksRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = len(m.Filter)
 	if l > 0 {
 		n += 1 + l + sovTasks(uint64(l))
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *ListTasksResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	if len(m.Tasks) > 0 {
@@ -2057,10 +3085,16 @@
 			n += 1 + l + sovTasks(uint64(l))
 		}
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *KillRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = len(m.ContainerID)
@@ -2077,10 +3111,16 @@
 	if m.All {
 		n += 2
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *ExecProcessRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = len(m.ContainerID)
@@ -2110,16 +3150,28 @@
 	if l > 0 {
 		n += 1 + l + sovTasks(uint64(l))
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *ExecProcessResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *ResizePtyRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = len(m.ContainerID)
@@ -2136,10 +3188,16 @@
 	if m.Height != 0 {
 		n += 1 + sovTasks(uint64(m.Height))
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *CloseIORequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = len(m.ContainerID)
@@ -2153,40 +3211,64 @@
 	if m.Stdin {
 		n += 2
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *PauseTaskRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = len(m.ContainerID)
 	if l > 0 {
 		n += 1 + l + sovTasks(uint64(l))
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *ResumeTaskRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = len(m.ContainerID)
 	if l > 0 {
 		n += 1 + l + sovTasks(uint64(l))
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *ListPidsRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = len(m.ContainerID)
 	if l > 0 {
 		n += 1 + l + sovTasks(uint64(l))
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *ListPidsResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	if len(m.Processes) > 0 {
@@ -2195,10 +3277,16 @@
 			n += 1 + l + sovTasks(uint64(l))
 		}
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *CheckpointTaskRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = len(m.ContainerID)
@@ -2213,10 +3301,16 @@
 		l = m.Options.Size()
 		n += 1 + l + sovTasks(uint64(l))
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *CheckpointTaskResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	if len(m.Descriptors) > 0 {
@@ -2225,10 +3319,16 @@
 			n += 1 + l + sovTasks(uint64(l))
 		}
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *UpdateTaskRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = len(m.ContainerID)
@@ -2239,10 +3339,16 @@
 		l = m.Resources.Size()
 		n += 1 + l + sovTasks(uint64(l))
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *MetricsRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	if len(m.Filters) > 0 {
@@ -2251,10 +3357,16 @@
 			n += 1 + l + sovTasks(uint64(l))
 		}
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *MetricsResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	if len(m.Metrics) > 0 {
@@ -2263,10 +3375,16 @@
 			n += 1 + l + sovTasks(uint64(l))
 		}
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *WaitRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = len(m.ContainerID)
@@ -2277,17 +3395,26 @@
 	if l > 0 {
 		n += 1 + l + sovTasks(uint64(l))
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *WaitResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	if m.ExitStatus != 0 {
 		n += 1 + sovTasks(uint64(m.ExitStatus))
 	}
-	l = types.SizeOfStdTime(m.ExitedAt)
+	l = github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt)
 	n += 1 + l + sovTasks(uint64(l))
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
@@ -2310,13 +3437,14 @@
 	}
 	s := strings.Join([]string{`&CreateTaskRequest{`,
 		`ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`,
-		`Rootfs:` + strings.Replace(fmt.Sprintf("%v", this.Rootfs), "Mount", "containerd_types.Mount", 1) + `,`,
+		`Rootfs:` + strings.Replace(fmt.Sprintf("%v", this.Rootfs), "Mount", "types.Mount", 1) + `,`,
 		`Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`,
 		`Stdout:` + fmt.Sprintf("%v", this.Stdout) + `,`,
 		`Stderr:` + fmt.Sprintf("%v", this.Stderr) + `,`,
 		`Terminal:` + fmt.Sprintf("%v", this.Terminal) + `,`,
-		`Checkpoint:` + strings.Replace(fmt.Sprintf("%v", this.Checkpoint), "Descriptor", "containerd_types2.Descriptor", 1) + `,`,
-		`Options:` + strings.Replace(fmt.Sprintf("%v", this.Options), "Any", "google_protobuf1.Any", 1) + `,`,
+		`Checkpoint:` + strings.Replace(fmt.Sprintf("%v", this.Checkpoint), "Descriptor", "types.Descriptor", 1) + `,`,
+		`Options:` + strings.Replace(fmt.Sprintf("%v", this.Options), "Any", "types1.Any", 1) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -2328,6 +3456,7 @@
 	s := strings.Join([]string{`&CreateTaskResponse{`,
 		`ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`,
 		`Pid:` + fmt.Sprintf("%v", this.Pid) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -2339,6 +3468,7 @@
 	s := strings.Join([]string{`&StartRequest{`,
 		`ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`,
 		`ExecID:` + fmt.Sprintf("%v", this.ExecID) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -2349,6 +3479,7 @@
 	}
 	s := strings.Join([]string{`&StartResponse{`,
 		`Pid:` + fmt.Sprintf("%v", this.Pid) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -2359,6 +3490,7 @@
 	}
 	s := strings.Join([]string{`&DeleteTaskRequest{`,
 		`ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -2371,7 +3503,8 @@
 		`ID:` + fmt.Sprintf("%v", this.ID) + `,`,
 		`Pid:` + fmt.Sprintf("%v", this.Pid) + `,`,
 		`ExitStatus:` + fmt.Sprintf("%v", this.ExitStatus) + `,`,
-		`ExitedAt:` + strings.Replace(strings.Replace(this.ExitedAt.String(), "Timestamp", "google_protobuf3.Timestamp", 1), `&`, ``, 1) + `,`,
+		`ExitedAt:` + strings.Replace(strings.Replace(this.ExitedAt.String(), "Timestamp", "types1.Timestamp", 1), `&`, ``, 1) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -2383,6 +3516,7 @@
 	s := strings.Join([]string{`&DeleteProcessRequest{`,
 		`ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`,
 		`ExecID:` + fmt.Sprintf("%v", this.ExecID) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -2394,6 +3528,7 @@
 	s := strings.Join([]string{`&GetRequest{`,
 		`ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`,
 		`ExecID:` + fmt.Sprintf("%v", this.ExecID) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -2403,7 +3538,8 @@
 		return "nil"
 	}
 	s := strings.Join([]string{`&GetResponse{`,
-		`Process:` + strings.Replace(fmt.Sprintf("%v", this.Process), "Process", "containerd_v1_types.Process", 1) + `,`,
+		`Process:` + strings.Replace(fmt.Sprintf("%v", this.Process), "Process", "task.Process", 1) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -2414,6 +3550,7 @@
 	}
 	s := strings.Join([]string{`&ListTasksRequest{`,
 		`Filter:` + fmt.Sprintf("%v", this.Filter) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -2423,7 +3560,8 @@
 		return "nil"
 	}
 	s := strings.Join([]string{`&ListTasksResponse{`,
-		`Tasks:` + strings.Replace(fmt.Sprintf("%v", this.Tasks), "Process", "containerd_v1_types.Process", 1) + `,`,
+		`Tasks:` + strings.Replace(fmt.Sprintf("%v", this.Tasks), "Process", "task.Process", 1) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -2437,6 +3575,7 @@
 		`ExecID:` + fmt.Sprintf("%v", this.ExecID) + `,`,
 		`Signal:` + fmt.Sprintf("%v", this.Signal) + `,`,
 		`All:` + fmt.Sprintf("%v", this.All) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -2451,8 +3590,9 @@
 		`Stdout:` + fmt.Sprintf("%v", this.Stdout) + `,`,
 		`Stderr:` + fmt.Sprintf("%v", this.Stderr) + `,`,
 		`Terminal:` + fmt.Sprintf("%v", this.Terminal) + `,`,
-		`Spec:` + strings.Replace(fmt.Sprintf("%v", this.Spec), "Any", "google_protobuf1.Any", 1) + `,`,
+		`Spec:` + strings.Replace(fmt.Sprintf("%v", this.Spec), "Any", "types1.Any", 1) + `,`,
 		`ExecID:` + fmt.Sprintf("%v", this.ExecID) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -2462,6 +3602,7 @@
 		return "nil"
 	}
 	s := strings.Join([]string{`&ExecProcessResponse{`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -2475,6 +3616,7 @@
 		`ExecID:` + fmt.Sprintf("%v", this.ExecID) + `,`,
 		`Width:` + fmt.Sprintf("%v", this.Width) + `,`,
 		`Height:` + fmt.Sprintf("%v", this.Height) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -2487,6 +3629,7 @@
 		`ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`,
 		`ExecID:` + fmt.Sprintf("%v", this.ExecID) + `,`,
 		`Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -2497,6 +3640,7 @@
 	}
 	s := strings.Join([]string{`&PauseTaskRequest{`,
 		`ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -2507,6 +3651,7 @@
 	}
 	s := strings.Join([]string{`&ResumeTaskRequest{`,
 		`ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -2517,6 +3662,7 @@
 	}
 	s := strings.Join([]string{`&ListPidsRequest{`,
 		`ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -2526,7 +3672,8 @@
 		return "nil"
 	}
 	s := strings.Join([]string{`&ListPidsResponse{`,
-		`Processes:` + strings.Replace(fmt.Sprintf("%v", this.Processes), "ProcessInfo", "containerd_v1_types.ProcessInfo", 1) + `,`,
+		`Processes:` + strings.Replace(fmt.Sprintf("%v", this.Processes), "ProcessInfo", "task.ProcessInfo", 1) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -2538,7 +3685,8 @@
 	s := strings.Join([]string{`&CheckpointTaskRequest{`,
 		`ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`,
 		`ParentCheckpoint:` + fmt.Sprintf("%v", this.ParentCheckpoint) + `,`,
-		`Options:` + strings.Replace(fmt.Sprintf("%v", this.Options), "Any", "google_protobuf1.Any", 1) + `,`,
+		`Options:` + strings.Replace(fmt.Sprintf("%v", this.Options), "Any", "types1.Any", 1) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -2548,7 +3696,8 @@
 		return "nil"
 	}
 	s := strings.Join([]string{`&CheckpointTaskResponse{`,
-		`Descriptors:` + strings.Replace(fmt.Sprintf("%v", this.Descriptors), "Descriptor", "containerd_types2.Descriptor", 1) + `,`,
+		`Descriptors:` + strings.Replace(fmt.Sprintf("%v", this.Descriptors), "Descriptor", "types.Descriptor", 1) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -2559,7 +3708,8 @@
 	}
 	s := strings.Join([]string{`&UpdateTaskRequest{`,
 		`ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`,
-		`Resources:` + strings.Replace(fmt.Sprintf("%v", this.Resources), "Any", "google_protobuf1.Any", 1) + `,`,
+		`Resources:` + strings.Replace(fmt.Sprintf("%v", this.Resources), "Any", "types1.Any", 1) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -2570,6 +3720,7 @@
 	}
 	s := strings.Join([]string{`&MetricsRequest{`,
 		`Filters:` + fmt.Sprintf("%v", this.Filters) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -2579,7 +3730,8 @@
 		return "nil"
 	}
 	s := strings.Join([]string{`&MetricsResponse{`,
-		`Metrics:` + strings.Replace(fmt.Sprintf("%v", this.Metrics), "Metric", "containerd_types1.Metric", 1) + `,`,
+		`Metrics:` + strings.Replace(fmt.Sprintf("%v", this.Metrics), "Metric", "types.Metric", 1) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -2591,6 +3743,7 @@
 	s := strings.Join([]string{`&WaitRequest{`,
 		`ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`,
 		`ExecID:` + fmt.Sprintf("%v", this.ExecID) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -2601,7 +3754,8 @@
 	}
 	s := strings.Join([]string{`&WaitResponse{`,
 		`ExitStatus:` + fmt.Sprintf("%v", this.ExitStatus) + `,`,
-		`ExitedAt:` + strings.Replace(strings.Replace(this.ExitedAt.String(), "Timestamp", "google_protobuf3.Timestamp", 1), `&`, ``, 1) + `,`,
+		`ExitedAt:` + strings.Replace(strings.Replace(this.ExitedAt.String(), "Timestamp", "types1.Timestamp", 1), `&`, ``, 1) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -2629,7 +3783,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -2657,7 +3811,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2667,6 +3821,9 @@
 				return ErrInvalidLengthTasks
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthTasks
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -2686,7 +3843,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2695,10 +3852,13 @@
 				return ErrInvalidLengthTasks
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthTasks
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			m.Rootfs = append(m.Rootfs, &containerd_types.Mount{})
+			m.Rootfs = append(m.Rootfs, &types.Mount{})
 			if err := m.Rootfs[len(m.Rootfs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
 				return err
 			}
@@ -2717,7 +3877,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2727,6 +3887,9 @@
 				return ErrInvalidLengthTasks
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthTasks
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -2746,7 +3909,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2756,6 +3919,9 @@
 				return ErrInvalidLengthTasks
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthTasks
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -2775,7 +3941,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2785,6 +3951,9 @@
 				return ErrInvalidLengthTasks
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthTasks
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -2804,7 +3973,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				v |= (int(b) & 0x7F) << shift
+				v |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2824,7 +3993,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2833,11 +4002,14 @@
 				return ErrInvalidLengthTasks
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthTasks
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
 			if m.Checkpoint == nil {
-				m.Checkpoint = &containerd_types2.Descriptor{}
+				m.Checkpoint = &types.Descriptor{}
 			}
 			if err := m.Checkpoint.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
 				return err
@@ -2857,7 +4029,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2866,11 +4038,14 @@
 				return ErrInvalidLengthTasks
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthTasks
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
 			if m.Options == nil {
-				m.Options = &google_protobuf1.Any{}
+				m.Options = &types1.Any{}
 			}
 			if err := m.Options.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
 				return err
@@ -2885,9 +4060,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthTasks
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthTasks
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -2912,7 +4091,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -2940,7 +4119,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2950,6 +4129,9 @@
 				return ErrInvalidLengthTasks
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthTasks
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -2969,7 +4151,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				m.Pid |= (uint32(b) & 0x7F) << shift
+				m.Pid |= uint32(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2983,9 +4165,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthTasks
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthTasks
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -3010,7 +4196,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -3038,7 +4224,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -3048,6 +4234,9 @@
 				return ErrInvalidLengthTasks
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthTasks
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -3067,7 +4256,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -3077,6 +4266,9 @@
 				return ErrInvalidLengthTasks
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthTasks
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -3091,9 +4283,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthTasks
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthTasks
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -3118,7 +4314,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -3146,7 +4342,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				m.Pid |= (uint32(b) & 0x7F) << shift
+				m.Pid |= uint32(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -3160,9 +4356,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthTasks
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthTasks
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -3187,7 +4387,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -3215,7 +4415,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -3225,6 +4425,9 @@
 				return ErrInvalidLengthTasks
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthTasks
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -3239,9 +4442,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthTasks
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthTasks
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -3266,7 +4473,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -3294,7 +4501,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -3304,6 +4511,9 @@
 				return ErrInvalidLengthTasks
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthTasks
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -3323,7 +4533,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				m.Pid |= (uint32(b) & 0x7F) << shift
+				m.Pid |= uint32(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -3342,7 +4552,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				m.ExitStatus |= (uint32(b) & 0x7F) << shift
+				m.ExitStatus |= uint32(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -3361,7 +4571,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -3370,10 +4580,13 @@
 				return ErrInvalidLengthTasks
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthTasks
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			if err := types.StdTimeUnmarshal(&m.ExitedAt, dAtA[iNdEx:postIndex]); err != nil {
+			if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.ExitedAt, dAtA[iNdEx:postIndex]); err != nil {
 				return err
 			}
 			iNdEx = postIndex
@@ -3386,9 +4599,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthTasks
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthTasks
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -3413,7 +4630,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -3441,7 +4658,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -3451,6 +4668,9 @@
 				return ErrInvalidLengthTasks
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthTasks
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -3470,7 +4690,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -3480,6 +4700,9 @@
 				return ErrInvalidLengthTasks
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthTasks
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -3494,9 +4717,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthTasks
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthTasks
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -3521,7 +4748,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -3549,7 +4776,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -3559,6 +4786,9 @@
 				return ErrInvalidLengthTasks
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthTasks
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -3578,7 +4808,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -3588,6 +4818,9 @@
 				return ErrInvalidLengthTasks
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthTasks
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -3602,9 +4835,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthTasks
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthTasks
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -3629,7 +4866,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -3657,7 +4894,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -3666,11 +4903,14 @@
 				return ErrInvalidLengthTasks
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthTasks
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
 			if m.Process == nil {
-				m.Process = &containerd_v1_types.Process{}
+				m.Process = &task.Process{}
 			}
 			if err := m.Process.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
 				return err
@@ -3685,9 +4925,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthTasks
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthTasks
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -3712,7 +4956,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -3740,7 +4984,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -3750,6 +4994,9 @@
 				return ErrInvalidLengthTasks
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthTasks
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -3764,9 +5011,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthTasks
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthTasks
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -3791,7 +5042,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -3819,7 +5070,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -3828,10 +5079,13 @@
 				return ErrInvalidLengthTasks
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthTasks
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			m.Tasks = append(m.Tasks, &containerd_v1_types.Process{})
+			m.Tasks = append(m.Tasks, &task.Process{})
 			if err := m.Tasks[len(m.Tasks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
 				return err
 			}
@@ -3845,9 +5099,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthTasks
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthTasks
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -3872,7 +5130,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -3900,7 +5158,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -3910,6 +5168,9 @@
 				return ErrInvalidLengthTasks
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthTasks
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -3929,7 +5190,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -3939,6 +5200,9 @@
 				return ErrInvalidLengthTasks
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthTasks
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -3958,7 +5222,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				m.Signal |= (uint32(b) & 0x7F) << shift
+				m.Signal |= uint32(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -3977,7 +5241,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				v |= (int(b) & 0x7F) << shift
+				v |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -3992,9 +5256,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthTasks
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthTasks
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -4019,7 +5287,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -4047,7 +5315,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -4057,6 +5325,9 @@
 				return ErrInvalidLengthTasks
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthTasks
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -4076,7 +5347,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -4086,6 +5357,9 @@
 				return ErrInvalidLengthTasks
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthTasks
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -4105,7 +5379,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -4115,6 +5389,9 @@
 				return ErrInvalidLengthTasks
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthTasks
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -4134,7 +5411,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -4144,6 +5421,9 @@
 				return ErrInvalidLengthTasks
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthTasks
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -4163,7 +5443,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				v |= (int(b) & 0x7F) << shift
+				v |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -4183,7 +5463,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -4192,11 +5472,14 @@
 				return ErrInvalidLengthTasks
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthTasks
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
 			if m.Spec == nil {
-				m.Spec = &google_protobuf1.Any{}
+				m.Spec = &types1.Any{}
 			}
 			if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
 				return err
@@ -4216,7 +5499,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -4226,6 +5509,9 @@
 				return ErrInvalidLengthTasks
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthTasks
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -4240,9 +5526,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthTasks
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthTasks
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -4267,7 +5557,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -4290,9 +5580,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthTasks
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthTasks
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -4317,7 +5611,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -4345,7 +5639,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -4355,6 +5649,9 @@
 				return ErrInvalidLengthTasks
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthTasks
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -4374,7 +5671,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -4384,6 +5681,9 @@
 				return ErrInvalidLengthTasks
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthTasks
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -4403,7 +5703,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				m.Width |= (uint32(b) & 0x7F) << shift
+				m.Width |= uint32(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -4422,7 +5722,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				m.Height |= (uint32(b) & 0x7F) << shift
+				m.Height |= uint32(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -4436,9 +5736,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthTasks
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthTasks
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -4463,7 +5767,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -4491,7 +5795,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -4501,6 +5805,9 @@
 				return ErrInvalidLengthTasks
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthTasks
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -4520,7 +5827,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -4530,6 +5837,9 @@
 				return ErrInvalidLengthTasks
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthTasks
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -4549,7 +5859,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				v |= (int(b) & 0x7F) << shift
+				v |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -4564,9 +5874,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthTasks
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthTasks
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -4591,7 +5905,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -4619,7 +5933,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -4629,6 +5943,9 @@
 				return ErrInvalidLengthTasks
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthTasks
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -4643,9 +5960,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthTasks
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthTasks
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -4670,7 +5991,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -4698,7 +6019,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -4708,6 +6029,9 @@
 				return ErrInvalidLengthTasks
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthTasks
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -4722,9 +6046,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthTasks
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthTasks
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -4749,7 +6077,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -4777,7 +6105,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -4787,6 +6115,9 @@
 				return ErrInvalidLengthTasks
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthTasks
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -4801,9 +6132,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthTasks
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthTasks
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -4828,7 +6163,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -4856,7 +6191,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -4865,10 +6200,13 @@
 				return ErrInvalidLengthTasks
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthTasks
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			m.Processes = append(m.Processes, &containerd_v1_types.ProcessInfo{})
+			m.Processes = append(m.Processes, &task.ProcessInfo{})
 			if err := m.Processes[len(m.Processes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
 				return err
 			}
@@ -4882,9 +6220,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthTasks
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthTasks
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -4909,7 +6251,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -4937,7 +6279,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -4947,6 +6289,9 @@
 				return ErrInvalidLengthTasks
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthTasks
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -4966,7 +6311,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -4976,6 +6321,9 @@
 				return ErrInvalidLengthTasks
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthTasks
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -4995,7 +6343,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -5004,11 +6352,14 @@
 				return ErrInvalidLengthTasks
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthTasks
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
 			if m.Options == nil {
-				m.Options = &google_protobuf1.Any{}
+				m.Options = &types1.Any{}
 			}
 			if err := m.Options.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
 				return err
@@ -5023,9 +6374,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthTasks
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthTasks
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -5050,7 +6405,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -5078,7 +6433,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -5087,10 +6442,13 @@
 				return ErrInvalidLengthTasks
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthTasks
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			m.Descriptors = append(m.Descriptors, &containerd_types2.Descriptor{})
+			m.Descriptors = append(m.Descriptors, &types.Descriptor{})
 			if err := m.Descriptors[len(m.Descriptors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
 				return err
 			}
@@ -5104,9 +6462,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthTasks
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthTasks
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -5131,7 +6493,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -5159,7 +6521,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -5169,6 +6531,9 @@
 				return ErrInvalidLengthTasks
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthTasks
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -5188,7 +6553,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -5197,11 +6562,14 @@
 				return ErrInvalidLengthTasks
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthTasks
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
 			if m.Resources == nil {
-				m.Resources = &google_protobuf1.Any{}
+				m.Resources = &types1.Any{}
 			}
 			if err := m.Resources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
 				return err
@@ -5216,9 +6584,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthTasks
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthTasks
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -5243,7 +6615,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -5271,7 +6643,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -5281,6 +6653,9 @@
 				return ErrInvalidLengthTasks
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthTasks
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -5295,9 +6670,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthTasks
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthTasks
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -5322,7 +6701,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -5350,7 +6729,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -5359,10 +6738,13 @@
 				return ErrInvalidLengthTasks
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthTasks
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			m.Metrics = append(m.Metrics, &containerd_types1.Metric{})
+			m.Metrics = append(m.Metrics, &types.Metric{})
 			if err := m.Metrics[len(m.Metrics)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
 				return err
 			}
@@ -5376,9 +6758,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthTasks
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthTasks
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -5403,7 +6789,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -5431,7 +6817,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -5441,6 +6827,9 @@
 				return ErrInvalidLengthTasks
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthTasks
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -5460,7 +6849,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -5470,6 +6859,9 @@
 				return ErrInvalidLengthTasks
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthTasks
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -5484,9 +6876,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthTasks
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthTasks
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -5511,7 +6907,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -5539,7 +6935,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				m.ExitStatus |= (uint32(b) & 0x7F) << shift
+				m.ExitStatus |= uint32(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -5558,7 +6954,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -5567,10 +6963,13 @@
 				return ErrInvalidLengthTasks
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthTasks
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			if err := types.StdTimeUnmarshal(&m.ExitedAt, dAtA[iNdEx:postIndex]); err != nil {
+			if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.ExitedAt, dAtA[iNdEx:postIndex]); err != nil {
 				return err
 			}
 			iNdEx = postIndex
@@ -5583,9 +6982,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthTasks
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthTasks
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -5649,10 +7052,13 @@
 					break
 				}
 			}
-			iNdEx += length
 			if length < 0 {
 				return 0, ErrInvalidLengthTasks
 			}
+			iNdEx += length
+			if iNdEx < 0 {
+				return 0, ErrInvalidLengthTasks
+			}
 			return iNdEx, nil
 		case 3:
 			for {
@@ -5681,6 +7087,9 @@
 					return 0, err
 				}
 				iNdEx = start + next
+				if iNdEx < 0 {
+					return 0, ErrInvalidLengthTasks
+				}
 			}
 			return iNdEx, nil
 		case 4:
@@ -5699,94 +7108,3 @@
 	ErrInvalidLengthTasks = fmt.Errorf("proto: negative length found during unmarshaling")
 	ErrIntOverflowTasks   = fmt.Errorf("proto: integer overflow")
 )
-
-func init() {
-	proto.RegisterFile("github.com/containerd/containerd/api/services/tasks/v1/tasks.proto", fileDescriptorTasks)
-}
-
-var fileDescriptorTasks = []byte{
-	// 1318 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0x4b, 0x6f, 0x1b, 0x45,
-	0x1c, 0xef, 0xfa, 0xed, 0xbf, 0x93, 0x36, 0x59, 0xd2, 0x60, 0x96, 0x2a, 0x0e, 0xcb, 0xc5, 0x04,
-	0xba, 0x4b, 0x5d, 0x54, 0x21, 0x5a, 0x21, 0x35, 0x0f, 0x22, 0x0b, 0xaa, 0xa6, 0xdb, 0x02, 0x55,
-	0x25, 0x14, 0xb6, 0xbb, 0x13, 0x67, 0x14, 0x7b, 0x67, 0xbb, 0x33, 0x4e, 0x1b, 0x38, 0xc0, 0x47,
-	0xe8, 0x95, 0x0b, 0x9f, 0x27, 0x47, 0x8e, 0x08, 0x55, 0x81, 0xfa, 0x5b, 0x70, 0x43, 0xf3, 0xd8,
-	0xcd, 0xc6, 0x8e, 0xbd, 0x4e, 0xd3, 0x70, 0x69, 0x67, 0x66, 0xff, 0xaf, 0xf9, 0xcd, 0xff, 0xf1,
-	0x73, 0x60, 0xb5, 0x83, 0xd9, 0x6e, 0xff, 0xa9, 0xe5, 0x91, 0x9e, 0xed, 0x91, 0x80, 0xb9, 0x38,
-	0x40, 0x91, 0x9f, 0x5e, 0xba, 0x21, 0xb6, 0x29, 0x8a, 0xf6, 0xb1, 0x87, 0xa8, 0xcd, 0x5c, 0xba,
-	0x47, 0xed, 0xfd, 0x1b, 0x72, 0x61, 0x85, 0x11, 0x61, 0x44, 0xbf, 0x76, 0x2c, 0x6d, 0xc5, 0x92,
-	0x96, 0x14, 0xd8, 0xbf, 0x61, 0xbc, 0xdf, 0x21, 0xa4, 0xd3, 0x45, 0xb6, 0x90, 0x7d, 0xda, 0xdf,
-	0xb1, 0x51, 0x2f, 0x64, 0x07, 0x52, 0xd5, 0x78, 0x6f, 0xf8, 0xa3, 0x1b, 0xc4, 0x9f, 0x16, 0x3a,
-	0xa4, 0x43, 0xc4, 0xd2, 0xe6, 0x2b, 0x75, 0x7a, 0x6b, 0xaa, 0x78, 0xd9, 0x41, 0x88, 0xa8, 0xdd,
-	0x23, 0xfd, 0x80, 0x29, 0xbd, 0xcf, 0xcf, 0xa2, 0x87, 0x58, 0x84, 0x3d, 0x75, 0x3b, 0xe3, 0xf6,
-	0x19, 0x34, 0x7d, 0x44, 0xbd, 0x08, 0x87, 0x8c, 0x44, 0x4a, 0xf9, 0x8b, 0x33, 0x28, 0x73, 0xc4,
-	0xc4, 0x3f, 0x4a, 0xb7, 0x31, 0x8c, 0x0d, 0xc3, 0x3d, 0x44, 0x99, 0xdb, 0x0b, 0xa5, 0x80, 0x79,
-	0x98, 0x83, 0xf9, 0xb5, 0x08, 0xb9, 0x0c, 0x3d, 0x72, 0xe9, 0x9e, 0x83, 0x9e, 0xf5, 0x11, 0x65,
-	0x7a, 0x0b, 0x66, 0x12, 0xf3, 0xdb, 0xd8, 0xaf, 0x6b, 0xcb, 0x5a, 0xb3, 0xba, 0x7a, 0x65, 0x70,
-	0xd4, 0xa8, 0xad, 0xc5, 0xe7, 0xed, 0x75, 0xa7, 0x96, 0x08, 0xb5, 0x7d, 0xdd, 0x86, 0x52, 0x44,
-	0x08, 0xdb, 0xa1, 0xf5, 0xfc, 0x72, 0xbe, 0x59, 0x6b, 0xbd, 0x6b, 0xa5, 0x9e, 0x54, 0x44, 0x67,
-	0xdd, 0xe3, 0x60, 0x3a, 0x4a, 0x4c, 0x5f, 0x80, 0x22, 0x65, 0x3e, 0x0e, 0xea, 0x05, 0x6e, 0xdd,
-	0x91, 0x1b, 0x7d, 0x11, 0x4a, 0x94, 0xf9, 0xa4, 0xcf, 0xea, 0x45, 0x71, 0xac, 0x76, 0xea, 0x1c,
-	0x45, 0x51, 0xbd, 0x94, 0x9c, 0xa3, 0x28, 0xd2, 0x0d, 0xa8, 0x30, 0x14, 0xf5, 0x70, 0xe0, 0x76,
-	0xeb, 0xe5, 0x65, 0xad, 0x59, 0x71, 0x92, 0xbd, 0x7e, 0x07, 0xc0, 0xdb, 0x45, 0xde, 0x5e, 0x48,
-	0x70, 0xc0, 0xea, 0x95, 0x65, 0xad, 0x59, 0x6b, 0x5d, 0x1b, 0x0d, 0x6b, 0x3d, 0x41, 0xdc, 0x49,
-	0xc9, 0xeb, 0x16, 0x94, 0x49, 0xc8, 0x30, 0x09, 0x68, 0xbd, 0x2a, 0x54, 0x17, 0x2c, 0x89, 0xa6,
-	0x15, 0xa3, 0x69, 0xdd, 0x0d, 0x0e, 0x9c, 0x58, 0xc8, 0x7c, 0x02, 0x7a, 0x1a, 0x49, 0x1a, 0x92,
-	0x80, 0xa2, 0x37, 0x82, 0x72, 0x0e, 0xf2, 0x21, 0xf6, 0xeb, 0xb9, 0x65, 0xad, 0x39, 0xeb, 0xf0,
-	0xa5, 0xd9, 0x81, 0x99, 0x87, 0xcc, 0x8d, 0xd8, 0x79, 0x1e, 0xe8, 0x43, 0x28, 0xa3, 0x17, 0xc8,
-	0xdb, 0x56, 0x96, 0xab, 0xab, 0x30, 0x38, 0x6a, 0x94, 0x36, 0x5e, 0x20, 0xaf, 0xbd, 0xee, 0x94,
-	0xf8, 0xa7, 0xb6, 0x6f, 0x7e, 0x00, 0xb3, 0xca, 0x91, 0x8a, 0x5f, 0xc5, 0xa2, 0x1d, 0xc7, 0xb2,
-	0x09, 0xf3, 0xeb, 0xa8, 0x8b, 0xce, 0x9d, 0x31, 0xe6, 0xef, 0x1a, 0x5c, 0x96, 0x96, 0x12, 0x6f,
-	0x8b, 0x90, 0x4b, 0x94, 0x4b, 0x83, 0xa3, 0x46, 0xae, 0xbd, 0xee, 0xe4, 0xf0, 0x29, 0x88, 0xe8,
-	0x0d, 0xa8, 0xa1, 0x17, 0x98, 0x6d, 0x53, 0xe6, 0xb2, 0x3e, 0xcf, 0x39, 0xfe, 0x05, 0xf8, 0xd1,
-	0x43, 0x71, 0xa2, 0xdf, 0x85, 0x2a, 0xdf, 0x21, 0x7f, 0xdb, 0x65, 0x22, 0xc5, 0x6a, 0x2d, 0x63,
-	0xe4, 0x01, 0x1f, 0xc5, 0xe5, 0xb0, 0x5a, 0x39, 0x3c, 0x6a, 0x5c, 0x7a, 0xf9, 0x77, 0x43, 0x73,
-	0x2a, 0x52, 0xed, 0x2e, 0x33, 0x09, 0x2c, 0xc8, 0xf8, 0xb6, 0x22, 0xe2, 0x21, 0x4a, 0x2f, 0x1c,
-	0x7d, 0x04, 0xb0, 0x89, 0x2e, 0xfe, 0x91, 0x37, 0xa0, 0x26, 0xdc, 0x28, 0xd0, 0x6f, 0x41, 0x39,
-	0x94, 0x17, 0x14, 0x2e, 0x86, 0x6a, 0x64, 0xff, 0x86, 0x2a, 0x93, 0x18, 0x84, 0x58, 0xd8, 0x5c,
-	0x81, 0xb9, 0x6f, 0x30, 0x65, 0x3c, 0x0d, 0x12, 0x68, 0x16, 0xa1, 0xb4, 0x83, 0xbb, 0x0c, 0x45,
-	0x32, 0x5a, 0x47, 0xed, 0x78, 0xd2, 0xa4, 0x64, 0x93, 0xda, 0x28, 0x8a, 0x16, 0x5f, 0xd7, 0x44,
-	0xc7, 0x98, 0xec, 0x56, 0x8a, 0x9a, 0x2f, 0x35, 0xa8, 0x7d, 0x8d, 0xbb, 0xdd, 0x8b, 0x06, 0x49,
-	0x34, 0x1c, 0xdc, 0xe1, 0x6d, 0x45, 0xe6, 0x96, 0xda, 0xf1, 0x54, 0x74, 0xbb, 0x5d, 0x91, 0x51,
-	0x15, 0x87, 0x2f, 0xcd, 0x7f, 0x35, 0xd0, 0xb9, 0xf2, 0x5b, 0xc8, 0x92, 0xa4, 0x27, 0xe6, 0x4e,
-	0xef, 0x89, 0xf9, 0x31, 0x3d, 0xb1, 0x30, 0xb6, 0x27, 0x16, 0x87, 0x7a, 0x62, 0x13, 0x0a, 0x34,
-	0x44, 0x9e, 0xe8, 0xa2, 0xe3, 0x5a, 0x9a, 0x90, 0x48, 0xa3, 0x54, 0x1e, 0x9b, 0x4a, 0x57, 0xe1,
-	0x9d, 0x13, 0x57, 0x97, 0x2f, 0x6b, 0xfe, 0xa6, 0xc1, 0x9c, 0x83, 0x28, 0xfe, 0x09, 0x6d, 0xb1,
-	0x83, 0x0b, 0x7f, 0xaa, 0x05, 0x28, 0x3e, 0xc7, 0x3e, 0xdb, 0x55, 0x2f, 0x25, 0x37, 0x1c, 0x9d,
-	0x5d, 0x84, 0x3b, 0xbb, 0xb2, 0xfa, 0x67, 0x1d, 0xb5, 0x33, 0x7f, 0x81, 0xcb, 0x6b, 0x5d, 0x42,
-	0x51, 0xfb, 0xfe, 0xff, 0x11, 0x98, 0x7c, 0xce, 0xbc, 0x78, 0x05, 0xb9, 0x31, 0xbf, 0x82, 0xb9,
-	0x2d, 0xb7, 0x4f, 0xcf, 0xdd, 0x3f, 0x37, 0x61, 0xde, 0x41, 0xb4, 0xdf, 0x3b, 0xb7, 0xa1, 0x0d,
-	0xb8, 0xc2, 0x8b, 0x73, 0x0b, 0xfb, 0xe7, 0x49, 0x5e, 0xd3, 0x91, 0xfd, 0x40, 0x9a, 0x51, 0x25,
-	0xfe, 0x25, 0x54, 0x55, 0xbb, 0x40, 0x71, 0x99, 0x2f, 0x4f, 0x2a, 0xf3, 0x76, 0xb0, 0x43, 0x9c,
-	0x63, 0x15, 0xf3, 0x95, 0x06, 0x57, 0xd7, 0x92, 0x99, 0x7c, 0x5e, 0x8e, 0xb2, 0x0d, 0xf3, 0xa1,
-	0x1b, 0xa1, 0x80, 0x6d, 0xa7, 0x78, 0x81, 0x7c, 0xbe, 0x16, 0xef, 0xff, 0x7f, 0x1d, 0x35, 0x56,
-	0x52, 0x6c, 0x8b, 0x84, 0x28, 0x48, 0xd4, 0xa9, 0xdd, 0x21, 0xd7, 0x7d, 0xdc, 0x41, 0x94, 0x59,
-	0xeb, 0xe2, 0x3f, 0x67, 0x4e, 0x1a, 0x5b, 0x3b, 0x95, 0x33, 0xe4, 0xa7, 0xe1, 0x0c, 0x8f, 0x61,
-	0x71, 0xf8, 0x76, 0x09, 0x70, 0xb5, 0x63, 0x26, 0x78, 0x6a, 0x87, 0x1c, 0x21, 0x2f, 0x69, 0x05,
-	0xf3, 0x67, 0x98, 0xff, 0x36, 0xf4, 0xdf, 0x02, 0xaf, 0x6b, 0x41, 0x35, 0x42, 0x94, 0xf4, 0x23,
-	0x0f, 0x51, 0x81, 0xd5, 0xb8, 0x4b, 0x1d, 0x8b, 0x99, 0x2b, 0x70, 0xf9, 0x9e, 0x24, 0xc0, 0xb1,
-	0xe7, 0x3a, 0x94, 0xe5, 0x24, 0x90, 0x57, 0xa9, 0x3a, 0xf1, 0x96, 0x27, 0x5f, 0x22, 0x9b, 0xcc,
-	0x85, 0xb2, 0xe2, 0xcf, 0xea, 0xde, 0xf5, 0x53, 0xb8, 0xa4, 0x10, 0x70, 0x62, 0x41, 0x73, 0x07,
-	0x6a, 0xdf, 0xbb, 0xf8, 0xe2, 0x67, 0x67, 0x04, 0x33, 0xd2, 0x8f, 0x8a, 0x75, 0x88, 0x87, 0x68,
-	0x93, 0x79, 0x48, 0xee, 0x4d, 0x78, 0x48, 0xeb, 0xd5, 0x0c, 0x14, 0xc5, 0xe4, 0xd4, 0xf7, 0xa0,
-	0x24, 0x39, 0xa6, 0x6e, 0x5b, 0x93, 0x7e, 0x31, 0x59, 0x23, 0x9c, 0xde, 0xf8, 0x74, 0x7a, 0x05,
-	0x75, 0xb5, 0x1f, 0xa1, 0x28, 0xb8, 0xa0, 0xbe, 0x32, 0x59, 0x35, 0xcd, 0x4c, 0x8d, 0x8f, 0xa7,
-	0x92, 0x55, 0x1e, 0x3a, 0x50, 0x92, 0x04, 0x2b, 0xeb, 0x3a, 0x23, 0x84, 0xd3, 0xf8, 0x64, 0x1a,
-	0x85, 0xc4, 0xd1, 0x33, 0x98, 0x3d, 0xc1, 0xe4, 0xf4, 0xd6, 0x34, 0xea, 0x27, 0x07, 0xfa, 0x19,
-	0x5d, 0x3e, 0x81, 0xfc, 0x26, 0x62, 0x7a, 0x73, 0xb2, 0xd2, 0x31, 0xdd, 0x33, 0x3e, 0x9a, 0x42,
-	0x32, 0xc1, 0xad, 0xc0, 0x3b, 0xad, 0x6e, 0x4d, 0x56, 0x19, 0x66, 0x67, 0x86, 0x3d, 0xb5, 0xbc,
-	0x72, 0xd4, 0x86, 0x02, 0x27, 0x5b, 0x7a, 0x46, 0x6c, 0x29, 0x42, 0x66, 0x2c, 0x8e, 0x24, 0xf7,
-	0x06, 0xff, 0xb1, 0xae, 0x6f, 0x41, 0x81, 0x97, 0x92, 0x9e, 0x91, 0x87, 0xa3, 0x44, 0x6a, 0xac,
-	0xc5, 0x87, 0x50, 0x4d, 0x38, 0x46, 0x16, 0x14, 0xc3, 0x64, 0x64, 0xac, 0xd1, 0xfb, 0x50, 0x56,
-	0xec, 0x40, 0xcf, 0x78, 0xef, 0x93, 0x24, 0x62, 0x82, 0xc1, 0xa2, 0x98, 0xf6, 0x59, 0x11, 0x0e,
-	0x53, 0x82, 0xb1, 0x06, 0x1f, 0x40, 0x49, 0x8e, 0xfd, 0xac, 0xa2, 0x19, 0x21, 0x07, 0x63, 0x4d,
-	0x62, 0xa8, 0xc4, 0x93, 0x5b, 0xbf, 0x9e, 0x9d, 0x23, 0x29, 0xa2, 0x60, 0x58, 0xd3, 0x8a, 0xab,
-	0x8c, 0x7a, 0x0e, 0x90, 0x9a, 0x97, 0x37, 0x33, 0x20, 0x3e, 0x6d, 0xf2, 0x1b, 0x9f, 0x9d, 0x4d,
-	0x49, 0x39, 0x7e, 0x00, 0x25, 0x39, 0x10, 0xb3, 0x60, 0x1b, 0x19, 0x9b, 0x63, 0x61, 0xdb, 0x81,
-	0xb2, 0x1a, 0x5d, 0x59, 0xb9, 0x72, 0x72, 0x1a, 0x1a, 0xd7, 0xa7, 0x94, 0x56, 0xa1, 0xff, 0x00,
-	0x05, 0x3e, 0x73, 0xb2, 0xaa, 0x30, 0x35, 0xff, 0x8c, 0x95, 0x69, 0x44, 0xa5, 0xf9, 0xd5, 0xef,
-	0x0e, 0x5f, 0x2f, 0x5d, 0xfa, 0xf3, 0xf5, 0xd2, 0xa5, 0x5f, 0x07, 0x4b, 0xda, 0xe1, 0x60, 0x49,
-	0xfb, 0x63, 0xb0, 0xa4, 0xfd, 0x33, 0x58, 0xd2, 0x9e, 0xdc, 0x79, 0xb3, 0xbf, 0xec, 0xdd, 0x16,
-	0x8b, 0xc7, 0xb9, 0xa7, 0x25, 0x01, 0xd8, 0xcd, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0x85, 0xa2,
-	0x4f, 0xd1, 0x22, 0x14, 0x00, 0x00,
-}
diff --git a/vendor/github.com/containerd/containerd/api/services/version/v1/version.pb.go b/vendor/github.com/containerd/containerd/api/services/version/v1/version.pb.go
index 829987c..cc39288 100644
--- a/vendor/github.com/containerd/containerd/api/services/version/v1/version.pb.go
+++ b/vendor/github.com/containerd/containerd/api/services/version/v1/version.pb.go
@@ -1,31 +1,19 @@
 // Code generated by protoc-gen-gogo. DO NOT EDIT.
 // source: github.com/containerd/containerd/api/services/version/v1/version.proto
 
-/*
-	Package version is a generated protocol buffer package.
-
-	It is generated from these files:
-		github.com/containerd/containerd/api/services/version/v1/version.proto
-
-	It has these top-level messages:
-		VersionResponse
-*/
 package version
 
-import proto "github.com/gogo/protobuf/proto"
-import fmt "fmt"
-import math "math"
-import google_protobuf "github.com/gogo/protobuf/types"
-
-// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto"
-
-import context "golang.org/x/net/context"
-import grpc "google.golang.org/grpc"
-
-import strings "strings"
-import reflect "reflect"
-
-import io "io"
+import (
+	context "context"
+	fmt "fmt"
+	proto "github.com/gogo/protobuf/proto"
+	types "github.com/gogo/protobuf/types"
+	grpc "google.golang.org/grpc"
+	io "io"
+	math "math"
+	reflect "reflect"
+	strings "strings"
+)
 
 // Reference imports to suppress errors if they are not otherwise used.
 var _ = proto.Marshal
@@ -39,18 +27,73 @@
 const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
 
 type VersionResponse struct {
-	Version  string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"`
-	Revision string `protobuf:"bytes,2,opt,name=revision,proto3" json:"revision,omitempty"`
+	Version              string   `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"`
+	Revision             string   `protobuf:"bytes,2,opt,name=revision,proto3" json:"revision,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
-func (m *VersionResponse) Reset()                    { *m = VersionResponse{} }
-func (*VersionResponse) ProtoMessage()               {}
-func (*VersionResponse) Descriptor() ([]byte, []int) { return fileDescriptorVersion, []int{0} }
+func (m *VersionResponse) Reset()      { *m = VersionResponse{} }
+func (*VersionResponse) ProtoMessage() {}
+func (*VersionResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_128109001e578ffe, []int{0}
+}
+func (m *VersionResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *VersionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_VersionResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *VersionResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_VersionResponse.Merge(m, src)
+}
+func (m *VersionResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *VersionResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_VersionResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_VersionResponse proto.InternalMessageInfo
 
 func init() {
 	proto.RegisterType((*VersionResponse)(nil), "containerd.services.version.v1.VersionResponse")
 }
 
+func init() {
+	proto.RegisterFile("github.com/containerd/containerd/api/services/version/v1/version.proto", fileDescriptor_128109001e578ffe)
+}
+
+var fileDescriptor_128109001e578ffe = []byte{
+	// 243 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x72, 0x4b, 0xcf, 0x2c, 0xc9,
+	0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
+	0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x17, 0xa7, 0x16, 0x95, 0x65, 0x26, 0xa7, 0x16, 0xeb,
+	0x97, 0xa5, 0x16, 0x15, 0x67, 0xe6, 0xe7, 0xe9, 0x97, 0x19, 0xc2, 0x98, 0x7a, 0x05, 0x45, 0xf9,
+	0x25, 0xf9, 0x42, 0x72, 0x08, 0x1d, 0x7a, 0x30, 0xd5, 0x7a, 0x30, 0x25, 0x65, 0x86, 0x52, 0xd2,
+	0xe9, 0xf9, 0xf9, 0xe9, 0x39, 0xa9, 0xfa, 0x60, 0xd5, 0x49, 0xa5, 0x69, 0xfa, 0xa9, 0xb9, 0x05,
+	0x25, 0x95, 0x10, 0xcd, 0x52, 0x22, 0xe9, 0xf9, 0xe9, 0xf9, 0x60, 0xa6, 0x3e, 0x88, 0x05, 0x11,
+	0x55, 0x72, 0xe7, 0xe2, 0x0f, 0x83, 0x18, 0x10, 0x94, 0x5a, 0x5c, 0x90, 0x9f, 0x57, 0x9c, 0x2a,
+	0x24, 0xc1, 0xc5, 0x0e, 0x35, 0x53, 0x82, 0x51, 0x81, 0x51, 0x83, 0x33, 0x08, 0xc6, 0x15, 0x92,
+	0xe2, 0xe2, 0x28, 0x4a, 0x2d, 0xcb, 0x04, 0x4b, 0x31, 0x81, 0xa5, 0xe0, 0x7c, 0xa3, 0x58, 0x2e,
+	0x76, 0xa8, 0x41, 0x42, 0x41, 0x08, 0xa6, 0x98, 0x1e, 0xc4, 0x49, 0x7a, 0x30, 0x27, 0xe9, 0xb9,
+	0x82, 0x9c, 0x24, 0xa5, 0xaf, 0x87, 0xdf, 0x2b, 0x7a, 0x68, 0x8e, 0x72, 0x8a, 0x3a, 0xf1, 0x50,
+	0x8e, 0xe1, 0xc6, 0x43, 0x39, 0x86, 0x86, 0x47, 0x72, 0x8c, 0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78,
+	0x24, 0xc7, 0xf8, 0xe0, 0x91, 0x1c, 0x63, 0x94, 0x03, 0xb9, 0x81, 0x6b, 0x0d, 0x65, 0x46, 0x30,
+	0x26, 0xb1, 0x81, 0x9d, 0x67, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff, 0x95, 0x0d, 0x52, 0x23, 0xa9,
+	0x01, 0x00, 0x00,
+}
+
 // Reference imports to suppress errors if they are not otherwise used.
 var _ context.Context
 var _ grpc.ClientConn
@@ -59,10 +102,11 @@
 // is compatible with the grpc package it is being compiled against.
 const _ = grpc.SupportPackageIsVersion4
 
-// Client API for Version service
-
+// VersionClient is the client API for Version service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
 type VersionClient interface {
-	Version(ctx context.Context, in *google_protobuf.Empty, opts ...grpc.CallOption) (*VersionResponse, error)
+	Version(ctx context.Context, in *types.Empty, opts ...grpc.CallOption) (*VersionResponse, error)
 }
 
 type versionClient struct {
@@ -73,19 +117,18 @@
 	return &versionClient{cc}
 }
 
-func (c *versionClient) Version(ctx context.Context, in *google_protobuf.Empty, opts ...grpc.CallOption) (*VersionResponse, error) {
+func (c *versionClient) Version(ctx context.Context, in *types.Empty, opts ...grpc.CallOption) (*VersionResponse, error) {
 	out := new(VersionResponse)
-	err := grpc.Invoke(ctx, "/containerd.services.version.v1.Version/Version", in, out, c.cc, opts...)
+	err := c.cc.Invoke(ctx, "/containerd.services.version.v1.Version/Version", in, out, opts...)
 	if err != nil {
 		return nil, err
 	}
 	return out, nil
 }
 
-// Server API for Version service
-
+// VersionServer is the server API for Version service.
 type VersionServer interface {
-	Version(context.Context, *google_protobuf.Empty) (*VersionResponse, error)
+	Version(context.Context, *types.Empty) (*VersionResponse, error)
 }
 
 func RegisterVersionServer(s *grpc.Server, srv VersionServer) {
@@ -93,7 +136,7 @@
 }
 
 func _Version_Version_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(google_protobuf.Empty)
+	in := new(types.Empty)
 	if err := dec(in); err != nil {
 		return nil, err
 	}
@@ -105,7 +148,7 @@
 		FullMethod: "/containerd.services.version.v1.Version/Version",
 	}
 	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(VersionServer).Version(ctx, req.(*google_protobuf.Empty))
+		return srv.(VersionServer).Version(ctx, req.(*types.Empty))
 	}
 	return interceptor(ctx, in, info, handler)
 }
@@ -150,6 +193,9 @@
 		i = encodeVarintVersion(dAtA, i, uint64(len(m.Revision)))
 		i += copy(dAtA[i:], m.Revision)
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -163,6 +209,9 @@
 	return offset + 1
 }
 func (m *VersionResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = len(m.Version)
@@ -173,6 +222,9 @@
 	if l > 0 {
 		n += 1 + l + sovVersion(uint64(l))
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
@@ -196,6 +248,7 @@
 	s := strings.Join([]string{`&VersionResponse{`,
 		`Version:` + fmt.Sprintf("%v", this.Version) + `,`,
 		`Revision:` + fmt.Sprintf("%v", this.Revision) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -223,7 +276,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -251,7 +304,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -261,6 +314,9 @@
 				return ErrInvalidLengthVersion
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthVersion
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -280,7 +336,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -290,6 +346,9 @@
 				return ErrInvalidLengthVersion
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthVersion
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -304,9 +363,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthVersion
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthVersion
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -370,10 +433,13 @@
 					break
 				}
 			}
-			iNdEx += length
 			if length < 0 {
 				return 0, ErrInvalidLengthVersion
 			}
+			iNdEx += length
+			if iNdEx < 0 {
+				return 0, ErrInvalidLengthVersion
+			}
 			return iNdEx, nil
 		case 3:
 			for {
@@ -402,6 +468,9 @@
 					return 0, err
 				}
 				iNdEx = start + next
+				if iNdEx < 0 {
+					return 0, ErrInvalidLengthVersion
+				}
 			}
 			return iNdEx, nil
 		case 4:
@@ -420,27 +489,3 @@
 	ErrInvalidLengthVersion = fmt.Errorf("proto: negative length found during unmarshaling")
 	ErrIntOverflowVersion   = fmt.Errorf("proto: integer overflow")
 )
-
-func init() {
-	proto.RegisterFile("github.com/containerd/containerd/api/services/version/v1/version.proto", fileDescriptorVersion)
-}
-
-var fileDescriptorVersion = []byte{
-	// 243 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x72, 0x4b, 0xcf, 0x2c, 0xc9,
-	0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
-	0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x17, 0xa7, 0x16, 0x95, 0x65, 0x26, 0xa7, 0x16, 0xeb,
-	0x97, 0xa5, 0x16, 0x15, 0x67, 0xe6, 0xe7, 0xe9, 0x97, 0x19, 0xc2, 0x98, 0x7a, 0x05, 0x45, 0xf9,
-	0x25, 0xf9, 0x42, 0x72, 0x08, 0x1d, 0x7a, 0x30, 0xd5, 0x7a, 0x30, 0x25, 0x65, 0x86, 0x52, 0xd2,
-	0xe9, 0xf9, 0xf9, 0xe9, 0x39, 0xa9, 0xfa, 0x60, 0xd5, 0x49, 0xa5, 0x69, 0xfa, 0xa9, 0xb9, 0x05,
-	0x25, 0x95, 0x10, 0xcd, 0x52, 0x22, 0xe9, 0xf9, 0xe9, 0xf9, 0x60, 0xa6, 0x3e, 0x88, 0x05, 0x11,
-	0x55, 0x72, 0xe7, 0xe2, 0x0f, 0x83, 0x18, 0x10, 0x94, 0x5a, 0x5c, 0x90, 0x9f, 0x57, 0x9c, 0x2a,
-	0x24, 0xc1, 0xc5, 0x0e, 0x35, 0x53, 0x82, 0x51, 0x81, 0x51, 0x83, 0x33, 0x08, 0xc6, 0x15, 0x92,
-	0xe2, 0xe2, 0x28, 0x4a, 0x2d, 0xcb, 0x04, 0x4b, 0x31, 0x81, 0xa5, 0xe0, 0x7c, 0xa3, 0x58, 0x2e,
-	0x76, 0xa8, 0x41, 0x42, 0x41, 0x08, 0xa6, 0x98, 0x1e, 0xc4, 0x49, 0x7a, 0x30, 0x27, 0xe9, 0xb9,
-	0x82, 0x9c, 0x24, 0xa5, 0xaf, 0x87, 0xdf, 0x2b, 0x7a, 0x68, 0x8e, 0x72, 0x8a, 0x3a, 0xf1, 0x50,
-	0x8e, 0xe1, 0xc6, 0x43, 0x39, 0x86, 0x86, 0x47, 0x72, 0x8c, 0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78,
-	0x24, 0xc7, 0xf8, 0xe0, 0x91, 0x1c, 0x63, 0x94, 0x03, 0xb9, 0x81, 0x6b, 0x0d, 0x65, 0x46, 0x30,
-	0x26, 0xb1, 0x81, 0x9d, 0x67, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff, 0x95, 0x0d, 0x52, 0x23, 0xa9,
-	0x01, 0x00, 0x00,
-}
diff --git a/vendor/github.com/containerd/containerd/api/types/descriptor.pb.go b/vendor/github.com/containerd/containerd/api/types/descriptor.pb.go
index 93e88c0..c0179d2 100644
--- a/vendor/github.com/containerd/containerd/api/types/descriptor.pb.go
+++ b/vendor/github.com/containerd/containerd/api/types/descriptor.pb.go
@@ -1,35 +1,18 @@
 // Code generated by protoc-gen-gogo. DO NOT EDIT.
 // source: github.com/containerd/containerd/api/types/descriptor.proto
 
-/*
-	Package types is a generated protocol buffer package.
-
-	It is generated from these files:
-		github.com/containerd/containerd/api/types/descriptor.proto
-		github.com/containerd/containerd/api/types/metrics.proto
-		github.com/containerd/containerd/api/types/mount.proto
-		github.com/containerd/containerd/api/types/platform.proto
-
-	It has these top-level messages:
-		Descriptor
-		Metric
-		Mount
-		Platform
-*/
 package types
 
-import proto "github.com/gogo/protobuf/proto"
-import fmt "fmt"
-import math "math"
-
-// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto"
-
-import github_com_opencontainers_go_digest "github.com/opencontainers/go-digest"
-
-import strings "strings"
-import reflect "reflect"
-
-import io "io"
+import (
+	fmt "fmt"
+	proto "github.com/gogo/protobuf/proto"
+	github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
+	github_com_opencontainers_go_digest "github.com/opencontainers/go-digest"
+	io "io"
+	math "math"
+	reflect "reflect"
+	strings "strings"
+)
 
 // Reference imports to suppress errors if they are not otherwise used.
 var _ = proto.Marshal
@@ -48,18 +31,80 @@
 // oci descriptor found in a manifest.
 // See https://godoc.org/github.com/opencontainers/image-spec/specs-go/v1#Descriptor
 type Descriptor struct {
-	MediaType string                                     `protobuf:"bytes,1,opt,name=media_type,json=mediaType,proto3" json:"media_type,omitempty"`
-	Digest    github_com_opencontainers_go_digest.Digest `protobuf:"bytes,2,opt,name=digest,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"digest"`
-	Size_     int64                                      `protobuf:"varint,3,opt,name=size,proto3" json:"size,omitempty"`
+	MediaType            string                                     `protobuf:"bytes,1,opt,name=media_type,json=mediaType,proto3" json:"media_type,omitempty"`
+	Digest               github_com_opencontainers_go_digest.Digest `protobuf:"bytes,2,opt,name=digest,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"digest"`
+	Size_                int64                                      `protobuf:"varint,3,opt,name=size,proto3" json:"size,omitempty"`
+	Annotations          map[string]string                          `protobuf:"bytes,5,rep,name=annotations,proto3" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	XXX_NoUnkeyedLiteral struct{}                                   `json:"-"`
+	XXX_unrecognized     []byte                                     `json:"-"`
+	XXX_sizecache        int32                                      `json:"-"`
 }
 
-func (m *Descriptor) Reset()                    { *m = Descriptor{} }
-func (*Descriptor) ProtoMessage()               {}
-func (*Descriptor) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{0} }
+func (m *Descriptor) Reset()      { *m = Descriptor{} }
+func (*Descriptor) ProtoMessage() {}
+func (*Descriptor) Descriptor() ([]byte, []int) {
+	return fileDescriptor_37f958df3707db9e, []int{0}
+}
+func (m *Descriptor) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *Descriptor) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_Descriptor.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *Descriptor) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Descriptor.Merge(m, src)
+}
+func (m *Descriptor) XXX_Size() int {
+	return m.Size()
+}
+func (m *Descriptor) XXX_DiscardUnknown() {
+	xxx_messageInfo_Descriptor.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Descriptor proto.InternalMessageInfo
 
 func init() {
 	proto.RegisterType((*Descriptor)(nil), "containerd.types.Descriptor")
+	proto.RegisterMapType((map[string]string)(nil), "containerd.types.Descriptor.AnnotationsEntry")
 }
+
+func init() {
+	proto.RegisterFile("github.com/containerd/containerd/api/types/descriptor.proto", fileDescriptor_37f958df3707db9e)
+}
+
+var fileDescriptor_37f958df3707db9e = []byte{
+	// 311 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0x4e, 0xcf, 0x2c, 0xc9,
+	0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
+	0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x97, 0x54, 0x16, 0xa4, 0x16, 0xeb, 0xa7, 0xa4, 0x16,
+	0x27, 0x17, 0x65, 0x16, 0x94, 0xe4, 0x17, 0xe9, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0x09, 0x20,
+	0x94, 0xe9, 0x81, 0x95, 0x48, 0x89, 0xa4, 0xe7, 0xa7, 0xe7, 0x83, 0x25, 0xf5, 0x41, 0x2c, 0x88,
+	0x3a, 0xa5, 0x39, 0x4c, 0x5c, 0x5c, 0x2e, 0x70, 0xcd, 0x42, 0xb2, 0x5c, 0x5c, 0xb9, 0xa9, 0x29,
+	0x99, 0x89, 0xf1, 0x20, 0x3d, 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0x9c, 0x41, 0x9c, 0x60, 0x91, 0x90,
+	0xca, 0x82, 0x54, 0x21, 0x2f, 0x2e, 0xb6, 0x94, 0xcc, 0xf4, 0xd4, 0xe2, 0x12, 0x09, 0x26, 0x90,
+	0x94, 0x93, 0xd1, 0x89, 0x7b, 0xf2, 0x0c, 0xb7, 0xee, 0xc9, 0x6b, 0x21, 0x39, 0x35, 0xbf, 0x20,
+	0x35, 0x0f, 0x6e, 0x79, 0xb1, 0x7e, 0x7a, 0xbe, 0x2e, 0x44, 0x8b, 0x9e, 0x0b, 0x98, 0x0a, 0x82,
+	0x9a, 0x20, 0x24, 0xc4, 0xc5, 0x52, 0x9c, 0x59, 0x95, 0x2a, 0xc1, 0xac, 0xc0, 0xa8, 0xc1, 0x1c,
+	0x04, 0x66, 0x0b, 0xf9, 0x73, 0x71, 0x27, 0xe6, 0xe5, 0xe5, 0x97, 0x24, 0x96, 0x64, 0xe6, 0xe7,
+	0x15, 0x4b, 0xb0, 0x2a, 0x30, 0x6b, 0x70, 0x1b, 0xe9, 0xea, 0xa1, 0xfb, 0x45, 0x0f, 0xe1, 0x62,
+	0x3d, 0x47, 0x84, 0x7a, 0xd7, 0xbc, 0x92, 0xa2, 0xca, 0x20, 0x64, 0x13, 0xa4, 0xec, 0xb8, 0x04,
+	0xd0, 0x15, 0x08, 0x09, 0x70, 0x31, 0x67, 0xa7, 0x56, 0x42, 0x3d, 0x07, 0x62, 0x0a, 0x89, 0x70,
+	0xb1, 0x96, 0x25, 0xe6, 0x94, 0xa6, 0x42, 0x7c, 0x15, 0x04, 0xe1, 0x58, 0x31, 0x59, 0x30, 0x3a,
+	0x79, 0x9d, 0x78, 0x28, 0xc7, 0x70, 0xe3, 0xa1, 0x1c, 0x43, 0xc3, 0x23, 0x39, 0xc6, 0x13, 0x8f,
+	0xe4, 0x18, 0x2f, 0x3c, 0x92, 0x63, 0x7c, 0xf0, 0x48, 0x8e, 0x31, 0xca, 0x80, 0xf8, 0xd8, 0xb1,
+	0x06, 0x93, 0x11, 0x0c, 0x49, 0x6c, 0xe0, 0x30, 0x37, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0x22,
+	0x8a, 0x20, 0x4a, 0xda, 0x01, 0x00, 0x00,
+}
+
 func (m *Descriptor) Marshal() (dAtA []byte, err error) {
 	size := m.Size()
 	dAtA = make([]byte, size)
@@ -92,6 +137,26 @@
 		i++
 		i = encodeVarintDescriptor(dAtA, i, uint64(m.Size_))
 	}
+	if len(m.Annotations) > 0 {
+		for k, _ := range m.Annotations {
+			dAtA[i] = 0x2a
+			i++
+			v := m.Annotations[k]
+			mapSize := 1 + len(k) + sovDescriptor(uint64(len(k))) + 1 + len(v) + sovDescriptor(uint64(len(v)))
+			i = encodeVarintDescriptor(dAtA, i, uint64(mapSize))
+			dAtA[i] = 0xa
+			i++
+			i = encodeVarintDescriptor(dAtA, i, uint64(len(k)))
+			i += copy(dAtA[i:], k)
+			dAtA[i] = 0x12
+			i++
+			i = encodeVarintDescriptor(dAtA, i, uint64(len(v)))
+			i += copy(dAtA[i:], v)
+		}
+	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -105,6 +170,9 @@
 	return offset + 1
 }
 func (m *Descriptor) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = len(m.MediaType)
@@ -118,6 +186,17 @@
 	if m.Size_ != 0 {
 		n += 1 + sovDescriptor(uint64(m.Size_))
 	}
+	if len(m.Annotations) > 0 {
+		for k, v := range m.Annotations {
+			_ = k
+			_ = v
+			mapEntrySize := 1 + len(k) + sovDescriptor(uint64(len(k))) + 1 + len(v) + sovDescriptor(uint64(len(v)))
+			n += mapEntrySize + 1 + sovDescriptor(uint64(mapEntrySize))
+		}
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
@@ -138,10 +217,22 @@
 	if this == nil {
 		return "nil"
 	}
+	keysForAnnotations := make([]string, 0, len(this.Annotations))
+	for k, _ := range this.Annotations {
+		keysForAnnotations = append(keysForAnnotations, k)
+	}
+	github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations)
+	mapStringForAnnotations := "map[string]string{"
+	for _, k := range keysForAnnotations {
+		mapStringForAnnotations += fmt.Sprintf("%v: %v,", k, this.Annotations[k])
+	}
+	mapStringForAnnotations += "}"
 	s := strings.Join([]string{`&Descriptor{`,
 		`MediaType:` + fmt.Sprintf("%v", this.MediaType) + `,`,
 		`Digest:` + fmt.Sprintf("%v", this.Digest) + `,`,
 		`Size_:` + fmt.Sprintf("%v", this.Size_) + `,`,
+		`Annotations:` + mapStringForAnnotations + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -169,7 +260,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -197,7 +288,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -207,6 +298,9 @@
 				return ErrInvalidLengthDescriptor
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthDescriptor
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -226,7 +320,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -236,6 +330,9 @@
 				return ErrInvalidLengthDescriptor
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthDescriptor
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -255,11 +352,138 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				m.Size_ |= (int64(b) & 0x7F) << shift
+				m.Size_ |= int64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
 			}
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowDescriptor
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthDescriptor
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthDescriptor
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Annotations == nil {
+				m.Annotations = make(map[string]string)
+			}
+			var mapkey string
+			var mapvalue string
+			for iNdEx < postIndex {
+				entryPreIndex := iNdEx
+				var wire uint64
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowDescriptor
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					wire |= uint64(b&0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				fieldNum := int32(wire >> 3)
+				if fieldNum == 1 {
+					var stringLenmapkey uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowDescriptor
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapkey |= uint64(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					intStringLenmapkey := int(stringLenmapkey)
+					if intStringLenmapkey < 0 {
+						return ErrInvalidLengthDescriptor
+					}
+					postStringIndexmapkey := iNdEx + intStringLenmapkey
+					if postStringIndexmapkey < 0 {
+						return ErrInvalidLengthDescriptor
+					}
+					if postStringIndexmapkey > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+					iNdEx = postStringIndexmapkey
+				} else if fieldNum == 2 {
+					var stringLenmapvalue uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowDescriptor
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapvalue |= uint64(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					intStringLenmapvalue := int(stringLenmapvalue)
+					if intStringLenmapvalue < 0 {
+						return ErrInvalidLengthDescriptor
+					}
+					postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+					if postStringIndexmapvalue < 0 {
+						return ErrInvalidLengthDescriptor
+					}
+					if postStringIndexmapvalue > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
+					iNdEx = postStringIndexmapvalue
+				} else {
+					iNdEx = entryPreIndex
+					skippy, err := skipDescriptor(dAtA[iNdEx:])
+					if err != nil {
+						return err
+					}
+					if skippy < 0 {
+						return ErrInvalidLengthDescriptor
+					}
+					if (iNdEx + skippy) > postIndex {
+						return io.ErrUnexpectedEOF
+					}
+					iNdEx += skippy
+				}
+			}
+			m.Annotations[mapkey] = mapvalue
+			iNdEx = postIndex
 		default:
 			iNdEx = preIndex
 			skippy, err := skipDescriptor(dAtA[iNdEx:])
@@ -269,9 +493,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthDescriptor
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthDescriptor
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -335,10 +563,13 @@
 					break
 				}
 			}
-			iNdEx += length
 			if length < 0 {
 				return 0, ErrInvalidLengthDescriptor
 			}
+			iNdEx += length
+			if iNdEx < 0 {
+				return 0, ErrInvalidLengthDescriptor
+			}
 			return iNdEx, nil
 		case 3:
 			for {
@@ -367,6 +598,9 @@
 					return 0, err
 				}
 				iNdEx = start + next
+				if iNdEx < 0 {
+					return 0, ErrInvalidLengthDescriptor
+				}
 			}
 			return iNdEx, nil
 		case 4:
@@ -385,26 +619,3 @@
 	ErrInvalidLengthDescriptor = fmt.Errorf("proto: negative length found during unmarshaling")
 	ErrIntOverflowDescriptor   = fmt.Errorf("proto: integer overflow")
 )
-
-func init() {
-	proto.RegisterFile("github.com/containerd/containerd/api/types/descriptor.proto", fileDescriptorDescriptor)
-}
-
-var fileDescriptorDescriptor = []byte{
-	// 234 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0x4e, 0xcf, 0x2c, 0xc9,
-	0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
-	0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x97, 0x54, 0x16, 0xa4, 0x16, 0xeb, 0xa7, 0xa4, 0x16,
-	0x27, 0x17, 0x65, 0x16, 0x94, 0xe4, 0x17, 0xe9, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0x09, 0x20,
-	0x94, 0xe9, 0x81, 0x95, 0x48, 0x89, 0xa4, 0xe7, 0xa7, 0xe7, 0x83, 0x25, 0xf5, 0x41, 0x2c, 0x88,
-	0x3a, 0xa5, 0x6e, 0x46, 0x2e, 0x2e, 0x17, 0xb8, 0x66, 0x21, 0x59, 0x2e, 0xae, 0xdc, 0xd4, 0x94,
-	0xcc, 0xc4, 0x78, 0x90, 0x1e, 0x09, 0x46, 0x05, 0x46, 0x0d, 0xce, 0x20, 0x4e, 0xb0, 0x48, 0x48,
-	0x65, 0x41, 0xaa, 0x90, 0x17, 0x17, 0x5b, 0x4a, 0x66, 0x7a, 0x6a, 0x71, 0x89, 0x04, 0x13, 0x48,
-	0xca, 0xc9, 0xe8, 0xc4, 0x3d, 0x79, 0x86, 0x5b, 0xf7, 0xe4, 0xb5, 0x90, 0x9c, 0x9a, 0x5f, 0x90,
-	0x9a, 0x07, 0xb7, 0xbc, 0x58, 0x3f, 0x3d, 0x5f, 0x17, 0xa2, 0x45, 0xcf, 0x05, 0x4c, 0x05, 0x41,
-	0x4d, 0x10, 0x12, 0xe2, 0x62, 0x29, 0xce, 0xac, 0x4a, 0x95, 0x60, 0x56, 0x60, 0xd4, 0x60, 0x0e,
-	0x02, 0xb3, 0x9d, 0xbc, 0x4e, 0x3c, 0x94, 0x63, 0xb8, 0xf1, 0x50, 0x8e, 0xa1, 0xe1, 0x91, 0x1c,
-	0xe3, 0x89, 0x47, 0x72, 0x8c, 0x17, 0x1e, 0xc9, 0x31, 0x3e, 0x78, 0x24, 0xc7, 0x18, 0x65, 0x40,
-	0x7c, 0x60, 0x58, 0x83, 0xc9, 0x08, 0x86, 0x24, 0x36, 0xb0, 0x17, 0x8d, 0x01, 0x01, 0x00, 0x00,
-	0xff, 0xff, 0xea, 0xac, 0x78, 0x9a, 0x49, 0x01, 0x00, 0x00,
-}
diff --git a/vendor/github.com/containerd/containerd/api/types/descriptor.proto b/vendor/github.com/containerd/containerd/api/types/descriptor.proto
index 5c00dca..6d90a16 100644
--- a/vendor/github.com/containerd/containerd/api/types/descriptor.proto
+++ b/vendor/github.com/containerd/containerd/api/types/descriptor.proto
@@ -15,4 +15,5 @@
 	string media_type = 1;
 	string digest = 2 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
 	int64 size = 3;
+	map<string, string> annotations = 5;
 }
diff --git a/vendor/github.com/containerd/containerd/api/types/metrics.pb.go b/vendor/github.com/containerd/containerd/api/types/metrics.pb.go
index 52e9f40..c231d34 100644
--- a/vendor/github.com/containerd/containerd/api/types/metrics.pb.go
+++ b/vendor/github.com/containerd/containerd/api/types/metrics.pb.go
@@ -3,22 +3,17 @@
 
 package types
 
-import proto "github.com/gogo/protobuf/proto"
-import fmt "fmt"
-import math "math"
-
-// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto"
-import google_protobuf1 "github.com/gogo/protobuf/types"
-import _ "github.com/gogo/protobuf/types"
-
-import time "time"
-
-import types1 "github.com/gogo/protobuf/types"
-
-import strings "strings"
-import reflect "reflect"
-
-import io "io"
+import (
+	fmt "fmt"
+	proto "github.com/gogo/protobuf/proto"
+	github_com_gogo_protobuf_types "github.com/gogo/protobuf/types"
+	types "github.com/gogo/protobuf/types"
+	io "io"
+	math "math"
+	reflect "reflect"
+	strings "strings"
+	time "time"
+)
 
 // Reference imports to suppress errors if they are not otherwise used.
 var _ = proto.Marshal
@@ -26,19 +21,82 @@
 var _ = math.Inf
 var _ = time.Kitchen
 
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+
 type Metric struct {
-	Timestamp time.Time             `protobuf:"bytes,1,opt,name=timestamp,stdtime" json:"timestamp"`
-	ID        string                `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"`
-	Data      *google_protobuf1.Any `protobuf:"bytes,3,opt,name=data" json:"data,omitempty"`
+	Timestamp            time.Time  `protobuf:"bytes,1,opt,name=timestamp,proto3,stdtime" json:"timestamp"`
+	ID                   string     `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"`
+	Data                 *types.Any `protobuf:"bytes,3,opt,name=data,proto3" json:"data,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}   `json:"-"`
+	XXX_unrecognized     []byte     `json:"-"`
+	XXX_sizecache        int32      `json:"-"`
 }
 
-func (m *Metric) Reset()                    { *m = Metric{} }
-func (*Metric) ProtoMessage()               {}
-func (*Metric) Descriptor() ([]byte, []int) { return fileDescriptorMetrics, []int{0} }
+func (m *Metric) Reset()      { *m = Metric{} }
+func (*Metric) ProtoMessage() {}
+func (*Metric) Descriptor() ([]byte, []int) {
+	return fileDescriptor_8d594d87edf6e6bc, []int{0}
+}
+func (m *Metric) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_Metric.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *Metric) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Metric.Merge(m, src)
+}
+func (m *Metric) XXX_Size() int {
+	return m.Size()
+}
+func (m *Metric) XXX_DiscardUnknown() {
+	xxx_messageInfo_Metric.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Metric proto.InternalMessageInfo
 
 func init() {
 	proto.RegisterType((*Metric)(nil), "containerd.types.Metric")
 }
+
+func init() {
+	proto.RegisterFile("github.com/containerd/containerd/api/types/metrics.proto", fileDescriptor_8d594d87edf6e6bc)
+}
+
+var fileDescriptor_8d594d87edf6e6bc = []byte{
+	// 258 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0x48, 0xcf, 0x2c, 0xc9,
+	0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
+	0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x97, 0x54, 0x16, 0xa4, 0x16, 0xeb, 0xe7, 0xa6, 0x96,
+	0x14, 0x65, 0x26, 0x17, 0xeb, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0x09, 0x20, 0xd4, 0xe8, 0x81,
+	0xe5, 0xa5, 0x44, 0xd2, 0xf3, 0xd3, 0xf3, 0xc1, 0x92, 0xfa, 0x20, 0x16, 0x44, 0x9d, 0x94, 0x64,
+	0x7a, 0x7e, 0x7e, 0x7a, 0x4e, 0xaa, 0x3e, 0x98, 0x97, 0x54, 0x9a, 0xa6, 0x9f, 0x98, 0x57, 0x09,
+	0x95, 0x92, 0x47, 0x97, 0x2a, 0xc9, 0xcc, 0x4d, 0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0x80, 0x28, 0x50,
+	0xea, 0x63, 0xe4, 0x62, 0xf3, 0x05, 0xdb, 0x2a, 0xe4, 0xc4, 0xc5, 0x09, 0x97, 0x95, 0x60, 0x54,
+	0x60, 0xd4, 0xe0, 0x36, 0x92, 0xd2, 0x83, 0xe8, 0xd7, 0x83, 0xe9, 0xd7, 0x0b, 0x81, 0xa9, 0x70,
+	0xe2, 0x38, 0x71, 0x4f, 0x9e, 0x61, 0xc2, 0x7d, 0x79, 0xc6, 0x20, 0x84, 0x36, 0x21, 0x31, 0x2e,
+	0xa6, 0xcc, 0x14, 0x09, 0x26, 0x05, 0x46, 0x0d, 0x4e, 0x27, 0xb6, 0x47, 0xf7, 0xe4, 0x99, 0x3c,
+	0x5d, 0x82, 0x98, 0x32, 0x53, 0x84, 0x34, 0xb8, 0x58, 0x52, 0x12, 0x4b, 0x12, 0x25, 0x98, 0xc1,
+	0xc6, 0x8a, 0x60, 0x18, 0xeb, 0x98, 0x57, 0x19, 0x04, 0x56, 0xe1, 0xe4, 0x75, 0xe2, 0xa1, 0x1c,
+	0xc3, 0x8d, 0x87, 0x72, 0x0c, 0x0d, 0x8f, 0xe4, 0x18, 0x4f, 0x3c, 0x92, 0x63, 0xbc, 0xf0, 0x48,
+	0x8e, 0xf1, 0xc1, 0x23, 0x39, 0xc6, 0x28, 0x03, 0xe2, 0x03, 0xd2, 0x1a, 0x4c, 0x46, 0x30, 0x24,
+	0xb1, 0x81, 0x6d, 0x30, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0xde, 0x0d, 0x02, 0xfe, 0x85, 0x01,
+	0x00, 0x00,
+}
+
 func (m *Metric) Marshal() (dAtA []byte, err error) {
 	size := m.Size()
 	dAtA = make([]byte, size)
@@ -56,8 +114,8 @@
 	_ = l
 	dAtA[i] = 0xa
 	i++
-	i = encodeVarintMetrics(dAtA, i, uint64(types1.SizeOfStdTime(m.Timestamp)))
-	n1, err := types1.StdTimeMarshalTo(m.Timestamp, dAtA[i:])
+	i = encodeVarintMetrics(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp)))
+	n1, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i:])
 	if err != nil {
 		return 0, err
 	}
@@ -78,6 +136,9 @@
 		}
 		i += n2
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -91,9 +152,12 @@
 	return offset + 1
 }
 func (m *Metric) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
-	l = types1.SizeOfStdTime(m.Timestamp)
+	l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp)
 	n += 1 + l + sovMetrics(uint64(l))
 	l = len(m.ID)
 	if l > 0 {
@@ -103,6 +167,9 @@
 		l = m.Data.Size()
 		n += 1 + l + sovMetrics(uint64(l))
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
@@ -124,9 +191,10 @@
 		return "nil"
 	}
 	s := strings.Join([]string{`&Metric{`,
-		`Timestamp:` + strings.Replace(strings.Replace(this.Timestamp.String(), "Timestamp", "google_protobuf2.Timestamp", 1), `&`, ``, 1) + `,`,
+		`Timestamp:` + strings.Replace(strings.Replace(this.Timestamp.String(), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`,
 		`ID:` + fmt.Sprintf("%v", this.ID) + `,`,
-		`Data:` + strings.Replace(fmt.Sprintf("%v", this.Data), "Any", "google_protobuf1.Any", 1) + `,`,
+		`Data:` + strings.Replace(fmt.Sprintf("%v", this.Data), "Any", "types.Any", 1) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -154,7 +222,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -182,7 +250,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -191,10 +259,13 @@
 				return ErrInvalidLengthMetrics
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthMetrics
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			if err := types1.StdTimeUnmarshal(&m.Timestamp, dAtA[iNdEx:postIndex]); err != nil {
+			if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Timestamp, dAtA[iNdEx:postIndex]); err != nil {
 				return err
 			}
 			iNdEx = postIndex
@@ -212,7 +283,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -222,6 +293,9 @@
 				return ErrInvalidLengthMetrics
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthMetrics
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -241,7 +315,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -250,11 +324,14 @@
 				return ErrInvalidLengthMetrics
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthMetrics
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
 			if m.Data == nil {
-				m.Data = &google_protobuf1.Any{}
+				m.Data = &types.Any{}
 			}
 			if err := m.Data.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
 				return err
@@ -269,9 +346,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthMetrics
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthMetrics
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -335,10 +416,13 @@
 					break
 				}
 			}
-			iNdEx += length
 			if length < 0 {
 				return 0, ErrInvalidLengthMetrics
 			}
+			iNdEx += length
+			if iNdEx < 0 {
+				return 0, ErrInvalidLengthMetrics
+			}
 			return iNdEx, nil
 		case 3:
 			for {
@@ -367,6 +451,9 @@
 					return 0, err
 				}
 				iNdEx = start + next
+				if iNdEx < 0 {
+					return 0, ErrInvalidLengthMetrics
+				}
 			}
 			return iNdEx, nil
 		case 4:
@@ -385,28 +472,3 @@
 	ErrInvalidLengthMetrics = fmt.Errorf("proto: negative length found during unmarshaling")
 	ErrIntOverflowMetrics   = fmt.Errorf("proto: integer overflow")
 )
-
-func init() {
-	proto.RegisterFile("github.com/containerd/containerd/api/types/metrics.proto", fileDescriptorMetrics)
-}
-
-var fileDescriptorMetrics = []byte{
-	// 258 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0x48, 0xcf, 0x2c, 0xc9,
-	0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
-	0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x97, 0x54, 0x16, 0xa4, 0x16, 0xeb, 0xe7, 0xa6, 0x96,
-	0x14, 0x65, 0x26, 0x17, 0xeb, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0x09, 0x20, 0xd4, 0xe8, 0x81,
-	0xe5, 0xa5, 0x44, 0xd2, 0xf3, 0xd3, 0xf3, 0xc1, 0x92, 0xfa, 0x20, 0x16, 0x44, 0x9d, 0x94, 0x64,
-	0x7a, 0x7e, 0x7e, 0x7a, 0x4e, 0xaa, 0x3e, 0x98, 0x97, 0x54, 0x9a, 0xa6, 0x9f, 0x98, 0x57, 0x09,
-	0x95, 0x92, 0x47, 0x97, 0x2a, 0xc9, 0xcc, 0x4d, 0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0x80, 0x28, 0x50,
-	0xea, 0x63, 0xe4, 0x62, 0xf3, 0x05, 0xdb, 0x2a, 0xe4, 0xc4, 0xc5, 0x09, 0x97, 0x95, 0x60, 0x54,
-	0x60, 0xd4, 0xe0, 0x36, 0x92, 0xd2, 0x83, 0xe8, 0xd7, 0x83, 0xe9, 0xd7, 0x0b, 0x81, 0xa9, 0x70,
-	0xe2, 0x38, 0x71, 0x4f, 0x9e, 0x61, 0xc2, 0x7d, 0x79, 0xc6, 0x20, 0x84, 0x36, 0x21, 0x31, 0x2e,
-	0xa6, 0xcc, 0x14, 0x09, 0x26, 0x05, 0x46, 0x0d, 0x4e, 0x27, 0xb6, 0x47, 0xf7, 0xe4, 0x99, 0x3c,
-	0x5d, 0x82, 0x98, 0x32, 0x53, 0x84, 0x34, 0xb8, 0x58, 0x52, 0x12, 0x4b, 0x12, 0x25, 0x98, 0xc1,
-	0xc6, 0x8a, 0x60, 0x18, 0xeb, 0x98, 0x57, 0x19, 0x04, 0x56, 0xe1, 0xe4, 0x75, 0xe2, 0xa1, 0x1c,
-	0xc3, 0x8d, 0x87, 0x72, 0x0c, 0x0d, 0x8f, 0xe4, 0x18, 0x4f, 0x3c, 0x92, 0x63, 0xbc, 0xf0, 0x48,
-	0x8e, 0xf1, 0xc1, 0x23, 0x39, 0xc6, 0x28, 0x03, 0xe2, 0x03, 0xd2, 0x1a, 0x4c, 0x46, 0x30, 0x24,
-	0xb1, 0x81, 0x6d, 0x30, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0xde, 0x0d, 0x02, 0xfe, 0x85, 0x01,
-	0x00, 0x00,
-}
diff --git a/vendor/github.com/containerd/containerd/api/types/mount.pb.go b/vendor/github.com/containerd/containerd/api/types/mount.pb.go
index f7a9c3c..54af8ea 100644
--- a/vendor/github.com/containerd/containerd/api/types/mount.pb.go
+++ b/vendor/github.com/containerd/containerd/api/types/mount.pb.go
@@ -3,22 +3,26 @@
 
 package types
 
-import proto "github.com/gogo/protobuf/proto"
-import fmt "fmt"
-import math "math"
-
-// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto"
-
-import strings "strings"
-import reflect "reflect"
-
-import io "io"
+import (
+	fmt "fmt"
+	proto "github.com/gogo/protobuf/proto"
+	io "io"
+	math "math"
+	reflect "reflect"
+	strings "strings"
+)
 
 // Reference imports to suppress errors if they are not otherwise used.
 var _ = proto.Marshal
 var _ = fmt.Errorf
 var _ = math.Inf
 
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+
 // Mount describes mounts for a container.
 //
 // This type is the lingua franca of ContainerD. All services provide mounts
@@ -35,16 +39,69 @@
 	// Target path in container
 	Target string `protobuf:"bytes,3,opt,name=target,proto3" json:"target,omitempty"`
 	// Options specifies zero or more fstab style mount options.
-	Options []string `protobuf:"bytes,4,rep,name=options" json:"options,omitempty"`
+	Options              []string `protobuf:"bytes,4,rep,name=options,proto3" json:"options,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
-func (m *Mount) Reset()                    { *m = Mount{} }
-func (*Mount) ProtoMessage()               {}
-func (*Mount) Descriptor() ([]byte, []int) { return fileDescriptorMount, []int{0} }
+func (m *Mount) Reset()      { *m = Mount{} }
+func (*Mount) ProtoMessage() {}
+func (*Mount) Descriptor() ([]byte, []int) {
+	return fileDescriptor_920196890d4a7b9f, []int{0}
+}
+func (m *Mount) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *Mount) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_Mount.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *Mount) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Mount.Merge(m, src)
+}
+func (m *Mount) XXX_Size() int {
+	return m.Size()
+}
+func (m *Mount) XXX_DiscardUnknown() {
+	xxx_messageInfo_Mount.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Mount proto.InternalMessageInfo
 
 func init() {
 	proto.RegisterType((*Mount)(nil), "containerd.types.Mount")
 }
+
+func init() {
+	proto.RegisterFile("github.com/containerd/containerd/api/types/mount.proto", fileDescriptor_920196890d4a7b9f)
+}
+
+var fileDescriptor_920196890d4a7b9f = []byte{
+	// 202 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x32, 0x4b, 0xcf, 0x2c, 0xc9,
+	0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
+	0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x97, 0x54, 0x16, 0xa4, 0x16, 0xeb, 0xe7, 0xe6, 0x97,
+	0xe6, 0x95, 0xe8, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0x09, 0x20, 0x54, 0xe8, 0x81, 0x65, 0xa5,
+	0x44, 0xd2, 0xf3, 0xd3, 0xf3, 0xc1, 0x92, 0xfa, 0x20, 0x16, 0x44, 0x9d, 0x52, 0x2a, 0x17, 0xab,
+	0x2f, 0x48, 0x9b, 0x90, 0x10, 0x17, 0x0b, 0x48, 0x9d, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x67, 0x10,
+	0x98, 0x2d, 0x24, 0xc6, 0xc5, 0x56, 0x9c, 0x5f, 0x5a, 0x94, 0x9c, 0x2a, 0xc1, 0x04, 0x16, 0x85,
+	0xf2, 0x40, 0xe2, 0x25, 0x89, 0x45, 0xe9, 0xa9, 0x25, 0x12, 0xcc, 0x10, 0x71, 0x08, 0x4f, 0x48,
+	0x82, 0x8b, 0x3d, 0xbf, 0xa0, 0x24, 0x33, 0x3f, 0xaf, 0x58, 0x82, 0x45, 0x81, 0x59, 0x83, 0x33,
+	0x08, 0xc6, 0x75, 0xf2, 0x3a, 0xf1, 0x50, 0x8e, 0xe1, 0xc6, 0x43, 0x39, 0x86, 0x86, 0x47, 0x72,
+	0x8c, 0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78, 0x24, 0xc7, 0xf8, 0xe0, 0x91, 0x1c, 0x63, 0x94, 0x01,
+	0xf1, 0x1e, 0xb4, 0x06, 0x93, 0x11, 0x0c, 0x49, 0x6c, 0x60, 0xb7, 0x1b, 0x03, 0x02, 0x00, 0x00,
+	0xff, 0xff, 0x82, 0x1c, 0x02, 0x18, 0x1d, 0x01, 0x00, 0x00,
+}
+
 func (m *Mount) Marshal() (dAtA []byte, err error) {
 	size := m.Size()
 	dAtA = make([]byte, size)
@@ -93,6 +150,9 @@
 			i += copy(dAtA[i:], s)
 		}
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -106,6 +166,9 @@
 	return offset + 1
 }
 func (m *Mount) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = len(m.Type)
@@ -126,6 +189,9 @@
 			n += 1 + l + sovMount(uint64(l))
 		}
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
@@ -151,6 +217,7 @@
 		`Source:` + fmt.Sprintf("%v", this.Source) + `,`,
 		`Target:` + fmt.Sprintf("%v", this.Target) + `,`,
 		`Options:` + fmt.Sprintf("%v", this.Options) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -178,7 +245,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -206,7 +273,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -216,6 +283,9 @@
 				return ErrInvalidLengthMount
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthMount
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -235,7 +305,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -245,6 +315,9 @@
 				return ErrInvalidLengthMount
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthMount
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -264,7 +337,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -274,6 +347,9 @@
 				return ErrInvalidLengthMount
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthMount
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -293,7 +369,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -303,6 +379,9 @@
 				return ErrInvalidLengthMount
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthMount
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -317,9 +396,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthMount
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthMount
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -383,10 +466,13 @@
 					break
 				}
 			}
-			iNdEx += length
 			if length < 0 {
 				return 0, ErrInvalidLengthMount
 			}
+			iNdEx += length
+			if iNdEx < 0 {
+				return 0, ErrInvalidLengthMount
+			}
 			return iNdEx, nil
 		case 3:
 			for {
@@ -415,6 +501,9 @@
 					return 0, err
 				}
 				iNdEx = start + next
+				if iNdEx < 0 {
+					return 0, ErrInvalidLengthMount
+				}
 			}
 			return iNdEx, nil
 		case 4:
@@ -433,24 +522,3 @@
 	ErrInvalidLengthMount = fmt.Errorf("proto: negative length found during unmarshaling")
 	ErrIntOverflowMount   = fmt.Errorf("proto: integer overflow")
 )
-
-func init() {
-	proto.RegisterFile("github.com/containerd/containerd/api/types/mount.proto", fileDescriptorMount)
-}
-
-var fileDescriptorMount = []byte{
-	// 202 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x32, 0x4b, 0xcf, 0x2c, 0xc9,
-	0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
-	0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x97, 0x54, 0x16, 0xa4, 0x16, 0xeb, 0xe7, 0xe6, 0x97,
-	0xe6, 0x95, 0xe8, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0x09, 0x20, 0x54, 0xe8, 0x81, 0x65, 0xa5,
-	0x44, 0xd2, 0xf3, 0xd3, 0xf3, 0xc1, 0x92, 0xfa, 0x20, 0x16, 0x44, 0x9d, 0x52, 0x2a, 0x17, 0xab,
-	0x2f, 0x48, 0x9b, 0x90, 0x10, 0x17, 0x0b, 0x48, 0x9d, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x67, 0x10,
-	0x98, 0x2d, 0x24, 0xc6, 0xc5, 0x56, 0x9c, 0x5f, 0x5a, 0x94, 0x9c, 0x2a, 0xc1, 0x04, 0x16, 0x85,
-	0xf2, 0x40, 0xe2, 0x25, 0x89, 0x45, 0xe9, 0xa9, 0x25, 0x12, 0xcc, 0x10, 0x71, 0x08, 0x4f, 0x48,
-	0x82, 0x8b, 0x3d, 0xbf, 0xa0, 0x24, 0x33, 0x3f, 0xaf, 0x58, 0x82, 0x45, 0x81, 0x59, 0x83, 0x33,
-	0x08, 0xc6, 0x75, 0xf2, 0x3a, 0xf1, 0x50, 0x8e, 0xe1, 0xc6, 0x43, 0x39, 0x86, 0x86, 0x47, 0x72,
-	0x8c, 0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78, 0x24, 0xc7, 0xf8, 0xe0, 0x91, 0x1c, 0x63, 0x94, 0x01,
-	0xf1, 0x1e, 0xb4, 0x06, 0x93, 0x11, 0x0c, 0x49, 0x6c, 0x60, 0xb7, 0x1b, 0x03, 0x02, 0x00, 0x00,
-	0xff, 0xff, 0x82, 0x1c, 0x02, 0x18, 0x1d, 0x01, 0x00, 0x00,
-}
diff --git a/vendor/github.com/containerd/containerd/api/types/platform.pb.go b/vendor/github.com/containerd/containerd/api/types/platform.pb.go
index ba9a3bf..558f947 100644
--- a/vendor/github.com/containerd/containerd/api/types/platform.pb.go
+++ b/vendor/github.com/containerd/containerd/api/types/platform.pb.go
@@ -3,37 +3,94 @@
 
 package types
 
-import proto "github.com/gogo/protobuf/proto"
-import fmt "fmt"
-import math "math"
-
-// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto"
-
-import strings "strings"
-import reflect "reflect"
-
-import io "io"
+import (
+	fmt "fmt"
+	proto "github.com/gogo/protobuf/proto"
+	io "io"
+	math "math"
+	reflect "reflect"
+	strings "strings"
+)
 
 // Reference imports to suppress errors if they are not otherwise used.
 var _ = proto.Marshal
 var _ = fmt.Errorf
 var _ = math.Inf
 
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+
 // Platform follows the structure of the OCI platform specification, from
 // descriptors.
 type Platform struct {
-	OS           string `protobuf:"bytes,1,opt,name=os,proto3" json:"os,omitempty"`
-	Architecture string `protobuf:"bytes,2,opt,name=architecture,proto3" json:"architecture,omitempty"`
-	Variant      string `protobuf:"bytes,3,opt,name=variant,proto3" json:"variant,omitempty"`
+	OS                   string   `protobuf:"bytes,1,opt,name=os,proto3" json:"os,omitempty"`
+	Architecture         string   `protobuf:"bytes,2,opt,name=architecture,proto3" json:"architecture,omitempty"`
+	Variant              string   `protobuf:"bytes,3,opt,name=variant,proto3" json:"variant,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
-func (m *Platform) Reset()                    { *m = Platform{} }
-func (*Platform) ProtoMessage()               {}
-func (*Platform) Descriptor() ([]byte, []int) { return fileDescriptorPlatform, []int{0} }
+func (m *Platform) Reset()      { *m = Platform{} }
+func (*Platform) ProtoMessage() {}
+func (*Platform) Descriptor() ([]byte, []int) {
+	return fileDescriptor_24ba7a4b83e2367e, []int{0}
+}
+func (m *Platform) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *Platform) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_Platform.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *Platform) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Platform.Merge(m, src)
+}
+func (m *Platform) XXX_Size() int {
+	return m.Size()
+}
+func (m *Platform) XXX_DiscardUnknown() {
+	xxx_messageInfo_Platform.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Platform proto.InternalMessageInfo
 
 func init() {
 	proto.RegisterType((*Platform)(nil), "containerd.types.Platform")
 }
+
+func init() {
+	proto.RegisterFile("github.com/containerd/containerd/api/types/platform.proto", fileDescriptor_24ba7a4b83e2367e)
+}
+
+var fileDescriptor_24ba7a4b83e2367e = []byte{
+	// 205 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0x4c, 0xcf, 0x2c, 0xc9,
+	0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
+	0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x97, 0x54, 0x16, 0xa4, 0x16, 0xeb, 0x17, 0xe4, 0x24,
+	0x96, 0xa4, 0xe5, 0x17, 0xe5, 0xea, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0x09, 0x20, 0x14, 0xe9,
+	0x81, 0x15, 0x48, 0x89, 0xa4, 0xe7, 0xa7, 0xe7, 0x83, 0x25, 0xf5, 0x41, 0x2c, 0x88, 0x3a, 0xa5,
+	0x04, 0x2e, 0x8e, 0x00, 0xa8, 0x4e, 0x21, 0x31, 0x2e, 0xa6, 0xfc, 0x62, 0x09, 0x46, 0x05, 0x46,
+	0x0d, 0x4e, 0x27, 0xb6, 0x47, 0xf7, 0xe4, 0x99, 0xfc, 0x83, 0x83, 0x98, 0xf2, 0x8b, 0x85, 0x94,
+	0xb8, 0x78, 0x12, 0x8b, 0x92, 0x33, 0x32, 0x4b, 0x52, 0x93, 0x4b, 0x4a, 0x8b, 0x52, 0x25, 0x98,
+	0x40, 0x2a, 0x82, 0x50, 0xc4, 0x84, 0x24, 0xb8, 0xd8, 0xcb, 0x12, 0x8b, 0x32, 0x13, 0xf3, 0x4a,
+	0x24, 0x98, 0xc1, 0xd2, 0x30, 0xae, 0x93, 0xd7, 0x89, 0x87, 0x72, 0x0c, 0x37, 0x1e, 0xca, 0x31,
+	0x34, 0x3c, 0x92, 0x63, 0x3c, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4,
+	0x18, 0xa3, 0x0c, 0x88, 0xf7, 0x9e, 0x35, 0x98, 0x8c, 0x60, 0x48, 0x62, 0x03, 0x3b, 0xdb, 0x18,
+	0x10, 0x00, 0x00, 0xff, 0xff, 0x05, 0xaa, 0xda, 0xa1, 0x1b, 0x01, 0x00, 0x00,
+}
+
 func (m *Platform) Marshal() (dAtA []byte, err error) {
 	size := m.Size()
 	dAtA = make([]byte, size)
@@ -67,6 +124,9 @@
 		i = encodeVarintPlatform(dAtA, i, uint64(len(m.Variant)))
 		i += copy(dAtA[i:], m.Variant)
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -80,6 +140,9 @@
 	return offset + 1
 }
 func (m *Platform) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = len(m.OS)
@@ -94,6 +157,9 @@
 	if l > 0 {
 		n += 1 + l + sovPlatform(uint64(l))
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
@@ -118,6 +184,7 @@
 		`OS:` + fmt.Sprintf("%v", this.OS) + `,`,
 		`Architecture:` + fmt.Sprintf("%v", this.Architecture) + `,`,
 		`Variant:` + fmt.Sprintf("%v", this.Variant) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -145,7 +212,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -173,7 +240,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -183,6 +250,9 @@
 				return ErrInvalidLengthPlatform
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthPlatform
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -202,7 +272,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -212,6 +282,9 @@
 				return ErrInvalidLengthPlatform
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthPlatform
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -231,7 +304,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -241,6 +314,9 @@
 				return ErrInvalidLengthPlatform
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthPlatform
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -255,9 +331,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthPlatform
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthPlatform
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -321,10 +401,13 @@
 					break
 				}
 			}
-			iNdEx += length
 			if length < 0 {
 				return 0, ErrInvalidLengthPlatform
 			}
+			iNdEx += length
+			if iNdEx < 0 {
+				return 0, ErrInvalidLengthPlatform
+			}
 			return iNdEx, nil
 		case 3:
 			for {
@@ -353,6 +436,9 @@
 					return 0, err
 				}
 				iNdEx = start + next
+				if iNdEx < 0 {
+					return 0, ErrInvalidLengthPlatform
+				}
 			}
 			return iNdEx, nil
 		case 4:
@@ -371,24 +457,3 @@
 	ErrInvalidLengthPlatform = fmt.Errorf("proto: negative length found during unmarshaling")
 	ErrIntOverflowPlatform   = fmt.Errorf("proto: integer overflow")
 )
-
-func init() {
-	proto.RegisterFile("github.com/containerd/containerd/api/types/platform.proto", fileDescriptorPlatform)
-}
-
-var fileDescriptorPlatform = []byte{
-	// 205 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0x4c, 0xcf, 0x2c, 0xc9,
-	0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
-	0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x97, 0x54, 0x16, 0xa4, 0x16, 0xeb, 0x17, 0xe4, 0x24,
-	0x96, 0xa4, 0xe5, 0x17, 0xe5, 0xea, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0x09, 0x20, 0x14, 0xe9,
-	0x81, 0x15, 0x48, 0x89, 0xa4, 0xe7, 0xa7, 0xe7, 0x83, 0x25, 0xf5, 0x41, 0x2c, 0x88, 0x3a, 0xa5,
-	0x04, 0x2e, 0x8e, 0x00, 0xa8, 0x4e, 0x21, 0x31, 0x2e, 0xa6, 0xfc, 0x62, 0x09, 0x46, 0x05, 0x46,
-	0x0d, 0x4e, 0x27, 0xb6, 0x47, 0xf7, 0xe4, 0x99, 0xfc, 0x83, 0x83, 0x98, 0xf2, 0x8b, 0x85, 0x94,
-	0xb8, 0x78, 0x12, 0x8b, 0x92, 0x33, 0x32, 0x4b, 0x52, 0x93, 0x4b, 0x4a, 0x8b, 0x52, 0x25, 0x98,
-	0x40, 0x2a, 0x82, 0x50, 0xc4, 0x84, 0x24, 0xb8, 0xd8, 0xcb, 0x12, 0x8b, 0x32, 0x13, 0xf3, 0x4a,
-	0x24, 0x98, 0xc1, 0xd2, 0x30, 0xae, 0x93, 0xd7, 0x89, 0x87, 0x72, 0x0c, 0x37, 0x1e, 0xca, 0x31,
-	0x34, 0x3c, 0x92, 0x63, 0x3c, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4,
-	0x18, 0xa3, 0x0c, 0x88, 0xf7, 0x9e, 0x35, 0x98, 0x8c, 0x60, 0x48, 0x62, 0x03, 0x3b, 0xdb, 0x18,
-	0x10, 0x00, 0x00, 0xff, 0xff, 0x05, 0xaa, 0xda, 0xa1, 0x1b, 0x01, 0x00, 0x00,
-}
diff --git a/vendor/github.com/containerd/containerd/api/types/task/task.pb.go b/vendor/github.com/containerd/containerd/api/types/task/task.pb.go
index 437abe8..69d851c 100644
--- a/vendor/github.com/containerd/containerd/api/types/task/task.pb.go
+++ b/vendor/github.com/containerd/containerd/api/types/task/task.pb.go
@@ -1,34 +1,19 @@
 // Code generated by protoc-gen-gogo. DO NOT EDIT.
 // source: github.com/containerd/containerd/api/types/task/task.proto
 
-/*
-	Package task is a generated protocol buffer package.
-
-	It is generated from these files:
-		github.com/containerd/containerd/api/types/task/task.proto
-
-	It has these top-level messages:
-		Process
-		ProcessInfo
-*/
 package task
 
-import proto "github.com/gogo/protobuf/proto"
-import fmt "fmt"
-import math "math"
-
-// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto"
-import _ "github.com/gogo/protobuf/types"
-import google_protobuf2 "github.com/gogo/protobuf/types"
-
-import time "time"
-
-import types "github.com/gogo/protobuf/types"
-
-import strings "strings"
-import reflect "reflect"
-
-import io "io"
+import (
+	fmt "fmt"
+	proto "github.com/gogo/protobuf/proto"
+	github_com_gogo_protobuf_types "github.com/gogo/protobuf/types"
+	types "github.com/gogo/protobuf/types"
+	io "io"
+	math "math"
+	reflect "reflect"
+	strings "strings"
+	time "time"
+)
 
 // Reference imports to suppress errors if they are not otherwise used.
 var _ = proto.Marshal
@@ -61,6 +46,7 @@
 	4: "PAUSED",
 	5: "PAUSING",
 }
+
 var Status_value = map[string]int32{
 	"UNKNOWN": 0,
 	"CREATED": 1,
@@ -73,24 +59,58 @@
 func (x Status) String() string {
 	return proto.EnumName(Status_name, int32(x))
 }
-func (Status) EnumDescriptor() ([]byte, []int) { return fileDescriptorTask, []int{0} }
 
-type Process struct {
-	ContainerID string    `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
-	ID          string    `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"`
-	Pid         uint32    `protobuf:"varint,3,opt,name=pid,proto3" json:"pid,omitempty"`
-	Status      Status    `protobuf:"varint,4,opt,name=status,proto3,enum=containerd.v1.types.Status" json:"status,omitempty"`
-	Stdin       string    `protobuf:"bytes,5,opt,name=stdin,proto3" json:"stdin,omitempty"`
-	Stdout      string    `protobuf:"bytes,6,opt,name=stdout,proto3" json:"stdout,omitempty"`
-	Stderr      string    `protobuf:"bytes,7,opt,name=stderr,proto3" json:"stderr,omitempty"`
-	Terminal    bool      `protobuf:"varint,8,opt,name=terminal,proto3" json:"terminal,omitempty"`
-	ExitStatus  uint32    `protobuf:"varint,9,opt,name=exit_status,json=exitStatus,proto3" json:"exit_status,omitempty"`
-	ExitedAt    time.Time `protobuf:"bytes,10,opt,name=exited_at,json=exitedAt,stdtime" json:"exited_at"`
+func (Status) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_391ef18c8ab0dc16, []int{0}
 }
 
-func (m *Process) Reset()                    { *m = Process{} }
-func (*Process) ProtoMessage()               {}
-func (*Process) Descriptor() ([]byte, []int) { return fileDescriptorTask, []int{0} }
+type Process struct {
+	ContainerID          string    `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
+	ID                   string    `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"`
+	Pid                  uint32    `protobuf:"varint,3,opt,name=pid,proto3" json:"pid,omitempty"`
+	Status               Status    `protobuf:"varint,4,opt,name=status,proto3,enum=containerd.v1.types.Status" json:"status,omitempty"`
+	Stdin                string    `protobuf:"bytes,5,opt,name=stdin,proto3" json:"stdin,omitempty"`
+	Stdout               string    `protobuf:"bytes,6,opt,name=stdout,proto3" json:"stdout,omitempty"`
+	Stderr               string    `protobuf:"bytes,7,opt,name=stderr,proto3" json:"stderr,omitempty"`
+	Terminal             bool      `protobuf:"varint,8,opt,name=terminal,proto3" json:"terminal,omitempty"`
+	ExitStatus           uint32    `protobuf:"varint,9,opt,name=exit_status,json=exitStatus,proto3" json:"exit_status,omitempty"`
+	ExitedAt             time.Time `protobuf:"bytes,10,opt,name=exited_at,json=exitedAt,proto3,stdtime" json:"exited_at"`
+	XXX_NoUnkeyedLiteral struct{}  `json:"-"`
+	XXX_unrecognized     []byte    `json:"-"`
+	XXX_sizecache        int32     `json:"-"`
+}
+
+func (m *Process) Reset()      { *m = Process{} }
+func (*Process) ProtoMessage() {}
+func (*Process) Descriptor() ([]byte, []int) {
+	return fileDescriptor_391ef18c8ab0dc16, []int{0}
+}
+func (m *Process) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *Process) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_Process.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *Process) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Process.Merge(m, src)
+}
+func (m *Process) XXX_Size() int {
+	return m.Size()
+}
+func (m *Process) XXX_DiscardUnknown() {
+	xxx_messageInfo_Process.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Process proto.InternalMessageInfo
 
 type ProcessInfo struct {
 	// PID is the process ID.
@@ -98,18 +118,93 @@
 	// Info contains additional process information.
 	//
 	// Info varies by platform.
-	Info *google_protobuf2.Any `protobuf:"bytes,2,opt,name=info" json:"info,omitempty"`
+	Info                 *types.Any `protobuf:"bytes,2,opt,name=info,proto3" json:"info,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}   `json:"-"`
+	XXX_unrecognized     []byte     `json:"-"`
+	XXX_sizecache        int32      `json:"-"`
 }
 
-func (m *ProcessInfo) Reset()                    { *m = ProcessInfo{} }
-func (*ProcessInfo) ProtoMessage()               {}
-func (*ProcessInfo) Descriptor() ([]byte, []int) { return fileDescriptorTask, []int{1} }
+func (m *ProcessInfo) Reset()      { *m = ProcessInfo{} }
+func (*ProcessInfo) ProtoMessage() {}
+func (*ProcessInfo) Descriptor() ([]byte, []int) {
+	return fileDescriptor_391ef18c8ab0dc16, []int{1}
+}
+func (m *ProcessInfo) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ProcessInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_ProcessInfo.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *ProcessInfo) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ProcessInfo.Merge(m, src)
+}
+func (m *ProcessInfo) XXX_Size() int {
+	return m.Size()
+}
+func (m *ProcessInfo) XXX_DiscardUnknown() {
+	xxx_messageInfo_ProcessInfo.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ProcessInfo proto.InternalMessageInfo
 
 func init() {
+	proto.RegisterEnum("containerd.v1.types.Status", Status_name, Status_value)
 	proto.RegisterType((*Process)(nil), "containerd.v1.types.Process")
 	proto.RegisterType((*ProcessInfo)(nil), "containerd.v1.types.ProcessInfo")
-	proto.RegisterEnum("containerd.v1.types.Status", Status_name, Status_value)
 }
+
+func init() {
+	proto.RegisterFile("github.com/containerd/containerd/api/types/task/task.proto", fileDescriptor_391ef18c8ab0dc16)
+}
+
+var fileDescriptor_391ef18c8ab0dc16 = []byte{
+	// 545 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0x3f, 0x6f, 0xd3, 0x40,
+	0x18, 0xc6, 0x7d, 0x6e, 0xeb, 0xa6, 0xe7, 0xb6, 0x18, 0x13, 0x55, 0xc6, 0x20, 0xdb, 0xea, 0x64,
+	0x31, 0xd8, 0x22, 0xdd, 0xd8, 0xf2, 0x4f, 0xc8, 0x42, 0x72, 0x23, 0x27, 0x11, 0x6c, 0x91, 0x13,
+	0x5f, 0xcc, 0xa9, 0xcd, 0x9d, 0x65, 0x9f, 0x81, 0x6c, 0x8c, 0xa8, 0x13, 0x5f, 0xa0, 0x13, 0x7c,
+	0x0a, 0x3e, 0x41, 0x46, 0x26, 0xc4, 0x14, 0xa8, 0x3f, 0x09, 0x3a, 0xdb, 0x49, 0x23, 0x60, 0x39,
+	0xbd, 0xef, 0xf3, 0x7b, 0xee, 0xbd, 0xf7, 0x1e, 0xf8, 0x22, 0xc6, 0xec, 0x6d, 0x3e, 0x75, 0x66,
+	0x74, 0xe1, 0xce, 0x28, 0x61, 0x21, 0x26, 0x28, 0x8d, 0x76, 0xcb, 0x30, 0xc1, 0x2e, 0x5b, 0x26,
+	0x28, 0x73, 0x59, 0x98, 0x5d, 0x95, 0x87, 0x93, 0xa4, 0x94, 0x51, 0xf5, 0xd1, 0xbd, 0xcb, 0x79,
+	0xf7, 0xdc, 0x29, 0x4d, 0x7a, 0x33, 0xa6, 0x31, 0x2d, 0xb9, 0xcb, 0xab, 0xca, 0xaa, 0x9b, 0x31,
+	0xa5, 0xf1, 0x35, 0x72, 0xcb, 0x6e, 0x9a, 0xcf, 0x5d, 0x86, 0x17, 0x28, 0x63, 0xe1, 0x22, 0xa9,
+	0x0d, 0x8f, 0xff, 0x36, 0x84, 0x64, 0x59, 0xa1, 0xf3, 0x42, 0x84, 0x87, 0x83, 0x94, 0xce, 0x50,
+	0x96, 0xa9, 0x2d, 0x78, 0xbc, 0x7d, 0x74, 0x82, 0x23, 0x0d, 0x58, 0xc0, 0x3e, 0xea, 0x3c, 0x28,
+	0xd6, 0xa6, 0xdc, 0xdd, 0xe8, 0x5e, 0x2f, 0x90, 0xb7, 0x26, 0x2f, 0x52, 0xcf, 0xa0, 0x88, 0x23,
+	0x4d, 0x2c, 0x9d, 0x52, 0xb1, 0x36, 0x45, 0xaf, 0x17, 0x88, 0x38, 0x52, 0x15, 0xb8, 0x97, 0xe0,
+	0x48, 0xdb, 0xb3, 0x80, 0x7d, 0x12, 0xf0, 0x52, 0xbd, 0x80, 0x52, 0xc6, 0x42, 0x96, 0x67, 0xda,
+	0xbe, 0x05, 0xec, 0xd3, 0xd6, 0x13, 0xe7, 0x3f, 0x3f, 0x74, 0x86, 0xa5, 0x25, 0xa8, 0xad, 0x6a,
+	0x13, 0x1e, 0x64, 0x2c, 0xc2, 0x44, 0x3b, 0xe0, 0x2f, 0x04, 0x55, 0xa3, 0x9e, 0xf1, 0x51, 0x11,
+	0xcd, 0x99, 0x26, 0x95, 0x72, 0xdd, 0xd5, 0x3a, 0x4a, 0x53, 0xed, 0x70, 0xab, 0xa3, 0x34, 0x55,
+	0x75, 0xd8, 0x60, 0x28, 0x5d, 0x60, 0x12, 0x5e, 0x6b, 0x0d, 0x0b, 0xd8, 0x8d, 0x60, 0xdb, 0xab,
+	0x26, 0x94, 0xd1, 0x07, 0xcc, 0x26, 0xf5, 0x6e, 0x47, 0xe5, 0xc2, 0x90, 0x4b, 0xd5, 0x2a, 0x6a,
+	0x1b, 0x1e, 0xf1, 0x0e, 0x45, 0x93, 0x90, 0x69, 0xd0, 0x02, 0xb6, 0xdc, 0xd2, 0x9d, 0x2a, 0x50,
+	0x67, 0x13, 0xa8, 0x33, 0xda, 0x24, 0xde, 0x69, 0xac, 0xd6, 0xa6, 0xf0, 0xf9, 0x97, 0x09, 0x82,
+	0x46, 0x75, 0xad, 0xcd, 0xce, 0x3d, 0x28, 0xd7, 0x19, 0x7b, 0x64, 0x4e, 0x37, 0xd9, 0x80, 0xfb,
+	0x6c, 0x6c, 0xb8, 0x8f, 0xc9, 0x9c, 0x96, 0x39, 0xca, 0xad, 0xe6, 0x3f, 0xe3, 0xdb, 0x64, 0x19,
+	0x94, 0x8e, 0x67, 0x3f, 0x00, 0x94, 0xea, 0xc5, 0x0c, 0x78, 0x38, 0xf6, 0x5f, 0xf9, 0x97, 0xaf,
+	0x7d, 0x45, 0xd0, 0x1f, 0xde, 0xdc, 0x5a, 0x27, 0x15, 0x18, 0x93, 0x2b, 0x42, 0xdf, 0x13, 0xce,
+	0xbb, 0x41, 0xbf, 0x3d, 0xea, 0xf7, 0x14, 0xb0, 0xcb, 0xbb, 0x29, 0x0a, 0x19, 0x8a, 0x38, 0x0f,
+	0xc6, 0xbe, 0xef, 0xf9, 0x2f, 0x15, 0x71, 0x97, 0x07, 0x39, 0x21, 0x98, 0xc4, 0x9c, 0x0f, 0x47,
+	0x97, 0x83, 0x41, 0xbf, 0xa7, 0xec, 0xed, 0xf2, 0x21, 0xa3, 0x49, 0x82, 0x22, 0xf5, 0x29, 0x94,
+	0x06, 0xed, 0xf1, 0xb0, 0xdf, 0x53, 0xf6, 0x75, 0xe5, 0xe6, 0xd6, 0x3a, 0xae, 0xf0, 0x20, 0xcc,
+	0xb3, 0x6a, 0x3a, 0xa7, 0x7c, 0xfa, 0xc1, 0xee, 0x6d, 0x8e, 0x31, 0x89, 0xf5, 0xd3, 0x4f, 0x5f,
+	0x0c, 0xe1, 0xdb, 0x57, 0xa3, 0xfe, 0x4d, 0x47, 0x5b, 0xdd, 0x19, 0xc2, 0xcf, 0x3b, 0x43, 0xf8,
+	0x58, 0x18, 0x60, 0x55, 0x18, 0xe0, 0x7b, 0x61, 0x80, 0xdf, 0x85, 0x01, 0xde, 0x08, 0x53, 0xa9,
+	0x0c, 0xe2, 0xe2, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xc3, 0x32, 0xd2, 0x86, 0x50, 0x03, 0x00,
+	0x00,
+}
+
 func (m *Process) Marshal() (dAtA []byte, err error) {
 	size := m.Size()
 	dAtA = make([]byte, size)
@@ -182,12 +277,15 @@
 	}
 	dAtA[i] = 0x52
 	i++
-	i = encodeVarintTask(dAtA, i, uint64(types.SizeOfStdTime(m.ExitedAt)))
-	n1, err := types.StdTimeMarshalTo(m.ExitedAt, dAtA[i:])
+	i = encodeVarintTask(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt)))
+	n1, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.ExitedAt, dAtA[i:])
 	if err != nil {
 		return 0, err
 	}
 	i += n1
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -221,6 +319,9 @@
 		}
 		i += n2
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -234,6 +335,9 @@
 	return offset + 1
 }
 func (m *Process) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = len(m.ContainerID)
@@ -268,12 +372,18 @@
 	if m.ExitStatus != 0 {
 		n += 1 + sovTask(uint64(m.ExitStatus))
 	}
-	l = types.SizeOfStdTime(m.ExitedAt)
+	l = github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt)
 	n += 1 + l + sovTask(uint64(l))
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *ProcessInfo) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	if m.Pid != 0 {
@@ -283,6 +393,9 @@
 		l = m.Info.Size()
 		n += 1 + l + sovTask(uint64(l))
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
@@ -313,7 +426,8 @@
 		`Stderr:` + fmt.Sprintf("%v", this.Stderr) + `,`,
 		`Terminal:` + fmt.Sprintf("%v", this.Terminal) + `,`,
 		`ExitStatus:` + fmt.Sprintf("%v", this.ExitStatus) + `,`,
-		`ExitedAt:` + strings.Replace(strings.Replace(this.ExitedAt.String(), "Timestamp", "google_protobuf1.Timestamp", 1), `&`, ``, 1) + `,`,
+		`ExitedAt:` + strings.Replace(strings.Replace(this.ExitedAt.String(), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -324,7 +438,8 @@
 	}
 	s := strings.Join([]string{`&ProcessInfo{`,
 		`Pid:` + fmt.Sprintf("%v", this.Pid) + `,`,
-		`Info:` + strings.Replace(fmt.Sprintf("%v", this.Info), "Any", "google_protobuf2.Any", 1) + `,`,
+		`Info:` + strings.Replace(fmt.Sprintf("%v", this.Info), "Any", "types.Any", 1) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -352,7 +467,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -380,7 +495,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -390,6 +505,9 @@
 				return ErrInvalidLengthTask
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthTask
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -409,7 +527,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -419,6 +537,9 @@
 				return ErrInvalidLengthTask
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthTask
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -438,7 +559,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				m.Pid |= (uint32(b) & 0x7F) << shift
+				m.Pid |= uint32(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -457,7 +578,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				m.Status |= (Status(b) & 0x7F) << shift
+				m.Status |= Status(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -476,7 +597,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -486,6 +607,9 @@
 				return ErrInvalidLengthTask
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthTask
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -505,7 +629,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -515,6 +639,9 @@
 				return ErrInvalidLengthTask
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthTask
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -534,7 +661,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -544,6 +671,9 @@
 				return ErrInvalidLengthTask
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthTask
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -563,7 +693,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				v |= (int(b) & 0x7F) << shift
+				v |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -583,7 +713,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				m.ExitStatus |= (uint32(b) & 0x7F) << shift
+				m.ExitStatus |= uint32(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -602,7 +732,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -611,10 +741,13 @@
 				return ErrInvalidLengthTask
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthTask
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			if err := types.StdTimeUnmarshal(&m.ExitedAt, dAtA[iNdEx:postIndex]); err != nil {
+			if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.ExitedAt, dAtA[iNdEx:postIndex]); err != nil {
 				return err
 			}
 			iNdEx = postIndex
@@ -627,9 +760,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthTask
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthTask
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -654,7 +791,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -682,7 +819,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				m.Pid |= (uint32(b) & 0x7F) << shift
+				m.Pid |= uint32(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -701,7 +838,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -710,11 +847,14 @@
 				return ErrInvalidLengthTask
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthTask
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
 			if m.Info == nil {
-				m.Info = &google_protobuf2.Any{}
+				m.Info = &types.Any{}
 			}
 			if err := m.Info.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
 				return err
@@ -729,9 +869,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthTask
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthTask
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -795,10 +939,13 @@
 					break
 				}
 			}
-			iNdEx += length
 			if length < 0 {
 				return 0, ErrInvalidLengthTask
 			}
+			iNdEx += length
+			if iNdEx < 0 {
+				return 0, ErrInvalidLengthTask
+			}
 			return iNdEx, nil
 		case 3:
 			for {
@@ -827,6 +974,9 @@
 					return 0, err
 				}
 				iNdEx = start + next
+				if iNdEx < 0 {
+					return 0, ErrInvalidLengthTask
+				}
 			}
 			return iNdEx, nil
 		case 4:
@@ -845,46 +995,3 @@
 	ErrInvalidLengthTask = fmt.Errorf("proto: negative length found during unmarshaling")
 	ErrIntOverflowTask   = fmt.Errorf("proto: integer overflow")
 )
-
-func init() {
-	proto.RegisterFile("github.com/containerd/containerd/api/types/task/task.proto", fileDescriptorTask)
-}
-
-var fileDescriptorTask = []byte{
-	// 545 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0x3f, 0x6f, 0xd3, 0x40,
-	0x18, 0xc6, 0x7d, 0x6e, 0xeb, 0xa6, 0xe7, 0xb6, 0x18, 0x13, 0x55, 0xc6, 0x20, 0xdb, 0xea, 0x64,
-	0x31, 0xd8, 0x22, 0xdd, 0xd8, 0xf2, 0x4f, 0xc8, 0x42, 0x72, 0x23, 0x27, 0x11, 0x6c, 0x91, 0x13,
-	0x5f, 0xcc, 0xa9, 0xcd, 0x9d, 0x65, 0x9f, 0x81, 0x6c, 0x8c, 0xa8, 0x13, 0x5f, 0xa0, 0x13, 0x7c,
-	0x0a, 0x3e, 0x41, 0x46, 0x26, 0xc4, 0x14, 0xa8, 0x3f, 0x09, 0x3a, 0xdb, 0x49, 0x23, 0x60, 0x39,
-	0xbd, 0xef, 0xf3, 0x7b, 0xee, 0xbd, 0xf7, 0x1e, 0xf8, 0x22, 0xc6, 0xec, 0x6d, 0x3e, 0x75, 0x66,
-	0x74, 0xe1, 0xce, 0x28, 0x61, 0x21, 0x26, 0x28, 0x8d, 0x76, 0xcb, 0x30, 0xc1, 0x2e, 0x5b, 0x26,
-	0x28, 0x73, 0x59, 0x98, 0x5d, 0x95, 0x87, 0x93, 0xa4, 0x94, 0x51, 0xf5, 0xd1, 0xbd, 0xcb, 0x79,
-	0xf7, 0xdc, 0x29, 0x4d, 0x7a, 0x33, 0xa6, 0x31, 0x2d, 0xb9, 0xcb, 0xab, 0xca, 0xaa, 0x9b, 0x31,
-	0xa5, 0xf1, 0x35, 0x72, 0xcb, 0x6e, 0x9a, 0xcf, 0x5d, 0x86, 0x17, 0x28, 0x63, 0xe1, 0x22, 0xa9,
-	0x0d, 0x8f, 0xff, 0x36, 0x84, 0x64, 0x59, 0xa1, 0xf3, 0x42, 0x84, 0x87, 0x83, 0x94, 0xce, 0x50,
-	0x96, 0xa9, 0x2d, 0x78, 0xbc, 0x7d, 0x74, 0x82, 0x23, 0x0d, 0x58, 0xc0, 0x3e, 0xea, 0x3c, 0x28,
-	0xd6, 0xa6, 0xdc, 0xdd, 0xe8, 0x5e, 0x2f, 0x90, 0xb7, 0x26, 0x2f, 0x52, 0xcf, 0xa0, 0x88, 0x23,
-	0x4d, 0x2c, 0x9d, 0x52, 0xb1, 0x36, 0x45, 0xaf, 0x17, 0x88, 0x38, 0x52, 0x15, 0xb8, 0x97, 0xe0,
-	0x48, 0xdb, 0xb3, 0x80, 0x7d, 0x12, 0xf0, 0x52, 0xbd, 0x80, 0x52, 0xc6, 0x42, 0x96, 0x67, 0xda,
-	0xbe, 0x05, 0xec, 0xd3, 0xd6, 0x13, 0xe7, 0x3f, 0x3f, 0x74, 0x86, 0xa5, 0x25, 0xa8, 0xad, 0x6a,
-	0x13, 0x1e, 0x64, 0x2c, 0xc2, 0x44, 0x3b, 0xe0, 0x2f, 0x04, 0x55, 0xa3, 0x9e, 0xf1, 0x51, 0x11,
-	0xcd, 0x99, 0x26, 0x95, 0x72, 0xdd, 0xd5, 0x3a, 0x4a, 0x53, 0xed, 0x70, 0xab, 0xa3, 0x34, 0x55,
-	0x75, 0xd8, 0x60, 0x28, 0x5d, 0x60, 0x12, 0x5e, 0x6b, 0x0d, 0x0b, 0xd8, 0x8d, 0x60, 0xdb, 0xab,
-	0x26, 0x94, 0xd1, 0x07, 0xcc, 0x26, 0xf5, 0x6e, 0x47, 0xe5, 0xc2, 0x90, 0x4b, 0xd5, 0x2a, 0x6a,
-	0x1b, 0x1e, 0xf1, 0x0e, 0x45, 0x93, 0x90, 0x69, 0xd0, 0x02, 0xb6, 0xdc, 0xd2, 0x9d, 0x2a, 0x50,
-	0x67, 0x13, 0xa8, 0x33, 0xda, 0x24, 0xde, 0x69, 0xac, 0xd6, 0xa6, 0xf0, 0xf9, 0x97, 0x09, 0x82,
-	0x46, 0x75, 0xad, 0xcd, 0xce, 0x3d, 0x28, 0xd7, 0x19, 0x7b, 0x64, 0x4e, 0x37, 0xd9, 0x80, 0xfb,
-	0x6c, 0x6c, 0xb8, 0x8f, 0xc9, 0x9c, 0x96, 0x39, 0xca, 0xad, 0xe6, 0x3f, 0xe3, 0xdb, 0x64, 0x19,
-	0x94, 0x8e, 0x67, 0x3f, 0x00, 0x94, 0xea, 0xc5, 0x0c, 0x78, 0x38, 0xf6, 0x5f, 0xf9, 0x97, 0xaf,
-	0x7d, 0x45, 0xd0, 0x1f, 0xde, 0xdc, 0x5a, 0x27, 0x15, 0x18, 0x93, 0x2b, 0x42, 0xdf, 0x13, 0xce,
-	0xbb, 0x41, 0xbf, 0x3d, 0xea, 0xf7, 0x14, 0xb0, 0xcb, 0xbb, 0x29, 0x0a, 0x19, 0x8a, 0x38, 0x0f,
-	0xc6, 0xbe, 0xef, 0xf9, 0x2f, 0x15, 0x71, 0x97, 0x07, 0x39, 0x21, 0x98, 0xc4, 0x9c, 0x0f, 0x47,
-	0x97, 0x83, 0x41, 0xbf, 0xa7, 0xec, 0xed, 0xf2, 0x21, 0xa3, 0x49, 0x82, 0x22, 0xf5, 0x29, 0x94,
-	0x06, 0xed, 0xf1, 0xb0, 0xdf, 0x53, 0xf6, 0x75, 0xe5, 0xe6, 0xd6, 0x3a, 0xae, 0xf0, 0x20, 0xcc,
-	0xb3, 0x6a, 0x3a, 0xa7, 0x7c, 0xfa, 0xc1, 0xee, 0x6d, 0x8e, 0x31, 0x89, 0xf5, 0xd3, 0x4f, 0x5f,
-	0x0c, 0xe1, 0xdb, 0x57, 0xa3, 0xfe, 0x4d, 0x47, 0x5b, 0xdd, 0x19, 0xc2, 0xcf, 0x3b, 0x43, 0xf8,
-	0x58, 0x18, 0x60, 0x55, 0x18, 0xe0, 0x7b, 0x61, 0x80, 0xdf, 0x85, 0x01, 0xde, 0x08, 0x53, 0xa9,
-	0x0c, 0xe2, 0xe2, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xc3, 0x32, 0xd2, 0x86, 0x50, 0x03, 0x00,
-	0x00,
-}
diff --git a/vendor/github.com/containerd/containerd/client.go b/vendor/github.com/containerd/containerd/client.go
index 39adfd9..ff78f7e 100644
--- a/vendor/github.com/containerd/containerd/client.go
+++ b/vendor/github.com/containerd/containerd/client.go
@@ -136,6 +136,20 @@
 	if copts.services == nil && c.conn == nil {
 		return nil, errors.New("no grpc connection or services is available")
 	}
+
+	// check namespace labels for default runtime
+	if copts.defaultRuntime == "" && copts.defaultns != "" {
+		namespaces := c.NamespaceService()
+		ctx := context.Background()
+		if labels, err := namespaces.Labels(ctx, copts.defaultns); err == nil {
+			if defaultRuntime, ok := labels[defaults.DefaultRuntimeNSLabel]; ok {
+				c.runtime = defaultRuntime
+			}
+		} else {
+			return nil, err
+		}
+	}
+
 	return c, nil
 }
 
@@ -152,6 +166,20 @@
 		conn:    conn,
 		runtime: fmt.Sprintf("%s.%s", plugin.RuntimePlugin, runtime.GOOS),
 	}
+
+	// check namespace labels for default runtime
+	if copts.defaultRuntime == "" && copts.defaultns != "" {
+		namespaces := c.NamespaceService()
+		ctx := context.Background()
+		if labels, err := namespaces.Labels(ctx, copts.defaultns); err == nil {
+			if defaultRuntime, ok := labels[defaults.DefaultRuntimeNSLabel]; ok {
+				c.runtime = defaultRuntime
+			}
+		} else {
+			return nil, err
+		}
+	}
+
 	if copts.services != nil {
 		c.services = *copts.services
 	}
@@ -594,6 +622,13 @@
 	return versionservice.NewVersionClient(c.conn)
 }
 
+// Conn returns the underlying GRPC connection object
+func (c *Client) Conn() *grpc.ClientConn {
+	c.connMu.Lock()
+	defer c.connMu.Unlock()
+	return c.conn
+}
+
 // Version of containerd
 type Version struct {
 	// Version number
diff --git a/vendor/github.com/containerd/containerd/container_checkpoint_opts.go b/vendor/github.com/containerd/containerd/container_checkpoint_opts.go
index 7d26142..5108636 100644
--- a/vendor/github.com/containerd/containerd/container_checkpoint_opts.go
+++ b/vendor/github.com/containerd/containerd/container_checkpoint_opts.go
@@ -70,10 +70,11 @@
 	for _, d := range task.Descriptors {
 		platformSpec := platforms.DefaultSpec()
 		index.Manifests = append(index.Manifests, imagespec.Descriptor{
-			MediaType: d.MediaType,
-			Size:      d.Size_,
-			Digest:    d.Digest,
-			Platform:  &platformSpec,
+			MediaType:   d.MediaType,
+			Size:        d.Size_,
+			Digest:      d.Digest,
+			Platform:    &platformSpec,
+			Annotations: d.Annotations,
 		})
 	}
 	// save copts
diff --git a/vendor/github.com/containerd/containerd/container_opts.go b/vendor/github.com/containerd/containerd/container_opts.go
index de900f4..1ce9894 100644
--- a/vendor/github.com/containerd/containerd/container_opts.go
+++ b/vendor/github.com/containerd/containerd/container_opts.go
@@ -20,7 +20,9 @@
 	"context"
 
 	"github.com/containerd/containerd/containers"
+	"github.com/containerd/containerd/defaults"
 	"github.com/containerd/containerd/errdefs"
+	"github.com/containerd/containerd/namespaces"
 	"github.com/containerd/containerd/oci"
 	"github.com/containerd/containerd/platforms"
 	"github.com/containerd/containerd/snapshots"
@@ -107,7 +109,7 @@
 // WithSnapshot uses an existing root filesystem for the container
 func WithSnapshot(id string) NewContainerOpts {
 	return func(ctx context.Context, client *Client, c *containers.Container) error {
-		setSnapshotterIfEmpty(c)
+		setSnapshotterIfEmpty(ctx, client, c)
 		// check that the snapshot exists, if not, fail on creation
 		if _, err := client.SnapshotService(c.Snapshotter).Mounts(ctx, id); err != nil {
 			return err
@@ -125,7 +127,7 @@
 		if err != nil {
 			return err
 		}
-		setSnapshotterIfEmpty(c)
+		setSnapshotterIfEmpty(ctx, client, c)
 		parent := identity.ChainID(diffIDs).String()
 		if _, err := client.SnapshotService(c.Snapshotter).Prepare(ctx, id, parent, opts...); err != nil {
 			return err
@@ -155,7 +157,7 @@
 		if err != nil {
 			return err
 		}
-		setSnapshotterIfEmpty(c)
+		setSnapshotterIfEmpty(ctx, client, c)
 		parent := identity.ChainID(diffIDs).String()
 		if _, err := client.SnapshotService(c.Snapshotter).View(ctx, id, parent, opts...); err != nil {
 			return err
@@ -166,9 +168,18 @@
 	}
 }
 
-func setSnapshotterIfEmpty(c *containers.Container) {
+func setSnapshotterIfEmpty(ctx context.Context, client *Client, c *containers.Container) {
 	if c.Snapshotter == "" {
-		c.Snapshotter = DefaultSnapshotter
+		defaultSnapshotter := DefaultSnapshotter
+		namespaceService := client.NamespaceService()
+		if ns, err := namespaces.NamespaceRequired(ctx); err == nil {
+			if labels, err := namespaceService.Labels(ctx, ns); err == nil {
+				if snapshotLabel, ok := labels[defaults.DefaultSnapshotterNSLabel]; ok {
+					defaultSnapshotter = snapshotLabel
+				}
+			}
+		}
+		c.Snapshotter = defaultSnapshotter
 	}
 }
 
diff --git a/vendor/github.com/containerd/containerd/container_opts_unix.go b/vendor/github.com/containerd/containerd/container_opts_unix.go
index 9e013f1..340a918 100644
--- a/vendor/github.com/containerd/containerd/container_opts_unix.go
+++ b/vendor/github.com/containerd/containerd/container_opts_unix.go
@@ -50,7 +50,7 @@
 			return err
 		}
 
-		setSnapshotterIfEmpty(c)
+		setSnapshotterIfEmpty(ctx, client, c)
 
 		var (
 			snapshotter = client.SnapshotService(c.Snapshotter)
diff --git a/vendor/github.com/containerd/containerd/content/local/store.go b/vendor/github.com/containerd/containerd/content/local/store.go
index 9967240..5503cb5 100644
--- a/vendor/github.com/containerd/containerd/content/local/store.go
+++ b/vendor/github.com/containerd/containerd/content/local/store.go
@@ -33,6 +33,7 @@
 	"github.com/containerd/containerd/errdefs"
 	"github.com/containerd/containerd/filters"
 	"github.com/containerd/containerd/log"
+	"github.com/sirupsen/logrus"
 
 	"github.com/containerd/continuity"
 	digest "github.com/opencontainers/go-digest"
@@ -477,6 +478,35 @@
 	return w, nil // lock is now held by w.
 }
 
+func (s *store) resumeStatus(ref string, total int64, digester digest.Digester) (content.Status, error) {
+	path, _, data := s.ingestPaths(ref)
+	status, err := s.status(path)
+	if err != nil {
+		return status, errors.Wrap(err, "failed reading status of resume write")
+	}
+	if ref != status.Ref {
+		// NOTE(stevvooe): This is fairly catastrophic. Either we have some
+		// layout corruption or a hash collision for the ref key.
+		return status, errors.Wrapf(err, "ref key does not match: %v != %v", ref, status.Ref)
+	}
+
+	if total > 0 && status.Total > 0 && total != status.Total {
+		return status, errors.Errorf("provided total differs from status: %v != %v", total, status.Total)
+	}
+
+	// TODO(stevvooe): slow slow slow!!, send to goroutine or use resumable hashes
+	fp, err := os.Open(data)
+	if err != nil {
+		return status, err
+	}
+
+	p := bufPool.Get().(*[]byte)
+	status.Offset, err = io.CopyBuffer(digester.Hash(), fp, *p)
+	bufPool.Put(p)
+	fp.Close()
+	return status, err
+}
+
 // writer provides the main implementation of the Writer method. The caller
 // must hold the lock correctly and release on error if there is a problem.
 func (s *store) writer(ctx context.Context, ref string, total int64, expected digest.Digest) (content.Writer, error) {
@@ -498,45 +528,25 @@
 		updatedAt time.Time
 	)
 
+	foundValidIngest := false
 	// ensure that the ingest path has been created.
 	if err := os.Mkdir(path, 0755); err != nil {
 		if !os.IsExist(err) {
 			return nil, err
 		}
-
-		status, err := s.status(path)
-		if err != nil {
-			return nil, errors.Wrap(err, "failed reading status of resume write")
+		status, err := s.resumeStatus(ref, total, digester)
+		if err == nil {
+			foundValidIngest = true
+			updatedAt = status.UpdatedAt
+			startedAt = status.StartedAt
+			total = status.Total
+			offset = status.Offset
+		} else {
+			logrus.Infof("failed to resume the status from path %s: %s. will recreate them", path, err.Error())
 		}
+	}
 
-		if ref != status.Ref {
-			// NOTE(stevvooe): This is fairly catastrophic. Either we have some
-			// layout corruption or a hash collision for the ref key.
-			return nil, errors.Wrapf(err, "ref key does not match: %v != %v", ref, status.Ref)
-		}
-
-		if total > 0 && status.Total > 0 && total != status.Total {
-			return nil, errors.Errorf("provided total differs from status: %v != %v", total, status.Total)
-		}
-
-		// TODO(stevvooe): slow slow slow!!, send to goroutine or use resumable hashes
-		fp, err := os.Open(data)
-		if err != nil {
-			return nil, err
-		}
-
-		p := bufPool.Get().(*[]byte)
-		offset, err = io.CopyBuffer(digester.Hash(), fp, *p)
-		bufPool.Put(p)
-		fp.Close()
-		if err != nil {
-			return nil, err
-		}
-
-		updatedAt = status.UpdatedAt
-		startedAt = status.StartedAt
-		total = status.Total
-	} else {
+	if !foundValidIngest {
 		startedAt = time.Now()
 		updatedAt = startedAt
 
@@ -546,11 +556,11 @@
 			return nil, err
 		}
 
-		if writeTimestampFile(filepath.Join(path, "startedat"), startedAt); err != nil {
+		if err := writeTimestampFile(filepath.Join(path, "startedat"), startedAt); err != nil {
 			return nil, err
 		}
 
-		if writeTimestampFile(filepath.Join(path, "updatedat"), startedAt); err != nil {
+		if err := writeTimestampFile(filepath.Join(path, "updatedat"), startedAt); err != nil {
 			return nil, err
 		}
 
diff --git a/vendor/github.com/containerd/containerd/content/local/writer.go b/vendor/github.com/containerd/containerd/content/local/writer.go
index 223b145..3a94744 100644
--- a/vendor/github.com/containerd/containerd/content/local/writer.go
+++ b/vendor/github.com/containerd/containerd/content/local/writer.go
@@ -74,6 +74,9 @@
 }
 
 func (w *writer) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...content.Opt) error {
+	// Ensure even on error the writer is fully closed
+	defer unlock(w.ref)
+
 	var base content.Info
 	for _, opt := range opts {
 		if err := opt(&base); err != nil {
@@ -81,8 +84,6 @@
 		}
 	}
 
-	// Ensure even on error the writer is fully closed
-	defer unlock(w.ref)
 	fp := w.fp
 	w.fp = nil
 
diff --git a/vendor/github.com/containerd/containerd/defaults/defaults.go b/vendor/github.com/containerd/containerd/defaults/defaults.go
index 7040f5b..3a748e4 100644
--- a/vendor/github.com/containerd/containerd/defaults/defaults.go
+++ b/vendor/github.com/containerd/containerd/defaults/defaults.go
@@ -23,4 +23,10 @@
 	// DefaultMaxSendMsgSize defines the default maximum message size for
 	// sending protobufs passed over the GRPC API.
 	DefaultMaxSendMsgSize = 16 << 20
+	// DefaultRuntimeNSLabel defines the namespace label to check for
+	// default runtime
+	DefaultRuntimeNSLabel = "containerd.io/defaults/runtime"
+	// DefaultSnapshotterNSLabel defines the namespances label to check for
+	// default snapshotter
+	DefaultSnapshotterNSLabel = "containerd.io/defaults/snapshotter"
 )
diff --git a/vendor/github.com/containerd/containerd/defaults/defaults_windows.go b/vendor/github.com/containerd/containerd/defaults/defaults_windows.go
index 983bf76..16f1048 100644
--- a/vendor/github.com/containerd/containerd/defaults/defaults_windows.go
+++ b/vendor/github.com/containerd/containerd/defaults/defaults_windows.go
@@ -26,10 +26,10 @@
 var (
 	// DefaultRootDir is the default location used by containerd to store
 	// persistent data
-	DefaultRootDir = filepath.Join(os.Getenv("programfiles"), "containerd", "root")
+	DefaultRootDir = filepath.Join(os.Getenv("ProgramData"), "containerd", "root")
 	// DefaultStateDir is the default location used by containerd to store
 	// transient data
-	DefaultStateDir = filepath.Join(os.Getenv("programfiles"), "containerd", "state")
+	DefaultStateDir = filepath.Join(os.Getenv("ProgramData"), "containerd", "state")
 )
 
 const (
diff --git a/vendor/github.com/containerd/containerd/diff.go b/vendor/github.com/containerd/containerd/diff.go
index 8d1219e..4d890ce 100644
--- a/vendor/github.com/containerd/containerd/diff.go
+++ b/vendor/github.com/containerd/containerd/diff.go
@@ -80,17 +80,19 @@
 
 func toDescriptor(d *types.Descriptor) ocispec.Descriptor {
 	return ocispec.Descriptor{
-		MediaType: d.MediaType,
-		Digest:    d.Digest,
-		Size:      d.Size_,
+		MediaType:   d.MediaType,
+		Digest:      d.Digest,
+		Size:        d.Size_,
+		Annotations: d.Annotations,
 	}
 }
 
 func fromDescriptor(d ocispec.Descriptor) *types.Descriptor {
 	return &types.Descriptor{
-		MediaType: d.MediaType,
-		Digest:    d.Digest,
-		Size_:     d.Size,
+		MediaType:   d.MediaType,
+		Digest:      d.Digest,
+		Size_:       d.Size,
+		Annotations: d.Annotations,
 	}
 }
 
diff --git a/vendor/github.com/containerd/containerd/image_store.go b/vendor/github.com/containerd/containerd/image_store.go
index 3676bda..fd79e89 100644
--- a/vendor/github.com/containerd/containerd/image_store.go
+++ b/vendor/github.com/containerd/containerd/image_store.go
@@ -137,16 +137,18 @@
 
 func descFromProto(desc *types.Descriptor) ocispec.Descriptor {
 	return ocispec.Descriptor{
-		MediaType: desc.MediaType,
-		Size:      desc.Size_,
-		Digest:    desc.Digest,
+		MediaType:   desc.MediaType,
+		Size:        desc.Size_,
+		Digest:      desc.Digest,
+		Annotations: desc.Annotations,
 	}
 }
 
 func descToProto(desc *ocispec.Descriptor) types.Descriptor {
 	return types.Descriptor{
-		MediaType: desc.MediaType,
-		Size_:     desc.Size,
-		Digest:    desc.Digest,
+		MediaType:   desc.MediaType,
+		Size_:       desc.Size,
+		Digest:      desc.Digest,
+		Annotations: desc.Annotations,
 	}
 }
diff --git a/vendor/github.com/containerd/containerd/metadata/adaptors.go b/vendor/github.com/containerd/containerd/metadata/adaptors.go
index 38539fd..4fd4c82 100644
--- a/vendor/github.com/containerd/containerd/metadata/adaptors.go
+++ b/vendor/github.com/containerd/containerd/metadata/adaptors.go
@@ -51,6 +51,8 @@
 			return checkMap(fieldpath[1:], obj.Labels)
 			// TODO(stevvooe): Greater/Less than filters would be awesome for
 			// size. Let's do it!
+		case "annotations":
+			return checkMap(fieldpath[1:], obj.Target.Annotations)
 		}
 
 		return "", false
diff --git a/vendor/github.com/containerd/containerd/metadata/boltutil/helpers.go b/vendor/github.com/containerd/containerd/metadata/boltutil/helpers.go
index 1240188..94af315 100644
--- a/vendor/github.com/containerd/containerd/metadata/boltutil/helpers.go
+++ b/vendor/github.com/containerd/containerd/metadata/boltutil/helpers.go
@@ -24,15 +24,26 @@
 )
 
 var (
-	bucketKeyLabels    = []byte("labels")
-	bucketKeyCreatedAt = []byte("createdat")
-	bucketKeyUpdatedAt = []byte("updatedat")
+	bucketKeyAnnotations = []byte("annotations")
+	bucketKeyLabels      = []byte("labels")
+	bucketKeyCreatedAt   = []byte("createdat")
+	bucketKeyUpdatedAt   = []byte("updatedat")
 )
 
 // ReadLabels reads the labels key from the bucket
 // Uses the key "labels"
 func ReadLabels(bkt *bolt.Bucket) (map[string]string, error) {
-	lbkt := bkt.Bucket(bucketKeyLabels)
+	return readMap(bkt, bucketKeyLabels)
+}
+
+// ReadAnnotations reads the OCI Descriptor Annotations key from the bucket
+// Uses the key "annotations"
+func ReadAnnotations(bkt *bolt.Bucket) (map[string]string, error) {
+	return readMap(bkt, bucketKeyAnnotations)
+}
+
+func readMap(bkt *bolt.Bucket, bucketName []byte) (map[string]string, error) {
+	lbkt := bkt.Bucket(bucketName)
 	if lbkt == nil {
 		return nil, nil
 	}
@@ -53,9 +64,18 @@
 // bucket. Typically, this removes zero-value entries.
 // Uses the key "labels"
 func WriteLabels(bkt *bolt.Bucket, labels map[string]string) error {
+	return writeMap(bkt, bucketKeyLabels, labels)
+}
+
+// WriteAnnotations writes the OCI Descriptor Annotations
+func WriteAnnotations(bkt *bolt.Bucket, labels map[string]string) error {
+	return writeMap(bkt, bucketKeyAnnotations, labels)
+}
+
+func writeMap(bkt *bolt.Bucket, bucketName []byte, labels map[string]string) error {
 	// Remove existing labels to keep from merging
-	if lbkt := bkt.Bucket(bucketKeyLabels); lbkt != nil {
-		if err := bkt.DeleteBucket(bucketKeyLabels); err != nil {
+	if lbkt := bkt.Bucket(bucketName); lbkt != nil {
+		if err := bkt.DeleteBucket(bucketName); err != nil {
 			return err
 		}
 	}
@@ -64,7 +84,7 @@
 		return nil
 	}
 
-	lbkt, err := bkt.CreateBucket(bucketKeyLabels)
+	lbkt, err := bkt.CreateBucket(bucketName)
 	if err != nil {
 		return err
 	}
diff --git a/vendor/github.com/containerd/containerd/metadata/images.go b/vendor/github.com/containerd/containerd/metadata/images.go
index 762f6fb..1dda753 100644
--- a/vendor/github.com/containerd/containerd/metadata/images.go
+++ b/vendor/github.com/containerd/containerd/metadata/images.go
@@ -192,6 +192,14 @@
 					key := strings.TrimPrefix(path, "labels.")
 					updated.Labels[key] = image.Labels[key]
 					continue
+				} else if strings.HasPrefix(path, "annotations.") {
+					if updated.Target.Annotations == nil {
+						updated.Target.Annotations = map[string]string{}
+					}
+
+					key := strings.TrimPrefix(path, "annotations.")
+					updated.Target.Annotations[key] = image.Target.Annotations[key]
+					continue
 				}
 
 				switch path {
@@ -204,6 +212,8 @@
 					// make sense to modify the size or digest without touching the
 					// mediatype, as well, for example.
 					updated.Target = image.Target
+				case "annotations":
+					updated.Target.Annotations = image.Target.Annotations
 				default:
 					return errors.Wrapf(errdefs.ErrInvalidArgument, "cannot update %q field on image %q", path, image.Name)
 				}
@@ -298,6 +308,11 @@
 	}
 	image.Labels = labels
 
+	image.Target.Annotations, err = boltutil.ReadAnnotations(bkt)
+	if err != nil {
+		return err
+	}
+
 	tbkt := bkt.Bucket(bucketKeyTarget)
 	if tbkt == nil {
 		return errors.New("unable to read target bucket")
@@ -331,6 +346,10 @@
 		return errors.Wrapf(err, "writing labels for image %v", image.Name)
 	}
 
+	if err := boltutil.WriteAnnotations(bkt, image.Target.Annotations); err != nil {
+		return errors.Wrapf(err, "writing Annotations for image %v", image.Name)
+	}
+
 	// write the target bucket
 	tbkt, err := bkt.CreateBucketIfNotExists(bucketKeyTarget)
 	if err != nil {
diff --git a/vendor/github.com/containerd/containerd/mount/mountinfo_linux.go b/vendor/github.com/containerd/containerd/mount/mountinfo_linux.go
index 369d045..a7407c5 100644
--- a/vendor/github.com/containerd/containerd/mount/mountinfo_linux.go
+++ b/vendor/github.com/containerd/containerd/mount/mountinfo_linux.go
@@ -25,6 +25,8 @@
 	"os"
 	"strconv"
 	"strings"
+
+	"github.com/pkg/errors"
 )
 
 // Self retrieves a list of mounts for the current running process.
@@ -41,13 +43,15 @@
 func parseInfoFile(r io.Reader) ([]Info, error) {
 	s := bufio.NewScanner(r)
 	out := []Info{}
-
+	var err error
 	for s.Scan() {
-		if err := s.Err(); err != nil {
+		if err = s.Err(); err != nil {
 			return nil, err
 		}
 
 		/*
+		   See http://man7.org/linux/man-pages/man5/proc.5.html
+
 		   36 35 98:0 /mnt1 /mnt2 rw,noatime master:1 - ext3 /dev/root rw,errors=continue
 		   (1)(2)(3)   (4)   (5)      (6)      (7)   (8) (9)   (10)         (11)
 		   (1) mount ID:  unique identifier of the mount (may be reused after umount)
@@ -68,7 +72,7 @@
 		numFields := len(fields)
 		if numFields < 10 {
 			// should be at least 10 fields
-			return nil, fmt.Errorf("parsing '%s' failed: not enough fields (%d)", text, numFields)
+			return nil, errors.Errorf("parsing '%s' failed: not enough fields (%d)", text, numFields)
 		}
 		p := Info{}
 		// ignore any numbers parsing errors, as there should not be any
@@ -76,13 +80,19 @@
 		p.Parent, _ = strconv.Atoi(fields[1])
 		mm := strings.Split(fields[2], ":")
 		if len(mm) != 2 {
-			return nil, fmt.Errorf("parsing '%s' failed: unexpected minor:major pair %s", text, mm)
+			return nil, errors.Errorf("parsing '%s' failed: unexpected minor:major pair %s", text, mm)
 		}
 		p.Major, _ = strconv.Atoi(mm[0])
 		p.Minor, _ = strconv.Atoi(mm[1])
 
-		p.Root = fields[3]
-		p.Mountpoint = fields[4]
+		p.Root, err = strconv.Unquote(`"` + fields[3] + `"`)
+		if err != nil {
+			return nil, errors.Wrapf(err, "parsing '%s' failed: unable to unquote root field", fields[3])
+		}
+		p.Mountpoint, err = strconv.Unquote(`"` + fields[4] + `"`)
+		if err != nil {
+			return nil, errors.Wrapf(err, "parsing '%s' failed: unable to unquote mount point field", fields[4])
+		}
 		p.Options = fields[5]
 
 		// one or more optional fields, when a separator (-)
@@ -101,11 +111,11 @@
 			}
 		}
 		if i == numFields {
-			return nil, fmt.Errorf("parsing '%s' failed: missing separator ('-')", text)
+			return nil, errors.Errorf("parsing '%s' failed: missing separator ('-')", text)
 		}
 		// There should be 3 fields after the separator...
 		if i+4 > numFields {
-			return nil, fmt.Errorf("parsing '%s' failed: not enough fields after a separator", text)
+			return nil, errors.Errorf("parsing '%s' failed: not enough fields after a separator", text)
 		}
 		// ... but in Linux <= 3.9 mounting a cifs with spaces in a share name
 		// (like "//serv/My Documents") _may_ end up having a space in the last field
diff --git a/vendor/github.com/containerd/containerd/oci/spec_opts.go b/vendor/github.com/containerd/containerd/oci/spec_opts.go
index 0769d51..ce75610 100644
--- a/vendor/github.com/containerd/containerd/oci/spec_opts.go
+++ b/vendor/github.com/containerd/containerd/oci/spec_opts.go
@@ -741,7 +741,9 @@
 }
 
 // WithAllCapabilities sets all linux capabilities for the process
-var WithAllCapabilities = WithCapabilities(GetAllCapabilities())
+var WithAllCapabilities = func(ctx context.Context, client Client, c *containers.Container, s *Spec) error {
+	return WithCapabilities(GetAllCapabilities())(ctx, client, c, s)
+}
 
 // GetAllCapabilities returns all caps up to CAP_LAST_CAP
 // or CAP_BLOCK_SUSPEND on RHEL6
@@ -771,11 +773,14 @@
 }
 
 func removeCap(caps *[]string, s string) {
-	for i, c := range *caps {
+	var newcaps []string
+	for _, c := range *caps {
 		if c == s {
-			*caps = append((*caps)[:i], (*caps)[i+1:]...)
+			continue
 		}
+		newcaps = append(newcaps, c)
 	}
+	*caps = newcaps
 }
 
 // WithAddedCapabilities adds the provided capabilities
diff --git a/vendor/github.com/containerd/containerd/plugin/plugin.go b/vendor/github.com/containerd/containerd/plugin/plugin.go
index 5e69145..9ae8bbe 100644
--- a/vendor/github.com/containerd/containerd/plugin/plugin.go
+++ b/vendor/github.com/containerd/containerd/plugin/plugin.go
@@ -20,6 +20,7 @@
 	"fmt"
 	"sync"
 
+	"github.com/containerd/ttrpc"
 	"github.com/pkg/errors"
 	"google.golang.org/grpc"
 )
@@ -123,6 +124,16 @@
 	Register(*grpc.Server) error
 }
 
+// TTRPCService allows TTRPC services to be registered with the underlying server
+type TTRPCService interface {
+	RegisterTTRPC(*ttrpc.Server) error
+}
+
+// TCPService allows GRPC services to be registered with the underlying tcp server
+type TCPService interface {
+	RegisterTCP(*grpc.Server) error
+}
+
 var register = struct {
 	sync.RWMutex
 	r []*Registration
diff --git a/vendor/github.com/containerd/containerd/process.go b/vendor/github.com/containerd/containerd/process.go
index 42f3b84..14732d9 100644
--- a/vendor/github.com/containerd/containerd/process.go
+++ b/vendor/github.com/containerd/containerd/process.go
@@ -52,6 +52,15 @@
 	Status(context.Context) (Status, error)
 }
 
+// NewExitStatus populates an ExitStatus
+func NewExitStatus(code uint32, t time.Time, err error) *ExitStatus {
+	return &ExitStatus{
+		code:     code,
+		exitedAt: t,
+		err:      err,
+	}
+}
+
 // ExitStatus encapsulates a process' exit status.
 // It is used by `Wait()` to return either a process exit code or an error
 type ExitStatus struct {
diff --git a/vendor/github.com/containerd/containerd/remotes/docker/fetcher.go b/vendor/github.com/containerd/containerd/remotes/docker/fetcher.go
index 4a2ce3c..6f06b0e 100644
--- a/vendor/github.com/containerd/containerd/remotes/docker/fetcher.go
+++ b/vendor/github.com/containerd/containerd/remotes/docker/fetcher.go
@@ -18,6 +18,7 @@
 
 import (
 	"context"
+	"encoding/json"
 	"fmt"
 	"io"
 	"io/ioutil"
@@ -28,6 +29,7 @@
 	"github.com/containerd/containerd/errdefs"
 	"github.com/containerd/containerd/images"
 	"github.com/containerd/containerd/log"
+	"github.com/docker/distribution/registry/api/errcode"
 	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
 	"github.com/pkg/errors"
 	"github.com/sirupsen/logrus"
@@ -101,12 +103,16 @@
 		// really distinguish between a 206 and a 200. In the case of 200, we
 		// can discard the bytes, hiding the seek behavior from the
 		// implementation.
+		defer resp.Body.Close()
 
-		resp.Body.Close()
 		if resp.StatusCode == http.StatusNotFound {
 			return nil, errors.Wrapf(errdefs.ErrNotFound, "content at %v not found", u)
 		}
-		return nil, errors.Errorf("unexpected status code %v: %v", u, resp.Status)
+		var registryErr errcode.Errors
+		if err := json.NewDecoder(resp.Body).Decode(&registryErr); err != nil || registryErr.Len() < 1 {
+			return nil, errors.Errorf("unexpected status code %v: %v", u, resp.Status)
+		}
+		return nil, errors.Errorf("unexpected status code %v: %s - Server message: %s", u, resp.Status, registryErr.Error())
 	}
 	if offset > 0 {
 		cr := resp.Header.Get("content-range")
diff --git a/vendor/github.com/containerd/containerd/remotes/resolver.go b/vendor/github.com/containerd/containerd/remotes/resolver.go
index a9b2b78..914d351 100644
--- a/vendor/github.com/containerd/containerd/remotes/resolver.go
+++ b/vendor/github.com/containerd/containerd/remotes/resolver.go
@@ -72,9 +72,9 @@
 
 // PusherFunc allows package users to implement a Pusher with just a
 // function.
-type PusherFunc func(ctx context.Context, desc ocispec.Descriptor, r io.Reader) error
+type PusherFunc func(ctx context.Context, desc ocispec.Descriptor) (content.Writer, error)
 
 // Push content
-func (fn PusherFunc) Push(ctx context.Context, desc ocispec.Descriptor, r io.Reader) error {
-	return fn(ctx, desc, r)
+func (fn PusherFunc) Push(ctx context.Context, desc ocispec.Descriptor) (content.Writer, error) {
+	return fn(ctx, desc)
 }
diff --git a/vendor/github.com/containerd/containerd/runtime/linux/runctypes/runc.pb.go b/vendor/github.com/containerd/containerd/runtime/linux/runctypes/runc.pb.go
index b6b5738..96dfebe 100644
--- a/vendor/github.com/containerd/containerd/runtime/linux/runctypes/runc.pb.go
+++ b/vendor/github.com/containerd/containerd/runtime/linux/runctypes/runc.pb.go
@@ -1,30 +1,16 @@
 // Code generated by protoc-gen-gogo. DO NOT EDIT.
 // source: github.com/containerd/containerd/runtime/linux/runctypes/runc.proto
 
-/*
-	Package runctypes is a generated protocol buffer package.
-
-	It is generated from these files:
-		github.com/containerd/containerd/runtime/linux/runctypes/runc.proto
-
-	It has these top-level messages:
-		RuncOptions
-		CreateOptions
-		CheckpointOptions
-		ProcessDetails
-*/
 package runctypes
 
-import proto "github.com/gogo/protobuf/proto"
-import fmt "fmt"
-import math "math"
-
-// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto"
-
-import strings "strings"
-import reflect "reflect"
-
-import io "io"
+import (
+	fmt "fmt"
+	proto "github.com/gogo/protobuf/proto"
+	io "io"
+	math "math"
+	reflect "reflect"
+	strings "strings"
+)
 
 // Reference imports to suppress errors if they are not otherwise used.
 var _ = proto.Marshal
@@ -38,59 +24,183 @@
 const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
 
 type RuncOptions struct {
-	Runtime       string `protobuf:"bytes,1,opt,name=runtime,proto3" json:"runtime,omitempty"`
-	RuntimeRoot   string `protobuf:"bytes,2,opt,name=runtime_root,json=runtimeRoot,proto3" json:"runtime_root,omitempty"`
-	CriuPath      string `protobuf:"bytes,3,opt,name=criu_path,json=criuPath,proto3" json:"criu_path,omitempty"`
-	SystemdCgroup bool   `protobuf:"varint,4,opt,name=systemd_cgroup,json=systemdCgroup,proto3" json:"systemd_cgroup,omitempty"`
+	Runtime              string   `protobuf:"bytes,1,opt,name=runtime,proto3" json:"runtime,omitempty"`
+	RuntimeRoot          string   `protobuf:"bytes,2,opt,name=runtime_root,json=runtimeRoot,proto3" json:"runtime_root,omitempty"`
+	CriuPath             string   `protobuf:"bytes,3,opt,name=criu_path,json=criuPath,proto3" json:"criu_path,omitempty"`
+	SystemdCgroup        bool     `protobuf:"varint,4,opt,name=systemd_cgroup,json=systemdCgroup,proto3" json:"systemd_cgroup,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
-func (m *RuncOptions) Reset()                    { *m = RuncOptions{} }
-func (*RuncOptions) ProtoMessage()               {}
-func (*RuncOptions) Descriptor() ([]byte, []int) { return fileDescriptorRunc, []int{0} }
+func (m *RuncOptions) Reset()      { *m = RuncOptions{} }
+func (*RuncOptions) ProtoMessage() {}
+func (*RuncOptions) Descriptor() ([]byte, []int) {
+	return fileDescriptor_d20e2ba8b3cc58b9, []int{0}
+}
+func (m *RuncOptions) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *RuncOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_RuncOptions.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *RuncOptions) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_RuncOptions.Merge(m, src)
+}
+func (m *RuncOptions) XXX_Size() int {
+	return m.Size()
+}
+func (m *RuncOptions) XXX_DiscardUnknown() {
+	xxx_messageInfo_RuncOptions.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_RuncOptions proto.InternalMessageInfo
 
 type CreateOptions struct {
-	NoPivotRoot         bool     `protobuf:"varint,1,opt,name=no_pivot_root,json=noPivotRoot,proto3" json:"no_pivot_root,omitempty"`
-	OpenTcp             bool     `protobuf:"varint,2,opt,name=open_tcp,json=openTcp,proto3" json:"open_tcp,omitempty"`
-	ExternalUnixSockets bool     `protobuf:"varint,3,opt,name=external_unix_sockets,json=externalUnixSockets,proto3" json:"external_unix_sockets,omitempty"`
-	Terminal            bool     `protobuf:"varint,4,opt,name=terminal,proto3" json:"terminal,omitempty"`
-	FileLocks           bool     `protobuf:"varint,5,opt,name=file_locks,json=fileLocks,proto3" json:"file_locks,omitempty"`
-	EmptyNamespaces     []string `protobuf:"bytes,6,rep,name=empty_namespaces,json=emptyNamespaces" json:"empty_namespaces,omitempty"`
-	CgroupsMode         string   `protobuf:"bytes,7,opt,name=cgroups_mode,json=cgroupsMode,proto3" json:"cgroups_mode,omitempty"`
-	NoNewKeyring        bool     `protobuf:"varint,8,opt,name=no_new_keyring,json=noNewKeyring,proto3" json:"no_new_keyring,omitempty"`
-	ShimCgroup          string   `protobuf:"bytes,9,opt,name=shim_cgroup,json=shimCgroup,proto3" json:"shim_cgroup,omitempty"`
-	IoUid               uint32   `protobuf:"varint,10,opt,name=io_uid,json=ioUid,proto3" json:"io_uid,omitempty"`
-	IoGid               uint32   `protobuf:"varint,11,opt,name=io_gid,json=ioGid,proto3" json:"io_gid,omitempty"`
-	CriuWorkPath        string   `protobuf:"bytes,12,opt,name=criu_work_path,json=criuWorkPath,proto3" json:"criu_work_path,omitempty"`
-	CriuImagePath       string   `protobuf:"bytes,13,opt,name=criu_image_path,json=criuImagePath,proto3" json:"criu_image_path,omitempty"`
+	NoPivotRoot          bool     `protobuf:"varint,1,opt,name=no_pivot_root,json=noPivotRoot,proto3" json:"no_pivot_root,omitempty"`
+	OpenTcp              bool     `protobuf:"varint,2,opt,name=open_tcp,json=openTcp,proto3" json:"open_tcp,omitempty"`
+	ExternalUnixSockets  bool     `protobuf:"varint,3,opt,name=external_unix_sockets,json=externalUnixSockets,proto3" json:"external_unix_sockets,omitempty"`
+	Terminal             bool     `protobuf:"varint,4,opt,name=terminal,proto3" json:"terminal,omitempty"`
+	FileLocks            bool     `protobuf:"varint,5,opt,name=file_locks,json=fileLocks,proto3" json:"file_locks,omitempty"`
+	EmptyNamespaces      []string `protobuf:"bytes,6,rep,name=empty_namespaces,json=emptyNamespaces,proto3" json:"empty_namespaces,omitempty"`
+	CgroupsMode          string   `protobuf:"bytes,7,opt,name=cgroups_mode,json=cgroupsMode,proto3" json:"cgroups_mode,omitempty"`
+	NoNewKeyring         bool     `protobuf:"varint,8,opt,name=no_new_keyring,json=noNewKeyring,proto3" json:"no_new_keyring,omitempty"`
+	ShimCgroup           string   `protobuf:"bytes,9,opt,name=shim_cgroup,json=shimCgroup,proto3" json:"shim_cgroup,omitempty"`
+	IoUid                uint32   `protobuf:"varint,10,opt,name=io_uid,json=ioUid,proto3" json:"io_uid,omitempty"`
+	IoGid                uint32   `protobuf:"varint,11,opt,name=io_gid,json=ioGid,proto3" json:"io_gid,omitempty"`
+	CriuWorkPath         string   `protobuf:"bytes,12,opt,name=criu_work_path,json=criuWorkPath,proto3" json:"criu_work_path,omitempty"`
+	CriuImagePath        string   `protobuf:"bytes,13,opt,name=criu_image_path,json=criuImagePath,proto3" json:"criu_image_path,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
-func (m *CreateOptions) Reset()                    { *m = CreateOptions{} }
-func (*CreateOptions) ProtoMessage()               {}
-func (*CreateOptions) Descriptor() ([]byte, []int) { return fileDescriptorRunc, []int{1} }
+func (m *CreateOptions) Reset()      { *m = CreateOptions{} }
+func (*CreateOptions) ProtoMessage() {}
+func (*CreateOptions) Descriptor() ([]byte, []int) {
+	return fileDescriptor_d20e2ba8b3cc58b9, []int{1}
+}
+func (m *CreateOptions) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *CreateOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_CreateOptions.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *CreateOptions) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_CreateOptions.Merge(m, src)
+}
+func (m *CreateOptions) XXX_Size() int {
+	return m.Size()
+}
+func (m *CreateOptions) XXX_DiscardUnknown() {
+	xxx_messageInfo_CreateOptions.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CreateOptions proto.InternalMessageInfo
 
 type CheckpointOptions struct {
-	Exit                bool     `protobuf:"varint,1,opt,name=exit,proto3" json:"exit,omitempty"`
-	OpenTcp             bool     `protobuf:"varint,2,opt,name=open_tcp,json=openTcp,proto3" json:"open_tcp,omitempty"`
-	ExternalUnixSockets bool     `protobuf:"varint,3,opt,name=external_unix_sockets,json=externalUnixSockets,proto3" json:"external_unix_sockets,omitempty"`
-	Terminal            bool     `protobuf:"varint,4,opt,name=terminal,proto3" json:"terminal,omitempty"`
-	FileLocks           bool     `protobuf:"varint,5,opt,name=file_locks,json=fileLocks,proto3" json:"file_locks,omitempty"`
-	EmptyNamespaces     []string `protobuf:"bytes,6,rep,name=empty_namespaces,json=emptyNamespaces" json:"empty_namespaces,omitempty"`
-	CgroupsMode         string   `protobuf:"bytes,7,opt,name=cgroups_mode,json=cgroupsMode,proto3" json:"cgroups_mode,omitempty"`
-	WorkPath            string   `protobuf:"bytes,8,opt,name=work_path,json=workPath,proto3" json:"work_path,omitempty"`
-	ImagePath           string   `protobuf:"bytes,9,opt,name=image_path,json=imagePath,proto3" json:"image_path,omitempty"`
+	Exit                 bool     `protobuf:"varint,1,opt,name=exit,proto3" json:"exit,omitempty"`
+	OpenTcp              bool     `protobuf:"varint,2,opt,name=open_tcp,json=openTcp,proto3" json:"open_tcp,omitempty"`
+	ExternalUnixSockets  bool     `protobuf:"varint,3,opt,name=external_unix_sockets,json=externalUnixSockets,proto3" json:"external_unix_sockets,omitempty"`
+	Terminal             bool     `protobuf:"varint,4,opt,name=terminal,proto3" json:"terminal,omitempty"`
+	FileLocks            bool     `protobuf:"varint,5,opt,name=file_locks,json=fileLocks,proto3" json:"file_locks,omitempty"`
+	EmptyNamespaces      []string `protobuf:"bytes,6,rep,name=empty_namespaces,json=emptyNamespaces,proto3" json:"empty_namespaces,omitempty"`
+	CgroupsMode          string   `protobuf:"bytes,7,opt,name=cgroups_mode,json=cgroupsMode,proto3" json:"cgroups_mode,omitempty"`
+	WorkPath             string   `protobuf:"bytes,8,opt,name=work_path,json=workPath,proto3" json:"work_path,omitempty"`
+	ImagePath            string   `protobuf:"bytes,9,opt,name=image_path,json=imagePath,proto3" json:"image_path,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
-func (m *CheckpointOptions) Reset()                    { *m = CheckpointOptions{} }
-func (*CheckpointOptions) ProtoMessage()               {}
-func (*CheckpointOptions) Descriptor() ([]byte, []int) { return fileDescriptorRunc, []int{2} }
+func (m *CheckpointOptions) Reset()      { *m = CheckpointOptions{} }
+func (*CheckpointOptions) ProtoMessage() {}
+func (*CheckpointOptions) Descriptor() ([]byte, []int) {
+	return fileDescriptor_d20e2ba8b3cc58b9, []int{2}
+}
+func (m *CheckpointOptions) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *CheckpointOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_CheckpointOptions.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *CheckpointOptions) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_CheckpointOptions.Merge(m, src)
+}
+func (m *CheckpointOptions) XXX_Size() int {
+	return m.Size()
+}
+func (m *CheckpointOptions) XXX_DiscardUnknown() {
+	xxx_messageInfo_CheckpointOptions.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CheckpointOptions proto.InternalMessageInfo
 
 type ProcessDetails struct {
-	ExecID string `protobuf:"bytes,1,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"`
+	ExecID               string   `protobuf:"bytes,1,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
-func (m *ProcessDetails) Reset()                    { *m = ProcessDetails{} }
-func (*ProcessDetails) ProtoMessage()               {}
-func (*ProcessDetails) Descriptor() ([]byte, []int) { return fileDescriptorRunc, []int{3} }
+func (m *ProcessDetails) Reset()      { *m = ProcessDetails{} }
+func (*ProcessDetails) ProtoMessage() {}
+func (*ProcessDetails) Descriptor() ([]byte, []int) {
+	return fileDescriptor_d20e2ba8b3cc58b9, []int{3}
+}
+func (m *ProcessDetails) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ProcessDetails) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_ProcessDetails.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *ProcessDetails) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ProcessDetails.Merge(m, src)
+}
+func (m *ProcessDetails) XXX_Size() int {
+	return m.Size()
+}
+func (m *ProcessDetails) XXX_DiscardUnknown() {
+	xxx_messageInfo_ProcessDetails.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ProcessDetails proto.InternalMessageInfo
 
 func init() {
 	proto.RegisterType((*RuncOptions)(nil), "containerd.linux.runc.RuncOptions")
@@ -98,6 +208,53 @@
 	proto.RegisterType((*CheckpointOptions)(nil), "containerd.linux.runc.CheckpointOptions")
 	proto.RegisterType((*ProcessDetails)(nil), "containerd.linux.runc.ProcessDetails")
 }
+
+func init() {
+	proto.RegisterFile("github.com/containerd/containerd/runtime/linux/runctypes/runc.proto", fileDescriptor_d20e2ba8b3cc58b9)
+}
+
+var fileDescriptor_d20e2ba8b3cc58b9 = []byte{
+	// 604 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x94, 0xcf, 0x6e, 0xd3, 0x40,
+	0x10, 0xc6, 0xeb, 0xfe, 0x49, 0x9c, 0x49, 0xd2, 0xc2, 0x42, 0x25, 0xd3, 0xaa, 0x69, 0x08, 0x7f,
+	0x14, 0x2e, 0xa9, 0x04, 0xe2, 0xc4, 0xad, 0x29, 0x42, 0x15, 0x50, 0x2a, 0x43, 0x05, 0x42, 0x48,
+	0x2b, 0x77, 0x3d, 0x24, 0xab, 0xc4, 0x3b, 0x96, 0x77, 0x4d, 0x92, 0x1b, 0x4f, 0xc0, 0x0b, 0xf1,
+	0x02, 0x3d, 0x21, 0x8e, 0x9c, 0x10, 0xcd, 0x93, 0xa0, 0x5d, 0xc7, 0x69, 0xcf, 0x1c, 0xb9, 0xcd,
+	0xfc, 0xe6, 0xb3, 0x67, 0xf4, 0x7d, 0xb2, 0xa1, 0x3f, 0x90, 0x66, 0x98, 0x9f, 0xf7, 0x04, 0x25,
+	0x07, 0x82, 0x94, 0x89, 0xa4, 0xc2, 0x2c, 0xbe, 0x5e, 0x66, 0xb9, 0x32, 0x32, 0xc1, 0x83, 0xb1,
+	0x54, 0xf9, 0xd4, 0x76, 0xc2, 0xcc, 0x52, 0xd4, 0xae, 0xea, 0xa5, 0x19, 0x19, 0x62, 0xdb, 0x57,
+	0xf2, 0x9e, 0x93, 0xf5, 0xec, 0x70, 0xe7, 0xf6, 0x80, 0x06, 0xe4, 0x14, 0x07, 0xb6, 0x2a, 0xc4,
+	0x9d, 0x6f, 0x1e, 0xd4, 0xc3, 0x5c, 0x89, 0x37, 0xa9, 0x91, 0xa4, 0x34, 0x0b, 0xa0, 0xba, 0x58,
+	0x11, 0x78, 0x6d, 0xaf, 0x5b, 0x0b, 0xcb, 0x96, 0xdd, 0x85, 0xc6, 0xa2, 0xe4, 0x19, 0x91, 0x09,
+	0x56, 0xdd, 0xb8, 0xbe, 0x60, 0x21, 0x91, 0x61, 0xbb, 0x50, 0x13, 0x99, 0xcc, 0x79, 0x1a, 0x99,
+	0x61, 0xb0, 0xe6, 0xe6, 0xbe, 0x05, 0xa7, 0x91, 0x19, 0xb2, 0x07, 0xb0, 0xa9, 0x67, 0xda, 0x60,
+	0x12, 0x73, 0x31, 0xc8, 0x28, 0x4f, 0x83, 0xf5, 0xb6, 0xd7, 0xf5, 0xc3, 0xe6, 0x82, 0xf6, 0x1d,
+	0xec, 0xfc, 0x58, 0x83, 0x66, 0x3f, 0xc3, 0xc8, 0x60, 0x79, 0x52, 0x07, 0x9a, 0x8a, 0x78, 0x2a,
+	0xbf, 0x90, 0x29, 0x36, 0x7b, 0xee, 0xb9, 0xba, 0xa2, 0x53, 0xcb, 0xdc, 0xe6, 0x3b, 0xe0, 0x53,
+	0x8a, 0x8a, 0x1b, 0x91, 0xba, 0xc3, 0xfc, 0xb0, 0x6a, 0xfb, 0x77, 0x22, 0x65, 0x8f, 0x61, 0x1b,
+	0xa7, 0x06, 0x33, 0x15, 0x8d, 0x79, 0xae, 0xe4, 0x94, 0x6b, 0x12, 0x23, 0x34, 0xda, 0x1d, 0xe8,
+	0x87, 0xb7, 0xca, 0xe1, 0x99, 0x92, 0xd3, 0xb7, 0xc5, 0x88, 0xed, 0x80, 0x6f, 0x30, 0x4b, 0xa4,
+	0x8a, 0xc6, 0x8b, 0x2b, 0x97, 0x3d, 0xdb, 0x03, 0xf8, 0x2c, 0xc7, 0xc8, 0xc7, 0x24, 0x46, 0x3a,
+	0xd8, 0x70, 0xd3, 0x9a, 0x25, 0xaf, 0x2c, 0x60, 0x8f, 0xe0, 0x06, 0x26, 0xa9, 0x99, 0x71, 0x15,
+	0x25, 0xa8, 0xd3, 0x48, 0xa0, 0x0e, 0x2a, 0xed, 0xb5, 0x6e, 0x2d, 0xdc, 0x72, 0xfc, 0x64, 0x89,
+	0xad, 0xa3, 0x85, 0x13, 0x9a, 0x27, 0x14, 0x63, 0x50, 0x2d, 0x1c, 0x5d, 0xb0, 0xd7, 0x14, 0x23,
+	0xbb, 0x0f, 0x9b, 0x8a, 0xb8, 0xc2, 0x09, 0x1f, 0xe1, 0x2c, 0x93, 0x6a, 0x10, 0xf8, 0x6e, 0x61,
+	0x43, 0xd1, 0x09, 0x4e, 0x5e, 0x16, 0x8c, 0xed, 0x43, 0x5d, 0x0f, 0x65, 0x52, 0xfa, 0x5a, 0x73,
+	0xef, 0x01, 0x8b, 0x0a, 0x53, 0xd9, 0x36, 0x54, 0x24, 0xf1, 0x5c, 0xc6, 0x01, 0xb4, 0xbd, 0x6e,
+	0x33, 0xdc, 0x90, 0x74, 0x26, 0xe3, 0x05, 0x1e, 0xc8, 0x38, 0xa8, 0x97, 0xf8, 0x85, 0x8c, 0xed,
+	0x52, 0x17, 0xe3, 0x84, 0xb2, 0x51, 0x91, 0x65, 0xc3, 0xbd, 0xb1, 0x61, 0xe9, 0x7b, 0xca, 0x46,
+	0x2e, 0xcf, 0x87, 0xb0, 0xe5, 0x54, 0x32, 0x89, 0x06, 0x58, 0xc8, 0x9a, 0x4e, 0xd6, 0xb4, 0xf8,
+	0xd8, 0x52, 0xab, 0xeb, 0x7c, 0x5f, 0x85, 0x9b, 0xfd, 0x21, 0x8a, 0x51, 0x4a, 0x52, 0x99, 0x32,
+	0x54, 0x06, 0xeb, 0x38, 0x95, 0x65, 0x96, 0xae, 0xfe, 0x6f, 0x43, 0xdc, 0x85, 0xda, 0x95, 0x95,
+	0x7e, 0xf1, 0x59, 0x4c, 0x4a, 0x1b, 0xf7, 0x00, 0xae, 0x39, 0x58, 0x44, 0x57, 0x93, 0x4b, 0xf7,
+	0x9e, 0xc2, 0xe6, 0x69, 0x46, 0x02, 0xb5, 0x3e, 0x42, 0x13, 0xc9, 0xb1, 0x66, 0xf7, 0xa0, 0x8a,
+	0x53, 0x14, 0x5c, 0xc6, 0xc5, 0x17, 0x7a, 0x08, 0xf3, 0xdf, 0xfb, 0x95, 0xe7, 0x53, 0x14, 0xc7,
+	0x47, 0x61, 0xc5, 0x8e, 0x8e, 0xe3, 0xc3, 0x4f, 0x17, 0x97, 0xad, 0x95, 0x5f, 0x97, 0xad, 0x95,
+	0xaf, 0xf3, 0x96, 0x77, 0x31, 0x6f, 0x79, 0x3f, 0xe7, 0x2d, 0xef, 0xcf, 0xbc, 0xe5, 0x7d, 0x3c,
+	0xfc, 0xd7, 0x5f, 0xcc, 0xb3, 0x65, 0xf5, 0x61, 0xe5, 0xbc, 0xe2, 0xfe, 0x1e, 0x4f, 0xfe, 0x06,
+	0x00, 0x00, 0xff, 0xff, 0x7f, 0x24, 0x6f, 0x2e, 0xb1, 0x04, 0x00, 0x00,
+}
+
 func (m *RuncOptions) Marshal() (dAtA []byte, err error) {
 	size := m.Size()
 	dAtA = make([]byte, size)
@@ -141,6 +298,9 @@
 		}
 		i++
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -268,6 +428,9 @@
 		i = encodeVarintRunc(dAtA, i, uint64(len(m.CriuImagePath)))
 		i += copy(dAtA[i:], m.CriuImagePath)
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -369,6 +532,9 @@
 		i = encodeVarintRunc(dAtA, i, uint64(len(m.ImagePath)))
 		i += copy(dAtA[i:], m.ImagePath)
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -393,6 +559,9 @@
 		i = encodeVarintRunc(dAtA, i, uint64(len(m.ExecID)))
 		i += copy(dAtA[i:], m.ExecID)
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -406,6 +575,9 @@
 	return offset + 1
 }
 func (m *RuncOptions) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = len(m.Runtime)
@@ -423,10 +595,16 @@
 	if m.SystemdCgroup {
 		n += 2
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *CreateOptions) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	if m.NoPivotRoot {
@@ -475,10 +653,16 @@
 	if l > 0 {
 		n += 1 + l + sovRunc(uint64(l))
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *CheckpointOptions) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	if m.Exit {
@@ -514,16 +698,25 @@
 	if l > 0 {
 		n += 1 + l + sovRunc(uint64(l))
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *ProcessDetails) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = len(m.ExecID)
 	if l > 0 {
 		n += 1 + l + sovRunc(uint64(l))
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
@@ -549,6 +742,7 @@
 		`RuntimeRoot:` + fmt.Sprintf("%v", this.RuntimeRoot) + `,`,
 		`CriuPath:` + fmt.Sprintf("%v", this.CriuPath) + `,`,
 		`SystemdCgroup:` + fmt.Sprintf("%v", this.SystemdCgroup) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -571,6 +765,7 @@
 		`IoGid:` + fmt.Sprintf("%v", this.IoGid) + `,`,
 		`CriuWorkPath:` + fmt.Sprintf("%v", this.CriuWorkPath) + `,`,
 		`CriuImagePath:` + fmt.Sprintf("%v", this.CriuImagePath) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -589,6 +784,7 @@
 		`CgroupsMode:` + fmt.Sprintf("%v", this.CgroupsMode) + `,`,
 		`WorkPath:` + fmt.Sprintf("%v", this.WorkPath) + `,`,
 		`ImagePath:` + fmt.Sprintf("%v", this.ImagePath) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -599,6 +795,7 @@
 	}
 	s := strings.Join([]string{`&ProcessDetails{`,
 		`ExecID:` + fmt.Sprintf("%v", this.ExecID) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -626,7 +823,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -654,7 +851,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -664,6 +861,9 @@
 				return ErrInvalidLengthRunc
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRunc
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -683,7 +883,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -693,6 +893,9 @@
 				return ErrInvalidLengthRunc
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRunc
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -712,7 +915,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -722,6 +925,9 @@
 				return ErrInvalidLengthRunc
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRunc
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -741,7 +947,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				v |= (int(b) & 0x7F) << shift
+				v |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -756,9 +962,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthRunc
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRunc
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -783,7 +993,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -811,7 +1021,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				v |= (int(b) & 0x7F) << shift
+				v |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -831,7 +1041,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				v |= (int(b) & 0x7F) << shift
+				v |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -851,7 +1061,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				v |= (int(b) & 0x7F) << shift
+				v |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -871,7 +1081,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				v |= (int(b) & 0x7F) << shift
+				v |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -891,7 +1101,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				v |= (int(b) & 0x7F) << shift
+				v |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -911,7 +1121,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -921,6 +1131,9 @@
 				return ErrInvalidLengthRunc
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRunc
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -940,7 +1153,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -950,6 +1163,9 @@
 				return ErrInvalidLengthRunc
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRunc
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -969,7 +1185,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				v |= (int(b) & 0x7F) << shift
+				v |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -989,7 +1205,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -999,6 +1215,9 @@
 				return ErrInvalidLengthRunc
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRunc
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -1018,7 +1237,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				m.IoUid |= (uint32(b) & 0x7F) << shift
+				m.IoUid |= uint32(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1037,7 +1256,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				m.IoGid |= (uint32(b) & 0x7F) << shift
+				m.IoGid |= uint32(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1056,7 +1275,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1066,6 +1285,9 @@
 				return ErrInvalidLengthRunc
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRunc
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -1085,7 +1307,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1095,6 +1317,9 @@
 				return ErrInvalidLengthRunc
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRunc
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -1109,9 +1334,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthRunc
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRunc
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -1136,7 +1365,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -1164,7 +1393,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				v |= (int(b) & 0x7F) << shift
+				v |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1184,7 +1413,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				v |= (int(b) & 0x7F) << shift
+				v |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1204,7 +1433,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				v |= (int(b) & 0x7F) << shift
+				v |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1224,7 +1453,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				v |= (int(b) & 0x7F) << shift
+				v |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1244,7 +1473,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				v |= (int(b) & 0x7F) << shift
+				v |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1264,7 +1493,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1274,6 +1503,9 @@
 				return ErrInvalidLengthRunc
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRunc
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -1293,7 +1525,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1303,6 +1535,9 @@
 				return ErrInvalidLengthRunc
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRunc
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -1322,7 +1557,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1332,6 +1567,9 @@
 				return ErrInvalidLengthRunc
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRunc
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -1351,7 +1589,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1361,6 +1599,9 @@
 				return ErrInvalidLengthRunc
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRunc
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -1375,9 +1616,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthRunc
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRunc
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -1402,7 +1647,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -1430,7 +1675,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1440,6 +1685,9 @@
 				return ErrInvalidLengthRunc
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRunc
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -1454,9 +1702,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthRunc
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRunc
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -1520,10 +1772,13 @@
 					break
 				}
 			}
-			iNdEx += length
 			if length < 0 {
 				return 0, ErrInvalidLengthRunc
 			}
+			iNdEx += length
+			if iNdEx < 0 {
+				return 0, ErrInvalidLengthRunc
+			}
 			return iNdEx, nil
 		case 3:
 			for {
@@ -1552,6 +1807,9 @@
 					return 0, err
 				}
 				iNdEx = start + next
+				if iNdEx < 0 {
+					return 0, ErrInvalidLengthRunc
+				}
 			}
 			return iNdEx, nil
 		case 4:
@@ -1570,49 +1828,3 @@
 	ErrInvalidLengthRunc = fmt.Errorf("proto: negative length found during unmarshaling")
 	ErrIntOverflowRunc   = fmt.Errorf("proto: integer overflow")
 )
-
-func init() {
-	proto.RegisterFile("github.com/containerd/containerd/runtime/linux/runctypes/runc.proto", fileDescriptorRunc)
-}
-
-var fileDescriptorRunc = []byte{
-	// 604 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x94, 0xcf, 0x6e, 0xd3, 0x40,
-	0x10, 0xc6, 0xeb, 0xfe, 0x49, 0x9c, 0x49, 0xd2, 0xc2, 0x42, 0x25, 0xd3, 0xaa, 0x69, 0x08, 0x7f,
-	0x14, 0x2e, 0xa9, 0x04, 0xe2, 0xc4, 0xad, 0x29, 0x42, 0x15, 0x50, 0x2a, 0x43, 0x05, 0x42, 0x48,
-	0x2b, 0x77, 0x3d, 0x24, 0xab, 0xc4, 0x3b, 0x96, 0x77, 0x4d, 0x92, 0x1b, 0x4f, 0xc0, 0x0b, 0xf1,
-	0x02, 0x3d, 0x21, 0x8e, 0x9c, 0x10, 0xcd, 0x93, 0xa0, 0x5d, 0xc7, 0x69, 0xcf, 0x1c, 0xb9, 0xcd,
-	0xfc, 0xe6, 0xb3, 0x67, 0xf4, 0x7d, 0xb2, 0xa1, 0x3f, 0x90, 0x66, 0x98, 0x9f, 0xf7, 0x04, 0x25,
-	0x07, 0x82, 0x94, 0x89, 0xa4, 0xc2, 0x2c, 0xbe, 0x5e, 0x66, 0xb9, 0x32, 0x32, 0xc1, 0x83, 0xb1,
-	0x54, 0xf9, 0xd4, 0x76, 0xc2, 0xcc, 0x52, 0xd4, 0xae, 0xea, 0xa5, 0x19, 0x19, 0x62, 0xdb, 0x57,
-	0xf2, 0x9e, 0x93, 0xf5, 0xec, 0x70, 0xe7, 0xf6, 0x80, 0x06, 0xe4, 0x14, 0x07, 0xb6, 0x2a, 0xc4,
-	0x9d, 0x6f, 0x1e, 0xd4, 0xc3, 0x5c, 0x89, 0x37, 0xa9, 0x91, 0xa4, 0x34, 0x0b, 0xa0, 0xba, 0x58,
-	0x11, 0x78, 0x6d, 0xaf, 0x5b, 0x0b, 0xcb, 0x96, 0xdd, 0x85, 0xc6, 0xa2, 0xe4, 0x19, 0x91, 0x09,
-	0x56, 0xdd, 0xb8, 0xbe, 0x60, 0x21, 0x91, 0x61, 0xbb, 0x50, 0x13, 0x99, 0xcc, 0x79, 0x1a, 0x99,
-	0x61, 0xb0, 0xe6, 0xe6, 0xbe, 0x05, 0xa7, 0x91, 0x19, 0xb2, 0x07, 0xb0, 0xa9, 0x67, 0xda, 0x60,
-	0x12, 0x73, 0x31, 0xc8, 0x28, 0x4f, 0x83, 0xf5, 0xb6, 0xd7, 0xf5, 0xc3, 0xe6, 0x82, 0xf6, 0x1d,
-	0xec, 0xfc, 0x58, 0x83, 0x66, 0x3f, 0xc3, 0xc8, 0x60, 0x79, 0x52, 0x07, 0x9a, 0x8a, 0x78, 0x2a,
-	0xbf, 0x90, 0x29, 0x36, 0x7b, 0xee, 0xb9, 0xba, 0xa2, 0x53, 0xcb, 0xdc, 0xe6, 0x3b, 0xe0, 0x53,
-	0x8a, 0x8a, 0x1b, 0x91, 0xba, 0xc3, 0xfc, 0xb0, 0x6a, 0xfb, 0x77, 0x22, 0x65, 0x8f, 0x61, 0x1b,
-	0xa7, 0x06, 0x33, 0x15, 0x8d, 0x79, 0xae, 0xe4, 0x94, 0x6b, 0x12, 0x23, 0x34, 0xda, 0x1d, 0xe8,
-	0x87, 0xb7, 0xca, 0xe1, 0x99, 0x92, 0xd3, 0xb7, 0xc5, 0x88, 0xed, 0x80, 0x6f, 0x30, 0x4b, 0xa4,
-	0x8a, 0xc6, 0x8b, 0x2b, 0x97, 0x3d, 0xdb, 0x03, 0xf8, 0x2c, 0xc7, 0xc8, 0xc7, 0x24, 0x46, 0x3a,
-	0xd8, 0x70, 0xd3, 0x9a, 0x25, 0xaf, 0x2c, 0x60, 0x8f, 0xe0, 0x06, 0x26, 0xa9, 0x99, 0x71, 0x15,
-	0x25, 0xa8, 0xd3, 0x48, 0xa0, 0x0e, 0x2a, 0xed, 0xb5, 0x6e, 0x2d, 0xdc, 0x72, 0xfc, 0x64, 0x89,
-	0xad, 0xa3, 0x85, 0x13, 0x9a, 0x27, 0x14, 0x63, 0x50, 0x2d, 0x1c, 0x5d, 0xb0, 0xd7, 0x14, 0x23,
-	0xbb, 0x0f, 0x9b, 0x8a, 0xb8, 0xc2, 0x09, 0x1f, 0xe1, 0x2c, 0x93, 0x6a, 0x10, 0xf8, 0x6e, 0x61,
-	0x43, 0xd1, 0x09, 0x4e, 0x5e, 0x16, 0x8c, 0xed, 0x43, 0x5d, 0x0f, 0x65, 0x52, 0xfa, 0x5a, 0x73,
-	0xef, 0x01, 0x8b, 0x0a, 0x53, 0xd9, 0x36, 0x54, 0x24, 0xf1, 0x5c, 0xc6, 0x01, 0xb4, 0xbd, 0x6e,
-	0x33, 0xdc, 0x90, 0x74, 0x26, 0xe3, 0x05, 0x1e, 0xc8, 0x38, 0xa8, 0x97, 0xf8, 0x85, 0x8c, 0xed,
-	0x52, 0x17, 0xe3, 0x84, 0xb2, 0x51, 0x91, 0x65, 0xc3, 0xbd, 0xb1, 0x61, 0xe9, 0x7b, 0xca, 0x46,
-	0x2e, 0xcf, 0x87, 0xb0, 0xe5, 0x54, 0x32, 0x89, 0x06, 0x58, 0xc8, 0x9a, 0x4e, 0xd6, 0xb4, 0xf8,
-	0xd8, 0x52, 0xab, 0xeb, 0x7c, 0x5f, 0x85, 0x9b, 0xfd, 0x21, 0x8a, 0x51, 0x4a, 0x52, 0x99, 0x32,
-	0x54, 0x06, 0xeb, 0x38, 0x95, 0x65, 0x96, 0xae, 0xfe, 0x6f, 0x43, 0xdc, 0x85, 0xda, 0x95, 0x95,
-	0x7e, 0xf1, 0x59, 0x4c, 0x4a, 0x1b, 0xf7, 0x00, 0xae, 0x39, 0x58, 0x44, 0x57, 0x93, 0x4b, 0xf7,
-	0x9e, 0xc2, 0xe6, 0x69, 0x46, 0x02, 0xb5, 0x3e, 0x42, 0x13, 0xc9, 0xb1, 0x66, 0xf7, 0xa0, 0x8a,
-	0x53, 0x14, 0x5c, 0xc6, 0xc5, 0x17, 0x7a, 0x08, 0xf3, 0xdf, 0xfb, 0x95, 0xe7, 0x53, 0x14, 0xc7,
-	0x47, 0x61, 0xc5, 0x8e, 0x8e, 0xe3, 0xc3, 0x4f, 0x17, 0x97, 0xad, 0x95, 0x5f, 0x97, 0xad, 0x95,
-	0xaf, 0xf3, 0x96, 0x77, 0x31, 0x6f, 0x79, 0x3f, 0xe7, 0x2d, 0xef, 0xcf, 0xbc, 0xe5, 0x7d, 0x3c,
-	0xfc, 0xd7, 0x5f, 0xcc, 0xb3, 0x65, 0xf5, 0x61, 0xe5, 0xbc, 0xe2, 0xfe, 0x1e, 0x4f, 0xfe, 0x06,
-	0x00, 0x00, 0xff, 0xff, 0x7f, 0x24, 0x6f, 0x2e, 0xb1, 0x04, 0x00, 0x00,
-}
diff --git a/vendor/github.com/containerd/containerd/runtime/v1/linux/proc/io.go b/vendor/github.com/containerd/containerd/runtime/v1/linux/proc/io.go
index b37c3c6..15f6ed8 100644
--- a/vendor/github.com/containerd/containerd/runtime/v1/linux/proc/io.go
+++ b/vendor/github.com/containerd/containerd/runtime/v1/linux/proc/io.go
@@ -27,8 +27,10 @@
 	"os/exec"
 	"path/filepath"
 	"sync"
+	"sync/atomic"
 	"syscall"
 
+	"github.com/containerd/containerd/log"
 	"github.com/containerd/containerd/namespaces"
 	"github.com/containerd/containerd/runtime/proc"
 	"github.com/containerd/fifo"
@@ -122,7 +124,7 @@
 }
 
 func copyPipes(ctx context.Context, rio runc.IO, stdin, stdout, stderr string, wg, cwg *sync.WaitGroup) error {
-	var sameFile io.WriteCloser
+	var sameFile *countingWriteCloser
 	for _, i := range []struct {
 		name string
 		dest func(wc io.WriteCloser, rc io.Closer)
@@ -136,7 +138,9 @@
 					cwg.Done()
 					p := bufPool.Get().(*[]byte)
 					defer bufPool.Put(p)
-					io.CopyBuffer(wc, rio.Stdout(), *p)
+					if _, err := io.CopyBuffer(wc, rio.Stdout(), *p); err != nil {
+						log.G(ctx).Warn("error copying stdout")
+					}
 					wg.Done()
 					wc.Close()
 					if rc != nil {
@@ -153,7 +157,9 @@
 					cwg.Done()
 					p := bufPool.Get().(*[]byte)
 					defer bufPool.Put(p)
-					io.CopyBuffer(wc, rio.Stderr(), *p)
+					if _, err := io.CopyBuffer(wc, rio.Stderr(), *p); err != nil {
+						log.G(ctx).Warn("error copying stderr")
+					}
 					wg.Done()
 					wc.Close()
 					if rc != nil {
@@ -180,6 +186,7 @@
 			}
 		} else {
 			if sameFile != nil {
+				sameFile.count++
 				i.dest(sameFile, nil)
 				continue
 			}
@@ -187,7 +194,10 @@
 				return fmt.Errorf("containerd-shim: opening %s failed: %s", i.name, err)
 			}
 			if stdout == stderr {
-				sameFile = fw
+				sameFile = &countingWriteCloser{
+					WriteCloser: fw,
+					count:       1,
+				}
 			}
 		}
 		i.dest(fw, fr)
@@ -212,6 +222,19 @@
 	return nil
 }
 
+// countingWriteCloser masks io.Closer() until close has been invoked a certain number of times.
+type countingWriteCloser struct {
+	io.WriteCloser
+	count int64
+}
+
+func (c *countingWriteCloser) Close() error {
+	if atomic.AddInt64(&c.count, -1) > 0 {
+		return nil
+	}
+	return c.WriteCloser.Close()
+}
+
 // isFifo checks if a file is a fifo
 // if the file does not exist then it returns false
 func isFifo(path string) (bool, error) {
diff --git a/vendor/github.com/containerd/containerd/runtime/v1/linux/runtime.go b/vendor/github.com/containerd/containerd/runtime/v1/linux/runtime.go
index 545e216..c408126 100644
--- a/vendor/github.com/containerd/containerd/runtime/v1/linux/runtime.go
+++ b/vendor/github.com/containerd/containerd/runtime/v1/linux/runtime.go
@@ -338,8 +338,12 @@
 		ctx = namespaces.WithNamespace(ctx, ns)
 		pid, _ := runc.ReadPidFile(filepath.Join(bundle.path, proc.InitPidFile))
 		s, err := bundle.NewShimClient(ctx, ns, ShimConnect(r.config, func() {
-			err := r.cleanupAfterDeadShim(ctx, bundle, ns, id, pid)
+			_, err := r.tasks.Get(ctx, id)
 			if err != nil {
+				// Task was never started or was already successfully deleted
+				return
+			}
+			if err := r.cleanupAfterDeadShim(ctx, bundle, ns, id, pid); err != nil {
 				log.G(ctx).WithError(err).WithField("bundle", bundle.path).
 					Error("cleaning up after dead shim")
 			}
diff --git a/vendor/github.com/containerd/containerd/runtime/v1/shim/v1/shim.pb.go b/vendor/github.com/containerd/containerd/runtime/v1/shim/v1/shim.pb.go
index 9bd2889..7cc5780 100644
--- a/vendor/github.com/containerd/containerd/runtime/v1/shim/v1/shim.pb.go
+++ b/vendor/github.com/containerd/containerd/runtime/v1/shim/v1/shim.pb.go
@@ -1,58 +1,23 @@
 // Code generated by protoc-gen-gogo. DO NOT EDIT.
 // source: github.com/containerd/containerd/runtime/v1/shim/v1/shim.proto
 
-/*
-	Package shim is a generated protocol buffer package.
-
-	It is generated from these files:
-		github.com/containerd/containerd/runtime/v1/shim/v1/shim.proto
-
-	It has these top-level messages:
-		CreateTaskRequest
-		CreateTaskResponse
-		DeleteResponse
-		DeleteProcessRequest
-		ExecProcessRequest
-		ExecProcessResponse
-		ResizePtyRequest
-		StateRequest
-		StateResponse
-		KillRequest
-		CloseIORequest
-		ListPidsRequest
-		ListPidsResponse
-		CheckpointTaskRequest
-		ShimInfoResponse
-		UpdateTaskRequest
-		StartRequest
-		StartResponse
-		WaitRequest
-		WaitResponse
-*/
 package shim
 
-import proto "github.com/gogo/protobuf/proto"
-import fmt "fmt"
-import math "math"
-import google_protobuf "github.com/gogo/protobuf/types"
-import google_protobuf1 "github.com/gogo/protobuf/types"
-
-// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto"
-import _ "github.com/gogo/protobuf/types"
-import containerd_types "github.com/containerd/containerd/api/types"
-import containerd_v1_types "github.com/containerd/containerd/api/types/task"
-
-import time "time"
-
-import types "github.com/gogo/protobuf/types"
-
-import strings "strings"
-import reflect "reflect"
-
-import context "context"
-import ttrpc "github.com/containerd/ttrpc"
-
-import io "io"
+import (
+	context "context"
+	fmt "fmt"
+	types "github.com/containerd/containerd/api/types"
+	task "github.com/containerd/containerd/api/types/task"
+	github_com_containerd_ttrpc "github.com/containerd/ttrpc"
+	proto "github.com/gogo/protobuf/proto"
+	github_com_gogo_protobuf_types "github.com/gogo/protobuf/types"
+	types1 "github.com/gogo/protobuf/types"
+	io "io"
+	math "math"
+	reflect "reflect"
+	strings "strings"
+	time "time"
+)
 
 // Reference imports to suppress errors if they are not otherwise used.
 var _ = proto.Marshal
@@ -67,197 +32,817 @@
 const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
 
 type CreateTaskRequest struct {
-	ID               string                    `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
-	Bundle           string                    `protobuf:"bytes,2,opt,name=bundle,proto3" json:"bundle,omitempty"`
-	Runtime          string                    `protobuf:"bytes,3,opt,name=runtime,proto3" json:"runtime,omitempty"`
-	Rootfs           []*containerd_types.Mount `protobuf:"bytes,4,rep,name=rootfs" json:"rootfs,omitempty"`
-	Terminal         bool                      `protobuf:"varint,5,opt,name=terminal,proto3" json:"terminal,omitempty"`
-	Stdin            string                    `protobuf:"bytes,6,opt,name=stdin,proto3" json:"stdin,omitempty"`
-	Stdout           string                    `protobuf:"bytes,7,opt,name=stdout,proto3" json:"stdout,omitempty"`
-	Stderr           string                    `protobuf:"bytes,8,opt,name=stderr,proto3" json:"stderr,omitempty"`
-	Checkpoint       string                    `protobuf:"bytes,9,opt,name=checkpoint,proto3" json:"checkpoint,omitempty"`
-	ParentCheckpoint string                    `protobuf:"bytes,10,opt,name=parent_checkpoint,json=parentCheckpoint,proto3" json:"parent_checkpoint,omitempty"`
-	Options          *google_protobuf.Any      `protobuf:"bytes,11,opt,name=options" json:"options,omitempty"`
+	ID                   string         `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+	Bundle               string         `protobuf:"bytes,2,opt,name=bundle,proto3" json:"bundle,omitempty"`
+	Runtime              string         `protobuf:"bytes,3,opt,name=runtime,proto3" json:"runtime,omitempty"`
+	Rootfs               []*types.Mount `protobuf:"bytes,4,rep,name=rootfs,proto3" json:"rootfs,omitempty"`
+	Terminal             bool           `protobuf:"varint,5,opt,name=terminal,proto3" json:"terminal,omitempty"`
+	Stdin                string         `protobuf:"bytes,6,opt,name=stdin,proto3" json:"stdin,omitempty"`
+	Stdout               string         `protobuf:"bytes,7,opt,name=stdout,proto3" json:"stdout,omitempty"`
+	Stderr               string         `protobuf:"bytes,8,opt,name=stderr,proto3" json:"stderr,omitempty"`
+	Checkpoint           string         `protobuf:"bytes,9,opt,name=checkpoint,proto3" json:"checkpoint,omitempty"`
+	ParentCheckpoint     string         `protobuf:"bytes,10,opt,name=parent_checkpoint,json=parentCheckpoint,proto3" json:"parent_checkpoint,omitempty"`
+	Options              *types1.Any    `protobuf:"bytes,11,opt,name=options,proto3" json:"options,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}       `json:"-"`
+	XXX_unrecognized     []byte         `json:"-"`
+	XXX_sizecache        int32          `json:"-"`
 }
 
-func (m *CreateTaskRequest) Reset()                    { *m = CreateTaskRequest{} }
-func (*CreateTaskRequest) ProtoMessage()               {}
-func (*CreateTaskRequest) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{0} }
+func (m *CreateTaskRequest) Reset()      { *m = CreateTaskRequest{} }
+func (*CreateTaskRequest) ProtoMessage() {}
+func (*CreateTaskRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_be1b2ef30ea3b8ef, []int{0}
+}
+func (m *CreateTaskRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *CreateTaskRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_CreateTaskRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *CreateTaskRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_CreateTaskRequest.Merge(m, src)
+}
+func (m *CreateTaskRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *CreateTaskRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_CreateTaskRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CreateTaskRequest proto.InternalMessageInfo
 
 type CreateTaskResponse struct {
-	Pid uint32 `protobuf:"varint,1,opt,name=pid,proto3" json:"pid,omitempty"`
+	Pid                  uint32   `protobuf:"varint,1,opt,name=pid,proto3" json:"pid,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
-func (m *CreateTaskResponse) Reset()                    { *m = CreateTaskResponse{} }
-func (*CreateTaskResponse) ProtoMessage()               {}
-func (*CreateTaskResponse) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{1} }
+func (m *CreateTaskResponse) Reset()      { *m = CreateTaskResponse{} }
+func (*CreateTaskResponse) ProtoMessage() {}
+func (*CreateTaskResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_be1b2ef30ea3b8ef, []int{1}
+}
+func (m *CreateTaskResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *CreateTaskResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_CreateTaskResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *CreateTaskResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_CreateTaskResponse.Merge(m, src)
+}
+func (m *CreateTaskResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *CreateTaskResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_CreateTaskResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CreateTaskResponse proto.InternalMessageInfo
 
 type DeleteResponse struct {
-	Pid        uint32    `protobuf:"varint,1,opt,name=pid,proto3" json:"pid,omitempty"`
-	ExitStatus uint32    `protobuf:"varint,2,opt,name=exit_status,json=exitStatus,proto3" json:"exit_status,omitempty"`
-	ExitedAt   time.Time `protobuf:"bytes,3,opt,name=exited_at,json=exitedAt,stdtime" json:"exited_at"`
+	Pid                  uint32    `protobuf:"varint,1,opt,name=pid,proto3" json:"pid,omitempty"`
+	ExitStatus           uint32    `protobuf:"varint,2,opt,name=exit_status,json=exitStatus,proto3" json:"exit_status,omitempty"`
+	ExitedAt             time.Time `protobuf:"bytes,3,opt,name=exited_at,json=exitedAt,proto3,stdtime" json:"exited_at"`
+	XXX_NoUnkeyedLiteral struct{}  `json:"-"`
+	XXX_unrecognized     []byte    `json:"-"`
+	XXX_sizecache        int32     `json:"-"`
 }
 
-func (m *DeleteResponse) Reset()                    { *m = DeleteResponse{} }
-func (*DeleteResponse) ProtoMessage()               {}
-func (*DeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{2} }
+func (m *DeleteResponse) Reset()      { *m = DeleteResponse{} }
+func (*DeleteResponse) ProtoMessage() {}
+func (*DeleteResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_be1b2ef30ea3b8ef, []int{2}
+}
+func (m *DeleteResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *DeleteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_DeleteResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *DeleteResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DeleteResponse.Merge(m, src)
+}
+func (m *DeleteResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *DeleteResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_DeleteResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeleteResponse proto.InternalMessageInfo
 
 type DeleteProcessRequest struct {
-	ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+	ID                   string   `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
-func (m *DeleteProcessRequest) Reset()                    { *m = DeleteProcessRequest{} }
-func (*DeleteProcessRequest) ProtoMessage()               {}
-func (*DeleteProcessRequest) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{3} }
+func (m *DeleteProcessRequest) Reset()      { *m = DeleteProcessRequest{} }
+func (*DeleteProcessRequest) ProtoMessage() {}
+func (*DeleteProcessRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_be1b2ef30ea3b8ef, []int{3}
+}
+func (m *DeleteProcessRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *DeleteProcessRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_DeleteProcessRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *DeleteProcessRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DeleteProcessRequest.Merge(m, src)
+}
+func (m *DeleteProcessRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *DeleteProcessRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_DeleteProcessRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeleteProcessRequest proto.InternalMessageInfo
 
 type ExecProcessRequest struct {
-	ID       string               `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
-	Terminal bool                 `protobuf:"varint,2,opt,name=terminal,proto3" json:"terminal,omitempty"`
-	Stdin    string               `protobuf:"bytes,3,opt,name=stdin,proto3" json:"stdin,omitempty"`
-	Stdout   string               `protobuf:"bytes,4,opt,name=stdout,proto3" json:"stdout,omitempty"`
-	Stderr   string               `protobuf:"bytes,5,opt,name=stderr,proto3" json:"stderr,omitempty"`
-	Spec     *google_protobuf.Any `protobuf:"bytes,6,opt,name=spec" json:"spec,omitempty"`
+	ID                   string      `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+	Terminal             bool        `protobuf:"varint,2,opt,name=terminal,proto3" json:"terminal,omitempty"`
+	Stdin                string      `protobuf:"bytes,3,opt,name=stdin,proto3" json:"stdin,omitempty"`
+	Stdout               string      `protobuf:"bytes,4,opt,name=stdout,proto3" json:"stdout,omitempty"`
+	Stderr               string      `protobuf:"bytes,5,opt,name=stderr,proto3" json:"stderr,omitempty"`
+	Spec                 *types1.Any `protobuf:"bytes,6,opt,name=spec,proto3" json:"spec,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}    `json:"-"`
+	XXX_unrecognized     []byte      `json:"-"`
+	XXX_sizecache        int32       `json:"-"`
 }
 
-func (m *ExecProcessRequest) Reset()                    { *m = ExecProcessRequest{} }
-func (*ExecProcessRequest) ProtoMessage()               {}
-func (*ExecProcessRequest) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{4} }
+func (m *ExecProcessRequest) Reset()      { *m = ExecProcessRequest{} }
+func (*ExecProcessRequest) ProtoMessage() {}
+func (*ExecProcessRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_be1b2ef30ea3b8ef, []int{4}
+}
+func (m *ExecProcessRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ExecProcessRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_ExecProcessRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *ExecProcessRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ExecProcessRequest.Merge(m, src)
+}
+func (m *ExecProcessRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *ExecProcessRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_ExecProcessRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ExecProcessRequest proto.InternalMessageInfo
 
 type ExecProcessResponse struct {
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
-func (m *ExecProcessResponse) Reset()                    { *m = ExecProcessResponse{} }
-func (*ExecProcessResponse) ProtoMessage()               {}
-func (*ExecProcessResponse) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{5} }
+func (m *ExecProcessResponse) Reset()      { *m = ExecProcessResponse{} }
+func (*ExecProcessResponse) ProtoMessage() {}
+func (*ExecProcessResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_be1b2ef30ea3b8ef, []int{5}
+}
+func (m *ExecProcessResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ExecProcessResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_ExecProcessResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *ExecProcessResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ExecProcessResponse.Merge(m, src)
+}
+func (m *ExecProcessResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *ExecProcessResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_ExecProcessResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ExecProcessResponse proto.InternalMessageInfo
 
 type ResizePtyRequest struct {
-	ID     string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
-	Width  uint32 `protobuf:"varint,2,opt,name=width,proto3" json:"width,omitempty"`
-	Height uint32 `protobuf:"varint,3,opt,name=height,proto3" json:"height,omitempty"`
+	ID                   string   `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+	Width                uint32   `protobuf:"varint,2,opt,name=width,proto3" json:"width,omitempty"`
+	Height               uint32   `protobuf:"varint,3,opt,name=height,proto3" json:"height,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
-func (m *ResizePtyRequest) Reset()                    { *m = ResizePtyRequest{} }
-func (*ResizePtyRequest) ProtoMessage()               {}
-func (*ResizePtyRequest) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{6} }
+func (m *ResizePtyRequest) Reset()      { *m = ResizePtyRequest{} }
+func (*ResizePtyRequest) ProtoMessage() {}
+func (*ResizePtyRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_be1b2ef30ea3b8ef, []int{6}
+}
+func (m *ResizePtyRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ResizePtyRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_ResizePtyRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *ResizePtyRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ResizePtyRequest.Merge(m, src)
+}
+func (m *ResizePtyRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *ResizePtyRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_ResizePtyRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ResizePtyRequest proto.InternalMessageInfo
 
 type StateRequest struct {
-	ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+	ID                   string   `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
-func (m *StateRequest) Reset()                    { *m = StateRequest{} }
-func (*StateRequest) ProtoMessage()               {}
-func (*StateRequest) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{7} }
+func (m *StateRequest) Reset()      { *m = StateRequest{} }
+func (*StateRequest) ProtoMessage() {}
+func (*StateRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_be1b2ef30ea3b8ef, []int{7}
+}
+func (m *StateRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *StateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_StateRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *StateRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_StateRequest.Merge(m, src)
+}
+func (m *StateRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *StateRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_StateRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_StateRequest proto.InternalMessageInfo
 
 type StateResponse struct {
-	ID         string                     `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
-	Bundle     string                     `protobuf:"bytes,2,opt,name=bundle,proto3" json:"bundle,omitempty"`
-	Pid        uint32                     `protobuf:"varint,3,opt,name=pid,proto3" json:"pid,omitempty"`
-	Status     containerd_v1_types.Status `protobuf:"varint,4,opt,name=status,proto3,enum=containerd.v1.types.Status" json:"status,omitempty"`
-	Stdin      string                     `protobuf:"bytes,5,opt,name=stdin,proto3" json:"stdin,omitempty"`
-	Stdout     string                     `protobuf:"bytes,6,opt,name=stdout,proto3" json:"stdout,omitempty"`
-	Stderr     string                     `protobuf:"bytes,7,opt,name=stderr,proto3" json:"stderr,omitempty"`
-	Terminal   bool                       `protobuf:"varint,8,opt,name=terminal,proto3" json:"terminal,omitempty"`
-	ExitStatus uint32                     `protobuf:"varint,9,opt,name=exit_status,json=exitStatus,proto3" json:"exit_status,omitempty"`
-	ExitedAt   time.Time                  `protobuf:"bytes,10,opt,name=exited_at,json=exitedAt,stdtime" json:"exited_at"`
+	ID                   string      `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+	Bundle               string      `protobuf:"bytes,2,opt,name=bundle,proto3" json:"bundle,omitempty"`
+	Pid                  uint32      `protobuf:"varint,3,opt,name=pid,proto3" json:"pid,omitempty"`
+	Status               task.Status `protobuf:"varint,4,opt,name=status,proto3,enum=containerd.v1.types.Status" json:"status,omitempty"`
+	Stdin                string      `protobuf:"bytes,5,opt,name=stdin,proto3" json:"stdin,omitempty"`
+	Stdout               string      `protobuf:"bytes,6,opt,name=stdout,proto3" json:"stdout,omitempty"`
+	Stderr               string      `protobuf:"bytes,7,opt,name=stderr,proto3" json:"stderr,omitempty"`
+	Terminal             bool        `protobuf:"varint,8,opt,name=terminal,proto3" json:"terminal,omitempty"`
+	ExitStatus           uint32      `protobuf:"varint,9,opt,name=exit_status,json=exitStatus,proto3" json:"exit_status,omitempty"`
+	ExitedAt             time.Time   `protobuf:"bytes,10,opt,name=exited_at,json=exitedAt,proto3,stdtime" json:"exited_at"`
+	XXX_NoUnkeyedLiteral struct{}    `json:"-"`
+	XXX_unrecognized     []byte      `json:"-"`
+	XXX_sizecache        int32       `json:"-"`
 }
 
-func (m *StateResponse) Reset()                    { *m = StateResponse{} }
-func (*StateResponse) ProtoMessage()               {}
-func (*StateResponse) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{8} }
+func (m *StateResponse) Reset()      { *m = StateResponse{} }
+func (*StateResponse) ProtoMessage() {}
+func (*StateResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_be1b2ef30ea3b8ef, []int{8}
+}
+func (m *StateResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *StateResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_StateResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *StateResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_StateResponse.Merge(m, src)
+}
+func (m *StateResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *StateResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_StateResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_StateResponse proto.InternalMessageInfo
 
 type KillRequest struct {
-	ID     string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
-	Signal uint32 `protobuf:"varint,2,opt,name=signal,proto3" json:"signal,omitempty"`
-	All    bool   `protobuf:"varint,3,opt,name=all,proto3" json:"all,omitempty"`
+	ID                   string   `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+	Signal               uint32   `protobuf:"varint,2,opt,name=signal,proto3" json:"signal,omitempty"`
+	All                  bool     `protobuf:"varint,3,opt,name=all,proto3" json:"all,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
-func (m *KillRequest) Reset()                    { *m = KillRequest{} }
-func (*KillRequest) ProtoMessage()               {}
-func (*KillRequest) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{9} }
+func (m *KillRequest) Reset()      { *m = KillRequest{} }
+func (*KillRequest) ProtoMessage() {}
+func (*KillRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_be1b2ef30ea3b8ef, []int{9}
+}
+func (m *KillRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *KillRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_KillRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *KillRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_KillRequest.Merge(m, src)
+}
+func (m *KillRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *KillRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_KillRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_KillRequest proto.InternalMessageInfo
 
 type CloseIORequest struct {
-	ID    string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
-	Stdin bool   `protobuf:"varint,2,opt,name=stdin,proto3" json:"stdin,omitempty"`
+	ID                   string   `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+	Stdin                bool     `protobuf:"varint,2,opt,name=stdin,proto3" json:"stdin,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
-func (m *CloseIORequest) Reset()                    { *m = CloseIORequest{} }
-func (*CloseIORequest) ProtoMessage()               {}
-func (*CloseIORequest) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{10} }
+func (m *CloseIORequest) Reset()      { *m = CloseIORequest{} }
+func (*CloseIORequest) ProtoMessage() {}
+func (*CloseIORequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_be1b2ef30ea3b8ef, []int{10}
+}
+func (m *CloseIORequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *CloseIORequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_CloseIORequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *CloseIORequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_CloseIORequest.Merge(m, src)
+}
+func (m *CloseIORequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *CloseIORequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_CloseIORequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CloseIORequest proto.InternalMessageInfo
 
 type ListPidsRequest struct {
-	ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+	ID                   string   `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
-func (m *ListPidsRequest) Reset()                    { *m = ListPidsRequest{} }
-func (*ListPidsRequest) ProtoMessage()               {}
-func (*ListPidsRequest) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{11} }
+func (m *ListPidsRequest) Reset()      { *m = ListPidsRequest{} }
+func (*ListPidsRequest) ProtoMessage() {}
+func (*ListPidsRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_be1b2ef30ea3b8ef, []int{11}
+}
+func (m *ListPidsRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ListPidsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_ListPidsRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *ListPidsRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ListPidsRequest.Merge(m, src)
+}
+func (m *ListPidsRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *ListPidsRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_ListPidsRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ListPidsRequest proto.InternalMessageInfo
 
 type ListPidsResponse struct {
-	Processes []*containerd_v1_types.ProcessInfo `protobuf:"bytes,1,rep,name=processes" json:"processes,omitempty"`
+	Processes            []*task.ProcessInfo `protobuf:"bytes,1,rep,name=processes,proto3" json:"processes,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}            `json:"-"`
+	XXX_unrecognized     []byte              `json:"-"`
+	XXX_sizecache        int32               `json:"-"`
 }
 
-func (m *ListPidsResponse) Reset()                    { *m = ListPidsResponse{} }
-func (*ListPidsResponse) ProtoMessage()               {}
-func (*ListPidsResponse) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{12} }
+func (m *ListPidsResponse) Reset()      { *m = ListPidsResponse{} }
+func (*ListPidsResponse) ProtoMessage() {}
+func (*ListPidsResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_be1b2ef30ea3b8ef, []int{12}
+}
+func (m *ListPidsResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ListPidsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_ListPidsResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *ListPidsResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ListPidsResponse.Merge(m, src)
+}
+func (m *ListPidsResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *ListPidsResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_ListPidsResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ListPidsResponse proto.InternalMessageInfo
 
 type CheckpointTaskRequest struct {
-	Path    string               `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"`
-	Options *google_protobuf.Any `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"`
+	Path                 string      `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"`
+	Options              *types1.Any `protobuf:"bytes,2,opt,name=options,proto3" json:"options,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}    `json:"-"`
+	XXX_unrecognized     []byte      `json:"-"`
+	XXX_sizecache        int32       `json:"-"`
 }
 
-func (m *CheckpointTaskRequest) Reset()                    { *m = CheckpointTaskRequest{} }
-func (*CheckpointTaskRequest) ProtoMessage()               {}
-func (*CheckpointTaskRequest) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{13} }
+func (m *CheckpointTaskRequest) Reset()      { *m = CheckpointTaskRequest{} }
+func (*CheckpointTaskRequest) ProtoMessage() {}
+func (*CheckpointTaskRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_be1b2ef30ea3b8ef, []int{13}
+}
+func (m *CheckpointTaskRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *CheckpointTaskRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_CheckpointTaskRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *CheckpointTaskRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_CheckpointTaskRequest.Merge(m, src)
+}
+func (m *CheckpointTaskRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *CheckpointTaskRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_CheckpointTaskRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CheckpointTaskRequest proto.InternalMessageInfo
 
 type ShimInfoResponse struct {
-	ShimPid uint32 `protobuf:"varint,1,opt,name=shim_pid,json=shimPid,proto3" json:"shim_pid,omitempty"`
+	ShimPid              uint32   `protobuf:"varint,1,opt,name=shim_pid,json=shimPid,proto3" json:"shim_pid,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
-func (m *ShimInfoResponse) Reset()                    { *m = ShimInfoResponse{} }
-func (*ShimInfoResponse) ProtoMessage()               {}
-func (*ShimInfoResponse) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{14} }
+func (m *ShimInfoResponse) Reset()      { *m = ShimInfoResponse{} }
+func (*ShimInfoResponse) ProtoMessage() {}
+func (*ShimInfoResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_be1b2ef30ea3b8ef, []int{14}
+}
+func (m *ShimInfoResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ShimInfoResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_ShimInfoResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *ShimInfoResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ShimInfoResponse.Merge(m, src)
+}
+func (m *ShimInfoResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *ShimInfoResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_ShimInfoResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ShimInfoResponse proto.InternalMessageInfo
 
 type UpdateTaskRequest struct {
-	Resources *google_protobuf.Any `protobuf:"bytes,1,opt,name=resources" json:"resources,omitempty"`
+	Resources            *types1.Any `protobuf:"bytes,1,opt,name=resources,proto3" json:"resources,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}    `json:"-"`
+	XXX_unrecognized     []byte      `json:"-"`
+	XXX_sizecache        int32       `json:"-"`
 }
 
-func (m *UpdateTaskRequest) Reset()                    { *m = UpdateTaskRequest{} }
-func (*UpdateTaskRequest) ProtoMessage()               {}
-func (*UpdateTaskRequest) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{15} }
+func (m *UpdateTaskRequest) Reset()      { *m = UpdateTaskRequest{} }
+func (*UpdateTaskRequest) ProtoMessage() {}
+func (*UpdateTaskRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_be1b2ef30ea3b8ef, []int{15}
+}
+func (m *UpdateTaskRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *UpdateTaskRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_UpdateTaskRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *UpdateTaskRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_UpdateTaskRequest.Merge(m, src)
+}
+func (m *UpdateTaskRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *UpdateTaskRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_UpdateTaskRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_UpdateTaskRequest proto.InternalMessageInfo
 
 type StartRequest struct {
-	ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+	ID                   string   `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
-func (m *StartRequest) Reset()                    { *m = StartRequest{} }
-func (*StartRequest) ProtoMessage()               {}
-func (*StartRequest) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{16} }
+func (m *StartRequest) Reset()      { *m = StartRequest{} }
+func (*StartRequest) ProtoMessage() {}
+func (*StartRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_be1b2ef30ea3b8ef, []int{16}
+}
+func (m *StartRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *StartRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_StartRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *StartRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_StartRequest.Merge(m, src)
+}
+func (m *StartRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *StartRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_StartRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_StartRequest proto.InternalMessageInfo
 
 type StartResponse struct {
-	ID  string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
-	Pid uint32 `protobuf:"varint,2,opt,name=pid,proto3" json:"pid,omitempty"`
+	ID                   string   `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+	Pid                  uint32   `protobuf:"varint,2,opt,name=pid,proto3" json:"pid,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
-func (m *StartResponse) Reset()                    { *m = StartResponse{} }
-func (*StartResponse) ProtoMessage()               {}
-func (*StartResponse) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{17} }
+func (m *StartResponse) Reset()      { *m = StartResponse{} }
+func (*StartResponse) ProtoMessage() {}
+func (*StartResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_be1b2ef30ea3b8ef, []int{17}
+}
+func (m *StartResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *StartResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_StartResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *StartResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_StartResponse.Merge(m, src)
+}
+func (m *StartResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *StartResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_StartResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_StartResponse proto.InternalMessageInfo
 
 type WaitRequest struct {
-	ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+	ID                   string   `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
-func (m *WaitRequest) Reset()                    { *m = WaitRequest{} }
-func (*WaitRequest) ProtoMessage()               {}
-func (*WaitRequest) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{18} }
+func (m *WaitRequest) Reset()      { *m = WaitRequest{} }
+func (*WaitRequest) ProtoMessage() {}
+func (*WaitRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_be1b2ef30ea3b8ef, []int{18}
+}
+func (m *WaitRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *WaitRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_WaitRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *WaitRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_WaitRequest.Merge(m, src)
+}
+func (m *WaitRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *WaitRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_WaitRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_WaitRequest proto.InternalMessageInfo
 
 type WaitResponse struct {
-	ExitStatus uint32    `protobuf:"varint,1,opt,name=exit_status,json=exitStatus,proto3" json:"exit_status,omitempty"`
-	ExitedAt   time.Time `protobuf:"bytes,2,opt,name=exited_at,json=exitedAt,stdtime" json:"exited_at"`
+	ExitStatus           uint32    `protobuf:"varint,1,opt,name=exit_status,json=exitStatus,proto3" json:"exit_status,omitempty"`
+	ExitedAt             time.Time `protobuf:"bytes,2,opt,name=exited_at,json=exitedAt,proto3,stdtime" json:"exited_at"`
+	XXX_NoUnkeyedLiteral struct{}  `json:"-"`
+	XXX_unrecognized     []byte    `json:"-"`
+	XXX_sizecache        int32     `json:"-"`
 }
 
-func (m *WaitResponse) Reset()                    { *m = WaitResponse{} }
-func (*WaitResponse) ProtoMessage()               {}
-func (*WaitResponse) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{19} }
+func (m *WaitResponse) Reset()      { *m = WaitResponse{} }
+func (*WaitResponse) ProtoMessage() {}
+func (*WaitResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_be1b2ef30ea3b8ef, []int{19}
+}
+func (m *WaitResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *WaitResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_WaitResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *WaitResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_WaitResponse.Merge(m, src)
+}
+func (m *WaitResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *WaitResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_WaitResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_WaitResponse proto.InternalMessageInfo
 
 func init() {
 	proto.RegisterType((*CreateTaskRequest)(nil), "containerd.runtime.linux.shim.v1.CreateTaskRequest")
@@ -281,6 +866,86 @@
 	proto.RegisterType((*WaitRequest)(nil), "containerd.runtime.linux.shim.v1.WaitRequest")
 	proto.RegisterType((*WaitResponse)(nil), "containerd.runtime.linux.shim.v1.WaitResponse")
 }
+
+func init() {
+	proto.RegisterFile("github.com/containerd/containerd/runtime/v1/shim/v1/shim.proto", fileDescriptor_be1b2ef30ea3b8ef)
+}
+
+var fileDescriptor_be1b2ef30ea3b8ef = []byte{
+	// 1133 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x57, 0x4f, 0x4f, 0x1b, 0x47,
+	0x14, 0x67, 0x17, 0xff, 0x7d, 0x8e, 0x29, 0x4c, 0x09, 0xdd, 0x38, 0x92, 0xb1, 0x56, 0x6a, 0x44,
+	0x55, 0x65, 0x5d, 0x4c, 0x95, 0xa4, 0xad, 0x84, 0x04, 0x24, 0xaa, 0x50, 0x1b, 0x05, 0x2d, 0xa4,
+	0x89, 0x5a, 0x55, 0x68, 0xf1, 0x0e, 0xf6, 0x08, 0x7b, 0x67, 0xb3, 0x33, 0x4b, 0xa1, 0xa7, 0x9e,
+	0x7a, 0xee, 0xc7, 0xe9, 0x47, 0xe0, 0x90, 0x43, 0x8f, 0x3d, 0xa5, 0x0d, 0xf7, 0x7e, 0x87, 0x6a,
+	0xfe, 0x18, 0xaf, 0x6d, 0x36, 0xbb, 0x70, 0xc1, 0xfb, 0x66, 0x7e, 0x6f, 0xe6, 0xcd, 0xfb, 0xfd,
+	0xe6, 0xbd, 0x01, 0x36, 0x7b, 0x84, 0xf7, 0xe3, 0x23, 0xa7, 0x4b, 0x87, 0xed, 0x2e, 0x0d, 0xb8,
+	0x47, 0x02, 0x1c, 0xf9, 0xc9, 0xcf, 0x28, 0x0e, 0x38, 0x19, 0xe2, 0xf6, 0xe9, 0x7a, 0x9b, 0xf5,
+	0xc9, 0x70, 0xf4, 0xeb, 0x84, 0x11, 0xe5, 0x14, 0xb5, 0xc6, 0x48, 0x47, 0x23, 0x9d, 0x01, 0x09,
+	0xe2, 0x33, 0x47, 0x82, 0x4e, 0xd7, 0x1b, 0xf7, 0x7a, 0x94, 0xf6, 0x06, 0xb8, 0x2d, 0xf1, 0x47,
+	0xf1, 0x71, 0xdb, 0x0b, 0xce, 0x95, 0x73, 0xe3, 0xfe, 0xf4, 0x14, 0x1e, 0x86, 0x7c, 0x34, 0xb9,
+	0xdc, 0xa3, 0x3d, 0x2a, 0x3f, 0xdb, 0xe2, 0x4b, 0x8f, 0xae, 0x4e, 0xbb, 0x88, 0x1d, 0x19, 0xf7,
+	0x86, 0xa1, 0x06, 0x3c, 0xca, 0x3c, 0x90, 0x17, 0x92, 0x36, 0x3f, 0x0f, 0x31, 0x6b, 0x0f, 0x69,
+	0x1c, 0x70, 0xed, 0xf7, 0xf5, 0x0d, 0xfc, 0xb8, 0xc7, 0x4e, 0xe4, 0x1f, 0xe5, 0x6b, 0xff, 0x67,
+	0xc2, 0xd2, 0x4e, 0x84, 0x3d, 0x8e, 0x0f, 0x3c, 0x76, 0xe2, 0xe2, 0x37, 0x31, 0x66, 0x1c, 0xad,
+	0x80, 0x49, 0x7c, 0xcb, 0x68, 0x19, 0x6b, 0xd5, 0xed, 0xd2, 0xe5, 0xbb, 0x55, 0x73, 0xf7, 0xa9,
+	0x6b, 0x12, 0x1f, 0xad, 0x40, 0xe9, 0x28, 0x0e, 0xfc, 0x01, 0xb6, 0x4c, 0x31, 0xe7, 0x6a, 0x0b,
+	0x59, 0x50, 0xd6, 0x19, 0xb4, 0xe6, 0xe5, 0xc4, 0xc8, 0x44, 0x6d, 0x28, 0x45, 0x94, 0xf2, 0x63,
+	0x66, 0x15, 0x5a, 0xf3, 0x6b, 0xb5, 0xce, 0x27, 0x4e, 0x22, 0xeb, 0x32, 0x24, 0xe7, 0xb9, 0x38,
+	0x8a, 0xab, 0x61, 0xa8, 0x01, 0x15, 0x8e, 0xa3, 0x21, 0x09, 0xbc, 0x81, 0x55, 0x6c, 0x19, 0x6b,
+	0x15, 0xf7, 0xca, 0x46, 0xcb, 0x50, 0x64, 0xdc, 0x27, 0x81, 0x55, 0x92, 0x9b, 0x28, 0x43, 0x04,
+	0xc5, 0xb8, 0x4f, 0x63, 0x6e, 0x95, 0x55, 0x50, 0xca, 0xd2, 0xe3, 0x38, 0x8a, 0xac, 0xca, 0xd5,
+	0x38, 0x8e, 0x22, 0xd4, 0x04, 0xe8, 0xf6, 0x71, 0xf7, 0x24, 0xa4, 0x24, 0xe0, 0x56, 0x55, 0xce,
+	0x25, 0x46, 0xd0, 0xe7, 0xb0, 0x14, 0x7a, 0x11, 0x0e, 0xf8, 0x61, 0x02, 0x06, 0x12, 0xb6, 0xa8,
+	0x26, 0x76, 0xc6, 0x60, 0x07, 0xca, 0x34, 0xe4, 0x84, 0x06, 0xcc, 0xaa, 0xb5, 0x8c, 0xb5, 0x5a,
+	0x67, 0xd9, 0x51, 0x34, 0x3b, 0x23, 0x9a, 0x9d, 0xad, 0xe0, 0xdc, 0x1d, 0x81, 0xec, 0x07, 0x80,
+	0x92, 0xe9, 0x66, 0x21, 0x0d, 0x18, 0x46, 0x8b, 0x30, 0x1f, 0xea, 0x84, 0xd7, 0x5d, 0xf1, 0x69,
+	0xff, 0x6e, 0xc0, 0xc2, 0x53, 0x3c, 0xc0, 0x1c, 0xa7, 0x83, 0xd0, 0x2a, 0xd4, 0xf0, 0x19, 0xe1,
+	0x87, 0x8c, 0x7b, 0x3c, 0x66, 0x92, 0x93, 0xba, 0x0b, 0x62, 0x68, 0x5f, 0x8e, 0xa0, 0x2d, 0xa8,
+	0x0a, 0x0b, 0xfb, 0x87, 0x1e, 0x97, 0xcc, 0xd4, 0x3a, 0x8d, 0x99, 0xf8, 0x0e, 0x46, 0x32, 0xdc,
+	0xae, 0x5c, 0xbc, 0x5b, 0x9d, 0xfb, 0xe3, 0x9f, 0x55, 0xc3, 0xad, 0x28, 0xb7, 0x2d, 0x6e, 0x3b,
+	0xb0, 0xac, 0xe2, 0xd8, 0x8b, 0x68, 0x17, 0x33, 0x96, 0x21, 0x11, 0xfb, 0x4f, 0x03, 0xd0, 0xb3,
+	0x33, 0xdc, 0xcd, 0x07, 0x9f, 0xa0, 0xdb, 0x4c, 0xa3, 0x7b, 0xfe, 0x7a, 0xba, 0x0b, 0x29, 0x74,
+	0x17, 0x27, 0xe8, 0x5e, 0x83, 0x02, 0x0b, 0x71, 0x57, 0x6a, 0x26, 0x8d, 0x1e, 0x89, 0xb0, 0xef,
+	0xc2, 0xc7, 0x13, 0x91, 0xab, 0xbc, 0xdb, 0xaf, 0x61, 0xd1, 0xc5, 0x8c, 0xfc, 0x8a, 0xf7, 0xf8,
+	0x79, 0xd6, 0x71, 0x96, 0xa1, 0xf8, 0x0b, 0xf1, 0x79, 0x5f, 0x73, 0xa1, 0x0c, 0x11, 0x5a, 0x1f,
+	0x93, 0x5e, 0x5f, 0x71, 0x50, 0x77, 0xb5, 0x65, 0x3f, 0x80, 0x3b, 0x82, 0x28, 0x9c, 0x95, 0xd3,
+	0xb7, 0x26, 0xd4, 0x35, 0x50, 0x6b, 0xe1, 0xa6, 0x17, 0x54, 0x6b, 0x67, 0x7e, 0xac, 0x9d, 0x0d,
+	0x91, 0x2e, 0x29, 0x1b, 0x91, 0xc6, 0x85, 0xce, 0xfd, 0xe4, 0xc5, 0x3c, 0x5d, 0xd7, 0x77, 0x53,
+	0xe9, 0xc8, 0xd5, 0xd0, 0x31, 0x23, 0xc5, 0xeb, 0x19, 0x29, 0xa5, 0x30, 0x52, 0x9e, 0x60, 0x24,
+	0xc9, 0x79, 0x65, 0x8a, 0xf3, 0x29, 0x49, 0x57, 0x3f, 0x2c, 0x69, 0xb8, 0x95, 0xa4, 0x5f, 0x40,
+	0xed, 0x3b, 0x32, 0x18, 0xe4, 0x28, 0x76, 0x8c, 0xf4, 0x46, 0xc2, 0xac, 0xbb, 0xda, 0x12, 0xb9,
+	0xf4, 0x06, 0x03, 0x99, 0xcb, 0x8a, 0x2b, 0x3e, 0xed, 0x4d, 0x58, 0xd8, 0x19, 0x50, 0x86, 0x77,
+	0x5f, 0xe4, 0xd0, 0x87, 0x4a, 0xa0, 0xd2, 0xba, 0x32, 0xec, 0xcf, 0xe0, 0xa3, 0xef, 0x09, 0xe3,
+	0x7b, 0xc4, 0xcf, 0xbc, 0x5e, 0x2e, 0x2c, 0x8e, 0xa1, 0x5a, 0x0c, 0x9b, 0x50, 0x0d, 0x95, 0x66,
+	0x31, 0xb3, 0x0c, 0x59, 0x66, 0x5b, 0xd7, 0xb2, 0xa9, 0x95, 0xbd, 0x1b, 0x1c, 0x53, 0x77, 0xec,
+	0x62, 0xff, 0x04, 0x77, 0xc7, 0x15, 0x2d, 0xd9, 0x06, 0x10, 0x14, 0x42, 0x8f, 0xf7, 0x55, 0x18,
+	0xae, 0xfc, 0x4e, 0x16, 0x3c, 0x33, 0x4f, 0xc1, 0x7b, 0x08, 0x8b, 0xfb, 0x7d, 0x32, 0x94, 0x7b,
+	0x8e, 0x02, 0xbe, 0x07, 0x15, 0xd1, 0x62, 0x0f, 0xc7, 0xe5, 0xac, 0x2c, 0xec, 0x3d, 0xe2, 0xdb,
+	0xdf, 0xc2, 0xd2, 0xcb, 0xd0, 0x9f, 0x6a, 0x47, 0x1d, 0xa8, 0x46, 0x98, 0xd1, 0x38, 0xea, 0xca,
+	0x03, 0xa6, 0xef, 0x3a, 0x86, 0xe9, 0xbb, 0x15, 0xf1, 0xac, 0x84, 0x7e, 0x25, 0xaf, 0x96, 0xc0,
+	0x65, 0x5c, 0x2d, 0x7d, 0x85, 0xcc, 0x71, 0x8d, 0xfe, 0x14, 0x6a, 0xaf, 0x3c, 0x92, 0xb9, 0x43,
+	0x04, 0x77, 0x14, 0x4c, 0x6f, 0x30, 0x25, 0x71, 0xe3, 0xc3, 0x12, 0x37, 0x6f, 0x23, 0xf1, 0xce,
+	0xdb, 0x1a, 0x14, 0x44, 0xda, 0x51, 0x1f, 0x8a, 0xb2, 0x72, 0x20, 0xc7, 0xc9, 0x7a, 0xee, 0x38,
+	0xc9, 0x5a, 0xd4, 0x68, 0xe7, 0xc6, 0xeb, 0x63, 0x31, 0x28, 0xa9, 0xce, 0x86, 0x36, 0xb2, 0x5d,
+	0x67, 0x9e, 0x1c, 0x8d, 0x2f, 0x6f, 0xe6, 0xa4, 0x37, 0x55, 0xc7, 0x8b, 0x78, 0xce, 0xe3, 0x5d,
+	0xc9, 0x21, 0xe7, 0xf1, 0x12, 0xb2, 0x70, 0xa1, 0xa4, 0xfa, 0x20, 0x5a, 0x99, 0xe1, 0xe2, 0x99,
+	0x78, 0xfb, 0x35, 0xbe, 0xc8, 0x5e, 0x72, 0xaa, 0xa3, 0x9f, 0x43, 0x7d, 0xa2, 0xb7, 0xa2, 0x47,
+	0x79, 0x97, 0x98, 0xec, 0xae, 0xb7, 0xd8, 0xfa, 0x0d, 0x54, 0x46, 0x75, 0x04, 0xad, 0x67, 0x7b,
+	0x4f, 0x95, 0xa7, 0x46, 0xe7, 0x26, 0x2e, 0x7a, 0xcb, 0xc7, 0x50, 0xdc, 0xf3, 0x62, 0x96, 0x9e,
+	0xc0, 0x94, 0x71, 0xf4, 0x04, 0x4a, 0x2e, 0x66, 0xf1, 0xf0, 0xe6, 0x9e, 0x3f, 0x03, 0x24, 0xde,
+	0x6a, 0x8f, 0x73, 0x48, 0xec, 0xba, 0x3a, 0x98, 0xba, 0xfc, 0x73, 0x28, 0x88, 0x46, 0x82, 0x1e,
+	0x66, 0x2f, 0x9c, 0x68, 0x38, 0xa9, 0xcb, 0x1d, 0x40, 0x41, 0xbc, 0x3f, 0x50, 0x8e, 0xab, 0x30,
+	0xfb, 0xc2, 0x4a, 0x5d, 0xf5, 0x15, 0x54, 0xaf, 0x9e, 0x2f, 0x28, 0x07, 0x6f, 0xd3, 0x6f, 0x9d,
+	0xd4, 0x85, 0xf7, 0xa1, 0xac, 0xbb, 0x1e, 0xca, 0xa1, 0xbf, 0xc9, 0x06, 0x99, 0xba, 0xe8, 0x0f,
+	0x50, 0x19, 0xb5, 0x8b, 0x54, 0xb6, 0x73, 0x1c, 0x62, 0xa6, 0xe5, 0xbc, 0x84, 0x92, 0xea, 0x2b,
+	0x79, 0xaa, 0xd3, 0x4c, 0x07, 0x4a, 0x0d, 0x17, 0x43, 0x41, 0xd4, 0xf6, 0x3c, 0x0a, 0x48, 0xb4,
+	0x8a, 0x86, 0x93, 0x17, 0xae, 0xa2, 0xdf, 0x76, 0x2f, 0xde, 0x37, 0xe7, 0xfe, 0x7e, 0xdf, 0x9c,
+	0xfb, 0xed, 0xb2, 0x69, 0x5c, 0x5c, 0x36, 0x8d, 0xbf, 0x2e, 0x9b, 0xc6, 0xbf, 0x97, 0x4d, 0xe3,
+	0xc7, 0x27, 0xb7, 0xf8, 0x27, 0xf8, 0x1b, 0xf1, 0xfb, 0xda, 0x3c, 0x2a, 0xc9, 0xc3, 0x6c, 0xfc,
+	0x1f, 0x00, 0x00, 0xff, 0xff, 0x64, 0x52, 0x86, 0xc0, 0x49, 0x0f, 0x00, 0x00,
+}
+
 func (m *CreateTaskRequest) Marshal() (dAtA []byte, err error) {
 	size := m.Size()
 	dAtA = make([]byte, size)
@@ -376,6 +1041,9 @@
 		}
 		i += n1
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -399,6 +1067,9 @@
 		i++
 		i = encodeVarintShim(dAtA, i, uint64(m.Pid))
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -429,12 +1100,15 @@
 	}
 	dAtA[i] = 0x1a
 	i++
-	i = encodeVarintShim(dAtA, i, uint64(types.SizeOfStdTime(m.ExitedAt)))
-	n2, err := types.StdTimeMarshalTo(m.ExitedAt, dAtA[i:])
+	i = encodeVarintShim(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt)))
+	n2, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.ExitedAt, dAtA[i:])
 	if err != nil {
 		return 0, err
 	}
 	i += n2
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -459,6 +1133,9 @@
 		i = encodeVarintShim(dAtA, i, uint64(len(m.ID)))
 		i += copy(dAtA[i:], m.ID)
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -521,6 +1198,9 @@
 		}
 		i += n3
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -539,6 +1219,9 @@
 	_ = i
 	var l int
 	_ = l
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -573,6 +1256,9 @@
 		i++
 		i = encodeVarintShim(dAtA, i, uint64(m.Height))
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -597,6 +1283,9 @@
 		i = encodeVarintShim(dAtA, i, uint64(len(m.ID)))
 		i += copy(dAtA[i:], m.ID)
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -672,12 +1361,15 @@
 	}
 	dAtA[i] = 0x52
 	i++
-	i = encodeVarintShim(dAtA, i, uint64(types.SizeOfStdTime(m.ExitedAt)))
-	n4, err := types.StdTimeMarshalTo(m.ExitedAt, dAtA[i:])
+	i = encodeVarintShim(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt)))
+	n4, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.ExitedAt, dAtA[i:])
 	if err != nil {
 		return 0, err
 	}
 	i += n4
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -717,6 +1409,9 @@
 		}
 		i++
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -751,6 +1446,9 @@
 		}
 		i++
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -775,6 +1473,9 @@
 		i = encodeVarintShim(dAtA, i, uint64(len(m.ID)))
 		i += copy(dAtA[i:], m.ID)
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -805,6 +1506,9 @@
 			i += n
 		}
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -839,6 +1543,9 @@
 		}
 		i += n5
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -862,6 +1569,9 @@
 		i++
 		i = encodeVarintShim(dAtA, i, uint64(m.ShimPid))
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -890,6 +1600,9 @@
 		}
 		i += n6
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -914,6 +1627,9 @@
 		i = encodeVarintShim(dAtA, i, uint64(len(m.ID)))
 		i += copy(dAtA[i:], m.ID)
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -943,6 +1659,9 @@
 		i++
 		i = encodeVarintShim(dAtA, i, uint64(m.Pid))
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -967,6 +1686,9 @@
 		i = encodeVarintShim(dAtA, i, uint64(len(m.ID)))
 		i += copy(dAtA[i:], m.ID)
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -992,12 +1714,15 @@
 	}
 	dAtA[i] = 0x12
 	i++
-	i = encodeVarintShim(dAtA, i, uint64(types.SizeOfStdTime(m.ExitedAt)))
-	n7, err := types.StdTimeMarshalTo(m.ExitedAt, dAtA[i:])
+	i = encodeVarintShim(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt)))
+	n7, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.ExitedAt, dAtA[i:])
 	if err != nil {
 		return 0, err
 	}
 	i += n7
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -1011,6 +1736,9 @@
 	return offset + 1
 }
 func (m *CreateTaskRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = len(m.ID)
@@ -1058,19 +1786,31 @@
 		l = m.Options.Size()
 		n += 1 + l + sovShim(uint64(l))
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *CreateTaskResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	if m.Pid != 0 {
 		n += 1 + sovShim(uint64(m.Pid))
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *DeleteResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	if m.Pid != 0 {
@@ -1079,22 +1819,34 @@
 	if m.ExitStatus != 0 {
 		n += 1 + sovShim(uint64(m.ExitStatus))
 	}
-	l = types.SizeOfStdTime(m.ExitedAt)
+	l = github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt)
 	n += 1 + l + sovShim(uint64(l))
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *DeleteProcessRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = len(m.ID)
 	if l > 0 {
 		n += 1 + l + sovShim(uint64(l))
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *ExecProcessRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = len(m.ID)
@@ -1120,16 +1872,28 @@
 		l = m.Spec.Size()
 		n += 1 + l + sovShim(uint64(l))
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *ExecProcessResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *ResizePtyRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = len(m.ID)
@@ -1142,20 +1906,32 @@
 	if m.Height != 0 {
 		n += 1 + sovShim(uint64(m.Height))
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *StateRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = len(m.ID)
 	if l > 0 {
 		n += 1 + l + sovShim(uint64(l))
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *StateResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = len(m.ID)
@@ -1190,12 +1966,18 @@
 	if m.ExitStatus != 0 {
 		n += 1 + sovShim(uint64(m.ExitStatus))
 	}
-	l = types.SizeOfStdTime(m.ExitedAt)
+	l = github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt)
 	n += 1 + l + sovShim(uint64(l))
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *KillRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = len(m.ID)
@@ -1208,10 +1990,16 @@
 	if m.All {
 		n += 2
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *CloseIORequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = len(m.ID)
@@ -1221,20 +2009,32 @@
 	if m.Stdin {
 		n += 2
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *ListPidsRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = len(m.ID)
 	if l > 0 {
 		n += 1 + l + sovShim(uint64(l))
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *ListPidsResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	if len(m.Processes) > 0 {
@@ -1243,10 +2043,16 @@
 			n += 1 + l + sovShim(uint64(l))
 		}
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *CheckpointTaskRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = len(m.Path)
@@ -1257,39 +2063,63 @@
 		l = m.Options.Size()
 		n += 1 + l + sovShim(uint64(l))
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *ShimInfoResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	if m.ShimPid != 0 {
 		n += 1 + sovShim(uint64(m.ShimPid))
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *UpdateTaskRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	if m.Resources != nil {
 		l = m.Resources.Size()
 		n += 1 + l + sovShim(uint64(l))
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *StartRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = len(m.ID)
 	if l > 0 {
 		n += 1 + l + sovShim(uint64(l))
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *StartResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = len(m.ID)
@@ -1299,27 +2129,42 @@
 	if m.Pid != 0 {
 		n += 1 + sovShim(uint64(m.Pid))
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *WaitRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = len(m.ID)
 	if l > 0 {
 		n += 1 + l + sovShim(uint64(l))
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *WaitResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	if m.ExitStatus != 0 {
 		n += 1 + sovShim(uint64(m.ExitStatus))
 	}
-	l = types.SizeOfStdTime(m.ExitedAt)
+	l = github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt)
 	n += 1 + l + sovShim(uint64(l))
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
@@ -1344,14 +2189,15 @@
 		`ID:` + fmt.Sprintf("%v", this.ID) + `,`,
 		`Bundle:` + fmt.Sprintf("%v", this.Bundle) + `,`,
 		`Runtime:` + fmt.Sprintf("%v", this.Runtime) + `,`,
-		`Rootfs:` + strings.Replace(fmt.Sprintf("%v", this.Rootfs), "Mount", "containerd_types.Mount", 1) + `,`,
+		`Rootfs:` + strings.Replace(fmt.Sprintf("%v", this.Rootfs), "Mount", "types.Mount", 1) + `,`,
 		`Terminal:` + fmt.Sprintf("%v", this.Terminal) + `,`,
 		`Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`,
 		`Stdout:` + fmt.Sprintf("%v", this.Stdout) + `,`,
 		`Stderr:` + fmt.Sprintf("%v", this.Stderr) + `,`,
 		`Checkpoint:` + fmt.Sprintf("%v", this.Checkpoint) + `,`,
 		`ParentCheckpoint:` + fmt.Sprintf("%v", this.ParentCheckpoint) + `,`,
-		`Options:` + strings.Replace(fmt.Sprintf("%v", this.Options), "Any", "google_protobuf.Any", 1) + `,`,
+		`Options:` + strings.Replace(fmt.Sprintf("%v", this.Options), "Any", "types1.Any", 1) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -1362,6 +2208,7 @@
 	}
 	s := strings.Join([]string{`&CreateTaskResponse{`,
 		`Pid:` + fmt.Sprintf("%v", this.Pid) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -1373,7 +2220,8 @@
 	s := strings.Join([]string{`&DeleteResponse{`,
 		`Pid:` + fmt.Sprintf("%v", this.Pid) + `,`,
 		`ExitStatus:` + fmt.Sprintf("%v", this.ExitStatus) + `,`,
-		`ExitedAt:` + strings.Replace(strings.Replace(this.ExitedAt.String(), "Timestamp", "google_protobuf3.Timestamp", 1), `&`, ``, 1) + `,`,
+		`ExitedAt:` + strings.Replace(strings.Replace(this.ExitedAt.String(), "Timestamp", "types1.Timestamp", 1), `&`, ``, 1) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -1384,6 +2232,7 @@
 	}
 	s := strings.Join([]string{`&DeleteProcessRequest{`,
 		`ID:` + fmt.Sprintf("%v", this.ID) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -1398,7 +2247,8 @@
 		`Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`,
 		`Stdout:` + fmt.Sprintf("%v", this.Stdout) + `,`,
 		`Stderr:` + fmt.Sprintf("%v", this.Stderr) + `,`,
-		`Spec:` + strings.Replace(fmt.Sprintf("%v", this.Spec), "Any", "google_protobuf.Any", 1) + `,`,
+		`Spec:` + strings.Replace(fmt.Sprintf("%v", this.Spec), "Any", "types1.Any", 1) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -1408,6 +2258,7 @@
 		return "nil"
 	}
 	s := strings.Join([]string{`&ExecProcessResponse{`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -1420,6 +2271,7 @@
 		`ID:` + fmt.Sprintf("%v", this.ID) + `,`,
 		`Width:` + fmt.Sprintf("%v", this.Width) + `,`,
 		`Height:` + fmt.Sprintf("%v", this.Height) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -1430,6 +2282,7 @@
 	}
 	s := strings.Join([]string{`&StateRequest{`,
 		`ID:` + fmt.Sprintf("%v", this.ID) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -1448,7 +2301,8 @@
 		`Stderr:` + fmt.Sprintf("%v", this.Stderr) + `,`,
 		`Terminal:` + fmt.Sprintf("%v", this.Terminal) + `,`,
 		`ExitStatus:` + fmt.Sprintf("%v", this.ExitStatus) + `,`,
-		`ExitedAt:` + strings.Replace(strings.Replace(this.ExitedAt.String(), "Timestamp", "google_protobuf3.Timestamp", 1), `&`, ``, 1) + `,`,
+		`ExitedAt:` + strings.Replace(strings.Replace(this.ExitedAt.String(), "Timestamp", "types1.Timestamp", 1), `&`, ``, 1) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -1461,6 +2315,7 @@
 		`ID:` + fmt.Sprintf("%v", this.ID) + `,`,
 		`Signal:` + fmt.Sprintf("%v", this.Signal) + `,`,
 		`All:` + fmt.Sprintf("%v", this.All) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -1472,6 +2327,7 @@
 	s := strings.Join([]string{`&CloseIORequest{`,
 		`ID:` + fmt.Sprintf("%v", this.ID) + `,`,
 		`Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -1482,6 +2338,7 @@
 	}
 	s := strings.Join([]string{`&ListPidsRequest{`,
 		`ID:` + fmt.Sprintf("%v", this.ID) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -1491,7 +2348,8 @@
 		return "nil"
 	}
 	s := strings.Join([]string{`&ListPidsResponse{`,
-		`Processes:` + strings.Replace(fmt.Sprintf("%v", this.Processes), "ProcessInfo", "containerd_v1_types.ProcessInfo", 1) + `,`,
+		`Processes:` + strings.Replace(fmt.Sprintf("%v", this.Processes), "ProcessInfo", "task.ProcessInfo", 1) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -1502,7 +2360,8 @@
 	}
 	s := strings.Join([]string{`&CheckpointTaskRequest{`,
 		`Path:` + fmt.Sprintf("%v", this.Path) + `,`,
-		`Options:` + strings.Replace(fmt.Sprintf("%v", this.Options), "Any", "google_protobuf.Any", 1) + `,`,
+		`Options:` + strings.Replace(fmt.Sprintf("%v", this.Options), "Any", "types1.Any", 1) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -1513,6 +2372,7 @@
 	}
 	s := strings.Join([]string{`&ShimInfoResponse{`,
 		`ShimPid:` + fmt.Sprintf("%v", this.ShimPid) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -1522,7 +2382,8 @@
 		return "nil"
 	}
 	s := strings.Join([]string{`&UpdateTaskRequest{`,
-		`Resources:` + strings.Replace(fmt.Sprintf("%v", this.Resources), "Any", "google_protobuf.Any", 1) + `,`,
+		`Resources:` + strings.Replace(fmt.Sprintf("%v", this.Resources), "Any", "types1.Any", 1) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -1533,6 +2394,7 @@
 	}
 	s := strings.Join([]string{`&StartRequest{`,
 		`ID:` + fmt.Sprintf("%v", this.ID) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -1544,6 +2406,7 @@
 	s := strings.Join([]string{`&StartResponse{`,
 		`ID:` + fmt.Sprintf("%v", this.ID) + `,`,
 		`Pid:` + fmt.Sprintf("%v", this.Pid) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -1554,6 +2417,7 @@
 	}
 	s := strings.Join([]string{`&WaitRequest{`,
 		`ID:` + fmt.Sprintf("%v", this.ID) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -1564,7 +2428,8 @@
 	}
 	s := strings.Join([]string{`&WaitResponse{`,
 		`ExitStatus:` + fmt.Sprintf("%v", this.ExitStatus) + `,`,
-		`ExitedAt:` + strings.Replace(strings.Replace(this.ExitedAt.String(), "Timestamp", "google_protobuf3.Timestamp", 1), `&`, ``, 1) + `,`,
+		`ExitedAt:` + strings.Replace(strings.Replace(this.ExitedAt.String(), "Timestamp", "types1.Timestamp", 1), `&`, ``, 1) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -1582,23 +2447,23 @@
 	State(ctx context.Context, req *StateRequest) (*StateResponse, error)
 	Create(ctx context.Context, req *CreateTaskRequest) (*CreateTaskResponse, error)
 	Start(ctx context.Context, req *StartRequest) (*StartResponse, error)
-	Delete(ctx context.Context, req *google_protobuf1.Empty) (*DeleteResponse, error)
+	Delete(ctx context.Context, req *types1.Empty) (*DeleteResponse, error)
 	DeleteProcess(ctx context.Context, req *DeleteProcessRequest) (*DeleteResponse, error)
 	ListPids(ctx context.Context, req *ListPidsRequest) (*ListPidsResponse, error)
-	Pause(ctx context.Context, req *google_protobuf1.Empty) (*google_protobuf1.Empty, error)
-	Resume(ctx context.Context, req *google_protobuf1.Empty) (*google_protobuf1.Empty, error)
-	Checkpoint(ctx context.Context, req *CheckpointTaskRequest) (*google_protobuf1.Empty, error)
-	Kill(ctx context.Context, req *KillRequest) (*google_protobuf1.Empty, error)
-	Exec(ctx context.Context, req *ExecProcessRequest) (*google_protobuf1.Empty, error)
-	ResizePty(ctx context.Context, req *ResizePtyRequest) (*google_protobuf1.Empty, error)
-	CloseIO(ctx context.Context, req *CloseIORequest) (*google_protobuf1.Empty, error)
-	ShimInfo(ctx context.Context, req *google_protobuf1.Empty) (*ShimInfoResponse, error)
-	Update(ctx context.Context, req *UpdateTaskRequest) (*google_protobuf1.Empty, error)
+	Pause(ctx context.Context, req *types1.Empty) (*types1.Empty, error)
+	Resume(ctx context.Context, req *types1.Empty) (*types1.Empty, error)
+	Checkpoint(ctx context.Context, req *CheckpointTaskRequest) (*types1.Empty, error)
+	Kill(ctx context.Context, req *KillRequest) (*types1.Empty, error)
+	Exec(ctx context.Context, req *ExecProcessRequest) (*types1.Empty, error)
+	ResizePty(ctx context.Context, req *ResizePtyRequest) (*types1.Empty, error)
+	CloseIO(ctx context.Context, req *CloseIORequest) (*types1.Empty, error)
+	ShimInfo(ctx context.Context, req *types1.Empty) (*ShimInfoResponse, error)
+	Update(ctx context.Context, req *UpdateTaskRequest) (*types1.Empty, error)
 	Wait(ctx context.Context, req *WaitRequest) (*WaitResponse, error)
 }
 
-func RegisterShimService(srv *ttrpc.Server, svc ShimService) {
-	srv.Register("containerd.runtime.linux.shim.v1.Shim", map[string]ttrpc.Method{
+func RegisterShimService(srv *github_com_containerd_ttrpc.Server, svc ShimService) {
+	srv.Register("containerd.runtime.linux.shim.v1.Shim", map[string]github_com_containerd_ttrpc.Method{
 		"State": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
 			var req StateRequest
 			if err := unmarshal(&req); err != nil {
@@ -1621,7 +2486,7 @@
 			return svc.Start(ctx, &req)
 		},
 		"Delete": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
-			var req google_protobuf1.Empty
+			var req types1.Empty
 			if err := unmarshal(&req); err != nil {
 				return nil, err
 			}
@@ -1642,14 +2507,14 @@
 			return svc.ListPids(ctx, &req)
 		},
 		"Pause": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
-			var req google_protobuf1.Empty
+			var req types1.Empty
 			if err := unmarshal(&req); err != nil {
 				return nil, err
 			}
 			return svc.Pause(ctx, &req)
 		},
 		"Resume": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
-			var req google_protobuf1.Empty
+			var req types1.Empty
 			if err := unmarshal(&req); err != nil {
 				return nil, err
 			}
@@ -1691,7 +2556,7 @@
 			return svc.CloseIO(ctx, &req)
 		},
 		"ShimInfo": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
-			var req google_protobuf1.Empty
+			var req types1.Empty
 			if err := unmarshal(&req); err != nil {
 				return nil, err
 			}
@@ -1715,10 +2580,10 @@
 }
 
 type shimClient struct {
-	client *ttrpc.Client
+	client *github_com_containerd_ttrpc.Client
 }
 
-func NewShimClient(client *ttrpc.Client) ShimService {
+func NewShimClient(client *github_com_containerd_ttrpc.Client) ShimService {
 	return &shimClient{
 		client: client,
 	}
@@ -1748,7 +2613,7 @@
 	return &resp, nil
 }
 
-func (c *shimClient) Delete(ctx context.Context, req *google_protobuf1.Empty) (*DeleteResponse, error) {
+func (c *shimClient) Delete(ctx context.Context, req *types1.Empty) (*DeleteResponse, error) {
 	var resp DeleteResponse
 	if err := c.client.Call(ctx, "containerd.runtime.linux.shim.v1.Shim", "Delete", req, &resp); err != nil {
 		return nil, err
@@ -1772,63 +2637,63 @@
 	return &resp, nil
 }
 
-func (c *shimClient) Pause(ctx context.Context, req *google_protobuf1.Empty) (*google_protobuf1.Empty, error) {
-	var resp google_protobuf1.Empty
+func (c *shimClient) Pause(ctx context.Context, req *types1.Empty) (*types1.Empty, error) {
+	var resp types1.Empty
 	if err := c.client.Call(ctx, "containerd.runtime.linux.shim.v1.Shim", "Pause", req, &resp); err != nil {
 		return nil, err
 	}
 	return &resp, nil
 }
 
-func (c *shimClient) Resume(ctx context.Context, req *google_protobuf1.Empty) (*google_protobuf1.Empty, error) {
-	var resp google_protobuf1.Empty
+func (c *shimClient) Resume(ctx context.Context, req *types1.Empty) (*types1.Empty, error) {
+	var resp types1.Empty
 	if err := c.client.Call(ctx, "containerd.runtime.linux.shim.v1.Shim", "Resume", req, &resp); err != nil {
 		return nil, err
 	}
 	return &resp, nil
 }
 
-func (c *shimClient) Checkpoint(ctx context.Context, req *CheckpointTaskRequest) (*google_protobuf1.Empty, error) {
-	var resp google_protobuf1.Empty
+func (c *shimClient) Checkpoint(ctx context.Context, req *CheckpointTaskRequest) (*types1.Empty, error) {
+	var resp types1.Empty
 	if err := c.client.Call(ctx, "containerd.runtime.linux.shim.v1.Shim", "Checkpoint", req, &resp); err != nil {
 		return nil, err
 	}
 	return &resp, nil
 }
 
-func (c *shimClient) Kill(ctx context.Context, req *KillRequest) (*google_protobuf1.Empty, error) {
-	var resp google_protobuf1.Empty
+func (c *shimClient) Kill(ctx context.Context, req *KillRequest) (*types1.Empty, error) {
+	var resp types1.Empty
 	if err := c.client.Call(ctx, "containerd.runtime.linux.shim.v1.Shim", "Kill", req, &resp); err != nil {
 		return nil, err
 	}
 	return &resp, nil
 }
 
-func (c *shimClient) Exec(ctx context.Context, req *ExecProcessRequest) (*google_protobuf1.Empty, error) {
-	var resp google_protobuf1.Empty
+func (c *shimClient) Exec(ctx context.Context, req *ExecProcessRequest) (*types1.Empty, error) {
+	var resp types1.Empty
 	if err := c.client.Call(ctx, "containerd.runtime.linux.shim.v1.Shim", "Exec", req, &resp); err != nil {
 		return nil, err
 	}
 	return &resp, nil
 }
 
-func (c *shimClient) ResizePty(ctx context.Context, req *ResizePtyRequest) (*google_protobuf1.Empty, error) {
-	var resp google_protobuf1.Empty
+func (c *shimClient) ResizePty(ctx context.Context, req *ResizePtyRequest) (*types1.Empty, error) {
+	var resp types1.Empty
 	if err := c.client.Call(ctx, "containerd.runtime.linux.shim.v1.Shim", "ResizePty", req, &resp); err != nil {
 		return nil, err
 	}
 	return &resp, nil
 }
 
-func (c *shimClient) CloseIO(ctx context.Context, req *CloseIORequest) (*google_protobuf1.Empty, error) {
-	var resp google_protobuf1.Empty
+func (c *shimClient) CloseIO(ctx context.Context, req *CloseIORequest) (*types1.Empty, error) {
+	var resp types1.Empty
 	if err := c.client.Call(ctx, "containerd.runtime.linux.shim.v1.Shim", "CloseIO", req, &resp); err != nil {
 		return nil, err
 	}
 	return &resp, nil
 }
 
-func (c *shimClient) ShimInfo(ctx context.Context, req *google_protobuf1.Empty) (*ShimInfoResponse, error) {
+func (c *shimClient) ShimInfo(ctx context.Context, req *types1.Empty) (*ShimInfoResponse, error) {
 	var resp ShimInfoResponse
 	if err := c.client.Call(ctx, "containerd.runtime.linux.shim.v1.Shim", "ShimInfo", req, &resp); err != nil {
 		return nil, err
@@ -1836,8 +2701,8 @@
 	return &resp, nil
 }
 
-func (c *shimClient) Update(ctx context.Context, req *UpdateTaskRequest) (*google_protobuf1.Empty, error) {
-	var resp google_protobuf1.Empty
+func (c *shimClient) Update(ctx context.Context, req *UpdateTaskRequest) (*types1.Empty, error) {
+	var resp types1.Empty
 	if err := c.client.Call(ctx, "containerd.runtime.linux.shim.v1.Shim", "Update", req, &resp); err != nil {
 		return nil, err
 	}
@@ -1866,7 +2731,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -1894,7 +2759,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1904,6 +2769,9 @@
 				return ErrInvalidLengthShim
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthShim
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -1923,7 +2791,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1933,6 +2801,9 @@
 				return ErrInvalidLengthShim
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthShim
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -1952,7 +2823,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1962,6 +2833,9 @@
 				return ErrInvalidLengthShim
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthShim
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -1981,7 +2855,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1990,10 +2864,13 @@
 				return ErrInvalidLengthShim
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthShim
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			m.Rootfs = append(m.Rootfs, &containerd_types.Mount{})
+			m.Rootfs = append(m.Rootfs, &types.Mount{})
 			if err := m.Rootfs[len(m.Rootfs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
 				return err
 			}
@@ -2012,7 +2889,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				v |= (int(b) & 0x7F) << shift
+				v |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2032,7 +2909,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2042,6 +2919,9 @@
 				return ErrInvalidLengthShim
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthShim
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -2061,7 +2941,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2071,6 +2951,9 @@
 				return ErrInvalidLengthShim
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthShim
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -2090,7 +2973,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2100,6 +2983,9 @@
 				return ErrInvalidLengthShim
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthShim
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -2119,7 +3005,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2129,6 +3015,9 @@
 				return ErrInvalidLengthShim
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthShim
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -2148,7 +3037,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2158,6 +3047,9 @@
 				return ErrInvalidLengthShim
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthShim
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -2177,7 +3069,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2186,11 +3078,14 @@
 				return ErrInvalidLengthShim
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthShim
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
 			if m.Options == nil {
-				m.Options = &google_protobuf.Any{}
+				m.Options = &types1.Any{}
 			}
 			if err := m.Options.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
 				return err
@@ -2205,9 +3100,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthShim
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthShim
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -2232,7 +3131,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -2260,7 +3159,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				m.Pid |= (uint32(b) & 0x7F) << shift
+				m.Pid |= uint32(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2274,9 +3173,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthShim
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthShim
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -2301,7 +3204,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -2329,7 +3232,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				m.Pid |= (uint32(b) & 0x7F) << shift
+				m.Pid |= uint32(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2348,7 +3251,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				m.ExitStatus |= (uint32(b) & 0x7F) << shift
+				m.ExitStatus |= uint32(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2367,7 +3270,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2376,10 +3279,13 @@
 				return ErrInvalidLengthShim
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthShim
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			if err := types.StdTimeUnmarshal(&m.ExitedAt, dAtA[iNdEx:postIndex]); err != nil {
+			if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.ExitedAt, dAtA[iNdEx:postIndex]); err != nil {
 				return err
 			}
 			iNdEx = postIndex
@@ -2392,9 +3298,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthShim
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthShim
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -2419,7 +3329,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -2447,7 +3357,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2457,6 +3367,9 @@
 				return ErrInvalidLengthShim
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthShim
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -2471,9 +3384,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthShim
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthShim
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -2498,7 +3415,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -2526,7 +3443,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2536,6 +3453,9 @@
 				return ErrInvalidLengthShim
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthShim
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -2555,7 +3475,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				v |= (int(b) & 0x7F) << shift
+				v |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2575,7 +3495,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2585,6 +3505,9 @@
 				return ErrInvalidLengthShim
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthShim
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -2604,7 +3527,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2614,6 +3537,9 @@
 				return ErrInvalidLengthShim
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthShim
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -2633,7 +3559,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2643,6 +3569,9 @@
 				return ErrInvalidLengthShim
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthShim
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -2662,7 +3591,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2671,11 +3600,14 @@
 				return ErrInvalidLengthShim
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthShim
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
 			if m.Spec == nil {
-				m.Spec = &google_protobuf.Any{}
+				m.Spec = &types1.Any{}
 			}
 			if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
 				return err
@@ -2690,9 +3622,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthShim
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthShim
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -2717,7 +3653,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -2740,9 +3676,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthShim
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthShim
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -2767,7 +3707,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -2795,7 +3735,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2805,6 +3745,9 @@
 				return ErrInvalidLengthShim
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthShim
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -2824,7 +3767,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				m.Width |= (uint32(b) & 0x7F) << shift
+				m.Width |= uint32(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2843,7 +3786,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				m.Height |= (uint32(b) & 0x7F) << shift
+				m.Height |= uint32(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2857,9 +3800,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthShim
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthShim
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -2884,7 +3831,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -2912,7 +3859,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2922,6 +3869,9 @@
 				return ErrInvalidLengthShim
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthShim
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -2936,9 +3886,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthShim
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthShim
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -2963,7 +3917,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -2991,7 +3945,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -3001,6 +3955,9 @@
 				return ErrInvalidLengthShim
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthShim
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -3020,7 +3977,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -3030,6 +3987,9 @@
 				return ErrInvalidLengthShim
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthShim
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -3049,7 +4009,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				m.Pid |= (uint32(b) & 0x7F) << shift
+				m.Pid |= uint32(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -3068,7 +4028,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				m.Status |= (containerd_v1_types.Status(b) & 0x7F) << shift
+				m.Status |= task.Status(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -3087,7 +4047,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -3097,6 +4057,9 @@
 				return ErrInvalidLengthShim
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthShim
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -3116,7 +4079,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -3126,6 +4089,9 @@
 				return ErrInvalidLengthShim
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthShim
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -3145,7 +4111,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -3155,6 +4121,9 @@
 				return ErrInvalidLengthShim
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthShim
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -3174,7 +4143,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				v |= (int(b) & 0x7F) << shift
+				v |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -3194,7 +4163,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				m.ExitStatus |= (uint32(b) & 0x7F) << shift
+				m.ExitStatus |= uint32(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -3213,7 +4182,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -3222,10 +4191,13 @@
 				return ErrInvalidLengthShim
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthShim
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			if err := types.StdTimeUnmarshal(&m.ExitedAt, dAtA[iNdEx:postIndex]); err != nil {
+			if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.ExitedAt, dAtA[iNdEx:postIndex]); err != nil {
 				return err
 			}
 			iNdEx = postIndex
@@ -3238,9 +4210,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthShim
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthShim
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -3265,7 +4241,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -3293,7 +4269,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -3303,6 +4279,9 @@
 				return ErrInvalidLengthShim
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthShim
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -3322,7 +4301,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				m.Signal |= (uint32(b) & 0x7F) << shift
+				m.Signal |= uint32(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -3341,7 +4320,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				v |= (int(b) & 0x7F) << shift
+				v |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -3356,9 +4335,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthShim
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthShim
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -3383,7 +4366,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -3411,7 +4394,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -3421,6 +4404,9 @@
 				return ErrInvalidLengthShim
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthShim
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -3440,7 +4426,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				v |= (int(b) & 0x7F) << shift
+				v |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -3455,9 +4441,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthShim
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthShim
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -3482,7 +4472,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -3510,7 +4500,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -3520,6 +4510,9 @@
 				return ErrInvalidLengthShim
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthShim
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -3534,9 +4527,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthShim
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthShim
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -3561,7 +4558,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -3589,7 +4586,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -3598,10 +4595,13 @@
 				return ErrInvalidLengthShim
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthShim
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			m.Processes = append(m.Processes, &containerd_v1_types.ProcessInfo{})
+			m.Processes = append(m.Processes, &task.ProcessInfo{})
 			if err := m.Processes[len(m.Processes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
 				return err
 			}
@@ -3615,9 +4615,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthShim
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthShim
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -3642,7 +4646,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -3670,7 +4674,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -3680,6 +4684,9 @@
 				return ErrInvalidLengthShim
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthShim
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -3699,7 +4706,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -3708,11 +4715,14 @@
 				return ErrInvalidLengthShim
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthShim
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
 			if m.Options == nil {
-				m.Options = &google_protobuf.Any{}
+				m.Options = &types1.Any{}
 			}
 			if err := m.Options.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
 				return err
@@ -3727,9 +4737,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthShim
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthShim
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -3754,7 +4768,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -3782,7 +4796,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				m.ShimPid |= (uint32(b) & 0x7F) << shift
+				m.ShimPid |= uint32(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -3796,9 +4810,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthShim
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthShim
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -3823,7 +4841,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -3851,7 +4869,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -3860,11 +4878,14 @@
 				return ErrInvalidLengthShim
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthShim
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
 			if m.Resources == nil {
-				m.Resources = &google_protobuf.Any{}
+				m.Resources = &types1.Any{}
 			}
 			if err := m.Resources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
 				return err
@@ -3879,9 +4900,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthShim
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthShim
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -3906,7 +4931,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -3934,7 +4959,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -3944,6 +4969,9 @@
 				return ErrInvalidLengthShim
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthShim
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -3958,9 +4986,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthShim
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthShim
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -3985,7 +5017,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -4013,7 +5045,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -4023,6 +5055,9 @@
 				return ErrInvalidLengthShim
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthShim
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -4042,7 +5077,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				m.Pid |= (uint32(b) & 0x7F) << shift
+				m.Pid |= uint32(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -4056,9 +5091,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthShim
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthShim
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -4083,7 +5122,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -4111,7 +5150,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -4121,6 +5160,9 @@
 				return ErrInvalidLengthShim
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthShim
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -4135,9 +5177,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthShim
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthShim
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -4162,7 +5208,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -4190,7 +5236,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				m.ExitStatus |= (uint32(b) & 0x7F) << shift
+				m.ExitStatus |= uint32(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -4209,7 +5255,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -4218,10 +5264,13 @@
 				return ErrInvalidLengthShim
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthShim
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			if err := types.StdTimeUnmarshal(&m.ExitedAt, dAtA[iNdEx:postIndex]); err != nil {
+			if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.ExitedAt, dAtA[iNdEx:postIndex]); err != nil {
 				return err
 			}
 			iNdEx = postIndex
@@ -4234,9 +5283,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthShim
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthShim
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -4300,10 +5353,13 @@
 					break
 				}
 			}
-			iNdEx += length
 			if length < 0 {
 				return 0, ErrInvalidLengthShim
 			}
+			iNdEx += length
+			if iNdEx < 0 {
+				return 0, ErrInvalidLengthShim
+			}
 			return iNdEx, nil
 		case 3:
 			for {
@@ -4332,6 +5388,9 @@
 					return 0, err
 				}
 				iNdEx = start + next
+				if iNdEx < 0 {
+					return 0, ErrInvalidLengthShim
+				}
 			}
 			return iNdEx, nil
 		case 4:
@@ -4350,82 +5409,3 @@
 	ErrInvalidLengthShim = fmt.Errorf("proto: negative length found during unmarshaling")
 	ErrIntOverflowShim   = fmt.Errorf("proto: integer overflow")
 )
-
-func init() {
-	proto.RegisterFile("github.com/containerd/containerd/runtime/v1/shim/v1/shim.proto", fileDescriptorShim)
-}
-
-var fileDescriptorShim = []byte{
-	// 1133 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x57, 0x4f, 0x4f, 0x1b, 0x47,
-	0x14, 0x67, 0x17, 0xff, 0x7d, 0x8e, 0x29, 0x4c, 0x09, 0xdd, 0x38, 0x92, 0xb1, 0x56, 0x6a, 0x44,
-	0x55, 0x65, 0x5d, 0x4c, 0x95, 0xa4, 0xad, 0x84, 0x04, 0x24, 0xaa, 0x50, 0x1b, 0x05, 0x2d, 0xa4,
-	0x89, 0x5a, 0x55, 0x68, 0xf1, 0x0e, 0xf6, 0x08, 0x7b, 0x67, 0xb3, 0x33, 0x4b, 0xa1, 0xa7, 0x9e,
-	0x7a, 0xee, 0xc7, 0xe9, 0x47, 0xe0, 0x90, 0x43, 0x8f, 0x3d, 0xa5, 0x0d, 0xf7, 0x7e, 0x87, 0x6a,
-	0xfe, 0x18, 0xaf, 0x6d, 0x36, 0xbb, 0x70, 0xc1, 0xfb, 0x66, 0x7e, 0x6f, 0xe6, 0xcd, 0xfb, 0xfd,
-	0xe6, 0xbd, 0x01, 0x36, 0x7b, 0x84, 0xf7, 0xe3, 0x23, 0xa7, 0x4b, 0x87, 0xed, 0x2e, 0x0d, 0xb8,
-	0x47, 0x02, 0x1c, 0xf9, 0xc9, 0xcf, 0x28, 0x0e, 0x38, 0x19, 0xe2, 0xf6, 0xe9, 0x7a, 0x9b, 0xf5,
-	0xc9, 0x70, 0xf4, 0xeb, 0x84, 0x11, 0xe5, 0x14, 0xb5, 0xc6, 0x48, 0x47, 0x23, 0x9d, 0x01, 0x09,
-	0xe2, 0x33, 0x47, 0x82, 0x4e, 0xd7, 0x1b, 0xf7, 0x7a, 0x94, 0xf6, 0x06, 0xb8, 0x2d, 0xf1, 0x47,
-	0xf1, 0x71, 0xdb, 0x0b, 0xce, 0x95, 0x73, 0xe3, 0xfe, 0xf4, 0x14, 0x1e, 0x86, 0x7c, 0x34, 0xb9,
-	0xdc, 0xa3, 0x3d, 0x2a, 0x3f, 0xdb, 0xe2, 0x4b, 0x8f, 0xae, 0x4e, 0xbb, 0x88, 0x1d, 0x19, 0xf7,
-	0x86, 0xa1, 0x06, 0x3c, 0xca, 0x3c, 0x90, 0x17, 0x92, 0x36, 0x3f, 0x0f, 0x31, 0x6b, 0x0f, 0x69,
-	0x1c, 0x70, 0xed, 0xf7, 0xf5, 0x0d, 0xfc, 0xb8, 0xc7, 0x4e, 0xe4, 0x1f, 0xe5, 0x6b, 0xff, 0x67,
-	0xc2, 0xd2, 0x4e, 0x84, 0x3d, 0x8e, 0x0f, 0x3c, 0x76, 0xe2, 0xe2, 0x37, 0x31, 0x66, 0x1c, 0xad,
-	0x80, 0x49, 0x7c, 0xcb, 0x68, 0x19, 0x6b, 0xd5, 0xed, 0xd2, 0xe5, 0xbb, 0x55, 0x73, 0xf7, 0xa9,
-	0x6b, 0x12, 0x1f, 0xad, 0x40, 0xe9, 0x28, 0x0e, 0xfc, 0x01, 0xb6, 0x4c, 0x31, 0xe7, 0x6a, 0x0b,
-	0x59, 0x50, 0xd6, 0x19, 0xb4, 0xe6, 0xe5, 0xc4, 0xc8, 0x44, 0x6d, 0x28, 0x45, 0x94, 0xf2, 0x63,
-	0x66, 0x15, 0x5a, 0xf3, 0x6b, 0xb5, 0xce, 0x27, 0x4e, 0x22, 0xeb, 0x32, 0x24, 0xe7, 0xb9, 0x38,
-	0x8a, 0xab, 0x61, 0xa8, 0x01, 0x15, 0x8e, 0xa3, 0x21, 0x09, 0xbc, 0x81, 0x55, 0x6c, 0x19, 0x6b,
-	0x15, 0xf7, 0xca, 0x46, 0xcb, 0x50, 0x64, 0xdc, 0x27, 0x81, 0x55, 0x92, 0x9b, 0x28, 0x43, 0x04,
-	0xc5, 0xb8, 0x4f, 0x63, 0x6e, 0x95, 0x55, 0x50, 0xca, 0xd2, 0xe3, 0x38, 0x8a, 0xac, 0xca, 0xd5,
-	0x38, 0x8e, 0x22, 0xd4, 0x04, 0xe8, 0xf6, 0x71, 0xf7, 0x24, 0xa4, 0x24, 0xe0, 0x56, 0x55, 0xce,
-	0x25, 0x46, 0xd0, 0xe7, 0xb0, 0x14, 0x7a, 0x11, 0x0e, 0xf8, 0x61, 0x02, 0x06, 0x12, 0xb6, 0xa8,
-	0x26, 0x76, 0xc6, 0x60, 0x07, 0xca, 0x34, 0xe4, 0x84, 0x06, 0xcc, 0xaa, 0xb5, 0x8c, 0xb5, 0x5a,
-	0x67, 0xd9, 0x51, 0x34, 0x3b, 0x23, 0x9a, 0x9d, 0xad, 0xe0, 0xdc, 0x1d, 0x81, 0xec, 0x07, 0x80,
-	0x92, 0xe9, 0x66, 0x21, 0x0d, 0x18, 0x46, 0x8b, 0x30, 0x1f, 0xea, 0x84, 0xd7, 0x5d, 0xf1, 0x69,
-	0xff, 0x6e, 0xc0, 0xc2, 0x53, 0x3c, 0xc0, 0x1c, 0xa7, 0x83, 0xd0, 0x2a, 0xd4, 0xf0, 0x19, 0xe1,
-	0x87, 0x8c, 0x7b, 0x3c, 0x66, 0x92, 0x93, 0xba, 0x0b, 0x62, 0x68, 0x5f, 0x8e, 0xa0, 0x2d, 0xa8,
-	0x0a, 0x0b, 0xfb, 0x87, 0x1e, 0x97, 0xcc, 0xd4, 0x3a, 0x8d, 0x99, 0xf8, 0x0e, 0x46, 0x32, 0xdc,
-	0xae, 0x5c, 0xbc, 0x5b, 0x9d, 0xfb, 0xe3, 0x9f, 0x55, 0xc3, 0xad, 0x28, 0xb7, 0x2d, 0x6e, 0x3b,
-	0xb0, 0xac, 0xe2, 0xd8, 0x8b, 0x68, 0x17, 0x33, 0x96, 0x21, 0x11, 0xfb, 0x4f, 0x03, 0xd0, 0xb3,
-	0x33, 0xdc, 0xcd, 0x07, 0x9f, 0xa0, 0xdb, 0x4c, 0xa3, 0x7b, 0xfe, 0x7a, 0xba, 0x0b, 0x29, 0x74,
-	0x17, 0x27, 0xe8, 0x5e, 0x83, 0x02, 0x0b, 0x71, 0x57, 0x6a, 0x26, 0x8d, 0x1e, 0x89, 0xb0, 0xef,
-	0xc2, 0xc7, 0x13, 0x91, 0xab, 0xbc, 0xdb, 0xaf, 0x61, 0xd1, 0xc5, 0x8c, 0xfc, 0x8a, 0xf7, 0xf8,
-	0x79, 0xd6, 0x71, 0x96, 0xa1, 0xf8, 0x0b, 0xf1, 0x79, 0x5f, 0x73, 0xa1, 0x0c, 0x11, 0x5a, 0x1f,
-	0x93, 0x5e, 0x5f, 0x71, 0x50, 0x77, 0xb5, 0x65, 0x3f, 0x80, 0x3b, 0x82, 0x28, 0x9c, 0x95, 0xd3,
-	0xb7, 0x26, 0xd4, 0x35, 0x50, 0x6b, 0xe1, 0xa6, 0x17, 0x54, 0x6b, 0x67, 0x7e, 0xac, 0x9d, 0x0d,
-	0x91, 0x2e, 0x29, 0x1b, 0x91, 0xc6, 0x85, 0xce, 0xfd, 0xe4, 0xc5, 0x3c, 0x5d, 0xd7, 0x77, 0x53,
-	0xe9, 0xc8, 0xd5, 0xd0, 0x31, 0x23, 0xc5, 0xeb, 0x19, 0x29, 0xa5, 0x30, 0x52, 0x9e, 0x60, 0x24,
-	0xc9, 0x79, 0x65, 0x8a, 0xf3, 0x29, 0x49, 0x57, 0x3f, 0x2c, 0x69, 0xb8, 0x95, 0xa4, 0x5f, 0x40,
-	0xed, 0x3b, 0x32, 0x18, 0xe4, 0x28, 0x76, 0x8c, 0xf4, 0x46, 0xc2, 0xac, 0xbb, 0xda, 0x12, 0xb9,
-	0xf4, 0x06, 0x03, 0x99, 0xcb, 0x8a, 0x2b, 0x3e, 0xed, 0x4d, 0x58, 0xd8, 0x19, 0x50, 0x86, 0x77,
-	0x5f, 0xe4, 0xd0, 0x87, 0x4a, 0xa0, 0xd2, 0xba, 0x32, 0xec, 0xcf, 0xe0, 0xa3, 0xef, 0x09, 0xe3,
-	0x7b, 0xc4, 0xcf, 0xbc, 0x5e, 0x2e, 0x2c, 0x8e, 0xa1, 0x5a, 0x0c, 0x9b, 0x50, 0x0d, 0x95, 0x66,
-	0x31, 0xb3, 0x0c, 0x59, 0x66, 0x5b, 0xd7, 0xb2, 0xa9, 0x95, 0xbd, 0x1b, 0x1c, 0x53, 0x77, 0xec,
-	0x62, 0xff, 0x04, 0x77, 0xc7, 0x15, 0x2d, 0xd9, 0x06, 0x10, 0x14, 0x42, 0x8f, 0xf7, 0x55, 0x18,
-	0xae, 0xfc, 0x4e, 0x16, 0x3c, 0x33, 0x4f, 0xc1, 0x7b, 0x08, 0x8b, 0xfb, 0x7d, 0x32, 0x94, 0x7b,
-	0x8e, 0x02, 0xbe, 0x07, 0x15, 0xd1, 0x62, 0x0f, 0xc7, 0xe5, 0xac, 0x2c, 0xec, 0x3d, 0xe2, 0xdb,
-	0xdf, 0xc2, 0xd2, 0xcb, 0xd0, 0x9f, 0x6a, 0x47, 0x1d, 0xa8, 0x46, 0x98, 0xd1, 0x38, 0xea, 0xca,
-	0x03, 0xa6, 0xef, 0x3a, 0x86, 0xe9, 0xbb, 0x15, 0xf1, 0xac, 0x84, 0x7e, 0x25, 0xaf, 0x96, 0xc0,
-	0x65, 0x5c, 0x2d, 0x7d, 0x85, 0xcc, 0x71, 0x8d, 0xfe, 0x14, 0x6a, 0xaf, 0x3c, 0x92, 0xb9, 0x43,
-	0x04, 0x77, 0x14, 0x4c, 0x6f, 0x30, 0x25, 0x71, 0xe3, 0xc3, 0x12, 0x37, 0x6f, 0x23, 0xf1, 0xce,
-	0xdb, 0x1a, 0x14, 0x44, 0xda, 0x51, 0x1f, 0x8a, 0xb2, 0x72, 0x20, 0xc7, 0xc9, 0x7a, 0xee, 0x38,
-	0xc9, 0x5a, 0xd4, 0x68, 0xe7, 0xc6, 0xeb, 0x63, 0x31, 0x28, 0xa9, 0xce, 0x86, 0x36, 0xb2, 0x5d,
-	0x67, 0x9e, 0x1c, 0x8d, 0x2f, 0x6f, 0xe6, 0xa4, 0x37, 0x55, 0xc7, 0x8b, 0x78, 0xce, 0xe3, 0x5d,
-	0xc9, 0x21, 0xe7, 0xf1, 0x12, 0xb2, 0x70, 0xa1, 0xa4, 0xfa, 0x20, 0x5a, 0x99, 0xe1, 0xe2, 0x99,
-	0x78, 0xfb, 0x35, 0xbe, 0xc8, 0x5e, 0x72, 0xaa, 0xa3, 0x9f, 0x43, 0x7d, 0xa2, 0xb7, 0xa2, 0x47,
-	0x79, 0x97, 0x98, 0xec, 0xae, 0xb7, 0xd8, 0xfa, 0x0d, 0x54, 0x46, 0x75, 0x04, 0xad, 0x67, 0x7b,
-	0x4f, 0x95, 0xa7, 0x46, 0xe7, 0x26, 0x2e, 0x7a, 0xcb, 0xc7, 0x50, 0xdc, 0xf3, 0x62, 0x96, 0x9e,
-	0xc0, 0x94, 0x71, 0xf4, 0x04, 0x4a, 0x2e, 0x66, 0xf1, 0xf0, 0xe6, 0x9e, 0x3f, 0x03, 0x24, 0xde,
-	0x6a, 0x8f, 0x73, 0x48, 0xec, 0xba, 0x3a, 0x98, 0xba, 0xfc, 0x73, 0x28, 0x88, 0x46, 0x82, 0x1e,
-	0x66, 0x2f, 0x9c, 0x68, 0x38, 0xa9, 0xcb, 0x1d, 0x40, 0x41, 0xbc, 0x3f, 0x50, 0x8e, 0xab, 0x30,
-	0xfb, 0xc2, 0x4a, 0x5d, 0xf5, 0x15, 0x54, 0xaf, 0x9e, 0x2f, 0x28, 0x07, 0x6f, 0xd3, 0x6f, 0x9d,
-	0xd4, 0x85, 0xf7, 0xa1, 0xac, 0xbb, 0x1e, 0xca, 0xa1, 0xbf, 0xc9, 0x06, 0x99, 0xba, 0xe8, 0x0f,
-	0x50, 0x19, 0xb5, 0x8b, 0x54, 0xb6, 0x73, 0x1c, 0x62, 0xa6, 0xe5, 0xbc, 0x84, 0x92, 0xea, 0x2b,
-	0x79, 0xaa, 0xd3, 0x4c, 0x07, 0x4a, 0x0d, 0x17, 0x43, 0x41, 0xd4, 0xf6, 0x3c, 0x0a, 0x48, 0xb4,
-	0x8a, 0x86, 0x93, 0x17, 0xae, 0xa2, 0xdf, 0x76, 0x2f, 0xde, 0x37, 0xe7, 0xfe, 0x7e, 0xdf, 0x9c,
-	0xfb, 0xed, 0xb2, 0x69, 0x5c, 0x5c, 0x36, 0x8d, 0xbf, 0x2e, 0x9b, 0xc6, 0xbf, 0x97, 0x4d, 0xe3,
-	0xc7, 0x27, 0xb7, 0xf8, 0x27, 0xf8, 0x1b, 0xf1, 0xfb, 0xda, 0x3c, 0x2a, 0xc9, 0xc3, 0x6c, 0xfc,
-	0x1f, 0x00, 0x00, 0xff, 0xff, 0x64, 0x52, 0x86, 0xc0, 0x49, 0x0f, 0x00, 0x00,
-}
diff --git a/vendor/github.com/containerd/containerd/runtime/v2/runc/options/oci.pb.go b/vendor/github.com/containerd/containerd/runtime/v2/runc/options/oci.pb.go
index ebfc3b8..a24dbad 100644
--- a/vendor/github.com/containerd/containerd/runtime/v2/runc/options/oci.pb.go
+++ b/vendor/github.com/containerd/containerd/runtime/v2/runc/options/oci.pb.go
@@ -1,29 +1,16 @@
 // Code generated by protoc-gen-gogo. DO NOT EDIT.
 // source: github.com/containerd/containerd/runtime/v2/runc/options/oci.proto
 
-/*
-	Package options is a generated protocol buffer package.
-
-	It is generated from these files:
-		github.com/containerd/containerd/runtime/v2/runc/options/oci.proto
-
-	It has these top-level messages:
-		Options
-		CheckpointOptions
-		ProcessDetails
-*/
 package options
 
-import proto "github.com/gogo/protobuf/proto"
-import fmt "fmt"
-import math "math"
-
-// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto"
-
-import strings "strings"
-import reflect "reflect"
-
-import io "io"
+import (
+	fmt "fmt"
+	proto "github.com/gogo/protobuf/proto"
+	io "io"
+	math "math"
+	reflect "reflect"
+	strings "strings"
+)
 
 // Reference imports to suppress errors if they are not otherwise used.
 var _ = proto.Marshal
@@ -58,12 +45,43 @@
 	// criu image path
 	CriuImagePath string `protobuf:"bytes,10,opt,name=criu_image_path,json=criuImagePath,proto3" json:"criu_image_path,omitempty"`
 	// criu work path
-	CriuWorkPath string `protobuf:"bytes,11,opt,name=criu_work_path,json=criuWorkPath,proto3" json:"criu_work_path,omitempty"`
+	CriuWorkPath         string   `protobuf:"bytes,11,opt,name=criu_work_path,json=criuWorkPath,proto3" json:"criu_work_path,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
-func (m *Options) Reset()                    { *m = Options{} }
-func (*Options) ProtoMessage()               {}
-func (*Options) Descriptor() ([]byte, []int) { return fileDescriptorOci, []int{0} }
+func (m *Options) Reset()      { *m = Options{} }
+func (*Options) ProtoMessage() {}
+func (*Options) Descriptor() ([]byte, []int) {
+	return fileDescriptor_4e5440d739e9a863, []int{0}
+}
+func (m *Options) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *Options) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_Options.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *Options) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Options.Merge(m, src)
+}
+func (m *Options) XXX_Size() int {
+	return m.Size()
+}
+func (m *Options) XXX_DiscardUnknown() {
+	xxx_messageInfo_Options.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Options proto.InternalMessageInfo
 
 type CheckpointOptions struct {
 	// exit the container after a checkpoint
@@ -77,33 +95,141 @@
 	// allow checkpointing of file locks
 	FileLocks bool `protobuf:"varint,5,opt,name=file_locks,json=fileLocks,proto3" json:"file_locks,omitempty"`
 	// restore provided namespaces as empty namespaces
-	EmptyNamespaces []string `protobuf:"bytes,6,rep,name=empty_namespaces,json=emptyNamespaces" json:"empty_namespaces,omitempty"`
+	EmptyNamespaces []string `protobuf:"bytes,6,rep,name=empty_namespaces,json=emptyNamespaces,proto3" json:"empty_namespaces,omitempty"`
 	// set the cgroups mode, soft, full, strict
 	CgroupsMode string `protobuf:"bytes,7,opt,name=cgroups_mode,json=cgroupsMode,proto3" json:"cgroups_mode,omitempty"`
 	// checkpoint image path
 	ImagePath string `protobuf:"bytes,8,opt,name=image_path,json=imagePath,proto3" json:"image_path,omitempty"`
 	// checkpoint work path
-	WorkPath string `protobuf:"bytes,9,opt,name=work_path,json=workPath,proto3" json:"work_path,omitempty"`
+	WorkPath             string   `protobuf:"bytes,9,opt,name=work_path,json=workPath,proto3" json:"work_path,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
-func (m *CheckpointOptions) Reset()                    { *m = CheckpointOptions{} }
-func (*CheckpointOptions) ProtoMessage()               {}
-func (*CheckpointOptions) Descriptor() ([]byte, []int) { return fileDescriptorOci, []int{1} }
+func (m *CheckpointOptions) Reset()      { *m = CheckpointOptions{} }
+func (*CheckpointOptions) ProtoMessage() {}
+func (*CheckpointOptions) Descriptor() ([]byte, []int) {
+	return fileDescriptor_4e5440d739e9a863, []int{1}
+}
+func (m *CheckpointOptions) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *CheckpointOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_CheckpointOptions.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *CheckpointOptions) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_CheckpointOptions.Merge(m, src)
+}
+func (m *CheckpointOptions) XXX_Size() int {
+	return m.Size()
+}
+func (m *CheckpointOptions) XXX_DiscardUnknown() {
+	xxx_messageInfo_CheckpointOptions.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CheckpointOptions proto.InternalMessageInfo
 
 type ProcessDetails struct {
 	// exec process id if the process is managed by a shim
-	ExecID string `protobuf:"bytes,1,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"`
+	ExecID               string   `protobuf:"bytes,1,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
-func (m *ProcessDetails) Reset()                    { *m = ProcessDetails{} }
-func (*ProcessDetails) ProtoMessage()               {}
-func (*ProcessDetails) Descriptor() ([]byte, []int) { return fileDescriptorOci, []int{2} }
+func (m *ProcessDetails) Reset()      { *m = ProcessDetails{} }
+func (*ProcessDetails) ProtoMessage() {}
+func (*ProcessDetails) Descriptor() ([]byte, []int) {
+	return fileDescriptor_4e5440d739e9a863, []int{2}
+}
+func (m *ProcessDetails) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ProcessDetails) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_ProcessDetails.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *ProcessDetails) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ProcessDetails.Merge(m, src)
+}
+func (m *ProcessDetails) XXX_Size() int {
+	return m.Size()
+}
+func (m *ProcessDetails) XXX_DiscardUnknown() {
+	xxx_messageInfo_ProcessDetails.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ProcessDetails proto.InternalMessageInfo
 
 func init() {
 	proto.RegisterType((*Options)(nil), "containerd.runc.v1.Options")
 	proto.RegisterType((*CheckpointOptions)(nil), "containerd.runc.v1.CheckpointOptions")
 	proto.RegisterType((*ProcessDetails)(nil), "containerd.runc.v1.ProcessDetails")
 }
+
+func init() {
+	proto.RegisterFile("github.com/containerd/containerd/runtime/v2/runc/options/oci.proto", fileDescriptor_4e5440d739e9a863)
+}
+
+var fileDescriptor_4e5440d739e9a863 = []byte{
+	// 587 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x93, 0xcf, 0x6e, 0xd3, 0x40,
+	0x10, 0x87, 0xeb, 0xfe, 0x49, 0xec, 0x4d, 0x93, 0xc2, 0x42, 0x25, 0xd3, 0x8a, 0x34, 0x94, 0x82,
+	0xc2, 0x25, 0x11, 0x45, 0x9c, 0xb8, 0xa0, 0xb6, 0x08, 0x55, 0x40, 0xa9, 0x0c, 0x15, 0xa8, 0x97,
+	0x95, 0xbb, 0x1e, 0x9c, 0x51, 0xe2, 0x1d, 0xcb, 0xbb, 0x69, 0xd2, 0x1b, 0xef, 0xc5, 0x0b, 0xf4,
+	0xc8, 0x91, 0x13, 0xa2, 0xb9, 0xf1, 0x16, 0x68, 0xd7, 0x4e, 0xdb, 0x33, 0x27, 0xcf, 0x7e, 0xf3,
+	0xf3, 0x78, 0xfd, 0xad, 0x96, 0xed, 0xa5, 0x68, 0x06, 0xe3, 0xb3, 0x9e, 0xa4, 0xac, 0x2f, 0x49,
+	0x99, 0x18, 0x15, 0x14, 0xc9, 0xed, 0xb2, 0x18, 0x2b, 0x83, 0x19, 0xf4, 0xcf, 0x77, 0x6d, 0x29,
+	0xfb, 0x94, 0x1b, 0x24, 0xa5, 0xfb, 0x24, 0xb1, 0x97, 0x17, 0x64, 0x88, 0xf3, 0x9b, 0x74, 0xcf,
+	0x46, 0x7a, 0xe7, 0xcf, 0x37, 0xee, 0xa7, 0x94, 0x92, 0x6b, 0xf7, 0x6d, 0x55, 0x26, 0xb7, 0xff,
+	0x2e, 0xb2, 0xfa, 0xc7, 0xf2, 0x7d, 0xbe, 0xcd, 0x9a, 0x8a, 0x44, 0x8e, 0xe7, 0x64, 0x44, 0x41,
+	0x64, 0x42, 0xaf, 0xe3, 0x75, 0xfd, 0xa8, 0xa1, 0xe8, 0xd8, 0xb2, 0x88, 0xc8, 0xf0, 0x1d, 0xd6,
+	0x52, 0x24, 0x14, 0x4c, 0xc4, 0x10, 0x2e, 0x0a, 0x54, 0x69, 0xb8, 0xe8, 0x42, 0xab, 0x8a, 0x8e,
+	0x60, 0xf2, 0xae, 0x64, 0x7c, 0x8b, 0x35, 0xf4, 0x00, 0x33, 0x21, 0xd3, 0x82, 0xc6, 0x79, 0xb8,
+	0xd4, 0xf1, 0xba, 0x41, 0xc4, 0x2c, 0xda, 0x77, 0x84, 0xaf, 0xb3, 0x1a, 0x92, 0x18, 0x63, 0x12,
+	0x2e, 0x77, 0xbc, 0x6e, 0x33, 0x5a, 0x41, 0x3a, 0xc1, 0xa4, 0xc2, 0x29, 0x26, 0xe1, 0xca, 0x1c,
+	0xbf, 0xc5, 0xc4, 0x8e, 0x3b, 0x43, 0x15, 0x17, 0x17, 0x42, 0xc5, 0x19, 0x84, 0xb5, 0x72, 0x5c,
+	0x89, 0x8e, 0xe2, 0x0c, 0x38, 0x67, 0xcb, 0x6e, 0xc3, 0x75, 0xd7, 0x71, 0x35, 0xdf, 0x64, 0x81,
+	0x2c, 0x70, 0x2c, 0xf2, 0xd8, 0x0c, 0x42, 0xdf, 0x35, 0x7c, 0x0b, 0x8e, 0x63, 0x33, 0xe0, 0x4f,
+	0x58, 0x4b, 0x5f, 0x68, 0x03, 0x59, 0x32, 0xdf, 0x63, 0xe0, 0x7e, 0xa3, 0x59, 0xd1, 0x6a, 0x9b,
+	0x4f, 0xd9, 0x9a, 0x9b, 0x81, 0x59, 0x9c, 0x42, 0x39, 0x89, 0xb9, 0x49, 0x4d, 0x8b, 0x0f, 0x2d,
+	0x75, 0xe3, 0x76, 0x58, 0xcb, 0xe5, 0x26, 0x54, 0x0c, 0xcb, 0x58, 0xc3, 0xc5, 0x56, 0x2d, 0xfd,
+	0x42, 0xc5, 0xd0, 0xa6, 0xb6, 0x7f, 0x2c, 0xb2, 0xbb, 0xfb, 0x03, 0x90, 0xc3, 0x9c, 0x50, 0x99,
+	0xb9, 0x75, 0xce, 0x96, 0x61, 0x8a, 0x73, 0xd9, 0xae, 0xe6, 0x0f, 0x98, 0x4f, 0x39, 0x28, 0x61,
+	0x64, 0x5e, 0xf9, 0xad, 0xdb, 0xf5, 0x67, 0x99, 0xf3, 0x5d, 0xb6, 0x0e, 0x53, 0x03, 0x85, 0x8a,
+	0x47, 0x62, 0xac, 0x70, 0x2a, 0x34, 0xc9, 0x21, 0x18, 0xed, 0x24, 0xfb, 0xd1, 0xbd, 0x79, 0xf3,
+	0x44, 0xe1, 0xf4, 0x53, 0xd9, 0xe2, 0x1b, 0xcc, 0x37, 0x50, 0x64, 0xa8, 0xe2, 0x91, 0xf3, 0xed,
+	0x47, 0xd7, 0x6b, 0xfe, 0x90, 0xb1, 0x6f, 0x38, 0x02, 0x31, 0x22, 0x39, 0xd4, 0x4e, 0xbb, 0x1f,
+	0x05, 0x96, 0xbc, 0xb7, 0x80, 0x3f, 0x63, 0x77, 0x20, 0xcb, 0x4d, 0x69, 0x5e, 0xe7, 0xb1, 0x04,
+	0x1d, 0xd6, 0x3a, 0x4b, 0xdd, 0x20, 0x5a, 0x73, 0xfc, 0xe8, 0x1a, 0xf3, 0x47, 0x6c, 0xb5, 0x74,
+	0xa9, 0x45, 0x46, 0x09, 0x54, 0x87, 0xd1, 0xa8, 0xd8, 0x07, 0x4a, 0xc0, 0x7e, 0xec, 0x96, 0xca,
+	0xf2, 0x50, 0x02, 0xbc, 0xd6, 0xb8, 0xc9, 0x82, 0x1b, 0x83, 0x41, 0x79, 0x64, 0x93, 0xb9, 0xbd,
+	0x97, 0xac, 0x75, 0x5c, 0x90, 0x04, 0xad, 0x0f, 0xc0, 0xc4, 0x38, 0xd2, 0xfc, 0x31, 0xab, 0xc3,
+	0x14, 0xa4, 0xc0, 0xc4, 0xc9, 0x0b, 0xf6, 0xd8, 0xec, 0xf7, 0x56, 0xed, 0xcd, 0x14, 0xe4, 0xe1,
+	0x41, 0x54, 0xb3, 0xad, 0xc3, 0x64, 0xef, 0xf4, 0xf2, 0xaa, 0xbd, 0xf0, 0xeb, 0xaa, 0xbd, 0xf0,
+	0x7d, 0xd6, 0xf6, 0x2e, 0x67, 0x6d, 0xef, 0xe7, 0xac, 0xed, 0xfd, 0x99, 0xb5, 0xbd, 0xd3, 0xd7,
+	0xff, 0x7b, 0xd1, 0x5e, 0x55, 0xcf, 0xaf, 0x0b, 0x67, 0x35, 0x77, 0x8b, 0x5e, 0xfc, 0x0b, 0x00,
+	0x00, 0xff, 0xff, 0x90, 0x50, 0x79, 0xf2, 0xb5, 0x03, 0x00, 0x00,
+}
+
 func (m *Options) Marshal() (dAtA []byte, err error) {
 	size := m.Size()
 	dAtA = make([]byte, size)
@@ -195,6 +321,9 @@
 		i = encodeVarintOci(dAtA, i, uint64(len(m.CriuWorkPath)))
 		i += copy(dAtA[i:], m.CriuWorkPath)
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -296,6 +425,9 @@
 		i = encodeVarintOci(dAtA, i, uint64(len(m.WorkPath)))
 		i += copy(dAtA[i:], m.WorkPath)
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -320,6 +452,9 @@
 		i = encodeVarintOci(dAtA, i, uint64(len(m.ExecID)))
 		i += copy(dAtA[i:], m.ExecID)
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -333,6 +468,9 @@
 	return offset + 1
 }
 func (m *Options) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	if m.NoPivotRoot {
@@ -374,10 +512,16 @@
 	if l > 0 {
 		n += 1 + l + sovOci(uint64(l))
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *CheckpointOptions) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	if m.Exit {
@@ -413,16 +557,25 @@
 	if l > 0 {
 		n += 1 + l + sovOci(uint64(l))
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *ProcessDetails) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = len(m.ExecID)
 	if l > 0 {
 		n += 1 + l + sovOci(uint64(l))
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
@@ -455,6 +608,7 @@
 		`SystemdCgroup:` + fmt.Sprintf("%v", this.SystemdCgroup) + `,`,
 		`CriuImagePath:` + fmt.Sprintf("%v", this.CriuImagePath) + `,`,
 		`CriuWorkPath:` + fmt.Sprintf("%v", this.CriuWorkPath) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -473,6 +627,7 @@
 		`CgroupsMode:` + fmt.Sprintf("%v", this.CgroupsMode) + `,`,
 		`ImagePath:` + fmt.Sprintf("%v", this.ImagePath) + `,`,
 		`WorkPath:` + fmt.Sprintf("%v", this.WorkPath) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -483,6 +638,7 @@
 	}
 	s := strings.Join([]string{`&ProcessDetails{`,
 		`ExecID:` + fmt.Sprintf("%v", this.ExecID) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -510,7 +666,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -538,7 +694,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				v |= (int(b) & 0x7F) << shift
+				v |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -558,7 +714,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				v |= (int(b) & 0x7F) << shift
+				v |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -578,7 +734,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -588,6 +744,9 @@
 				return ErrInvalidLengthOci
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthOci
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -607,7 +766,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				m.IoUid |= (uint32(b) & 0x7F) << shift
+				m.IoUid |= uint32(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -626,7 +785,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				m.IoGid |= (uint32(b) & 0x7F) << shift
+				m.IoGid |= uint32(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -645,7 +804,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -655,6 +814,9 @@
 				return ErrInvalidLengthOci
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthOci
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -674,7 +836,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -684,6 +846,9 @@
 				return ErrInvalidLengthOci
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthOci
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -703,7 +868,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -713,6 +878,9 @@
 				return ErrInvalidLengthOci
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthOci
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -732,7 +900,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				v |= (int(b) & 0x7F) << shift
+				v |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -752,7 +920,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -762,6 +930,9 @@
 				return ErrInvalidLengthOci
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthOci
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -781,7 +952,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -791,6 +962,9 @@
 				return ErrInvalidLengthOci
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthOci
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -805,9 +979,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthOci
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthOci
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -832,7 +1010,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -860,7 +1038,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				v |= (int(b) & 0x7F) << shift
+				v |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -880,7 +1058,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				v |= (int(b) & 0x7F) << shift
+				v |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -900,7 +1078,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				v |= (int(b) & 0x7F) << shift
+				v |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -920,7 +1098,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				v |= (int(b) & 0x7F) << shift
+				v |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -940,7 +1118,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				v |= (int(b) & 0x7F) << shift
+				v |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -960,7 +1138,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -970,6 +1148,9 @@
 				return ErrInvalidLengthOci
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthOci
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -989,7 +1170,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -999,6 +1180,9 @@
 				return ErrInvalidLengthOci
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthOci
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -1018,7 +1202,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1028,6 +1212,9 @@
 				return ErrInvalidLengthOci
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthOci
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -1047,7 +1234,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1057,6 +1244,9 @@
 				return ErrInvalidLengthOci
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthOci
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -1071,9 +1261,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthOci
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthOci
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -1098,7 +1292,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -1126,7 +1320,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1136,6 +1330,9 @@
 				return ErrInvalidLengthOci
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthOci
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -1150,9 +1347,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthOci
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthOci
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -1216,10 +1417,13 @@
 					break
 				}
 			}
-			iNdEx += length
 			if length < 0 {
 				return 0, ErrInvalidLengthOci
 			}
+			iNdEx += length
+			if iNdEx < 0 {
+				return 0, ErrInvalidLengthOci
+			}
 			return iNdEx, nil
 		case 3:
 			for {
@@ -1248,6 +1452,9 @@
 					return 0, err
 				}
 				iNdEx = start + next
+				if iNdEx < 0 {
+					return 0, ErrInvalidLengthOci
+				}
 			}
 			return iNdEx, nil
 		case 4:
@@ -1266,48 +1473,3 @@
 	ErrInvalidLengthOci = fmt.Errorf("proto: negative length found during unmarshaling")
 	ErrIntOverflowOci   = fmt.Errorf("proto: integer overflow")
 )
-
-func init() {
-	proto.RegisterFile("github.com/containerd/containerd/runtime/v2/runc/options/oci.proto", fileDescriptorOci)
-}
-
-var fileDescriptorOci = []byte{
-	// 587 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x93, 0xcf, 0x6e, 0xd3, 0x40,
-	0x10, 0x87, 0xeb, 0xfe, 0x49, 0xec, 0x4d, 0x93, 0xc2, 0x42, 0x25, 0xd3, 0x8a, 0x34, 0x94, 0x82,
-	0xc2, 0x25, 0x11, 0x45, 0x9c, 0xb8, 0xa0, 0xb6, 0x08, 0x55, 0x40, 0xa9, 0x0c, 0x15, 0xa8, 0x97,
-	0x95, 0xbb, 0x1e, 0x9c, 0x51, 0xe2, 0x1d, 0xcb, 0xbb, 0x69, 0xd2, 0x1b, 0xef, 0xc5, 0x0b, 0xf4,
-	0xc8, 0x91, 0x13, 0xa2, 0xb9, 0xf1, 0x16, 0x68, 0xd7, 0x4e, 0xdb, 0x33, 0x27, 0xcf, 0x7e, 0xf3,
-	0xf3, 0x78, 0xfd, 0xad, 0x96, 0xed, 0xa5, 0x68, 0x06, 0xe3, 0xb3, 0x9e, 0xa4, 0xac, 0x2f, 0x49,
-	0x99, 0x18, 0x15, 0x14, 0xc9, 0xed, 0xb2, 0x18, 0x2b, 0x83, 0x19, 0xf4, 0xcf, 0x77, 0x6d, 0x29,
-	0xfb, 0x94, 0x1b, 0x24, 0xa5, 0xfb, 0x24, 0xb1, 0x97, 0x17, 0x64, 0x88, 0xf3, 0x9b, 0x74, 0xcf,
-	0x46, 0x7a, 0xe7, 0xcf, 0x37, 0xee, 0xa7, 0x94, 0x92, 0x6b, 0xf7, 0x6d, 0x55, 0x26, 0xb7, 0xff,
-	0x2e, 0xb2, 0xfa, 0xc7, 0xf2, 0x7d, 0xbe, 0xcd, 0x9a, 0x8a, 0x44, 0x8e, 0xe7, 0x64, 0x44, 0x41,
-	0x64, 0x42, 0xaf, 0xe3, 0x75, 0xfd, 0xa8, 0xa1, 0xe8, 0xd8, 0xb2, 0x88, 0xc8, 0xf0, 0x1d, 0xd6,
-	0x52, 0x24, 0x14, 0x4c, 0xc4, 0x10, 0x2e, 0x0a, 0x54, 0x69, 0xb8, 0xe8, 0x42, 0xab, 0x8a, 0x8e,
-	0x60, 0xf2, 0xae, 0x64, 0x7c, 0x8b, 0x35, 0xf4, 0x00, 0x33, 0x21, 0xd3, 0x82, 0xc6, 0x79, 0xb8,
-	0xd4, 0xf1, 0xba, 0x41, 0xc4, 0x2c, 0xda, 0x77, 0x84, 0xaf, 0xb3, 0x1a, 0x92, 0x18, 0x63, 0x12,
-	0x2e, 0x77, 0xbc, 0x6e, 0x33, 0x5a, 0x41, 0x3a, 0xc1, 0xa4, 0xc2, 0x29, 0x26, 0xe1, 0xca, 0x1c,
-	0xbf, 0xc5, 0xc4, 0x8e, 0x3b, 0x43, 0x15, 0x17, 0x17, 0x42, 0xc5, 0x19, 0x84, 0xb5, 0x72, 0x5c,
-	0x89, 0x8e, 0xe2, 0x0c, 0x38, 0x67, 0xcb, 0x6e, 0xc3, 0x75, 0xd7, 0x71, 0x35, 0xdf, 0x64, 0x81,
-	0x2c, 0x70, 0x2c, 0xf2, 0xd8, 0x0c, 0x42, 0xdf, 0x35, 0x7c, 0x0b, 0x8e, 0x63, 0x33, 0xe0, 0x4f,
-	0x58, 0x4b, 0x5f, 0x68, 0x03, 0x59, 0x32, 0xdf, 0x63, 0xe0, 0x7e, 0xa3, 0x59, 0xd1, 0x6a, 0x9b,
-	0x4f, 0xd9, 0x9a, 0x9b, 0x81, 0x59, 0x9c, 0x42, 0x39, 0x89, 0xb9, 0x49, 0x4d, 0x8b, 0x0f, 0x2d,
-	0x75, 0xe3, 0x76, 0x58, 0xcb, 0xe5, 0x26, 0x54, 0x0c, 0xcb, 0x58, 0xc3, 0xc5, 0x56, 0x2d, 0xfd,
-	0x42, 0xc5, 0xd0, 0xa6, 0xb6, 0x7f, 0x2c, 0xb2, 0xbb, 0xfb, 0x03, 0x90, 0xc3, 0x9c, 0x50, 0x99,
-	0xb9, 0x75, 0xce, 0x96, 0x61, 0x8a, 0x73, 0xd9, 0xae, 0xe6, 0x0f, 0x98, 0x4f, 0x39, 0x28, 0x61,
-	0x64, 0x5e, 0xf9, 0xad, 0xdb, 0xf5, 0x67, 0x99, 0xf3, 0x5d, 0xb6, 0x0e, 0x53, 0x03, 0x85, 0x8a,
-	0x47, 0x62, 0xac, 0x70, 0x2a, 0x34, 0xc9, 0x21, 0x18, 0xed, 0x24, 0xfb, 0xd1, 0xbd, 0x79, 0xf3,
-	0x44, 0xe1, 0xf4, 0x53, 0xd9, 0xe2, 0x1b, 0xcc, 0x37, 0x50, 0x64, 0xa8, 0xe2, 0x91, 0xf3, 0xed,
-	0x47, 0xd7, 0x6b, 0xfe, 0x90, 0xb1, 0x6f, 0x38, 0x02, 0x31, 0x22, 0x39, 0xd4, 0x4e, 0xbb, 0x1f,
-	0x05, 0x96, 0xbc, 0xb7, 0x80, 0x3f, 0x63, 0x77, 0x20, 0xcb, 0x4d, 0x69, 0x5e, 0xe7, 0xb1, 0x04,
-	0x1d, 0xd6, 0x3a, 0x4b, 0xdd, 0x20, 0x5a, 0x73, 0xfc, 0xe8, 0x1a, 0xf3, 0x47, 0x6c, 0xb5, 0x74,
-	0xa9, 0x45, 0x46, 0x09, 0x54, 0x87, 0xd1, 0xa8, 0xd8, 0x07, 0x4a, 0xc0, 0x7e, 0xec, 0x96, 0xca,
-	0xf2, 0x50, 0x02, 0xbc, 0xd6, 0xb8, 0xc9, 0x82, 0x1b, 0x83, 0x41, 0x79, 0x64, 0x93, 0xb9, 0xbd,
-	0x97, 0xac, 0x75, 0x5c, 0x90, 0x04, 0xad, 0x0f, 0xc0, 0xc4, 0x38, 0xd2, 0xfc, 0x31, 0xab, 0xc3,
-	0x14, 0xa4, 0xc0, 0xc4, 0xc9, 0x0b, 0xf6, 0xd8, 0xec, 0xf7, 0x56, 0xed, 0xcd, 0x14, 0xe4, 0xe1,
-	0x41, 0x54, 0xb3, 0xad, 0xc3, 0x64, 0xef, 0xf4, 0xf2, 0xaa, 0xbd, 0xf0, 0xeb, 0xaa, 0xbd, 0xf0,
-	0x7d, 0xd6, 0xf6, 0x2e, 0x67, 0x6d, 0xef, 0xe7, 0xac, 0xed, 0xfd, 0x99, 0xb5, 0xbd, 0xd3, 0xd7,
-	0xff, 0x7b, 0xd1, 0x5e, 0x55, 0xcf, 0xaf, 0x0b, 0x67, 0x35, 0x77, 0x8b, 0x5e, 0xfc, 0x0b, 0x00,
-	0x00, 0xff, 0xff, 0x90, 0x50, 0x79, 0xf2, 0xb5, 0x03, 0x00, 0x00,
-}
diff --git a/vendor/github.com/containerd/containerd/services/server/config/config.go b/vendor/github.com/containerd/containerd/services/server/config/config.go
index 27cebf5..26fb925 100644
--- a/vendor/github.com/containerd/containerd/services/server/config/config.go
+++ b/vendor/github.com/containerd/containerd/services/server/config/config.go
@@ -28,6 +28,8 @@
 	Root string `toml:"root"`
 	// State is the path to a directory where containerd will store transient data
 	State string `toml:"state"`
+	// PluginDir is the directory for dynamic plugins to be stored
+	PluginDir string `toml:"plugin_dir"`
 	// GRPC configuration settings
 	GRPC GRPCConfig `toml:"grpc"`
 	// Debug and profiling settings
@@ -37,6 +39,9 @@
 	// DisabledPlugins are IDs of plugins to disable. Disabled plugins won't be
 	// initialized and started.
 	DisabledPlugins []string `toml:"disabled_plugins"`
+	// RequiredPlugins are IDs of required plugins. Containerd exits if any
+	// required plugin doesn't exist or fails to be initialized or started.
+	RequiredPlugins []string `toml:"required_plugins"`
 	// Plugins provides plugin specific configuration for the initialization of a plugin
 	Plugins map[string]toml.Primitive `toml:"plugins"`
 	// OOMScore adjust the containerd's oom score
@@ -52,6 +57,9 @@
 // GRPCConfig provides GRPC configuration for the socket
 type GRPCConfig struct {
 	Address        string `toml:"address"`
+	TCPAddress     string `toml:"tcp_address"`
+	TCPTLSCert     string `toml:"tcp_tls_cert"`
+	TCPTLSKey      string `toml:"tcp_tls_key"`
 	UID            int    `toml:"uid"`
 	GID            int    `toml:"gid"`
 	MaxRecvMsgSize int    `toml:"max_recv_message_size"`
diff --git a/vendor/github.com/containerd/containerd/sys/filesys_unix.go b/vendor/github.com/containerd/containerd/sys/filesys_unix.go
index 700f44e..d8329af 100644
--- a/vendor/github.com/containerd/containerd/sys/filesys_unix.go
+++ b/vendor/github.com/containerd/containerd/sys/filesys_unix.go
@@ -24,3 +24,8 @@
 func ForceRemoveAll(path string) error {
 	return os.RemoveAll(path)
 }
+
+// MkdirAllWithACL is a wrapper for os.MkdirAll on Unix systems.
+func MkdirAllWithACL(path string, perm os.FileMode) error {
+	return os.MkdirAll(path, perm)
+}
diff --git a/vendor/github.com/containerd/containerd/sys/filesys_windows.go b/vendor/github.com/containerd/containerd/sys/filesys_windows.go
index dc880c3..1bdc531 100644
--- a/vendor/github.com/containerd/containerd/sys/filesys_windows.go
+++ b/vendor/github.com/containerd/containerd/sys/filesys_windows.go
@@ -30,6 +30,11 @@
 	"github.com/Microsoft/hcsshim"
 )
 
+const (
+	// SddlAdministratorsLocalSystem is local administrators plus NT AUTHORITY\System
+	SddlAdministratorsLocalSystem = "D:P(A;OICI;GA;;;BA)(A;OICI;GA;;;SY)"
+)
+
 // MkdirAllWithACL is a wrapper for MkdirAll that creates a directory
 // ACL'd for Builtin Administrators and Local System.
 func MkdirAllWithACL(path string, perm os.FileMode) error {
@@ -78,7 +83,7 @@
 
 	if j > 1 {
 		// Create parent
-		err = mkdirall(path[0:j-1], false)
+		err = mkdirall(path[0:j-1], adminAndLocalSystem)
 		if err != nil {
 			return err
 		}
@@ -112,8 +117,7 @@
 // and Local System.
 func mkdirWithACL(name string) error {
 	sa := syscall.SecurityAttributes{Length: 0}
-	sddl := "D:P(A;OICI;GA;;;BA)(A;OICI;GA;;;SY)"
-	sd, err := winio.SddlToSecurityDescriptor(sddl)
+	sd, err := winio.SddlToSecurityDescriptor(SddlAdministratorsLocalSystem)
 	if err != nil {
 		return &os.PathError{Op: "mkdir", Path: name, Err: err}
 	}
diff --git a/vendor/github.com/containerd/containerd/sys/oom_unix.go b/vendor/github.com/containerd/containerd/sys/oom_unix.go
index 7192efe..54412e9 100644
--- a/vendor/github.com/containerd/containerd/sys/oom_unix.go
+++ b/vendor/github.com/containerd/containerd/sys/oom_unix.go
@@ -20,8 +20,10 @@
 
 import (
 	"fmt"
+	"io/ioutil"
 	"os"
 	"strconv"
+	"strings"
 
 	"github.com/opencontainers/runc/libcontainer/system"
 )
@@ -45,3 +47,13 @@
 	}
 	return nil
 }
+
+// GetOOMScoreAdj gets the oom score for a process
+func GetOOMScoreAdj(pid int) (int, error) {
+	path := fmt.Sprintf("/proc/%d/oom_score_adj", pid)
+	data, err := ioutil.ReadFile(path)
+	if err != nil {
+		return 0, err
+	}
+	return strconv.Atoi(strings.TrimSpace(string(data)))
+}
diff --git a/vendor/github.com/containerd/containerd/sys/oom_windows.go b/vendor/github.com/containerd/containerd/sys/oom_windows.go
index f44bceb..a917ba6 100644
--- a/vendor/github.com/containerd/containerd/sys/oom_windows.go
+++ b/vendor/github.com/containerd/containerd/sys/oom_windows.go
@@ -22,3 +22,10 @@
 func SetOOMScore(pid, score int) error {
 	return nil
 }
+
+// GetOOMScoreAdj gets the oom score for a process
+//
+// Not implemented on Windows
+func GetOOMScoreAdj(pid int) (int, error) {
+	return 0, nil
+}
diff --git a/vendor/github.com/containerd/containerd/task.go b/vendor/github.com/containerd/containerd/task.go
index c81aa37..fadb2db 100644
--- a/vendor/github.com/containerd/containerd/task.go
+++ b/vendor/github.com/containerd/containerd/task.go
@@ -521,6 +521,9 @@
 }
 
 func (t *task) LoadProcess(ctx context.Context, id string, ioAttach cio.Attach) (Process, error) {
+	if id == t.id && ioAttach == nil {
+		return t, nil
+	}
 	response, err := t.client.TaskService().Get(ctx, &tasks.GetRequest{
 		ContainerID: t.id,
 		ExecID:      id,
@@ -582,6 +585,7 @@
 				OS:           goruntime.GOOS,
 				Architecture: goruntime.GOARCH,
 			},
+			Annotations: d.Annotations,
 		})
 	}
 	return nil
diff --git a/vendor/github.com/containerd/containerd/task_opts.go b/vendor/github.com/containerd/containerd/task_opts.go
index c0e98b3..07e094c 100644
--- a/vendor/github.com/containerd/containerd/task_opts.go
+++ b/vendor/github.com/containerd/containerd/task_opts.go
@@ -59,9 +59,10 @@
 		for _, m := range index.Manifests {
 			if m.MediaType == images.MediaTypeContainerd1Checkpoint {
 				info.Checkpoint = &types.Descriptor{
-					MediaType: m.MediaType,
-					Size_:     m.Size,
-					Digest:    m.Digest,
+					MediaType:   m.MediaType,
+					Size_:       m.Size,
+					Digest:      m.Digest,
+					Annotations: m.Annotations,
 				}
 				return nil
 			}
diff --git a/vendor/github.com/containerd/containerd/vendor.conf b/vendor/github.com/containerd/containerd/vendor.conf
index 1d8c1b0..dd19f04 100644
--- a/vendor/github.com/containerd/containerd/vendor.conf
+++ b/vendor/github.com/containerd/containerd/vendor.conf
@@ -1,52 +1,51 @@
 github.com/containerd/go-runc 5a6d9f37cfa36b15efba46dc7ea349fa9b7143c3
-github.com/containerd/console c12b1e7919c14469339a5d38f2f8ed9b64a9de23
-github.com/containerd/cgroups dbea6f2bd41658b84b00417ceefa416b979cbf10
+github.com/containerd/console 0650fd9eeb50bab4fc99dceb9f2e14cf58f36e7f
+github.com/containerd/cgroups 4994991857f9b0ae8dc439551e8bebdbb4bf66c1
 github.com/containerd/typeurl a93fcdb778cd272c6e9b3028b2f42d813e785d40
 github.com/containerd/fifo 3d5202aec260678c48179c56f40e6f38a095738c
-github.com/containerd/btrfs 2e1aa0ddf94f91fa282b6ed87c23bf0d64911244
+github.com/containerd/btrfs af5082808c833de0e79c1e72eea9fea239364877
 github.com/containerd/continuity bd77b46c8352f74eb12c85bdc01f4b90f69d66b4
 github.com/coreos/go-systemd 48702e0da86bd25e76cfef347e2adeb434a0d0a6
 github.com/docker/go-metrics 4ea375f7759c82740c893fc030bc37088d2ec098
 github.com/docker/go-events 9461782956ad83b30282bf90e31fa6a70c255ba9
-github.com/docker/go-units v0.3.1
+github.com/docker/go-units v0.4.0
 github.com/godbus/dbus c7fdd8b5cd55e87b4e1f4e372cdb1db61dd6c66f
 github.com/prometheus/client_golang f4fb1b73fb099f396a7f0036bf86aa8def4ed823
 github.com/prometheus/client_model 99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c
 github.com/prometheus/common 89604d197083d4781071d3c65855d24ecfb0a563
 github.com/prometheus/procfs cb4147076ac75738c9a7d279075a253c0cc5acbd
 github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9
-github.com/matttproud/golang_protobuf_extensions v1.0.0
-github.com/gogo/protobuf v1.0.0
-github.com/gogo/googleapis 08a7655d27152912db7aaf4f983275eaf8d128ef
-github.com/golang/protobuf v1.1.0
+github.com/matttproud/golang_protobuf_extensions v1.0.1
+github.com/gogo/protobuf v1.2.1
+github.com/gogo/googleapis v1.2.0
+github.com/golang/protobuf v1.2.0
 github.com/opencontainers/runtime-spec 29686dbc5559d93fb1ef402eeda3e35c38d75af4 # v1.0.1-59-g29686db
-github.com/opencontainers/runc 2b18fe1d885ee5083ef9f0838fee39b62d653e30
+github.com/opencontainers/runc 029124da7af7360afa781a0234d1b083550f797c
 github.com/konsorten/go-windows-terminal-sequences v1.0.1
-github.com/sirupsen/logrus v1.3.0
+github.com/sirupsen/logrus v1.4.1
 github.com/urfave/cli 7bc6a0acffa589f415f88aca16cc1de5ffd66f9c
 golang.org/x/net b3756b4b77d7b13260a0a2ec658753cf48922eac
 google.golang.org/grpc v1.12.0
-github.com/pkg/errors v0.8.0
+github.com/pkg/errors v0.8.1
 github.com/opencontainers/go-digest c9281466c8b2f606084ac71339773efd177436e7
 golang.org/x/sys d455e41777fca6e8a5a79e34a14b8368bc11d9ba https://github.com/golang/sys
 github.com/opencontainers/image-spec v1.0.1
 golang.org/x/sync 42b317875d0fa942474b76e1b46a6060d720ae6e
-github.com/BurntSushi/toml a368813c5e648fee92e5f6c30e3944ff9d5e8895
+github.com/BurntSushi/toml v0.3.1
 github.com/grpc-ecosystem/go-grpc-prometheus 6b7015e65d366bf3f19b2b2a000a831940f0f7e0
-github.com/Microsoft/go-winio v0.4.12
-github.com/Microsoft/hcsshim v0.8.5
+github.com/Microsoft/go-winio 84b4ab48a50763fe7b3abcef38e5205c12027fac
+github.com/Microsoft/hcsshim 8abdbb8205e4192c68b5f84c31197156f31be517
 google.golang.org/genproto d80a6e20e776b0b17a324d0ba1ab50a39c8e8944
 golang.org/x/text 19e51611da83d6be54ddafce4a4af510cb3e9ea4
 github.com/containerd/ttrpc f02858b1457c5ca3aaec3a0803eb0d59f96e41d6
-github.com/syndtr/gocapability db04d3cc01c8b54962a58ec7e491717d06cfcc16
-gotest.tools v2.1.0
-github.com/google/go-cmp v0.1.0
+github.com/syndtr/gocapability d98352740cb2c55f81556b63d4a1ec64c5a319c2
+gotest.tools v2.3.0
+github.com/google/go-cmp v0.2.0
 go.etcd.io/bbolt v1.3.2
 
 # cri dependencies
-github.com/containerd/cri 4dd6735020f5596dd41738f8c4f5cb07fa804c5e # master
+github.com/containerd/cri 6d353571e64417d80c9478ffaea793714dd539d0 # master
 github.com/containerd/go-cni 40bcf8ec8acd7372be1d77031d585d5d8e561c90
-github.com/blang/semver v3.1.0
 github.com/containernetworking/cni v0.6.0
 github.com/containernetworking/plugins v0.7.0
 github.com/davecgh/go-spew v1.1.0
@@ -60,31 +59,27 @@
 github.com/json-iterator/go 1.1.5
 github.com/modern-go/reflect2 1.0.1
 github.com/modern-go/concurrent 1.0.3
-github.com/opencontainers/runtime-tools v0.6.0
-github.com/opencontainers/selinux b6fa367ed7f534f9ba25391cc2d467085dbb445a
+github.com/opencontainers/selinux v1.2.1
 github.com/seccomp/libseccomp-golang 32f571b70023028bd57d9288c20efbcb237f3ce0
 github.com/tchap/go-patricia v2.2.6
-github.com/xeipuuv/gojsonpointer 4e3ac2762d5f479393488629ee9370b50873b3a6
-github.com/xeipuuv/gojsonreference bd5ef7bd5415a7ac448318e64f11a24cd21e594b
-github.com/xeipuuv/gojsonschema 1d523034197ff1f222f6429836dd36a2457a1874
-golang.org/x/crypto 49796115aa4b964c318aad4f3084fdb41e9aa067
+golang.org/x/crypto 88737f569e3a9c7ab309cdc09a07fe7fc87233c3
 golang.org/x/oauth2 a6bd8cefa1811bd24b86f8902872e4e8225f74c4
 golang.org/x/time f51c12702a4d776e4c1fa9b0fabab841babae631
 gopkg.in/inf.v0 3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4
 gopkg.in/yaml.v2 v2.2.1
-k8s.io/api kubernetes-1.13.0
-k8s.io/apimachinery kubernetes-1.13.0
-k8s.io/apiserver kubernetes-1.13.0
-k8s.io/client-go kubernetes-1.13.0
+k8s.io/api kubernetes-1.15.0-alpha.0
+k8s.io/apimachinery kubernetes-1.15.0-alpha.0
+k8s.io/apiserver kubernetes-1.15.0-alpha.0
+k8s.io/client-go kubernetes-1.15.0-alpha.0
 k8s.io/klog 8139d8cb77af419532b33dfa7dd09fbc5f1d344f
-k8s.io/kubernetes v1.13.0
-k8s.io/utils 0d26856f57b32ec3398579285e5c8a2bfe8c5243
+k8s.io/kubernetes v1.15.0-alpha.0
+k8s.io/utils c2654d5206da6b7b6ace12841e8f359bb89b443c
 sigs.k8s.io/yaml v1.1.0
 
 # zfs dependencies
-github.com/containerd/zfs 9f6ef3b1fe5144bd91fe5855b4eba81bc0d17d03
-github.com/mistifyio/go-zfs 166add352731e515512690329794ee593f1aaff2
-github.com/pborman/uuid c65b2f87fee37d1c7854c9164a450713c28d50cd
+github.com/containerd/zfs 31af176f2ae84fe142ef2655bf7bb2aa618b3b1f
+github.com/mistifyio/go-zfs f784269be439d704d3dfa1906f45dd848fed2beb
+github.com/google/uuid v1.1.1
 
 # aufs dependencies
-github.com/containerd/aufs da3cf16bfbe68ba8f114f1536a05c01528a25434
+github.com/containerd/aufs f894a800659b6e11c1a13084abd1712f346e349c
diff --git a/vendor/github.com/containerd/continuity/fs/path.go b/vendor/github.com/containerd/continuity/fs/path.go
index 9959817..8863caa 100644
--- a/vendor/github.com/containerd/continuity/fs/path.go
+++ b/vendor/github.com/containerd/continuity/fs/path.go
@@ -22,7 +22,6 @@
 	"io"
 	"os"
 	"path/filepath"
-	"strings"
 
 	"github.com/pkg/errors"
 )
@@ -47,9 +46,8 @@
 	if upper == nil {
 		return ChangeKindDelete, lower.path
 	}
-	// TODO: compare by directory
 
-	switch i := strings.Compare(lower.path, upper.path); {
+	switch i := directoryCompare(lower.path, upper.path); {
 	case i < 0:
 		// File in lower that is not in upper
 		return ChangeKindDelete, lower.path
@@ -61,6 +59,35 @@
 	}
 }
 
+func directoryCompare(a, b string) int {
+	l := len(a)
+	if len(b) < l {
+		l = len(b)
+	}
+	for i := 0; i < l; i++ {
+		c1, c2 := a[i], b[i]
+		if c1 == filepath.Separator {
+			c1 = byte(0)
+		}
+		if c2 == filepath.Separator {
+			c2 = byte(0)
+		}
+		if c1 < c2 {
+			return -1
+		}
+		if c1 > c2 {
+			return +1
+		}
+	}
+	if len(a) < len(b) {
+		return -1
+	}
+	if len(a) > len(b) {
+		return +1
+	}
+	return 0
+}
+
 func sameFile(f1, f2 *currentPath) (bool, error) {
 	if os.SameFile(f1.f, f2.f) {
 		return true, nil
diff --git a/vendor/github.com/gogo/googleapis/LICENSE b/vendor/github.com/gogo/googleapis/LICENSE
new file mode 100644
index 0000000..d6f85b1
--- /dev/null
+++ b/vendor/github.com/gogo/googleapis/LICENSE
@@ -0,0 +1,203 @@
+Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "{}"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright 2015, Google Inc
+   Copyright 2018, GoGo Authors
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+
diff --git a/vendor/github.com/gogo/googleapis/google/rpc/code.pb.go b/vendor/github.com/gogo/googleapis/google/rpc/code.pb.go
index 2a77c1b..e2c94ae 100644
--- a/vendor/github.com/gogo/googleapis/google/rpc/code.pb.go
+++ b/vendor/github.com/gogo/googleapis/google/rpc/code.pb.go
@@ -3,17 +3,24 @@
 
 package rpc
 
-import proto "github.com/gogo/protobuf/proto"
-import fmt "fmt"
-import math "math"
-
-import strconv "strconv"
+import (
+	fmt "fmt"
+	proto "github.com/gogo/protobuf/proto"
+	math "math"
+	strconv "strconv"
+)
 
 // Reference imports to suppress errors if they are not otherwise used.
 var _ = proto.Marshal
 var _ = fmt.Errorf
 var _ = math.Inf
 
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+
 // The canonical error codes for Google APIs.
 //
 //
@@ -181,6 +188,7 @@
 	14: "UNAVAILABLE",
 	15: "DATA_LOSS",
 }
+
 var Code_value = map[string]int32{
 	"OK":                  0,
 	"CANCELLED":           1,
@@ -201,22 +209,17 @@
 	"DATA_LOSS":           15,
 }
 
-func (Code) EnumDescriptor() ([]byte, []int) { return fileDescriptorCode, []int{0} }
+func (Code) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_fe593a732623ccf0, []int{0}
+}
 
 func init() {
 	proto.RegisterEnum("google.rpc.Code", Code_name, Code_value)
 }
-func (x Code) String() string {
-	s, ok := Code_name[int32(x)]
-	if ok {
-		return s
-	}
-	return strconv.Itoa(int(x))
-}
 
-func init() { proto.RegisterFile("google/rpc/code.proto", fileDescriptorCode) }
+func init() { proto.RegisterFile("google/rpc/code.proto", fileDescriptor_fe593a732623ccf0) }
 
-var fileDescriptorCode = []byte{
+var fileDescriptor_fe593a732623ccf0 = []byte{
 	// 393 bytes of a gzipped FileDescriptorProto
 	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x44, 0x91, 0x3d, 0x6e, 0x13, 0x41,
 	0x14, 0xc7, 0x3d, 0x76, 0x70, 0xe2, 0xf1, 0xd7, 0xcb, 0x84, 0x40, 0x37, 0x07, 0xa0, 0x70, 0x0a,
@@ -244,3 +247,11 @@
 	0xf3, 0xb5, 0x3f, 0x08, 0x65, 0xf1, 0x61, 0xf8, 0xb7, 0xaa, 0xd7, 0x7f, 0x02, 0x00, 0x00, 0xff,
 	0xff, 0x03, 0xd4, 0x27, 0xff, 0xc3, 0x01, 0x00, 0x00,
 }
+
+func (x Code) String() string {
+	s, ok := Code_name[int32(x)]
+	if ok {
+		return s
+	}
+	return strconv.Itoa(int(x))
+}
diff --git a/vendor/github.com/gogo/googleapis/google/rpc/code.proto b/vendor/github.com/gogo/googleapis/google/rpc/code.proto
index d832de1..0540a4f 100644
--- a/vendor/github.com/gogo/googleapis/google/rpc/code.proto
+++ b/vendor/github.com/gogo/googleapis/google/rpc/code.proto
@@ -22,7 +22,6 @@
 option java_package = "com.google.rpc";
 option objc_class_prefix = "RPC";
 
-
 // The canonical error codes for Google APIs.
 //
 //
diff --git a/vendor/github.com/gogo/googleapis/google/rpc/error_details.pb.go b/vendor/github.com/gogo/googleapis/google/rpc/error_details.pb.go
index 5677581..657f1da 100644
--- a/vendor/github.com/gogo/googleapis/google/rpc/error_details.pb.go
+++ b/vendor/github.com/gogo/googleapis/google/rpc/error_details.pb.go
@@ -3,21 +3,28 @@
 
 package rpc
 
-import proto "github.com/gogo/protobuf/proto"
-import fmt "fmt"
-import math "math"
-import google_protobuf1 "github.com/gogo/protobuf/types"
-
-import strings "strings"
-import reflect "reflect"
-
-import io "io"
+import (
+	bytes "bytes"
+	fmt "fmt"
+	proto "github.com/gogo/protobuf/proto"
+	types "github.com/gogo/protobuf/types"
+	io "io"
+	math "math"
+	reflect "reflect"
+	strings "strings"
+)
 
 // Reference imports to suppress errors if they are not otherwise used.
 var _ = proto.Marshal
 var _ = fmt.Errorf
 var _ = math.Inf
 
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+
 // Describes when the clients can retry a failed request. Clients could ignore
 // the recommendation here or retry when this information is missing from error
 // responses.
@@ -33,14 +40,45 @@
 // reached.
 type RetryInfo struct {
 	// Clients should wait at least this long between retrying the same request.
-	RetryDelay *google_protobuf1.Duration `protobuf:"bytes,1,opt,name=retry_delay,json=retryDelay" json:"retry_delay,omitempty"`
+	RetryDelay           *types.Duration `protobuf:"bytes,1,opt,name=retry_delay,json=retryDelay,proto3" json:"retry_delay,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
+	XXX_unrecognized     []byte          `json:"-"`
+	XXX_sizecache        int32           `json:"-"`
 }
 
-func (m *RetryInfo) Reset()                    { *m = RetryInfo{} }
-func (*RetryInfo) ProtoMessage()               {}
-func (*RetryInfo) Descriptor() ([]byte, []int) { return fileDescriptorErrorDetails, []int{0} }
+func (m *RetryInfo) Reset()      { *m = RetryInfo{} }
+func (*RetryInfo) ProtoMessage() {}
+func (*RetryInfo) Descriptor() ([]byte, []int) {
+	return fileDescriptor_851816e4d6b6361a, []int{0}
+}
+func (m *RetryInfo) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *RetryInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_RetryInfo.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *RetryInfo) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_RetryInfo.Merge(m, src)
+}
+func (m *RetryInfo) XXX_Size() int {
+	return m.Size()
+}
+func (m *RetryInfo) XXX_DiscardUnknown() {
+	xxx_messageInfo_RetryInfo.DiscardUnknown(m)
+}
 
-func (m *RetryInfo) GetRetryDelay() *google_protobuf1.Duration {
+var xxx_messageInfo_RetryInfo proto.InternalMessageInfo
+
+func (m *RetryInfo) GetRetryDelay() *types.Duration {
 	if m != nil {
 		return m.RetryDelay
 	}
@@ -54,14 +92,45 @@
 // Describes additional debugging info.
 type DebugInfo struct {
 	// The stack trace entries indicating where the error occurred.
-	StackEntries []string `protobuf:"bytes,1,rep,name=stack_entries,json=stackEntries" json:"stack_entries,omitempty"`
+	StackEntries []string `protobuf:"bytes,1,rep,name=stack_entries,json=stackEntries,proto3" json:"stack_entries,omitempty"`
 	// Additional debugging information provided by the server.
-	Detail string `protobuf:"bytes,2,opt,name=detail,proto3" json:"detail,omitempty"`
+	Detail               string   `protobuf:"bytes,2,opt,name=detail,proto3" json:"detail,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
-func (m *DebugInfo) Reset()                    { *m = DebugInfo{} }
-func (*DebugInfo) ProtoMessage()               {}
-func (*DebugInfo) Descriptor() ([]byte, []int) { return fileDescriptorErrorDetails, []int{1} }
+func (m *DebugInfo) Reset()      { *m = DebugInfo{} }
+func (*DebugInfo) ProtoMessage() {}
+func (*DebugInfo) Descriptor() ([]byte, []int) {
+	return fileDescriptor_851816e4d6b6361a, []int{1}
+}
+func (m *DebugInfo) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *DebugInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_DebugInfo.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *DebugInfo) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DebugInfo.Merge(m, src)
+}
+func (m *DebugInfo) XXX_Size() int {
+	return m.Size()
+}
+func (m *DebugInfo) XXX_DiscardUnknown() {
+	xxx_messageInfo_DebugInfo.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DebugInfo proto.InternalMessageInfo
 
 func (m *DebugInfo) GetStackEntries() []string {
 	if m != nil {
@@ -94,12 +163,43 @@
 // quota failure.
 type QuotaFailure struct {
 	// Describes all quota violations.
-	Violations []*QuotaFailure_Violation `protobuf:"bytes,1,rep,name=violations" json:"violations,omitempty"`
+	Violations           []*QuotaFailure_Violation `protobuf:"bytes,1,rep,name=violations,proto3" json:"violations,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}                  `json:"-"`
+	XXX_unrecognized     []byte                    `json:"-"`
+	XXX_sizecache        int32                     `json:"-"`
 }
 
-func (m *QuotaFailure) Reset()                    { *m = QuotaFailure{} }
-func (*QuotaFailure) ProtoMessage()               {}
-func (*QuotaFailure) Descriptor() ([]byte, []int) { return fileDescriptorErrorDetails, []int{2} }
+func (m *QuotaFailure) Reset()      { *m = QuotaFailure{} }
+func (*QuotaFailure) ProtoMessage() {}
+func (*QuotaFailure) Descriptor() ([]byte, []int) {
+	return fileDescriptor_851816e4d6b6361a, []int{2}
+}
+func (m *QuotaFailure) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *QuotaFailure) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_QuotaFailure.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *QuotaFailure) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_QuotaFailure.Merge(m, src)
+}
+func (m *QuotaFailure) XXX_Size() int {
+	return m.Size()
+}
+func (m *QuotaFailure) XXX_DiscardUnknown() {
+	xxx_messageInfo_QuotaFailure.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_QuotaFailure proto.InternalMessageInfo
 
 func (m *QuotaFailure) GetViolations() []*QuotaFailure_Violation {
 	if m != nil {
@@ -126,14 +226,43 @@
 	//
 	// For example: "Service disabled" or "Daily Limit for read operations
 	// exceeded".
-	Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
+	Description          string   `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
 func (m *QuotaFailure_Violation) Reset()      { *m = QuotaFailure_Violation{} }
 func (*QuotaFailure_Violation) ProtoMessage() {}
 func (*QuotaFailure_Violation) Descriptor() ([]byte, []int) {
-	return fileDescriptorErrorDetails, []int{2, 0}
+	return fileDescriptor_851816e4d6b6361a, []int{2, 0}
 }
+func (m *QuotaFailure_Violation) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *QuotaFailure_Violation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_QuotaFailure_Violation.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *QuotaFailure_Violation) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_QuotaFailure_Violation.Merge(m, src)
+}
+func (m *QuotaFailure_Violation) XXX_Size() int {
+	return m.Size()
+}
+func (m *QuotaFailure_Violation) XXX_DiscardUnknown() {
+	xxx_messageInfo_QuotaFailure_Violation.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_QuotaFailure_Violation proto.InternalMessageInfo
 
 func (m *QuotaFailure_Violation) GetSubject() string {
 	if m != nil {
@@ -160,12 +289,43 @@
 // PreconditionFailure message.
 type PreconditionFailure struct {
 	// Describes all precondition violations.
-	Violations []*PreconditionFailure_Violation `protobuf:"bytes,1,rep,name=violations" json:"violations,omitempty"`
+	Violations           []*PreconditionFailure_Violation `protobuf:"bytes,1,rep,name=violations,proto3" json:"violations,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}                         `json:"-"`
+	XXX_unrecognized     []byte                           `json:"-"`
+	XXX_sizecache        int32                            `json:"-"`
 }
 
-func (m *PreconditionFailure) Reset()                    { *m = PreconditionFailure{} }
-func (*PreconditionFailure) ProtoMessage()               {}
-func (*PreconditionFailure) Descriptor() ([]byte, []int) { return fileDescriptorErrorDetails, []int{3} }
+func (m *PreconditionFailure) Reset()      { *m = PreconditionFailure{} }
+func (*PreconditionFailure) ProtoMessage() {}
+func (*PreconditionFailure) Descriptor() ([]byte, []int) {
+	return fileDescriptor_851816e4d6b6361a, []int{3}
+}
+func (m *PreconditionFailure) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *PreconditionFailure) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_PreconditionFailure.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *PreconditionFailure) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_PreconditionFailure.Merge(m, src)
+}
+func (m *PreconditionFailure) XXX_Size() int {
+	return m.Size()
+}
+func (m *PreconditionFailure) XXX_DiscardUnknown() {
+	xxx_messageInfo_PreconditionFailure.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PreconditionFailure proto.InternalMessageInfo
 
 func (m *PreconditionFailure) GetViolations() []*PreconditionFailure_Violation {
 	if m != nil {
@@ -192,14 +352,43 @@
 	// description to understand how to fix the failure.
 	//
 	// For example: "Terms of service not accepted".
-	Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"`
+	Description          string   `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
 func (m *PreconditionFailure_Violation) Reset()      { *m = PreconditionFailure_Violation{} }
 func (*PreconditionFailure_Violation) ProtoMessage() {}
 func (*PreconditionFailure_Violation) Descriptor() ([]byte, []int) {
-	return fileDescriptorErrorDetails, []int{3, 0}
+	return fileDescriptor_851816e4d6b6361a, []int{3, 0}
 }
+func (m *PreconditionFailure_Violation) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *PreconditionFailure_Violation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_PreconditionFailure_Violation.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *PreconditionFailure_Violation) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_PreconditionFailure_Violation.Merge(m, src)
+}
+func (m *PreconditionFailure_Violation) XXX_Size() int {
+	return m.Size()
+}
+func (m *PreconditionFailure_Violation) XXX_DiscardUnknown() {
+	xxx_messageInfo_PreconditionFailure_Violation.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PreconditionFailure_Violation proto.InternalMessageInfo
 
 func (m *PreconditionFailure_Violation) GetType() string {
 	if m != nil {
@@ -230,12 +419,43 @@
 // syntactic aspects of the request.
 type BadRequest struct {
 	// Describes all violations in a client request.
-	FieldViolations []*BadRequest_FieldViolation `protobuf:"bytes,1,rep,name=field_violations,json=fieldViolations" json:"field_violations,omitempty"`
+	FieldViolations      []*BadRequest_FieldViolation `protobuf:"bytes,1,rep,name=field_violations,json=fieldViolations,proto3" json:"field_violations,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}                     `json:"-"`
+	XXX_unrecognized     []byte                       `json:"-"`
+	XXX_sizecache        int32                        `json:"-"`
 }
 
-func (m *BadRequest) Reset()                    { *m = BadRequest{} }
-func (*BadRequest) ProtoMessage()               {}
-func (*BadRequest) Descriptor() ([]byte, []int) { return fileDescriptorErrorDetails, []int{4} }
+func (m *BadRequest) Reset()      { *m = BadRequest{} }
+func (*BadRequest) ProtoMessage() {}
+func (*BadRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_851816e4d6b6361a, []int{4}
+}
+func (m *BadRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *BadRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_BadRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *BadRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_BadRequest.Merge(m, src)
+}
+func (m *BadRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *BadRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_BadRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_BadRequest proto.InternalMessageInfo
 
 func (m *BadRequest) GetFieldViolations() []*BadRequest_FieldViolation {
 	if m != nil {
@@ -255,14 +475,43 @@
 	// field. E.g., "field_violations.field" would identify this field.
 	Field string `protobuf:"bytes,1,opt,name=field,proto3" json:"field,omitempty"`
 	// A description of why the request element is bad.
-	Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
+	Description          string   `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
 func (m *BadRequest_FieldViolation) Reset()      { *m = BadRequest_FieldViolation{} }
 func (*BadRequest_FieldViolation) ProtoMessage() {}
 func (*BadRequest_FieldViolation) Descriptor() ([]byte, []int) {
-	return fileDescriptorErrorDetails, []int{4, 0}
+	return fileDescriptor_851816e4d6b6361a, []int{4, 0}
 }
+func (m *BadRequest_FieldViolation) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *BadRequest_FieldViolation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_BadRequest_FieldViolation.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *BadRequest_FieldViolation) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_BadRequest_FieldViolation.Merge(m, src)
+}
+func (m *BadRequest_FieldViolation) XXX_Size() int {
+	return m.Size()
+}
+func (m *BadRequest_FieldViolation) XXX_DiscardUnknown() {
+	xxx_messageInfo_BadRequest_FieldViolation.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_BadRequest_FieldViolation proto.InternalMessageInfo
 
 func (m *BadRequest_FieldViolation) GetField() string {
 	if m != nil {
@@ -290,12 +539,43 @@
 	RequestId string `protobuf:"bytes,1,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"`
 	// Any data that was used to serve this request. For example, an encrypted
 	// stack trace that can be sent back to the service provider for debugging.
-	ServingData string `protobuf:"bytes,2,opt,name=serving_data,json=servingData,proto3" json:"serving_data,omitempty"`
+	ServingData          string   `protobuf:"bytes,2,opt,name=serving_data,json=servingData,proto3" json:"serving_data,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
-func (m *RequestInfo) Reset()                    { *m = RequestInfo{} }
-func (*RequestInfo) ProtoMessage()               {}
-func (*RequestInfo) Descriptor() ([]byte, []int) { return fileDescriptorErrorDetails, []int{5} }
+func (m *RequestInfo) Reset()      { *m = RequestInfo{} }
+func (*RequestInfo) ProtoMessage() {}
+func (*RequestInfo) Descriptor() ([]byte, []int) {
+	return fileDescriptor_851816e4d6b6361a, []int{5}
+}
+func (m *RequestInfo) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *RequestInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_RequestInfo.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *RequestInfo) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_RequestInfo.Merge(m, src)
+}
+func (m *RequestInfo) XXX_Size() int {
+	return m.Size()
+}
+func (m *RequestInfo) XXX_DiscardUnknown() {
+	xxx_messageInfo_RequestInfo.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_RequestInfo proto.InternalMessageInfo
 
 func (m *RequestInfo) GetRequestId() string {
 	if m != nil {
@@ -323,7 +603,8 @@
 	ResourceType string `protobuf:"bytes,1,opt,name=resource_type,json=resourceType,proto3" json:"resource_type,omitempty"`
 	// The name of the resource being accessed.  For example, a shared calendar
 	// name: "example.com_4fghdhgsrgh@group.calendar.google.com", if the current
-	// error is [google.rpc.Code.PERMISSION_DENIED][google.rpc.Code.PERMISSION_DENIED].
+	// error is
+	// [google.rpc.Code.PERMISSION_DENIED][google.rpc.Code.PERMISSION_DENIED].
 	ResourceName string `protobuf:"bytes,2,opt,name=resource_name,json=resourceName,proto3" json:"resource_name,omitempty"`
 	// The owner of the resource (optional).
 	// For example, "user:<owner email>" or "project:<Google developer project
@@ -332,12 +613,43 @@
 	// Describes what error is encountered when accessing this resource.
 	// For example, updating a cloud project may require the `writer` permission
 	// on the developer console project.
-	Description string `protobuf:"bytes,4,opt,name=description,proto3" json:"description,omitempty"`
+	Description          string   `protobuf:"bytes,4,opt,name=description,proto3" json:"description,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
-func (m *ResourceInfo) Reset()                    { *m = ResourceInfo{} }
-func (*ResourceInfo) ProtoMessage()               {}
-func (*ResourceInfo) Descriptor() ([]byte, []int) { return fileDescriptorErrorDetails, []int{6} }
+func (m *ResourceInfo) Reset()      { *m = ResourceInfo{} }
+func (*ResourceInfo) ProtoMessage() {}
+func (*ResourceInfo) Descriptor() ([]byte, []int) {
+	return fileDescriptor_851816e4d6b6361a, []int{6}
+}
+func (m *ResourceInfo) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ResourceInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_ResourceInfo.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *ResourceInfo) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ResourceInfo.Merge(m, src)
+}
+func (m *ResourceInfo) XXX_Size() int {
+	return m.Size()
+}
+func (m *ResourceInfo) XXX_DiscardUnknown() {
+	xxx_messageInfo_ResourceInfo.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ResourceInfo proto.InternalMessageInfo
 
 func (m *ResourceInfo) GetResourceType() string {
 	if m != nil {
@@ -378,12 +690,43 @@
 // directly to the right place in the developer console to flip the bit.
 type Help struct {
 	// URL(s) pointing to additional information on handling the current error.
-	Links []*Help_Link `protobuf:"bytes,1,rep,name=links" json:"links,omitempty"`
+	Links                []*Help_Link `protobuf:"bytes,1,rep,name=links,proto3" json:"links,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}     `json:"-"`
+	XXX_unrecognized     []byte       `json:"-"`
+	XXX_sizecache        int32        `json:"-"`
 }
 
-func (m *Help) Reset()                    { *m = Help{} }
-func (*Help) ProtoMessage()               {}
-func (*Help) Descriptor() ([]byte, []int) { return fileDescriptorErrorDetails, []int{7} }
+func (m *Help) Reset()      { *m = Help{} }
+func (*Help) ProtoMessage() {}
+func (*Help) Descriptor() ([]byte, []int) {
+	return fileDescriptor_851816e4d6b6361a, []int{7}
+}
+func (m *Help) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *Help) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_Help.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *Help) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Help.Merge(m, src)
+}
+func (m *Help) XXX_Size() int {
+	return m.Size()
+}
+func (m *Help) XXX_DiscardUnknown() {
+	xxx_messageInfo_Help.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Help proto.InternalMessageInfo
 
 func (m *Help) GetLinks() []*Help_Link {
 	if m != nil {
@@ -401,12 +744,43 @@
 	// Describes what the link offers.
 	Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"`
 	// The URL of the link.
-	Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"`
+	Url                  string   `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
-func (m *Help_Link) Reset()                    { *m = Help_Link{} }
-func (*Help_Link) ProtoMessage()               {}
-func (*Help_Link) Descriptor() ([]byte, []int) { return fileDescriptorErrorDetails, []int{7, 0} }
+func (m *Help_Link) Reset()      { *m = Help_Link{} }
+func (*Help_Link) ProtoMessage() {}
+func (*Help_Link) Descriptor() ([]byte, []int) {
+	return fileDescriptor_851816e4d6b6361a, []int{7, 0}
+}
+func (m *Help_Link) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *Help_Link) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_Help_Link.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *Help_Link) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Help_Link.Merge(m, src)
+}
+func (m *Help_Link) XXX_Size() int {
+	return m.Size()
+}
+func (m *Help_Link) XXX_DiscardUnknown() {
+	xxx_messageInfo_Help_Link.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Help_Link proto.InternalMessageInfo
 
 func (m *Help_Link) GetDescription() string {
 	if m != nil {
@@ -434,12 +808,43 @@
 	// Examples are: "en-US", "fr-CH", "es-MX"
 	Locale string `protobuf:"bytes,1,opt,name=locale,proto3" json:"locale,omitempty"`
 	// The localized error message in the above locale.
-	Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"`
+	Message              string   `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
 }
 
-func (m *LocalizedMessage) Reset()                    { *m = LocalizedMessage{} }
-func (*LocalizedMessage) ProtoMessage()               {}
-func (*LocalizedMessage) Descriptor() ([]byte, []int) { return fileDescriptorErrorDetails, []int{8} }
+func (m *LocalizedMessage) Reset()      { *m = LocalizedMessage{} }
+func (*LocalizedMessage) ProtoMessage() {}
+func (*LocalizedMessage) Descriptor() ([]byte, []int) {
+	return fileDescriptor_851816e4d6b6361a, []int{8}
+}
+func (m *LocalizedMessage) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *LocalizedMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_LocalizedMessage.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *LocalizedMessage) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_LocalizedMessage.Merge(m, src)
+}
+func (m *LocalizedMessage) XXX_Size() int {
+	return m.Size()
+}
+func (m *LocalizedMessage) XXX_DiscardUnknown() {
+	xxx_messageInfo_LocalizedMessage.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LocalizedMessage proto.InternalMessageInfo
 
 func (m *LocalizedMessage) GetLocale() string {
 	if m != nil {
@@ -473,6 +878,52 @@
 	proto.RegisterType((*Help_Link)(nil), "google.rpc.Help.Link")
 	proto.RegisterType((*LocalizedMessage)(nil), "google.rpc.LocalizedMessage")
 }
+
+func init() { proto.RegisterFile("google/rpc/error_details.proto", fileDescriptor_851816e4d6b6361a) }
+
+var fileDescriptor_851816e4d6b6361a = []byte{
+	// 624 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x54, 0xbf, 0x6f, 0xd3, 0x40,
+	0x18, 0xed, 0x35, 0x69, 0x91, 0xbf, 0x84, 0x52, 0xcc, 0x0f, 0x85, 0x48, 0x9c, 0x82, 0x11, 0x52,
+	0x11, 0x92, 0x2b, 0x95, 0xad, 0x63, 0x48, 0x7f, 0x49, 0x05, 0x82, 0x85, 0x18, 0x60, 0xb0, 0x2e,
+	0xf6, 0x97, 0xe8, 0xa8, 0xe3, 0x33, 0x67, 0xbb, 0xa8, 0x4c, 0xfc, 0x09, 0xec, 0x6c, 0x4c, 0xfd,
+	0x27, 0xd8, 0x3b, 0x76, 0x64, 0x24, 0xe9, 0xc2, 0xd8, 0x91, 0x11, 0x9d, 0x7d, 0xd7, 0xba, 0x4d,
+	0x41, 0x6c, 0x7e, 0xef, 0xde, 0x3d, 0xbf, 0xf7, 0xe9, 0xee, 0x80, 0x8e, 0x84, 0x18, 0x45, 0xb8,
+	0x2a, 0x93, 0x60, 0x15, 0xa5, 0x14, 0xd2, 0x0f, 0x31, 0x63, 0x3c, 0x4a, 0xdd, 0x44, 0x8a, 0x4c,
+	0xd8, 0x50, 0xae, 0xbb, 0x32, 0x09, 0xda, 0x46, 0x5b, 0xac, 0x0c, 0xf2, 0xe1, 0x6a, 0x98, 0x4b,
+	0x96, 0x71, 0x11, 0x97, 0x5a, 0x67, 0x0b, 0x2c, 0x0f, 0x33, 0x79, 0xb0, 0x13, 0x0f, 0x85, 0xbd,
+	0x0e, 0x0d, 0xa9, 0x80, 0x1f, 0x62, 0xc4, 0x0e, 0x5a, 0xa4, 0x43, 0x56, 0x1a, 0x6b, 0xf7, 0x5c,
+	0x6d, 0x67, 0x2c, 0xdc, 0x9e, 0xb6, 0xf0, 0xa0, 0x50, 0xf7, 0x94, 0xd8, 0xd9, 0x06, 0xab, 0x87,
+	0x83, 0x7c, 0x54, 0x18, 0x3d, 0x84, 0xeb, 0x69, 0xc6, 0x82, 0x3d, 0x1f, 0xe3, 0x4c, 0x72, 0x4c,
+	0x5b, 0xa4, 0x53, 0x5b, 0xb1, 0xbc, 0x66, 0x41, 0x6e, 0x94, 0x9c, 0x7d, 0x17, 0x16, 0xcb, 0xdc,
+	0xad, 0xf9, 0x0e, 0x59, 0xb1, 0x3c, 0x8d, 0x9c, 0xaf, 0x04, 0x9a, 0xaf, 0x72, 0x91, 0xb1, 0x4d,
+	0xc6, 0xa3, 0x5c, 0xa2, 0xdd, 0x05, 0xd8, 0xe7, 0x22, 0x2a, 0xfe, 0x59, 0x5a, 0x35, 0xd6, 0x1c,
+	0xf7, 0xbc, 0xa4, 0x5b, 0x55, 0xbb, 0x6f, 0x8c, 0xd4, 0xab, 0xec, 0x6a, 0x6f, 0x81, 0x75, 0xb6,
+	0x60, 0xb7, 0xe0, 0x5a, 0x9a, 0x0f, 0xde, 0x63, 0x90, 0x15, 0x1d, 0x2d, 0xcf, 0x40, 0xbb, 0x03,
+	0x8d, 0x10, 0xd3, 0x40, 0xf2, 0x44, 0x09, 0x75, 0xb0, 0x2a, 0xe5, 0x7c, 0x27, 0x70, 0xab, 0x2f,
+	0x31, 0x10, 0x71, 0xc8, 0x15, 0x61, 0x42, 0xee, 0x5c, 0x11, 0xf2, 0x71, 0x35, 0xe4, 0x15, 0x9b,
+	0xfe, 0x92, 0xf5, 0x5d, 0x35, 0xab, 0x0d, 0xf5, 0xec, 0x20, 0x41, 0x1d, 0xb4, 0xf8, 0xae, 0xe6,
+	0x9f, 0xff, 0x67, 0xfe, 0xda, 0x6c, 0xfe, 0x43, 0x02, 0xd0, 0x65, 0xa1, 0x87, 0x1f, 0x72, 0x4c,
+	0x33, 0xbb, 0x0f, 0xcb, 0x43, 0x8e, 0x51, 0xe8, 0xcf, 0x84, 0x7f, 0x54, 0x0d, 0x7f, 0xbe, 0xc3,
+	0xdd, 0x54, 0xf2, 0xf3, 0xe0, 0x37, 0x86, 0x17, 0x70, 0xda, 0xde, 0x86, 0xa5, 0x8b, 0x12, 0xfb,
+	0x36, 0x2c, 0x14, 0x22, 0xdd, 0xa1, 0x04, 0xff, 0x31, 0xea, 0x97, 0xd0, 0xd0, 0x3f, 0x2d, 0x0e,
+	0xd5, 0x7d, 0x00, 0x59, 0x42, 0x9f, 0x1b, 0x2f, 0x4b, 0x33, 0x3b, 0xa1, 0xfd, 0x00, 0x9a, 0x29,
+	0xca, 0x7d, 0x1e, 0x8f, 0xfc, 0x90, 0x65, 0xcc, 0x18, 0x6a, 0xae, 0xc7, 0x32, 0xe6, 0x7c, 0x21,
+	0xd0, 0xf4, 0x30, 0x15, 0xb9, 0x0c, 0xd0, 0x9c, 0x53, 0xa9, 0xb1, 0x5f, 0x99, 0x72, 0xd3, 0x90,
+	0xaf, 0xd5, 0xb4, 0xab, 0xa2, 0x98, 0x8d, 0x51, 0x3b, 0x9f, 0x89, 0x5e, 0xb0, 0x31, 0xaa, 0x8e,
+	0xe2, 0x63, 0x8c, 0x52, 0x8f, 0xbc, 0x04, 0x97, 0x3b, 0xd6, 0x67, 0x3b, 0x0a, 0xa8, 0x6f, 0x63,
+	0x94, 0xd8, 0x4f, 0x60, 0x21, 0xe2, 0xf1, 0x9e, 0x19, 0xfe, 0x9d, 0xea, 0xf0, 0x95, 0xc0, 0xdd,
+	0xe5, 0xf1, 0x9e, 0x57, 0x6a, 0xda, 0xeb, 0x50, 0x57, 0xf0, 0xb2, 0x3d, 0x99, 0xb1, 0xb7, 0x97,
+	0xa1, 0x96, 0x4b, 0x73, 0xc1, 0xd4, 0xa7, 0xd3, 0x83, 0xe5, 0x5d, 0x11, 0xb0, 0x88, 0x7f, 0xc2,
+	0xf0, 0x39, 0xa6, 0x29, 0x1b, 0xa1, 0xba, 0x89, 0x91, 0xe2, 0x4c, 0x7f, 0x8d, 0xd4, 0x39, 0x1b,
+	0x97, 0x12, 0x73, 0xce, 0x34, 0xec, 0x86, 0xc7, 0x13, 0x3a, 0xf7, 0x63, 0x42, 0xe7, 0x4e, 0x27,
+	0x94, 0xfc, 0x9e, 0x50, 0xf2, 0x79, 0x4a, 0xc9, 0xe1, 0x94, 0x92, 0xa3, 0x29, 0x25, 0xc7, 0x53,
+	0x4a, 0x7e, 0x4e, 0x29, 0xf9, 0x35, 0xa5, 0x73, 0xa7, 0x8a, 0x3f, 0xa1, 0xe4, 0xe8, 0x84, 0x12,
+	0x58, 0x0a, 0xc4, 0xb8, 0x52, 0xac, 0x7b, 0x73, 0x43, 0xbd, 0x5e, 0xbd, 0xf2, 0xf1, 0xea, 0xab,
+	0xe7, 0xa5, 0x4f, 0xde, 0xd6, 0x64, 0x12, 0x7c, 0x9b, 0xaf, 0x79, 0xfd, 0x67, 0x83, 0xc5, 0xe2,
+	0xc9, 0x79, 0xfa, 0x27, 0x00, 0x00, 0xff, 0xff, 0x63, 0xe4, 0x76, 0x26, 0xf1, 0x04, 0x00, 0x00,
+}
+
 func (this *RetryInfo) Compare(that interface{}) int {
 	if that == nil {
 		if this == nil {
@@ -501,6 +952,9 @@
 	if c := this.RetryDelay.Compare(that1.RetryDelay); c != 0 {
 		return c
 	}
+	if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 {
+		return c
+	}
 	return 0
 }
 func (this *DebugInfo) Compare(that interface{}) int {
@@ -548,6 +1002,9 @@
 		}
 		return 1
 	}
+	if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 {
+		return c
+	}
 	return 0
 }
 func (this *QuotaFailure) Compare(that interface{}) int {
@@ -586,6 +1043,9 @@
 			return c
 		}
 	}
+	if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 {
+		return c
+	}
 	return 0
 }
 func (this *QuotaFailure_Violation) Compare(that interface{}) int {
@@ -625,6 +1085,9 @@
 		}
 		return 1
 	}
+	if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 {
+		return c
+	}
 	return 0
 }
 func (this *PreconditionFailure) Compare(that interface{}) int {
@@ -663,6 +1126,9 @@
 			return c
 		}
 	}
+	if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 {
+		return c
+	}
 	return 0
 }
 func (this *PreconditionFailure_Violation) Compare(that interface{}) int {
@@ -708,6 +1174,9 @@
 		}
 		return 1
 	}
+	if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 {
+		return c
+	}
 	return 0
 }
 func (this *BadRequest) Compare(that interface{}) int {
@@ -746,6 +1215,9 @@
 			return c
 		}
 	}
+	if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 {
+		return c
+	}
 	return 0
 }
 func (this *BadRequest_FieldViolation) Compare(that interface{}) int {
@@ -785,6 +1257,9 @@
 		}
 		return 1
 	}
+	if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 {
+		return c
+	}
 	return 0
 }
 func (this *RequestInfo) Compare(that interface{}) int {
@@ -824,6 +1299,9 @@
 		}
 		return 1
 	}
+	if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 {
+		return c
+	}
 	return 0
 }
 func (this *ResourceInfo) Compare(that interface{}) int {
@@ -875,6 +1353,9 @@
 		}
 		return 1
 	}
+	if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 {
+		return c
+	}
 	return 0
 }
 func (this *Help) Compare(that interface{}) int {
@@ -913,6 +1394,9 @@
 			return c
 		}
 	}
+	if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 {
+		return c
+	}
 	return 0
 }
 func (this *Help_Link) Compare(that interface{}) int {
@@ -952,6 +1436,9 @@
 		}
 		return 1
 	}
+	if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 {
+		return c
+	}
 	return 0
 }
 func (this *LocalizedMessage) Compare(that interface{}) int {
@@ -991,6 +1478,9 @@
 		}
 		return 1
 	}
+	if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 {
+		return c
+	}
 	return 0
 }
 func (this *RetryInfo) Equal(that interface{}) bool {
@@ -1015,6 +1505,9 @@
 	if !this.RetryDelay.Equal(that1.RetryDelay) {
 		return false
 	}
+	if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) {
+		return false
+	}
 	return true
 }
 func (this *DebugInfo) Equal(that interface{}) bool {
@@ -1047,6 +1540,9 @@
 	if this.Detail != that1.Detail {
 		return false
 	}
+	if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) {
+		return false
+	}
 	return true
 }
 func (this *QuotaFailure) Equal(that interface{}) bool {
@@ -1076,6 +1572,9 @@
 			return false
 		}
 	}
+	if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) {
+		return false
+	}
 	return true
 }
 func (this *QuotaFailure_Violation) Equal(that interface{}) bool {
@@ -1103,6 +1602,9 @@
 	if this.Description != that1.Description {
 		return false
 	}
+	if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) {
+		return false
+	}
 	return true
 }
 func (this *PreconditionFailure) Equal(that interface{}) bool {
@@ -1132,6 +1634,9 @@
 			return false
 		}
 	}
+	if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) {
+		return false
+	}
 	return true
 }
 func (this *PreconditionFailure_Violation) Equal(that interface{}) bool {
@@ -1162,6 +1667,9 @@
 	if this.Description != that1.Description {
 		return false
 	}
+	if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) {
+		return false
+	}
 	return true
 }
 func (this *BadRequest) Equal(that interface{}) bool {
@@ -1191,6 +1699,9 @@
 			return false
 		}
 	}
+	if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) {
+		return false
+	}
 	return true
 }
 func (this *BadRequest_FieldViolation) Equal(that interface{}) bool {
@@ -1218,6 +1729,9 @@
 	if this.Description != that1.Description {
 		return false
 	}
+	if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) {
+		return false
+	}
 	return true
 }
 func (this *RequestInfo) Equal(that interface{}) bool {
@@ -1245,6 +1759,9 @@
 	if this.ServingData != that1.ServingData {
 		return false
 	}
+	if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) {
+		return false
+	}
 	return true
 }
 func (this *ResourceInfo) Equal(that interface{}) bool {
@@ -1278,6 +1795,9 @@
 	if this.Description != that1.Description {
 		return false
 	}
+	if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) {
+		return false
+	}
 	return true
 }
 func (this *Help) Equal(that interface{}) bool {
@@ -1307,6 +1827,9 @@
 			return false
 		}
 	}
+	if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) {
+		return false
+	}
 	return true
 }
 func (this *Help_Link) Equal(that interface{}) bool {
@@ -1334,6 +1857,9 @@
 	if this.Url != that1.Url {
 		return false
 	}
+	if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) {
+		return false
+	}
 	return true
 }
 func (this *LocalizedMessage) Equal(that interface{}) bool {
@@ -1361,6 +1887,9 @@
 	if this.Message != that1.Message {
 		return false
 	}
+	if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) {
+		return false
+	}
 	return true
 }
 func (this *RetryInfo) GoString() string {
@@ -1372,6 +1901,9 @@
 	if this.RetryDelay != nil {
 		s = append(s, "RetryDelay: "+fmt.Sprintf("%#v", this.RetryDelay)+",\n")
 	}
+	if this.XXX_unrecognized != nil {
+		s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+	}
 	s = append(s, "}")
 	return strings.Join(s, "")
 }
@@ -1383,6 +1915,9 @@
 	s = append(s, "&rpc.DebugInfo{")
 	s = append(s, "StackEntries: "+fmt.Sprintf("%#v", this.StackEntries)+",\n")
 	s = append(s, "Detail: "+fmt.Sprintf("%#v", this.Detail)+",\n")
+	if this.XXX_unrecognized != nil {
+		s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+	}
 	s = append(s, "}")
 	return strings.Join(s, "")
 }
@@ -1395,6 +1930,9 @@
 	if this.Violations != nil {
 		s = append(s, "Violations: "+fmt.Sprintf("%#v", this.Violations)+",\n")
 	}
+	if this.XXX_unrecognized != nil {
+		s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+	}
 	s = append(s, "}")
 	return strings.Join(s, "")
 }
@@ -1406,6 +1944,9 @@
 	s = append(s, "&rpc.QuotaFailure_Violation{")
 	s = append(s, "Subject: "+fmt.Sprintf("%#v", this.Subject)+",\n")
 	s = append(s, "Description: "+fmt.Sprintf("%#v", this.Description)+",\n")
+	if this.XXX_unrecognized != nil {
+		s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+	}
 	s = append(s, "}")
 	return strings.Join(s, "")
 }
@@ -1418,6 +1959,9 @@
 	if this.Violations != nil {
 		s = append(s, "Violations: "+fmt.Sprintf("%#v", this.Violations)+",\n")
 	}
+	if this.XXX_unrecognized != nil {
+		s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+	}
 	s = append(s, "}")
 	return strings.Join(s, "")
 }
@@ -1430,6 +1974,9 @@
 	s = append(s, "Type: "+fmt.Sprintf("%#v", this.Type)+",\n")
 	s = append(s, "Subject: "+fmt.Sprintf("%#v", this.Subject)+",\n")
 	s = append(s, "Description: "+fmt.Sprintf("%#v", this.Description)+",\n")
+	if this.XXX_unrecognized != nil {
+		s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+	}
 	s = append(s, "}")
 	return strings.Join(s, "")
 }
@@ -1442,6 +1989,9 @@
 	if this.FieldViolations != nil {
 		s = append(s, "FieldViolations: "+fmt.Sprintf("%#v", this.FieldViolations)+",\n")
 	}
+	if this.XXX_unrecognized != nil {
+		s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+	}
 	s = append(s, "}")
 	return strings.Join(s, "")
 }
@@ -1453,6 +2003,9 @@
 	s = append(s, "&rpc.BadRequest_FieldViolation{")
 	s = append(s, "Field: "+fmt.Sprintf("%#v", this.Field)+",\n")
 	s = append(s, "Description: "+fmt.Sprintf("%#v", this.Description)+",\n")
+	if this.XXX_unrecognized != nil {
+		s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+	}
 	s = append(s, "}")
 	return strings.Join(s, "")
 }
@@ -1464,6 +2017,9 @@
 	s = append(s, "&rpc.RequestInfo{")
 	s = append(s, "RequestId: "+fmt.Sprintf("%#v", this.RequestId)+",\n")
 	s = append(s, "ServingData: "+fmt.Sprintf("%#v", this.ServingData)+",\n")
+	if this.XXX_unrecognized != nil {
+		s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+	}
 	s = append(s, "}")
 	return strings.Join(s, "")
 }
@@ -1477,6 +2033,9 @@
 	s = append(s, "ResourceName: "+fmt.Sprintf("%#v", this.ResourceName)+",\n")
 	s = append(s, "Owner: "+fmt.Sprintf("%#v", this.Owner)+",\n")
 	s = append(s, "Description: "+fmt.Sprintf("%#v", this.Description)+",\n")
+	if this.XXX_unrecognized != nil {
+		s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+	}
 	s = append(s, "}")
 	return strings.Join(s, "")
 }
@@ -1489,6 +2048,9 @@
 	if this.Links != nil {
 		s = append(s, "Links: "+fmt.Sprintf("%#v", this.Links)+",\n")
 	}
+	if this.XXX_unrecognized != nil {
+		s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+	}
 	s = append(s, "}")
 	return strings.Join(s, "")
 }
@@ -1500,6 +2062,9 @@
 	s = append(s, "&rpc.Help_Link{")
 	s = append(s, "Description: "+fmt.Sprintf("%#v", this.Description)+",\n")
 	s = append(s, "Url: "+fmt.Sprintf("%#v", this.Url)+",\n")
+	if this.XXX_unrecognized != nil {
+		s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+	}
 	s = append(s, "}")
 	return strings.Join(s, "")
 }
@@ -1511,6 +2076,9 @@
 	s = append(s, "&rpc.LocalizedMessage{")
 	s = append(s, "Locale: "+fmt.Sprintf("%#v", this.Locale)+",\n")
 	s = append(s, "Message: "+fmt.Sprintf("%#v", this.Message)+",\n")
+	if this.XXX_unrecognized != nil {
+		s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+	}
 	s = append(s, "}")
 	return strings.Join(s, "")
 }
@@ -1547,6 +2115,9 @@
 		}
 		i += n1
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -1586,6 +2157,9 @@
 		i = encodeVarintErrorDetails(dAtA, i, uint64(len(m.Detail)))
 		i += copy(dAtA[i:], m.Detail)
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -1616,6 +2190,9 @@
 			i += n
 		}
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -1646,6 +2223,9 @@
 		i = encodeVarintErrorDetails(dAtA, i, uint64(len(m.Description)))
 		i += copy(dAtA[i:], m.Description)
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -1676,6 +2256,9 @@
 			i += n
 		}
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -1712,6 +2295,9 @@
 		i = encodeVarintErrorDetails(dAtA, i, uint64(len(m.Description)))
 		i += copy(dAtA[i:], m.Description)
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -1742,6 +2328,9 @@
 			i += n
 		}
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -1772,6 +2361,9 @@
 		i = encodeVarintErrorDetails(dAtA, i, uint64(len(m.Description)))
 		i += copy(dAtA[i:], m.Description)
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -1802,6 +2394,9 @@
 		i = encodeVarintErrorDetails(dAtA, i, uint64(len(m.ServingData)))
 		i += copy(dAtA[i:], m.ServingData)
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -1844,6 +2439,9 @@
 		i = encodeVarintErrorDetails(dAtA, i, uint64(len(m.Description)))
 		i += copy(dAtA[i:], m.Description)
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -1874,6 +2472,9 @@
 			i += n
 		}
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -1904,6 +2505,9 @@
 		i = encodeVarintErrorDetails(dAtA, i, uint64(len(m.Url)))
 		i += copy(dAtA[i:], m.Url)
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -1934,6 +2538,9 @@
 		i = encodeVarintErrorDetails(dAtA, i, uint64(len(m.Message)))
 		i += copy(dAtA[i:], m.Message)
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -1949,9 +2556,10 @@
 func NewPopulatedRetryInfo(r randyErrorDetails, easy bool) *RetryInfo {
 	this := &RetryInfo{}
 	if r.Intn(10) != 0 {
-		this.RetryDelay = google_protobuf1.NewPopulatedDuration(r, easy)
+		this.RetryDelay = types.NewPopulatedDuration(r, easy)
 	}
 	if !easy && r.Intn(10) != 0 {
+		this.XXX_unrecognized = randUnrecognizedErrorDetails(r, 2)
 	}
 	return this
 }
@@ -1965,6 +2573,7 @@
 	}
 	this.Detail = string(randStringErrorDetails(r))
 	if !easy && r.Intn(10) != 0 {
+		this.XXX_unrecognized = randUnrecognizedErrorDetails(r, 3)
 	}
 	return this
 }
@@ -1979,6 +2588,7 @@
 		}
 	}
 	if !easy && r.Intn(10) != 0 {
+		this.XXX_unrecognized = randUnrecognizedErrorDetails(r, 2)
 	}
 	return this
 }
@@ -1988,6 +2598,7 @@
 	this.Subject = string(randStringErrorDetails(r))
 	this.Description = string(randStringErrorDetails(r))
 	if !easy && r.Intn(10) != 0 {
+		this.XXX_unrecognized = randUnrecognizedErrorDetails(r, 3)
 	}
 	return this
 }
@@ -2002,6 +2613,7 @@
 		}
 	}
 	if !easy && r.Intn(10) != 0 {
+		this.XXX_unrecognized = randUnrecognizedErrorDetails(r, 2)
 	}
 	return this
 }
@@ -2012,6 +2624,7 @@
 	this.Subject = string(randStringErrorDetails(r))
 	this.Description = string(randStringErrorDetails(r))
 	if !easy && r.Intn(10) != 0 {
+		this.XXX_unrecognized = randUnrecognizedErrorDetails(r, 4)
 	}
 	return this
 }
@@ -2026,6 +2639,7 @@
 		}
 	}
 	if !easy && r.Intn(10) != 0 {
+		this.XXX_unrecognized = randUnrecognizedErrorDetails(r, 2)
 	}
 	return this
 }
@@ -2035,6 +2649,7 @@
 	this.Field = string(randStringErrorDetails(r))
 	this.Description = string(randStringErrorDetails(r))
 	if !easy && r.Intn(10) != 0 {
+		this.XXX_unrecognized = randUnrecognizedErrorDetails(r, 3)
 	}
 	return this
 }
@@ -2044,6 +2659,7 @@
 	this.RequestId = string(randStringErrorDetails(r))
 	this.ServingData = string(randStringErrorDetails(r))
 	if !easy && r.Intn(10) != 0 {
+		this.XXX_unrecognized = randUnrecognizedErrorDetails(r, 3)
 	}
 	return this
 }
@@ -2055,6 +2671,7 @@
 	this.Owner = string(randStringErrorDetails(r))
 	this.Description = string(randStringErrorDetails(r))
 	if !easy && r.Intn(10) != 0 {
+		this.XXX_unrecognized = randUnrecognizedErrorDetails(r, 5)
 	}
 	return this
 }
@@ -2069,6 +2686,7 @@
 		}
 	}
 	if !easy && r.Intn(10) != 0 {
+		this.XXX_unrecognized = randUnrecognizedErrorDetails(r, 2)
 	}
 	return this
 }
@@ -2078,6 +2696,7 @@
 	this.Description = string(randStringErrorDetails(r))
 	this.Url = string(randStringErrorDetails(r))
 	if !easy && r.Intn(10) != 0 {
+		this.XXX_unrecognized = randUnrecognizedErrorDetails(r, 3)
 	}
 	return this
 }
@@ -2087,6 +2706,7 @@
 	this.Locale = string(randStringErrorDetails(r))
 	this.Message = string(randStringErrorDetails(r))
 	if !easy && r.Intn(10) != 0 {
+		this.XXX_unrecognized = randUnrecognizedErrorDetails(r, 3)
 	}
 	return this
 }
@@ -2164,16 +2784,25 @@
 	return dAtA
 }
 func (m *RetryInfo) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	if m.RetryDelay != nil {
 		l = m.RetryDelay.Size()
 		n += 1 + l + sovErrorDetails(uint64(l))
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *DebugInfo) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	if len(m.StackEntries) > 0 {
@@ -2186,10 +2815,16 @@
 	if l > 0 {
 		n += 1 + l + sovErrorDetails(uint64(l))
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *QuotaFailure) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	if len(m.Violations) > 0 {
@@ -2198,10 +2833,16 @@
 			n += 1 + l + sovErrorDetails(uint64(l))
 		}
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *QuotaFailure_Violation) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = len(m.Subject)
@@ -2212,10 +2853,16 @@
 	if l > 0 {
 		n += 1 + l + sovErrorDetails(uint64(l))
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *PreconditionFailure) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	if len(m.Violations) > 0 {
@@ -2224,10 +2871,16 @@
 			n += 1 + l + sovErrorDetails(uint64(l))
 		}
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *PreconditionFailure_Violation) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = len(m.Type)
@@ -2242,10 +2895,16 @@
 	if l > 0 {
 		n += 1 + l + sovErrorDetails(uint64(l))
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *BadRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	if len(m.FieldViolations) > 0 {
@@ -2254,10 +2913,16 @@
 			n += 1 + l + sovErrorDetails(uint64(l))
 		}
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *BadRequest_FieldViolation) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = len(m.Field)
@@ -2268,10 +2933,16 @@
 	if l > 0 {
 		n += 1 + l + sovErrorDetails(uint64(l))
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *RequestInfo) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = len(m.RequestId)
@@ -2282,10 +2953,16 @@
 	if l > 0 {
 		n += 1 + l + sovErrorDetails(uint64(l))
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *ResourceInfo) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = len(m.ResourceType)
@@ -2304,10 +2981,16 @@
 	if l > 0 {
 		n += 1 + l + sovErrorDetails(uint64(l))
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *Help) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	if len(m.Links) > 0 {
@@ -2316,10 +2999,16 @@
 			n += 1 + l + sovErrorDetails(uint64(l))
 		}
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *Help_Link) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = len(m.Description)
@@ -2330,10 +3019,16 @@
 	if l > 0 {
 		n += 1 + l + sovErrorDetails(uint64(l))
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
 func (m *LocalizedMessage) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	l = len(m.Locale)
@@ -2344,6 +3039,9 @@
 	if l > 0 {
 		n += 1 + l + sovErrorDetails(uint64(l))
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
@@ -2365,7 +3063,8 @@
 		return "nil"
 	}
 	s := strings.Join([]string{`&RetryInfo{`,
-		`RetryDelay:` + strings.Replace(fmt.Sprintf("%v", this.RetryDelay), "Duration", "google_protobuf1.Duration", 1) + `,`,
+		`RetryDelay:` + strings.Replace(fmt.Sprintf("%v", this.RetryDelay), "Duration", "types.Duration", 1) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -2377,6 +3076,7 @@
 	s := strings.Join([]string{`&DebugInfo{`,
 		`StackEntries:` + fmt.Sprintf("%v", this.StackEntries) + `,`,
 		`Detail:` + fmt.Sprintf("%v", this.Detail) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -2387,6 +3087,7 @@
 	}
 	s := strings.Join([]string{`&QuotaFailure{`,
 		`Violations:` + strings.Replace(fmt.Sprintf("%v", this.Violations), "QuotaFailure_Violation", "QuotaFailure_Violation", 1) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -2398,6 +3099,7 @@
 	s := strings.Join([]string{`&QuotaFailure_Violation{`,
 		`Subject:` + fmt.Sprintf("%v", this.Subject) + `,`,
 		`Description:` + fmt.Sprintf("%v", this.Description) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -2408,6 +3110,7 @@
 	}
 	s := strings.Join([]string{`&PreconditionFailure{`,
 		`Violations:` + strings.Replace(fmt.Sprintf("%v", this.Violations), "PreconditionFailure_Violation", "PreconditionFailure_Violation", 1) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -2420,6 +3123,7 @@
 		`Type:` + fmt.Sprintf("%v", this.Type) + `,`,
 		`Subject:` + fmt.Sprintf("%v", this.Subject) + `,`,
 		`Description:` + fmt.Sprintf("%v", this.Description) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -2430,6 +3134,7 @@
 	}
 	s := strings.Join([]string{`&BadRequest{`,
 		`FieldViolations:` + strings.Replace(fmt.Sprintf("%v", this.FieldViolations), "BadRequest_FieldViolation", "BadRequest_FieldViolation", 1) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -2441,6 +3146,7 @@
 	s := strings.Join([]string{`&BadRequest_FieldViolation{`,
 		`Field:` + fmt.Sprintf("%v", this.Field) + `,`,
 		`Description:` + fmt.Sprintf("%v", this.Description) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -2452,6 +3158,7 @@
 	s := strings.Join([]string{`&RequestInfo{`,
 		`RequestId:` + fmt.Sprintf("%v", this.RequestId) + `,`,
 		`ServingData:` + fmt.Sprintf("%v", this.ServingData) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -2465,6 +3172,7 @@
 		`ResourceName:` + fmt.Sprintf("%v", this.ResourceName) + `,`,
 		`Owner:` + fmt.Sprintf("%v", this.Owner) + `,`,
 		`Description:` + fmt.Sprintf("%v", this.Description) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -2475,6 +3183,7 @@
 	}
 	s := strings.Join([]string{`&Help{`,
 		`Links:` + strings.Replace(fmt.Sprintf("%v", this.Links), "Help_Link", "Help_Link", 1) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -2486,6 +3195,7 @@
 	s := strings.Join([]string{`&Help_Link{`,
 		`Description:` + fmt.Sprintf("%v", this.Description) + `,`,
 		`Url:` + fmt.Sprintf("%v", this.Url) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -2497,6 +3207,7 @@
 	s := strings.Join([]string{`&LocalizedMessage{`,
 		`Locale:` + fmt.Sprintf("%v", this.Locale) + `,`,
 		`Message:` + fmt.Sprintf("%v", this.Message) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -2524,7 +3235,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -2552,7 +3263,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2561,11 +3272,14 @@
 				return ErrInvalidLengthErrorDetails
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthErrorDetails
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
 			if m.RetryDelay == nil {
-				m.RetryDelay = &google_protobuf1.Duration{}
+				m.RetryDelay = &types.Duration{}
 			}
 			if err := m.RetryDelay.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
 				return err
@@ -2580,9 +3294,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthErrorDetails
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthErrorDetails
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -2607,7 +3325,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -2635,7 +3353,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2645,6 +3363,9 @@
 				return ErrInvalidLengthErrorDetails
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthErrorDetails
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -2664,7 +3385,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2674,6 +3395,9 @@
 				return ErrInvalidLengthErrorDetails
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthErrorDetails
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -2688,9 +3412,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthErrorDetails
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthErrorDetails
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -2715,7 +3443,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -2743,7 +3471,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2752,6 +3480,9 @@
 				return ErrInvalidLengthErrorDetails
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthErrorDetails
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -2769,9 +3500,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthErrorDetails
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthErrorDetails
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -2796,7 +3531,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -2824,7 +3559,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2834,6 +3569,9 @@
 				return ErrInvalidLengthErrorDetails
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthErrorDetails
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -2853,7 +3591,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2863,6 +3601,9 @@
 				return ErrInvalidLengthErrorDetails
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthErrorDetails
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -2877,9 +3618,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthErrorDetails
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthErrorDetails
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -2904,7 +3649,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -2932,7 +3677,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2941,6 +3686,9 @@
 				return ErrInvalidLengthErrorDetails
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthErrorDetails
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -2958,9 +3706,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthErrorDetails
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthErrorDetails
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -2985,7 +3737,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -3013,7 +3765,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -3023,6 +3775,9 @@
 				return ErrInvalidLengthErrorDetails
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthErrorDetails
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -3042,7 +3797,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -3052,6 +3807,9 @@
 				return ErrInvalidLengthErrorDetails
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthErrorDetails
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -3071,7 +3829,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -3081,6 +3839,9 @@
 				return ErrInvalidLengthErrorDetails
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthErrorDetails
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -3095,9 +3856,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthErrorDetails
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthErrorDetails
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -3122,7 +3887,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -3150,7 +3915,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -3159,6 +3924,9 @@
 				return ErrInvalidLengthErrorDetails
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthErrorDetails
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -3176,9 +3944,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthErrorDetails
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthErrorDetails
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -3203,7 +3975,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -3231,7 +4003,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -3241,6 +4013,9 @@
 				return ErrInvalidLengthErrorDetails
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthErrorDetails
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -3260,7 +4035,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -3270,6 +4045,9 @@
 				return ErrInvalidLengthErrorDetails
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthErrorDetails
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -3284,9 +4062,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthErrorDetails
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthErrorDetails
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -3311,7 +4093,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -3339,7 +4121,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -3349,6 +4131,9 @@
 				return ErrInvalidLengthErrorDetails
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthErrorDetails
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -3368,7 +4153,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -3378,6 +4163,9 @@
 				return ErrInvalidLengthErrorDetails
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthErrorDetails
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -3392,9 +4180,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthErrorDetails
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthErrorDetails
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -3419,7 +4211,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -3447,7 +4239,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -3457,6 +4249,9 @@
 				return ErrInvalidLengthErrorDetails
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthErrorDetails
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -3476,7 +4271,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -3486,6 +4281,9 @@
 				return ErrInvalidLengthErrorDetails
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthErrorDetails
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -3505,7 +4303,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -3515,6 +4313,9 @@
 				return ErrInvalidLengthErrorDetails
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthErrorDetails
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -3534,7 +4335,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -3544,6 +4345,9 @@
 				return ErrInvalidLengthErrorDetails
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthErrorDetails
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -3558,9 +4362,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthErrorDetails
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthErrorDetails
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -3585,7 +4393,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -3613,7 +4421,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -3622,6 +4430,9 @@
 				return ErrInvalidLengthErrorDetails
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthErrorDetails
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -3639,9 +4450,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthErrorDetails
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthErrorDetails
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -3666,7 +4481,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -3694,7 +4509,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -3704,6 +4519,9 @@
 				return ErrInvalidLengthErrorDetails
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthErrorDetails
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -3723,7 +4541,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -3733,6 +4551,9 @@
 				return ErrInvalidLengthErrorDetails
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthErrorDetails
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -3747,9 +4568,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthErrorDetails
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthErrorDetails
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -3774,7 +4599,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -3802,7 +4627,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -3812,6 +4637,9 @@
 				return ErrInvalidLengthErrorDetails
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthErrorDetails
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -3831,7 +4659,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -3841,6 +4669,9 @@
 				return ErrInvalidLengthErrorDetails
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthErrorDetails
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -3855,9 +4686,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthErrorDetails
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthErrorDetails
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -3921,10 +4756,13 @@
 					break
 				}
 			}
-			iNdEx += length
 			if length < 0 {
 				return 0, ErrInvalidLengthErrorDetails
 			}
+			iNdEx += length
+			if iNdEx < 0 {
+				return 0, ErrInvalidLengthErrorDetails
+			}
 			return iNdEx, nil
 		case 3:
 			for {
@@ -3953,6 +4791,9 @@
 					return 0, err
 				}
 				iNdEx = start + next
+				if iNdEx < 0 {
+					return 0, ErrInvalidLengthErrorDetails
+				}
 			}
 			return iNdEx, nil
 		case 4:
@@ -3971,48 +4812,3 @@
 	ErrInvalidLengthErrorDetails = fmt.Errorf("proto: negative length found during unmarshaling")
 	ErrIntOverflowErrorDetails   = fmt.Errorf("proto: integer overflow")
 )
-
-func init() { proto.RegisterFile("google/rpc/error_details.proto", fileDescriptorErrorDetails) }
-
-var fileDescriptorErrorDetails = []byte{
-	// 624 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x54, 0xbf, 0x6f, 0xd3, 0x40,
-	0x18, 0xed, 0x35, 0x69, 0x91, 0xbf, 0x84, 0x52, 0xcc, 0x0f, 0x85, 0x48, 0x9c, 0x82, 0x11, 0x52,
-	0x11, 0x92, 0x2b, 0x95, 0xad, 0x63, 0x48, 0x7f, 0x49, 0x05, 0x82, 0x85, 0x18, 0x60, 0xb0, 0x2e,
-	0xf6, 0x97, 0xe8, 0xa8, 0xe3, 0x33, 0x67, 0xbb, 0xa8, 0x4c, 0xfc, 0x09, 0xec, 0x6c, 0x4c, 0xfd,
-	0x27, 0xd8, 0x3b, 0x76, 0x64, 0x24, 0xe9, 0xc2, 0xd8, 0x91, 0x11, 0x9d, 0x7d, 0xd7, 0xba, 0x4d,
-	0x41, 0x6c, 0x7e, 0xef, 0xde, 0x3d, 0xbf, 0xf7, 0xe9, 0xee, 0x80, 0x8e, 0x84, 0x18, 0x45, 0xb8,
-	0x2a, 0x93, 0x60, 0x15, 0xa5, 0x14, 0xd2, 0x0f, 0x31, 0x63, 0x3c, 0x4a, 0xdd, 0x44, 0x8a, 0x4c,
-	0xd8, 0x50, 0xae, 0xbb, 0x32, 0x09, 0xda, 0x46, 0x5b, 0xac, 0x0c, 0xf2, 0xe1, 0x6a, 0x98, 0x4b,
-	0x96, 0x71, 0x11, 0x97, 0x5a, 0x67, 0x0b, 0x2c, 0x0f, 0x33, 0x79, 0xb0, 0x13, 0x0f, 0x85, 0xbd,
-	0x0e, 0x0d, 0xa9, 0x80, 0x1f, 0x62, 0xc4, 0x0e, 0x5a, 0xa4, 0x43, 0x56, 0x1a, 0x6b, 0xf7, 0x5c,
-	0x6d, 0x67, 0x2c, 0xdc, 0x9e, 0xb6, 0xf0, 0xa0, 0x50, 0xf7, 0x94, 0xd8, 0xd9, 0x06, 0xab, 0x87,
-	0x83, 0x7c, 0x54, 0x18, 0x3d, 0x84, 0xeb, 0x69, 0xc6, 0x82, 0x3d, 0x1f, 0xe3, 0x4c, 0x72, 0x4c,
-	0x5b, 0xa4, 0x53, 0x5b, 0xb1, 0xbc, 0x66, 0x41, 0x6e, 0x94, 0x9c, 0x7d, 0x17, 0x16, 0xcb, 0xdc,
-	0xad, 0xf9, 0x0e, 0x59, 0xb1, 0x3c, 0x8d, 0x9c, 0xaf, 0x04, 0x9a, 0xaf, 0x72, 0x91, 0xb1, 0x4d,
-	0xc6, 0xa3, 0x5c, 0xa2, 0xdd, 0x05, 0xd8, 0xe7, 0x22, 0x2a, 0xfe, 0x59, 0x5a, 0x35, 0xd6, 0x1c,
-	0xf7, 0xbc, 0xa4, 0x5b, 0x55, 0xbb, 0x6f, 0x8c, 0xd4, 0xab, 0xec, 0x6a, 0x6f, 0x81, 0x75, 0xb6,
-	0x60, 0xb7, 0xe0, 0x5a, 0x9a, 0x0f, 0xde, 0x63, 0x90, 0x15, 0x1d, 0x2d, 0xcf, 0x40, 0xbb, 0x03,
-	0x8d, 0x10, 0xd3, 0x40, 0xf2, 0x44, 0x09, 0x75, 0xb0, 0x2a, 0xe5, 0x7c, 0x27, 0x70, 0xab, 0x2f,
-	0x31, 0x10, 0x71, 0xc8, 0x15, 0x61, 0x42, 0xee, 0x5c, 0x11, 0xf2, 0x71, 0x35, 0xe4, 0x15, 0x9b,
-	0xfe, 0x92, 0xf5, 0x5d, 0x35, 0xab, 0x0d, 0xf5, 0xec, 0x20, 0x41, 0x1d, 0xb4, 0xf8, 0xae, 0xe6,
-	0x9f, 0xff, 0x67, 0xfe, 0xda, 0x6c, 0xfe, 0x43, 0x02, 0xd0, 0x65, 0xa1, 0x87, 0x1f, 0x72, 0x4c,
-	0x33, 0xbb, 0x0f, 0xcb, 0x43, 0x8e, 0x51, 0xe8, 0xcf, 0x84, 0x7f, 0x54, 0x0d, 0x7f, 0xbe, 0xc3,
-	0xdd, 0x54, 0xf2, 0xf3, 0xe0, 0x37, 0x86, 0x17, 0x70, 0xda, 0xde, 0x86, 0xa5, 0x8b, 0x12, 0xfb,
-	0x36, 0x2c, 0x14, 0x22, 0xdd, 0xa1, 0x04, 0xff, 0x31, 0xea, 0x97, 0xd0, 0xd0, 0x3f, 0x2d, 0x0e,
-	0xd5, 0x7d, 0x00, 0x59, 0x42, 0x9f, 0x1b, 0x2f, 0x4b, 0x33, 0x3b, 0xa1, 0xfd, 0x00, 0x9a, 0x29,
-	0xca, 0x7d, 0x1e, 0x8f, 0xfc, 0x90, 0x65, 0xcc, 0x18, 0x6a, 0xae, 0xc7, 0x32, 0xe6, 0x7c, 0x21,
-	0xd0, 0xf4, 0x30, 0x15, 0xb9, 0x0c, 0xd0, 0x9c, 0x53, 0xa9, 0xb1, 0x5f, 0x99, 0x72, 0xd3, 0x90,
-	0xaf, 0xd5, 0xb4, 0xab, 0xa2, 0x98, 0x8d, 0x51, 0x3b, 0x9f, 0x89, 0x5e, 0xb0, 0x31, 0xaa, 0x8e,
-	0xe2, 0x63, 0x8c, 0x52, 0x8f, 0xbc, 0x04, 0x97, 0x3b, 0xd6, 0x67, 0x3b, 0x0a, 0xa8, 0x6f, 0x63,
-	0x94, 0xd8, 0x4f, 0x60, 0x21, 0xe2, 0xf1, 0x9e, 0x19, 0xfe, 0x9d, 0xea, 0xf0, 0x95, 0xc0, 0xdd,
-	0xe5, 0xf1, 0x9e, 0x57, 0x6a, 0xda, 0xeb, 0x50, 0x57, 0xf0, 0xb2, 0x3d, 0x99, 0xb1, 0xb7, 0x97,
-	0xa1, 0x96, 0x4b, 0x73, 0xc1, 0xd4, 0xa7, 0xd3, 0x83, 0xe5, 0x5d, 0x11, 0xb0, 0x88, 0x7f, 0xc2,
-	0xf0, 0x39, 0xa6, 0x29, 0x1b, 0xa1, 0xba, 0x89, 0x91, 0xe2, 0x4c, 0x7f, 0x8d, 0xd4, 0x39, 0x1b,
-	0x97, 0x12, 0x73, 0xce, 0x34, 0xec, 0x86, 0xc7, 0x13, 0x3a, 0xf7, 0x63, 0x42, 0xe7, 0x4e, 0x27,
-	0x94, 0xfc, 0x9e, 0x50, 0xf2, 0x79, 0x4a, 0xc9, 0xe1, 0x94, 0x92, 0xa3, 0x29, 0x25, 0xc7, 0x53,
-	0x4a, 0x7e, 0x4e, 0x29, 0xf9, 0x35, 0xa5, 0x73, 0xa7, 0x8a, 0x3f, 0xa1, 0xe4, 0xe8, 0x84, 0x12,
-	0x58, 0x0a, 0xc4, 0xb8, 0x52, 0xac, 0x7b, 0x73, 0x43, 0xbd, 0x5e, 0xbd, 0xf2, 0xf1, 0xea, 0xab,
-	0xe7, 0xa5, 0x4f, 0xde, 0xd6, 0x64, 0x12, 0x7c, 0x9b, 0xaf, 0x79, 0xfd, 0x67, 0x83, 0xc5, 0xe2,
-	0xc9, 0x79, 0xfa, 0x27, 0x00, 0x00, 0xff, 0xff, 0x63, 0xe4, 0x76, 0x26, 0xf1, 0x04, 0x00, 0x00,
-}
diff --git a/vendor/github.com/gogo/googleapis/google/rpc/error_details.proto b/vendor/github.com/gogo/googleapis/google/rpc/error_details.proto
index a62078b..0682cc9 100644
--- a/vendor/github.com/gogo/googleapis/google/rpc/error_details.proto
+++ b/vendor/github.com/gogo/googleapis/google/rpc/error_details.proto
@@ -24,7 +24,6 @@
 option java_package = "com.google.rpc";
 option objc_class_prefix = "RPC";
 
-
 // Describes when the clients can retry a failed request. Clients could ignore
 // the recommendation here or retry when this information is missing from error
 // responses.
@@ -154,7 +153,8 @@
 
   // The name of the resource being accessed.  For example, a shared calendar
   // name: "example.com_4fghdhgsrgh@group.calendar.google.com", if the current
-  // error is [google.rpc.Code.PERMISSION_DENIED][google.rpc.Code.PERMISSION_DENIED].
+  // error is
+  // [google.rpc.Code.PERMISSION_DENIED][google.rpc.Code.PERMISSION_DENIED].
   string resource_name = 2;
 
   // The owner of the resource (optional).
diff --git a/vendor/github.com/gogo/googleapis/google/rpc/status.pb.go b/vendor/github.com/gogo/googleapis/google/rpc/status.pb.go
index 2d12390..65fc30f 100644
--- a/vendor/github.com/gogo/googleapis/google/rpc/status.pb.go
+++ b/vendor/github.com/gogo/googleapis/google/rpc/status.pb.go
@@ -1,37 +1,18 @@
 // Code generated by protoc-gen-gogo. DO NOT EDIT.
 // source: google/rpc/status.proto
 
-/*
-	Package rpc is a generated protocol buffer package.
-
-	It is generated from these files:
-		google/rpc/status.proto
-		google/rpc/error_details.proto
-		google/rpc/code.proto
-
-	It has these top-level messages:
-		Status
-		RetryInfo
-		DebugInfo
-		QuotaFailure
-		PreconditionFailure
-		BadRequest
-		RequestInfo
-		ResourceInfo
-		Help
-		LocalizedMessage
-*/
 package rpc
 
-import proto "github.com/gogo/protobuf/proto"
-import fmt "fmt"
-import math "math"
-import google_protobuf "github.com/gogo/protobuf/types"
-
-import strings "strings"
-import reflect "reflect"
-
-import io "io"
+import (
+	bytes "bytes"
+	fmt "fmt"
+	proto "github.com/gogo/protobuf/proto"
+	types "github.com/gogo/protobuf/types"
+	io "io"
+	math "math"
+	reflect "reflect"
+	strings "strings"
+)
 
 // Reference imports to suppress errors if they are not otherwise used.
 var _ = proto.Marshal
@@ -44,24 +25,25 @@
 // proto package needs to be updated.
 const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
 
-// The `Status` type defines a logical error model that is suitable for different
-// programming environments, including REST APIs and RPC APIs. It is used by
-// [gRPC](https://github.com/grpc). The error model is designed to be:
+// The `Status` type defines a logical error model that is suitable for
+// different programming environments, including REST APIs and RPC APIs. It is
+// used by [gRPC](https://github.com/grpc). The error model is designed to be:
 //
 // - Simple to use and understand for most users
 // - Flexible enough to meet unexpected needs
 //
 // # Overview
 //
-// The `Status` message contains three pieces of data: error code, error message,
-// and error details. The error code should be an enum value of
-// [google.rpc.Code][google.rpc.Code], but it may accept additional error codes if needed.  The
-// error message should be a developer-facing English message that helps
-// developers *understand* and *resolve* the error. If a localized user-facing
-// error message is needed, put the localized message in the error details or
-// localize it in the client. The optional error details may contain arbitrary
-// information about the error. There is a predefined set of error detail types
-// in the package `google.rpc` that can be used for common error conditions.
+// The `Status` message contains three pieces of data: error code, error
+// message, and error details. The error code should be an enum value of
+// [google.rpc.Code][google.rpc.Code], but it may accept additional error codes
+// if needed.  The error message should be a developer-facing English message
+// that helps developers *understand* and *resolve* the error. If a localized
+// user-facing error message is needed, put the localized message in the error
+// details or localize it in the client. The optional error details may contain
+// arbitrary information about the error. There is a predefined set of error
+// detail types in the package `google.rpc` that can be used for common error
+// conditions.
 //
 // # Language mapping
 //
@@ -97,20 +79,53 @@
 // - Logging. If some API errors are stored in logs, the message `Status` could
 //     be used directly after any stripping needed for security/privacy reasons.
 type Status struct {
-	// The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code].
+	// The status code, which should be an enum value of
+	// [google.rpc.Code][google.rpc.Code].
 	Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"`
 	// A developer-facing error message, which should be in English. Any
 	// user-facing error message should be localized and sent in the
-	// [google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client.
+	// [google.rpc.Status.details][google.rpc.Status.details] field, or localized
+	// by the client.
 	Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"`
 	// A list of messages that carry the error details.  There is a common set of
 	// message types for APIs to use.
-	Details []*google_protobuf.Any `protobuf:"bytes,3,rep,name=details" json:"details,omitempty"`
+	Details              []*types.Any `protobuf:"bytes,3,rep,name=details,proto3" json:"details,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}     `json:"-"`
+	XXX_unrecognized     []byte       `json:"-"`
+	XXX_sizecache        int32        `json:"-"`
 }
 
-func (m *Status) Reset()                    { *m = Status{} }
-func (*Status) ProtoMessage()               {}
-func (*Status) Descriptor() ([]byte, []int) { return fileDescriptorStatus, []int{0} }
+func (m *Status) Reset()      { *m = Status{} }
+func (*Status) ProtoMessage() {}
+func (*Status) Descriptor() ([]byte, []int) {
+	return fileDescriptor_24d244abaf643bfe, []int{0}
+}
+func (m *Status) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *Status) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_Status.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *Status) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Status.Merge(m, src)
+}
+func (m *Status) XXX_Size() int {
+	return m.Size()
+}
+func (m *Status) XXX_DiscardUnknown() {
+	xxx_messageInfo_Status.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Status proto.InternalMessageInfo
 
 func (m *Status) GetCode() int32 {
 	if m != nil {
@@ -126,7 +141,7 @@
 	return ""
 }
 
-func (m *Status) GetDetails() []*google_protobuf.Any {
+func (m *Status) GetDetails() []*types.Any {
 	if m != nil {
 		return m.Details
 	}
@@ -139,6 +154,28 @@
 func init() {
 	proto.RegisterType((*Status)(nil), "google.rpc.Status")
 }
+
+func init() { proto.RegisterFile("google/rpc/status.proto", fileDescriptor_24d244abaf643bfe) }
+
+var fileDescriptor_24d244abaf643bfe = []byte{
+	// 235 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4f, 0xcf, 0xcf, 0x4f,
+	0xcf, 0x49, 0xd5, 0x2f, 0x2a, 0x48, 0xd6, 0x2f, 0x2e, 0x49, 0x2c, 0x29, 0x2d, 0xd6, 0x2b, 0x28,
+	0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x82, 0x48, 0xe8, 0x15, 0x15, 0x24, 0x4b, 0x49, 0x42, 0x15, 0x81,
+	0x65, 0x92, 0x4a, 0xd3, 0xf4, 0x13, 0xf3, 0x2a, 0x21, 0xca, 0x94, 0xd2, 0xb8, 0xd8, 0x82, 0xc1,
+	0xda, 0x84, 0x84, 0xb8, 0x58, 0x92, 0xf3, 0x53, 0x52, 0x25, 0x18, 0x15, 0x18, 0x35, 0x58, 0x83,
+	0xc0, 0x6c, 0x21, 0x09, 0x2e, 0xf6, 0xdc, 0xd4, 0xe2, 0xe2, 0xc4, 0xf4, 0x54, 0x09, 0x26, 0x05,
+	0x46, 0x0d, 0xce, 0x20, 0x18, 0x57, 0x48, 0x8f, 0x8b, 0x3d, 0x25, 0xb5, 0x24, 0x31, 0x33, 0xa7,
+	0x58, 0x82, 0x59, 0x81, 0x59, 0x83, 0xdb, 0x48, 0x44, 0x0f, 0x6a, 0x21, 0xcc, 0x12, 0x3d, 0xc7,
+	0xbc, 0xca, 0x20, 0x98, 0x22, 0xa7, 0xb8, 0x0b, 0x0f, 0xe5, 0x18, 0x6e, 0x3c, 0x94, 0x63, 0xf8,
+	0xf0, 0x50, 0x8e, 0xf1, 0xc7, 0x43, 0x39, 0xc6, 0x86, 0x47, 0x72, 0x8c, 0x2b, 0x1e, 0xc9, 0x31,
+	0x9e, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c, 0xe3, 0x83, 0x47, 0x72, 0x8c, 0x2f, 0x1e, 0xc9,
+	0x31, 0x7c, 0x00, 0x89, 0x3f, 0x96, 0x63, 0x3c, 0xf1, 0x58, 0x8e, 0x91, 0x8b, 0x2f, 0x39, 0x3f,
+	0x57, 0x0f, 0xe1, 0x11, 0x27, 0x6e, 0x88, 0x5b, 0x03, 0x40, 0x56, 0x04, 0x30, 0x46, 0x31, 0x17,
+	0x15, 0x24, 0x2f, 0x62, 0x62, 0x0e, 0x0a, 0x70, 0x4e, 0x62, 0x03, 0x5b, 0x6b, 0x0c, 0x08, 0x00,
+	0x00, 0xff, 0xff, 0xaa, 0x06, 0xa1, 0xaa, 0x10, 0x01, 0x00, 0x00,
+}
+
 func (this *Status) Compare(that interface{}) int {
 	if that == nil {
 		if this == nil {
@@ -187,6 +224,9 @@
 			return c
 		}
 	}
+	if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 {
+		return c
+	}
 	return 0
 }
 func (this *Status) Equal(that interface{}) bool {
@@ -222,6 +262,9 @@
 			return false
 		}
 	}
+	if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) {
+		return false
+	}
 	return true
 }
 func (this *Status) GoString() string {
@@ -235,6 +278,9 @@
 	if this.Details != nil {
 		s = append(s, "Details: "+fmt.Sprintf("%#v", this.Details)+",\n")
 	}
+	if this.XXX_unrecognized != nil {
+		s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+	}
 	s = append(s, "}")
 	return strings.Join(s, "")
 }
@@ -284,6 +330,9 @@
 			i += n
 		}
 	}
+	if m.XXX_unrecognized != nil {
+		i += copy(dAtA[i:], m.XXX_unrecognized)
+	}
 	return i, nil
 }
 
@@ -305,12 +354,13 @@
 	this.Message = string(randStringStatus(r))
 	if r.Intn(10) != 0 {
 		v1 := r.Intn(5)
-		this.Details = make([]*google_protobuf.Any, v1)
+		this.Details = make([]*types.Any, v1)
 		for i := 0; i < v1; i++ {
-			this.Details[i] = google_protobuf.NewPopulatedAny(r, easy)
+			this.Details[i] = types.NewPopulatedAny(r, easy)
 		}
 	}
 	if !easy && r.Intn(10) != 0 {
+		this.XXX_unrecognized = randUnrecognizedStatus(r, 4)
 	}
 	return this
 }
@@ -388,6 +438,9 @@
 	return dAtA
 }
 func (m *Status) Size() (n int) {
+	if m == nil {
+		return 0
+	}
 	var l int
 	_ = l
 	if m.Code != 0 {
@@ -403,6 +456,9 @@
 			n += 1 + l + sovStatus(uint64(l))
 		}
 	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
 	return n
 }
 
@@ -426,7 +482,8 @@
 	s := strings.Join([]string{`&Status{`,
 		`Code:` + fmt.Sprintf("%v", this.Code) + `,`,
 		`Message:` + fmt.Sprintf("%v", this.Message) + `,`,
-		`Details:` + strings.Replace(fmt.Sprintf("%v", this.Details), "Any", "google_protobuf.Any", 1) + `,`,
+		`Details:` + strings.Replace(fmt.Sprintf("%v", this.Details), "Any", "types.Any", 1) + `,`,
+		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -454,7 +511,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -482,7 +539,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				m.Code |= (int32(b) & 0x7F) << shift
+				m.Code |= int32(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -501,7 +558,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -511,6 +568,9 @@
 				return ErrInvalidLengthStatus
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthStatus
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -530,7 +590,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -539,10 +599,13 @@
 				return ErrInvalidLengthStatus
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthStatus
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			m.Details = append(m.Details, &google_protobuf.Any{})
+			m.Details = append(m.Details, &types.Any{})
 			if err := m.Details[len(m.Details)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
 				return err
 			}
@@ -556,9 +619,13 @@
 			if skippy < 0 {
 				return ErrInvalidLengthStatus
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthStatus
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
 			iNdEx += skippy
 		}
 	}
@@ -622,10 +689,13 @@
 					break
 				}
 			}
-			iNdEx += length
 			if length < 0 {
 				return 0, ErrInvalidLengthStatus
 			}
+			iNdEx += length
+			if iNdEx < 0 {
+				return 0, ErrInvalidLengthStatus
+			}
 			return iNdEx, nil
 		case 3:
 			for {
@@ -654,6 +724,9 @@
 					return 0, err
 				}
 				iNdEx = start + next
+				if iNdEx < 0 {
+					return 0, ErrInvalidLengthStatus
+				}
 			}
 			return iNdEx, nil
 		case 4:
@@ -672,24 +745,3 @@
 	ErrInvalidLengthStatus = fmt.Errorf("proto: negative length found during unmarshaling")
 	ErrIntOverflowStatus   = fmt.Errorf("proto: integer overflow")
 )
-
-func init() { proto.RegisterFile("google/rpc/status.proto", fileDescriptorStatus) }
-
-var fileDescriptorStatus = []byte{
-	// 235 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4f, 0xcf, 0xcf, 0x4f,
-	0xcf, 0x49, 0xd5, 0x2f, 0x2a, 0x48, 0xd6, 0x2f, 0x2e, 0x49, 0x2c, 0x29, 0x2d, 0xd6, 0x2b, 0x28,
-	0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x82, 0x48, 0xe8, 0x15, 0x15, 0x24, 0x4b, 0x49, 0x42, 0x15, 0x81,
-	0x65, 0x92, 0x4a, 0xd3, 0xf4, 0x13, 0xf3, 0x2a, 0x21, 0xca, 0x94, 0xd2, 0xb8, 0xd8, 0x82, 0xc1,
-	0xda, 0x84, 0x84, 0xb8, 0x58, 0x92, 0xf3, 0x53, 0x52, 0x25, 0x18, 0x15, 0x18, 0x35, 0x58, 0x83,
-	0xc0, 0x6c, 0x21, 0x09, 0x2e, 0xf6, 0xdc, 0xd4, 0xe2, 0xe2, 0xc4, 0xf4, 0x54, 0x09, 0x26, 0x05,
-	0x46, 0x0d, 0xce, 0x20, 0x18, 0x57, 0x48, 0x8f, 0x8b, 0x3d, 0x25, 0xb5, 0x24, 0x31, 0x33, 0xa7,
-	0x58, 0x82, 0x59, 0x81, 0x59, 0x83, 0xdb, 0x48, 0x44, 0x0f, 0x6a, 0x21, 0xcc, 0x12, 0x3d, 0xc7,
-	0xbc, 0xca, 0x20, 0x98, 0x22, 0xa7, 0xb8, 0x0b, 0x0f, 0xe5, 0x18, 0x6e, 0x3c, 0x94, 0x63, 0xf8,
-	0xf0, 0x50, 0x8e, 0xf1, 0xc7, 0x43, 0x39, 0xc6, 0x86, 0x47, 0x72, 0x8c, 0x2b, 0x1e, 0xc9, 0x31,
-	0x9e, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c, 0xe3, 0x83, 0x47, 0x72, 0x8c, 0x2f, 0x1e, 0xc9,
-	0x31, 0x7c, 0x00, 0x89, 0x3f, 0x96, 0x63, 0x3c, 0xf1, 0x58, 0x8e, 0x91, 0x8b, 0x2f, 0x39, 0x3f,
-	0x57, 0x0f, 0xe1, 0x11, 0x27, 0x6e, 0x88, 0x5b, 0x03, 0x40, 0x56, 0x04, 0x30, 0x46, 0x31, 0x17,
-	0x15, 0x24, 0x2f, 0x62, 0x62, 0x0e, 0x0a, 0x70, 0x4e, 0x62, 0x03, 0x5b, 0x6b, 0x0c, 0x08, 0x00,
-	0x00, 0xff, 0xff, 0xaa, 0x06, 0xa1, 0xaa, 0x10, 0x01, 0x00, 0x00,
-}
diff --git a/vendor/github.com/gogo/googleapis/google/rpc/status.proto b/vendor/github.com/gogo/googleapis/google/rpc/status.proto
index db3226e..abcd453 100644
--- a/vendor/github.com/gogo/googleapis/google/rpc/status.proto
+++ b/vendor/github.com/gogo/googleapis/google/rpc/status.proto
@@ -24,25 +24,25 @@
 option java_package = "com.google.rpc";
 option objc_class_prefix = "RPC";
 
-
-// The `Status` type defines a logical error model that is suitable for different
-// programming environments, including REST APIs and RPC APIs. It is used by
-// [gRPC](https://github.com/grpc). The error model is designed to be:
+// The `Status` type defines a logical error model that is suitable for
+// different programming environments, including REST APIs and RPC APIs. It is
+// used by [gRPC](https://github.com/grpc). The error model is designed to be:
 //
 // - Simple to use and understand for most users
 // - Flexible enough to meet unexpected needs
 //
 // # Overview
 //
-// The `Status` message contains three pieces of data: error code, error message,
-// and error details. The error code should be an enum value of
-// [google.rpc.Code][google.rpc.Code], but it may accept additional error codes if needed.  The
-// error message should be a developer-facing English message that helps
-// developers *understand* and *resolve* the error. If a localized user-facing
-// error message is needed, put the localized message in the error details or
-// localize it in the client. The optional error details may contain arbitrary
-// information about the error. There is a predefined set of error detail types
-// in the package `google.rpc` that can be used for common error conditions.
+// The `Status` message contains three pieces of data: error code, error
+// message, and error details. The error code should be an enum value of
+// [google.rpc.Code][google.rpc.Code], but it may accept additional error codes
+// if needed.  The error message should be a developer-facing English message
+// that helps developers *understand* and *resolve* the error. If a localized
+// user-facing error message is needed, put the localized message in the error
+// details or localize it in the client. The optional error details may contain
+// arbitrary information about the error. There is a predefined set of error
+// detail types in the package `google.rpc` that can be used for common error
+// conditions.
 //
 // # Language mapping
 //
@@ -78,12 +78,14 @@
 // - Logging. If some API errors are stored in logs, the message `Status` could
 //     be used directly after any stripping needed for security/privacy reasons.
 message Status {
-  // The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code].
+  // The status code, which should be an enum value of
+  // [google.rpc.Code][google.rpc.Code].
   int32 code = 1;
 
   // A developer-facing error message, which should be in English. Any
   // user-facing error message should be localized and sent in the
-  // [google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client.
+  // [google.rpc.Status.details][google.rpc.Status.details] field, or localized
+  // by the client.
   string message = 2;
 
   // A list of messages that carry the error details.  There is a common set of
diff --git a/vendor/github.com/gogo/protobuf/README b/vendor/github.com/gogo/protobuf/README
index 405429a..6af26af 100644
--- a/vendor/github.com/gogo/protobuf/README
+++ b/vendor/github.com/gogo/protobuf/README
@@ -29,11 +29,19 @@
 	https://golang.org/doc/install
   for details or, if you are using gccgo, follow the instructions at
 	https://golang.org/doc/install/gccgo
-- Grab the code from the repository and install the proto package.
+- Grab the code from the repository and install the `proto` package.
   The simplest way is to run `go get -u github.com/golang/protobuf/protoc-gen-go`.
-  The compiler plugin, protoc-gen-go, will be installed in $GOBIN,
-  defaulting to $GOPATH/bin.  It must be in your $PATH for the protocol
-  compiler, protoc, to find it.
+  The compiler plugin, `protoc-gen-go`, will be installed in `$GOPATH/bin`
+  unless `$GOBIN` is set. It must be in your `$PATH` for the protocol
+  compiler, `protoc`, to find it.
+- If you need a particular version of `protoc-gen-go` (e.g., to match your
+  `proto` package version), one option is
+  ```shell
+  GIT_TAG="v1.2.0" # change as needed
+  go get -d -u github.com/golang/protobuf/protoc-gen-go
+  git -C "$(go env GOPATH)"/src/github.com/golang/protobuf checkout $GIT_TAG
+  go install github.com/golang/protobuf/protoc-gen-go
+  ```
 
 This software has two parts: a 'protocol compiler plugin' that
 generates Go source files that, once compiled, can access and manage
diff --git a/vendor/github.com/gogo/protobuf/Readme.md b/vendor/github.com/gogo/protobuf/Readme.md
index f8a3aee..06685a2 100644
--- a/vendor/github.com/gogo/protobuf/Readme.md
+++ b/vendor/github.com/gogo/protobuf/Readme.md
@@ -63,7 +63,7 @@
   - <a href="https://jbrandhorst.com/post/gogoproto/">So you want to use GoGo Protobuf - Johan Brandhorst</a>
   - <a href="https://jbrandhorst.com/post/grpc-errors/">Advanced gRPC Error Usage - Johan Brandhorst</a>
   - <a href="https://www.udemy.com/grpc-golang/?couponCode=GITHUB10">gRPC Golang Course on Udemy - Stephane Maarek</a>
-  
+
 ## Getting Started
 
 There are several ways to use gogoprotobuf, but for all you need to install go and protoc.
@@ -75,11 +75,11 @@
 
 ### Installation
 
-To install it, you must first have Go (at least version 1.6.3 or 1.9 if you are using gRPC) installed (see [http://golang.org/doc/install](http://golang.org/doc/install)). 
-Latest patch versions of 1.9 and 1.10 are continuously tested.
+To install it, you must first have Go (at least version 1.6.3 or 1.9 if you are using gRPC) installed (see [http://golang.org/doc/install](http://golang.org/doc/install)).
+Latest patch versions of 1.10 and 1.11 are continuously tested.
 
 Next, install the standard protocol buffer implementation from [https://github.com/google/protobuf](https://github.com/google/protobuf).
-Most versions from 2.3.1 should not give any problems, but 2.6.1, 3.0.2 and 3.5.1 are continuously tested.
+Most versions from 2.3.1 should not give any problems, but 2.6.1, 3.0.2 and 3.6.1 are continuously tested.
 
 ### Speed
 
@@ -156,4 +156,4 @@
 This software is licensed under the 3-Clause BSD License
 ("BSD License 2.0", "Revised BSD License", "New BSD License", or "Modified BSD License").
 
-  
+
diff --git a/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go b/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go
index 0057f8e..e352808 100644
--- a/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go
+++ b/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go
@@ -1,12 +1,14 @@
 // Code generated by protoc-gen-gogo. DO NOT EDIT.
 // source: gogo.proto
 
-package gogoproto // import "github.com/gogo/protobuf/gogoproto"
+package gogoproto
 
-import proto "github.com/gogo/protobuf/proto"
-import fmt "fmt"
-import math "math"
-import descriptor "github.com/gogo/protobuf/protoc-gen-gogo/descriptor"
+import (
+	fmt "fmt"
+	proto "github.com/gogo/protobuf/proto"
+	descriptor "github.com/gogo/protobuf/protoc-gen-gogo/descriptor"
+	math "math"
+)
 
 // Reference imports to suppress errors if they are not otherwise used.
 var _ = proto.Marshal
@@ -24,7 +26,7 @@
 	ExtensionType: (*bool)(nil),
 	Field:         62001,
 	Name:          "gogoproto.goproto_enum_prefix",
-	Tag:           "varint,62001,opt,name=goproto_enum_prefix,json=goprotoEnumPrefix",
+	Tag:           "varint,62001,opt,name=goproto_enum_prefix",
 	Filename:      "gogo.proto",
 }
 
@@ -33,7 +35,7 @@
 	ExtensionType: (*bool)(nil),
 	Field:         62021,
 	Name:          "gogoproto.goproto_enum_stringer",
-	Tag:           "varint,62021,opt,name=goproto_enum_stringer,json=goprotoEnumStringer",
+	Tag:           "varint,62021,opt,name=goproto_enum_stringer",
 	Filename:      "gogo.proto",
 }
 
@@ -42,7 +44,7 @@
 	ExtensionType: (*bool)(nil),
 	Field:         62022,
 	Name:          "gogoproto.enum_stringer",
-	Tag:           "varint,62022,opt,name=enum_stringer,json=enumStringer",
+	Tag:           "varint,62022,opt,name=enum_stringer",
 	Filename:      "gogo.proto",
 }
 
@@ -51,7 +53,7 @@
 	ExtensionType: (*string)(nil),
 	Field:         62023,
 	Name:          "gogoproto.enum_customname",
-	Tag:           "bytes,62023,opt,name=enum_customname,json=enumCustomname",
+	Tag:           "bytes,62023,opt,name=enum_customname",
 	Filename:      "gogo.proto",
 }
 
@@ -69,7 +71,7 @@
 	ExtensionType: (*string)(nil),
 	Field:         66001,
 	Name:          "gogoproto.enumvalue_customname",
-	Tag:           "bytes,66001,opt,name=enumvalue_customname,json=enumvalueCustomname",
+	Tag:           "bytes,66001,opt,name=enumvalue_customname",
 	Filename:      "gogo.proto",
 }
 
@@ -78,7 +80,7 @@
 	ExtensionType: (*bool)(nil),
 	Field:         63001,
 	Name:          "gogoproto.goproto_getters_all",
-	Tag:           "varint,63001,opt,name=goproto_getters_all,json=goprotoGettersAll",
+	Tag:           "varint,63001,opt,name=goproto_getters_all",
 	Filename:      "gogo.proto",
 }
 
@@ -87,7 +89,7 @@
 	ExtensionType: (*bool)(nil),
 	Field:         63002,
 	Name:          "gogoproto.goproto_enum_prefix_all",
-	Tag:           "varint,63002,opt,name=goproto_enum_prefix_all,json=goprotoEnumPrefixAll",
+	Tag:           "varint,63002,opt,name=goproto_enum_prefix_all",
 	Filename:      "gogo.proto",
 }
 
@@ -96,7 +98,7 @@
 	ExtensionType: (*bool)(nil),
 	Field:         63003,
 	Name:          "gogoproto.goproto_stringer_all",
-	Tag:           "varint,63003,opt,name=goproto_stringer_all,json=goprotoStringerAll",
+	Tag:           "varint,63003,opt,name=goproto_stringer_all",
 	Filename:      "gogo.proto",
 }
 
@@ -105,7 +107,7 @@
 	ExtensionType: (*bool)(nil),
 	Field:         63004,
 	Name:          "gogoproto.verbose_equal_all",
-	Tag:           "varint,63004,opt,name=verbose_equal_all,json=verboseEqualAll",
+	Tag:           "varint,63004,opt,name=verbose_equal_all",
 	Filename:      "gogo.proto",
 }
 
@@ -114,7 +116,7 @@
 	ExtensionType: (*bool)(nil),
 	Field:         63005,
 	Name:          "gogoproto.face_all",
-	Tag:           "varint,63005,opt,name=face_all,json=faceAll",
+	Tag:           "varint,63005,opt,name=face_all",
 	Filename:      "gogo.proto",
 }
 
@@ -123,7 +125,7 @@
 	ExtensionType: (*bool)(nil),
 	Field:         63006,
 	Name:          "gogoproto.gostring_all",
-	Tag:           "varint,63006,opt,name=gostring_all,json=gostringAll",
+	Tag:           "varint,63006,opt,name=gostring_all",
 	Filename:      "gogo.proto",
 }
 
@@ -132,7 +134,7 @@
 	ExtensionType: (*bool)(nil),
 	Field:         63007,
 	Name:          "gogoproto.populate_all",
-	Tag:           "varint,63007,opt,name=populate_all,json=populateAll",
+	Tag:           "varint,63007,opt,name=populate_all",
 	Filename:      "gogo.proto",
 }
 
@@ -141,7 +143,7 @@
 	ExtensionType: (*bool)(nil),
 	Field:         63008,
 	Name:          "gogoproto.stringer_all",
-	Tag:           "varint,63008,opt,name=stringer_all,json=stringerAll",
+	Tag:           "varint,63008,opt,name=stringer_all",
 	Filename:      "gogo.proto",
 }
 
@@ -150,7 +152,7 @@
 	ExtensionType: (*bool)(nil),
 	Field:         63009,
 	Name:          "gogoproto.onlyone_all",
-	Tag:           "varint,63009,opt,name=onlyone_all,json=onlyoneAll",
+	Tag:           "varint,63009,opt,name=onlyone_all",
 	Filename:      "gogo.proto",
 }
 
@@ -159,7 +161,7 @@
 	ExtensionType: (*bool)(nil),
 	Field:         63013,
 	Name:          "gogoproto.equal_all",
-	Tag:           "varint,63013,opt,name=equal_all,json=equalAll",
+	Tag:           "varint,63013,opt,name=equal_all",
 	Filename:      "gogo.proto",
 }
 
@@ -168,7 +170,7 @@
 	ExtensionType: (*bool)(nil),
 	Field:         63014,
 	Name:          "gogoproto.description_all",
-	Tag:           "varint,63014,opt,name=description_all,json=descriptionAll",
+	Tag:           "varint,63014,opt,name=description_all",
 	Filename:      "gogo.proto",
 }
 
@@ -177,7 +179,7 @@
 	ExtensionType: (*bool)(nil),
 	Field:         63015,
 	Name:          "gogoproto.testgen_all",
-	Tag:           "varint,63015,opt,name=testgen_all,json=testgenAll",
+	Tag:           "varint,63015,opt,name=testgen_all",
 	Filename:      "gogo.proto",
 }
 
@@ -186,7 +188,7 @@
 	ExtensionType: (*bool)(nil),
 	Field:         63016,
 	Name:          "gogoproto.benchgen_all",
-	Tag:           "varint,63016,opt,name=benchgen_all,json=benchgenAll",
+	Tag:           "varint,63016,opt,name=benchgen_all",
 	Filename:      "gogo.proto",
 }
 
@@ -195,7 +197,7 @@
 	ExtensionType: (*bool)(nil),
 	Field:         63017,
 	Name:          "gogoproto.marshaler_all",
-	Tag:           "varint,63017,opt,name=marshaler_all,json=marshalerAll",
+	Tag:           "varint,63017,opt,name=marshaler_all",
 	Filename:      "gogo.proto",
 }
 
@@ -204,7 +206,7 @@
 	ExtensionType: (*bool)(nil),
 	Field:         63018,
 	Name:          "gogoproto.unmarshaler_all",
-	Tag:           "varint,63018,opt,name=unmarshaler_all,json=unmarshalerAll",
+	Tag:           "varint,63018,opt,name=unmarshaler_all",
 	Filename:      "gogo.proto",
 }
 
@@ -213,7 +215,7 @@
 	ExtensionType: (*bool)(nil),
 	Field:         63019,
 	Name:          "gogoproto.stable_marshaler_all",
-	Tag:           "varint,63019,opt,name=stable_marshaler_all,json=stableMarshalerAll",
+	Tag:           "varint,63019,opt,name=stable_marshaler_all",
 	Filename:      "gogo.proto",
 }
 
@@ -222,7 +224,7 @@
 	ExtensionType: (*bool)(nil),
 	Field:         63020,
 	Name:          "gogoproto.sizer_all",
-	Tag:           "varint,63020,opt,name=sizer_all,json=sizerAll",
+	Tag:           "varint,63020,opt,name=sizer_all",
 	Filename:      "gogo.proto",
 }
 
@@ -231,7 +233,7 @@
 	ExtensionType: (*bool)(nil),
 	Field:         63021,
 	Name:          "gogoproto.goproto_enum_stringer_all",
-	Tag:           "varint,63021,opt,name=goproto_enum_stringer_all,json=goprotoEnumStringerAll",
+	Tag:           "varint,63021,opt,name=goproto_enum_stringer_all",
 	Filename:      "gogo.proto",
 }
 
@@ -240,7 +242,7 @@
 	ExtensionType: (*bool)(nil),
 	Field:         63022,
 	Name:          "gogoproto.enum_stringer_all",
-	Tag:           "varint,63022,opt,name=enum_stringer_all,json=enumStringerAll",
+	Tag:           "varint,63022,opt,name=enum_stringer_all",
 	Filename:      "gogo.proto",
 }
 
@@ -249,7 +251,7 @@
 	ExtensionType: (*bool)(nil),
 	Field:         63023,
 	Name:          "gogoproto.unsafe_marshaler_all",
-	Tag:           "varint,63023,opt,name=unsafe_marshaler_all,json=unsafeMarshalerAll",
+	Tag:           "varint,63023,opt,name=unsafe_marshaler_all",
 	Filename:      "gogo.proto",
 }
 
@@ -258,7 +260,7 @@
 	ExtensionType: (*bool)(nil),
 	Field:         63024,
 	Name:          "gogoproto.unsafe_unmarshaler_all",
-	Tag:           "varint,63024,opt,name=unsafe_unmarshaler_all,json=unsafeUnmarshalerAll",
+	Tag:           "varint,63024,opt,name=unsafe_unmarshaler_all",
 	Filename:      "gogo.proto",
 }
 
@@ -267,7 +269,7 @@
 	ExtensionType: (*bool)(nil),
 	Field:         63025,
 	Name:          "gogoproto.goproto_extensions_map_all",
-	Tag:           "varint,63025,opt,name=goproto_extensions_map_all,json=goprotoExtensionsMapAll",
+	Tag:           "varint,63025,opt,name=goproto_extensions_map_all",
 	Filename:      "gogo.proto",
 }
 
@@ -276,7 +278,7 @@
 	ExtensionType: (*bool)(nil),
 	Field:         63026,
 	Name:          "gogoproto.goproto_unrecognized_all",
-	Tag:           "varint,63026,opt,name=goproto_unrecognized_all,json=goprotoUnrecognizedAll",
+	Tag:           "varint,63026,opt,name=goproto_unrecognized_all",
 	Filename:      "gogo.proto",
 }
 
@@ -285,7 +287,7 @@
 	ExtensionType: (*bool)(nil),
 	Field:         63027,
 	Name:          "gogoproto.gogoproto_import",
-	Tag:           "varint,63027,opt,name=gogoproto_import,json=gogoprotoImport",
+	Tag:           "varint,63027,opt,name=gogoproto_import",
 	Filename:      "gogo.proto",
 }
 
@@ -294,7 +296,7 @@
 	ExtensionType: (*bool)(nil),
 	Field:         63028,
 	Name:          "gogoproto.protosizer_all",
-	Tag:           "varint,63028,opt,name=protosizer_all,json=protosizerAll",
+	Tag:           "varint,63028,opt,name=protosizer_all",
 	Filename:      "gogo.proto",
 }
 
@@ -303,7 +305,7 @@
 	ExtensionType: (*bool)(nil),
 	Field:         63029,
 	Name:          "gogoproto.compare_all",
-	Tag:           "varint,63029,opt,name=compare_all,json=compareAll",
+	Tag:           "varint,63029,opt,name=compare_all",
 	Filename:      "gogo.proto",
 }
 
@@ -312,7 +314,7 @@
 	ExtensionType: (*bool)(nil),
 	Field:         63030,
 	Name:          "gogoproto.typedecl_all",
-	Tag:           "varint,63030,opt,name=typedecl_all,json=typedeclAll",
+	Tag:           "varint,63030,opt,name=typedecl_all",
 	Filename:      "gogo.proto",
 }
 
@@ -321,7 +323,7 @@
 	ExtensionType: (*bool)(nil),
 	Field:         63031,
 	Name:          "gogoproto.enumdecl_all",
-	Tag:           "varint,63031,opt,name=enumdecl_all,json=enumdeclAll",
+	Tag:           "varint,63031,opt,name=enumdecl_all",
 	Filename:      "gogo.proto",
 }
 
@@ -330,7 +332,7 @@
 	ExtensionType: (*bool)(nil),
 	Field:         63032,
 	Name:          "gogoproto.goproto_registration",
-	Tag:           "varint,63032,opt,name=goproto_registration,json=goprotoRegistration",
+	Tag:           "varint,63032,opt,name=goproto_registration",
 	Filename:      "gogo.proto",
 }
 
@@ -339,7 +341,7 @@
 	ExtensionType: (*bool)(nil),
 	Field:         63033,
 	Name:          "gogoproto.messagename_all",
-	Tag:           "varint,63033,opt,name=messagename_all,json=messagenameAll",
+	Tag:           "varint,63033,opt,name=messagename_all",
 	Filename:      "gogo.proto",
 }
 
@@ -348,7 +350,7 @@
 	ExtensionType: (*bool)(nil),
 	Field:         63034,
 	Name:          "gogoproto.goproto_sizecache_all",
-	Tag:           "varint,63034,opt,name=goproto_sizecache_all,json=goprotoSizecacheAll",
+	Tag:           "varint,63034,opt,name=goproto_sizecache_all",
 	Filename:      "gogo.proto",
 }
 
@@ -357,7 +359,7 @@
 	ExtensionType: (*bool)(nil),
 	Field:         63035,
 	Name:          "gogoproto.goproto_unkeyed_all",
-	Tag:           "varint,63035,opt,name=goproto_unkeyed_all,json=goprotoUnkeyedAll",
+	Tag:           "varint,63035,opt,name=goproto_unkeyed_all",
 	Filename:      "gogo.proto",
 }
 
@@ -366,7 +368,7 @@
 	ExtensionType: (*bool)(nil),
 	Field:         64001,
 	Name:          "gogoproto.goproto_getters",
-	Tag:           "varint,64001,opt,name=goproto_getters,json=goprotoGetters",
+	Tag:           "varint,64001,opt,name=goproto_getters",
 	Filename:      "gogo.proto",
 }
 
@@ -375,7 +377,7 @@
 	ExtensionType: (*bool)(nil),
 	Field:         64003,
 	Name:          "gogoproto.goproto_stringer",
-	Tag:           "varint,64003,opt,name=goproto_stringer,json=goprotoStringer",
+	Tag:           "varint,64003,opt,name=goproto_stringer",
 	Filename:      "gogo.proto",
 }
 
@@ -384,7 +386,7 @@
 	ExtensionType: (*bool)(nil),
 	Field:         64004,
 	Name:          "gogoproto.verbose_equal",
-	Tag:           "varint,64004,opt,name=verbose_equal,json=verboseEqual",
+	Tag:           "varint,64004,opt,name=verbose_equal",
 	Filename:      "gogo.proto",
 }
 
@@ -492,7 +494,7 @@
 	ExtensionType: (*bool)(nil),
 	Field:         64019,
 	Name:          "gogoproto.stable_marshaler",
-	Tag:           "varint,64019,opt,name=stable_marshaler,json=stableMarshaler",
+	Tag:           "varint,64019,opt,name=stable_marshaler",
 	Filename:      "gogo.proto",
 }
 
@@ -510,7 +512,7 @@
 	ExtensionType: (*bool)(nil),
 	Field:         64023,
 	Name:          "gogoproto.unsafe_marshaler",
-	Tag:           "varint,64023,opt,name=unsafe_marshaler,json=unsafeMarshaler",
+	Tag:           "varint,64023,opt,name=unsafe_marshaler",
 	Filename:      "gogo.proto",
 }
 
@@ -519,7 +521,7 @@
 	ExtensionType: (*bool)(nil),
 	Field:         64024,
 	Name:          "gogoproto.unsafe_unmarshaler",
-	Tag:           "varint,64024,opt,name=unsafe_unmarshaler,json=unsafeUnmarshaler",
+	Tag:           "varint,64024,opt,name=unsafe_unmarshaler",
 	Filename:      "gogo.proto",
 }
 
@@ -528,7 +530,7 @@
 	ExtensionType: (*bool)(nil),
 	Field:         64025,
 	Name:          "gogoproto.goproto_extensions_map",
-	Tag:           "varint,64025,opt,name=goproto_extensions_map,json=goprotoExtensionsMap",
+	Tag:           "varint,64025,opt,name=goproto_extensions_map",
 	Filename:      "gogo.proto",
 }
 
@@ -537,7 +539,7 @@
 	ExtensionType: (*bool)(nil),
 	Field:         64026,
 	Name:          "gogoproto.goproto_unrecognized",
-	Tag:           "varint,64026,opt,name=goproto_unrecognized,json=goprotoUnrecognized",
+	Tag:           "varint,64026,opt,name=goproto_unrecognized",
 	Filename:      "gogo.proto",
 }
 
@@ -582,7 +584,7 @@
 	ExtensionType: (*bool)(nil),
 	Field:         64034,
 	Name:          "gogoproto.goproto_sizecache",
-	Tag:           "varint,64034,opt,name=goproto_sizecache,json=goprotoSizecache",
+	Tag:           "varint,64034,opt,name=goproto_sizecache",
 	Filename:      "gogo.proto",
 }
 
@@ -591,7 +593,7 @@
 	ExtensionType: (*bool)(nil),
 	Field:         64035,
 	Name:          "gogoproto.goproto_unkeyed",
-	Tag:           "varint,64035,opt,name=goproto_unkeyed,json=goprotoUnkeyed",
+	Tag:           "varint,64035,opt,name=goproto_unkeyed",
 	Filename:      "gogo.proto",
 }
 
@@ -782,9 +784,9 @@
 	proto.RegisterExtension(E_Wktpointer)
 }
 
-func init() { proto.RegisterFile("gogo.proto", fileDescriptor_gogo_b95f77e237336c7c) }
+func init() { proto.RegisterFile("gogo.proto", fileDescriptor_592445b5231bc2b9) }
 
-var fileDescriptor_gogo_b95f77e237336c7c = []byte{
+var fileDescriptor_592445b5231bc2b9 = []byte{
 	// 1328 bytes of a gzipped FileDescriptorProto
 	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x98, 0x49, 0x6f, 0x1c, 0x45,
 	0x14, 0x80, 0x85, 0x48, 0x64, 0x4f, 0x79, 0x8b, 0xc7, 0xc6, 0x84, 0x08, 0x44, 0xe0, 0xc4, 0xc9,
diff --git a/vendor/github.com/gogo/protobuf/proto/decode.go b/vendor/github.com/gogo/protobuf/proto/decode.go
index d9aa3c4..63b0f08 100644
--- a/vendor/github.com/gogo/protobuf/proto/decode.go
+++ b/vendor/github.com/gogo/protobuf/proto/decode.go
@@ -186,7 +186,6 @@
 	if b&0x80 == 0 {
 		goto done
 	}
-	// x -= 0x80 << 63 // Always zero.
 
 	return 0, errOverflow
 
diff --git a/vendor/github.com/gogo/protobuf/proto/deprecated.go b/vendor/github.com/gogo/protobuf/proto/deprecated.go
new file mode 100644
index 0000000..35b882c
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/deprecated.go
@@ -0,0 +1,63 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2018 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import "errors"
+
+// Deprecated: do not use.
+type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 }
+
+// Deprecated: do not use.
+func GetStats() Stats { return Stats{} }
+
+// Deprecated: do not use.
+func MarshalMessageSet(interface{}) ([]byte, error) {
+	return nil, errors.New("proto: not implemented")
+}
+
+// Deprecated: do not use.
+func UnmarshalMessageSet([]byte, interface{}) error {
+	return errors.New("proto: not implemented")
+}
+
+// Deprecated: do not use.
+func MarshalMessageSetJSON(interface{}) ([]byte, error) {
+	return nil, errors.New("proto: not implemented")
+}
+
+// Deprecated: do not use.
+func UnmarshalMessageSetJSON([]byte, interface{}) error {
+	return errors.New("proto: not implemented")
+}
+
+// Deprecated: do not use.
+func RegisterMessageSetType(Message, int32, string) {}
diff --git a/vendor/github.com/gogo/protobuf/proto/extensions.go b/vendor/github.com/gogo/protobuf/proto/extensions.go
index 44ebd45..686bd2a 100644
--- a/vendor/github.com/gogo/protobuf/proto/extensions.go
+++ b/vendor/github.com/gogo/protobuf/proto/extensions.go
@@ -544,7 +544,7 @@
 	}
 	typ := reflect.TypeOf(extension.ExtensionType)
 	if typ != reflect.TypeOf(value) {
-		return errors.New("proto: bad extension value type")
+		return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", value, extension.ExtensionType)
 	}
 	// nil extension values need to be caught early, because the
 	// encoder can't distinguish an ErrNil due to a nil extension
diff --git a/vendor/github.com/gogo/protobuf/proto/lib.go b/vendor/github.com/gogo/protobuf/proto/lib.go
index b2271d0..d17f802 100644
--- a/vendor/github.com/gogo/protobuf/proto/lib.go
+++ b/vendor/github.com/gogo/protobuf/proto/lib.go
@@ -341,26 +341,6 @@
 	ProtoMessage()
 }
 
-// Stats records allocation details about the protocol buffer encoders
-// and decoders.  Useful for tuning the library itself.
-type Stats struct {
-	Emalloc uint64 // mallocs in encode
-	Dmalloc uint64 // mallocs in decode
-	Encode  uint64 // number of encodes
-	Decode  uint64 // number of decodes
-	Chit    uint64 // number of cache hits
-	Cmiss   uint64 // number of cache misses
-	Size    uint64 // number of sizes
-}
-
-// Set to true to enable stats collection.
-const collectStats = false
-
-var stats Stats
-
-// GetStats returns a copy of the global Stats structure.
-func GetStats() Stats { return stats }
-
 // A Buffer is a buffer manager for marshaling and unmarshaling
 // protocol buffers.  It may be reused between invocations to
 // reduce memory usage.  It is not necessary to use a Buffer;
diff --git a/vendor/github.com/gogo/protobuf/proto/message_set.go b/vendor/github.com/gogo/protobuf/proto/message_set.go
index 3b6ca41..f48a756 100644
--- a/vendor/github.com/gogo/protobuf/proto/message_set.go
+++ b/vendor/github.com/gogo/protobuf/proto/message_set.go
@@ -36,13 +36,7 @@
  */
 
 import (
-	"bytes"
-	"encoding/json"
 	"errors"
-	"fmt"
-	"reflect"
-	"sort"
-	"sync"
 )
 
 // errNoMessageTypeID occurs when a protocol buffer does not have a message type ID.
@@ -145,46 +139,9 @@
 	return buf[i+1:]
 }
 
-// MarshalMessageSet encodes the extension map represented by m in the message set wire format.
-// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option.
-func MarshalMessageSet(exts interface{}) ([]byte, error) {
-	return marshalMessageSet(exts, false)
-}
-
-// marshaMessageSet implements above function, with the opt to turn on / off deterministic during Marshal.
-func marshalMessageSet(exts interface{}, deterministic bool) ([]byte, error) {
-	switch exts := exts.(type) {
-	case *XXX_InternalExtensions:
-		var u marshalInfo
-		siz := u.sizeMessageSet(exts)
-		b := make([]byte, 0, siz)
-		return u.appendMessageSet(b, exts, deterministic)
-
-	case map[int32]Extension:
-		// This is an old-style extension map.
-		// Wrap it in a new-style XXX_InternalExtensions.
-		ie := XXX_InternalExtensions{
-			p: &struct {
-				mu           sync.Mutex
-				extensionMap map[int32]Extension
-			}{
-				extensionMap: exts,
-			},
-		}
-
-		var u marshalInfo
-		siz := u.sizeMessageSet(&ie)
-		b := make([]byte, 0, siz)
-		return u.appendMessageSet(b, &ie, deterministic)
-
-	default:
-		return nil, errors.New("proto: not an extension map")
-	}
-}
-
-// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
+// unmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
 // It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
-func UnmarshalMessageSet(buf []byte, exts interface{}) error {
+func unmarshalMessageSet(buf []byte, exts interface{}) error {
 	var m map[int32]Extension
 	switch exts := exts.(type) {
 	case *XXX_InternalExtensions:
@@ -222,93 +179,3 @@
 	}
 	return nil
 }
-
-// MarshalMessageSetJSON encodes the extension map represented by m in JSON format.
-// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
-func MarshalMessageSetJSON(exts interface{}) ([]byte, error) {
-	var m map[int32]Extension
-	switch exts := exts.(type) {
-	case *XXX_InternalExtensions:
-		var mu sync.Locker
-		m, mu = exts.extensionsRead()
-		if m != nil {
-			// Keep the extensions map locked until we're done marshaling to prevent
-			// races between marshaling and unmarshaling the lazily-{en,de}coded
-			// values.
-			mu.Lock()
-			defer mu.Unlock()
-		}
-	case map[int32]Extension:
-		m = exts
-	default:
-		return nil, errors.New("proto: not an extension map")
-	}
-	var b bytes.Buffer
-	b.WriteByte('{')
-
-	// Process the map in key order for deterministic output.
-	ids := make([]int32, 0, len(m))
-	for id := range m {
-		ids = append(ids, id)
-	}
-	sort.Sort(int32Slice(ids)) // int32Slice defined in text.go
-
-	for i, id := range ids {
-		ext := m[id]
-		msd, ok := messageSetMap[id]
-		if !ok {
-			// Unknown type; we can't render it, so skip it.
-			continue
-		}
-
-		if i > 0 && b.Len() > 1 {
-			b.WriteByte(',')
-		}
-
-		fmt.Fprintf(&b, `"[%s]":`, msd.name)
-
-		x := ext.value
-		if x == nil {
-			x = reflect.New(msd.t.Elem()).Interface()
-			if err := Unmarshal(ext.enc, x.(Message)); err != nil {
-				return nil, err
-			}
-		}
-		d, err := json.Marshal(x)
-		if err != nil {
-			return nil, err
-		}
-		b.Write(d)
-	}
-	b.WriteByte('}')
-	return b.Bytes(), nil
-}
-
-// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format.
-// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
-func UnmarshalMessageSetJSON(buf []byte, exts interface{}) error {
-	// Common-case fast path.
-	if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) {
-		return nil
-	}
-
-	// This is fairly tricky, and it's not clear that it is needed.
-	return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented")
-}
-
-// A global registry of types that can be used in a MessageSet.
-
-var messageSetMap = make(map[int32]messageSetDesc)
-
-type messageSetDesc struct {
-	t    reflect.Type // pointer to struct
-	name string
-}
-
-// RegisterMessageSetType is called from the generated code.
-func RegisterMessageSetType(m Message, fieldNum int32, name string) {
-	messageSetMap[fieldNum] = messageSetDesc{
-		t:    reflect.TypeOf(m),
-		name: name,
-	}
-}
diff --git a/vendor/github.com/gogo/protobuf/proto/properties.go b/vendor/github.com/gogo/protobuf/proto/properties.go
index 04dcb8d..c9e5fa0 100644
--- a/vendor/github.com/gogo/protobuf/proto/properties.go
+++ b/vendor/github.com/gogo/protobuf/proto/properties.go
@@ -391,9 +391,6 @@
 	sprop, ok := propertiesMap[t]
 	propertiesMu.RUnlock()
 	if ok {
-		if collectStats {
-			stats.Chit++
-		}
 		return sprop
 	}
 
@@ -406,14 +403,8 @@
 // getPropertiesLocked requires that propertiesMu is held.
 func getPropertiesLocked(t reflect.Type) *StructProperties {
 	if prop, ok := propertiesMap[t]; ok {
-		if collectStats {
-			stats.Chit++
-		}
 		return prop
 	}
-	if collectStats {
-		stats.Cmiss++
-	}
 
 	prop := new(StructProperties)
 	// in case of recursive protos, fill this in now.
diff --git a/vendor/github.com/gogo/protobuf/proto/table_marshal.go b/vendor/github.com/gogo/protobuf/proto/table_marshal.go
index ba58c49..9b1538d 100644
--- a/vendor/github.com/gogo/protobuf/proto/table_marshal.go
+++ b/vendor/github.com/gogo/protobuf/proto/table_marshal.go
@@ -491,7 +491,7 @@
 
 func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofImplementers []interface{}) {
 	fi.field = toField(f)
-	fi.wiretag = 1<<31 - 1 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire.
+	fi.wiretag = math.MaxInt32 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire.
 	fi.isPointer = true
 	fi.sizer, fi.marshaler = makeOneOfMarshaler(fi, f)
 	fi.oneofElems = make(map[reflect.Type]*marshalElemInfo)
diff --git a/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go b/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go
index e6b15c7..bb2622f 100644
--- a/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go
+++ b/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go
@@ -138,7 +138,7 @@
 		u.computeUnmarshalInfo()
 	}
 	if u.isMessageSet {
-		return UnmarshalMessageSet(b, m.offset(u.extensions).toExtensions())
+		return unmarshalMessageSet(b, m.offset(u.extensions).toExtensions())
 	}
 	var reqMask uint64 // bitmask of required fields we've seen.
 	var errLater error
@@ -2142,7 +2142,7 @@
 // If there is an error, it returns 0,0.
 func decodeVarint(b []byte) (uint64, int) {
 	var x, y uint64
-	if len(b) <= 0 {
+	if len(b) == 0 {
 		goto bad
 	}
 	x = uint64(b[0])
diff --git a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/any.proto b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/any.proto
index b6cc7cb..4cf3843 100644
--- a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/any.proto
+++ b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/any.proto
@@ -121,7 +121,8 @@
 //
 message Any {
   // A URL/resource name that uniquely identifies the type of the serialized
-  // protocol buffer message. The last segment of the URL's path must represent
+  // protocol buffer message. This string must contain at least
+  // one "/" character. The last segment of the URL's path must represent
   // the fully qualified name of the type (as in
   // `path/google.protobuf.Duration`). The name should be in a canonical form
   // (e.g., leading "." is not accepted).
diff --git a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/descriptor.proto b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/descriptor.proto
index 1598ad7..887f16d 100644
--- a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/descriptor.proto
+++ b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/descriptor.proto
@@ -417,6 +417,17 @@
   // determining the namespace.
   optional string php_namespace = 41;
 
+
+  // Use this option to change the namespace of php generated metadata classes.
+  // Default is empty. When this option is empty, the proto file name will be used
+  // for determining the namespace.
+  optional string php_metadata_namespace = 44;
+
+  // Use this option to change the package of ruby generated classes. Default
+  // is empty. When this option is not set, the package name will be used for
+  // determining the ruby package.
+  optional string ruby_package = 45;
+
   // The parser stores options it doesn't recognize here.
   // See the documentation for the "Options" section above.
   repeated UninterpretedOption uninterpreted_option = 999;
diff --git a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/field_mask.proto b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/field_mask.proto
index 1216198..7b77007 100644
--- a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/field_mask.proto
+++ b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/field_mask.proto
@@ -38,6 +38,7 @@
 option java_multiple_files = true;
 option objc_class_prefix = "GPB";
 option go_package = "types";
+option cc_enable_arenas = true;
 
 // `FieldMask` represents a set of symbolic field paths, for example:
 //
@@ -107,57 +108,49 @@
 // describe the updated values, the API ignores the values of all
 // fields not covered by the mask.
 //
-// If a repeated field is specified for an update operation, the existing
-// repeated values in the target resource will be overwritten by the new values.
-// Note that a repeated field is only allowed in the last position of a `paths`
-// string.
+// If a repeated field is specified for an update operation, new values will
+// be appended to the existing repeated field in the target resource. Note that
+// a repeated field is only allowed in the last position of a `paths` string.
 //
 // If a sub-message is specified in the last position of the field mask for an
-// update operation, then the existing sub-message in the target resource is
-// overwritten. Given the target message:
+// update operation, then new value will be merged into the existing sub-message
+// in the target resource.
+//
+// For example, given the target message:
 //
 //     f {
 //       b {
-//         d : 1
-//         x : 2
+//         d: 1
+//         x: 2
 //       }
-//       c : 1
+//       c: [1]
 //     }
 //
 // And an update message:
 //
 //     f {
 //       b {
-//         d : 10
+//         d: 10
 //       }
+//       c: [2]
 //     }
 //
 // then if the field mask is:
 //
-//  paths: "f.b"
+//  paths: ["f.b", "f.c"]
 //
 // then the result will be:
 //
 //     f {
 //       b {
-//         d : 10
+//         d: 10
+//         x: 2
 //       }
-//       c : 1
+//       c: [1, 2]
 //     }
 //
-// However, if the update mask was:
-//
-//  paths: "f.b.d"
-//
-// then the result would be:
-//
-//     f {
-//       b {
-//         d : 10
-//         x : 2
-//       }
-//       c : 1
-//     }
+// An implementation may provide options to override this default behavior for
+// repeated and message fields.
 //
 // In order to reset a field's value to the default, the field must
 // be in the mask and set to the default value in the provided resource.
@@ -243,8 +236,8 @@
 //
 // ## Field Mask Verification
 //
-// The implementation of the all the API methods, which have any FieldMask type
-// field in the request, should verify the included field paths, and return
+// The implementation of any API method which has a FieldMask type field in the
+// request should verify the included field paths, and return an
 // `INVALID_ARGUMENT` error if any path is duplicated or unmappable.
 message FieldMask {
   // The set of field mask paths.
diff --git a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/timestamp.proto b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/timestamp.proto
index 150468b..18cb7c3 100644
--- a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/timestamp.proto
+++ b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/timestamp.proto
@@ -40,17 +40,19 @@
 option java_multiple_files = true;
 option objc_class_prefix = "GPB";
 
-// A Timestamp represents a point in time independent of any time zone
-// or calendar, represented as seconds and fractions of seconds at
-// nanosecond resolution in UTC Epoch time. It is encoded using the
-// Proleptic Gregorian Calendar which extends the Gregorian calendar
-// backwards to year one. It is encoded assuming all minutes are 60
-// seconds long, i.e. leap seconds are "smeared" so that no leap second
-// table is needed for interpretation. Range is from
-// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z.
-// By restricting to that range, we ensure that we can convert to
-// and from  RFC 3339 date strings.
-// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt).
+// A Timestamp represents a point in time independent of any time zone or local
+// calendar, encoded as a count of seconds and fractions of seconds at
+// nanosecond resolution. The count is relative to an epoch at UTC midnight on
+// January 1, 1970, in the proleptic Gregorian calendar which extends the
+// Gregorian calendar backwards to year one.
+//
+// All minutes are 60 seconds long. Leap seconds are "smeared" so that no leap
+// second table is needed for interpretation, using a [24-hour linear
+// smear](https://developers.google.com/time/smear).
+//
+// The range is from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. By
+// restricting to that range, we ensure that we can convert to and from [RFC
+// 3339](https://www.ietf.org/rfc/rfc3339.txt) date strings.
 //
 // # Examples
 //
@@ -111,12 +113,12 @@
 // 01:30 UTC on January 15, 2017.
 //
 // In JavaScript, one can convert a Date object to this format using the
-// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString]
+// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString)
 // method. In Python, a standard `datetime.datetime` object can be converted
 // to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime)
 // with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one
 // can use the Joda Time's [`ISODateTimeFormat.dateTime()`](
-// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime--
+// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime%2D%2D
 // ) to obtain a formatter capable of generating timestamps in this format.
 //
 //
diff --git a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/wrappers.proto b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/wrappers.proto
index c5632e5..59b76ac 100644
--- a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/wrappers.proto
+++ b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/wrappers.proto
@@ -32,6 +32,11 @@
 // for embedding primitives in the `google.protobuf.Any` type and for places
 // where we need to distinguish between the absence of a primitive
 // typed field and its default value.
+//
+// These wrappers have no meaningful use within repeated fields as they lack
+// the ability to detect presence on individual elements.
+// These wrappers have no meaningful use within a map or a oneof since
+// individual entries of a map or fields of a oneof can already detect presence.
 
 syntax = "proto3";
 
diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go
index 44f893b..cacfa39 100644
--- a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go
+++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go
@@ -3,9 +3,11 @@
 
 package descriptor
 
-import proto "github.com/gogo/protobuf/proto"
-import fmt "fmt"
-import math "math"
+import (
+	fmt "fmt"
+	proto "github.com/gogo/protobuf/proto"
+	math "math"
+)
 
 // Reference imports to suppress errors if they are not otherwise used.
 var _ = proto.Marshal
@@ -72,6 +74,7 @@
 	17: "TYPE_SINT32",
 	18: "TYPE_SINT64",
 }
+
 var FieldDescriptorProto_Type_value = map[string]int32{
 	"TYPE_DOUBLE":   1,
 	"TYPE_FLOAT":    2,
@@ -98,9 +101,11 @@
 	*p = x
 	return p
 }
+
 func (x FieldDescriptorProto_Type) String() string {
 	return proto.EnumName(FieldDescriptorProto_Type_name, int32(x))
 }
+
 func (x *FieldDescriptorProto_Type) UnmarshalJSON(data []byte) error {
 	value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Type_value, data, "FieldDescriptorProto_Type")
 	if err != nil {
@@ -109,8 +114,9 @@
 	*x = FieldDescriptorProto_Type(value)
 	return nil
 }
+
 func (FieldDescriptorProto_Type) EnumDescriptor() ([]byte, []int) {
-	return fileDescriptor_descriptor_9588782fb9cbecd6, []int{4, 0}
+	return fileDescriptor_308767df5ffe18af, []int{4, 0}
 }
 
 type FieldDescriptorProto_Label int32
@@ -127,6 +133,7 @@
 	2: "LABEL_REQUIRED",
 	3: "LABEL_REPEATED",
 }
+
 var FieldDescriptorProto_Label_value = map[string]int32{
 	"LABEL_OPTIONAL": 1,
 	"LABEL_REQUIRED": 2,
@@ -138,9 +145,11 @@
 	*p = x
 	return p
 }
+
 func (x FieldDescriptorProto_Label) String() string {
 	return proto.EnumName(FieldDescriptorProto_Label_name, int32(x))
 }
+
 func (x *FieldDescriptorProto_Label) UnmarshalJSON(data []byte) error {
 	value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Label_value, data, "FieldDescriptorProto_Label")
 	if err != nil {
@@ -149,8 +158,9 @@
 	*x = FieldDescriptorProto_Label(value)
 	return nil
 }
+
 func (FieldDescriptorProto_Label) EnumDescriptor() ([]byte, []int) {
-	return fileDescriptor_descriptor_9588782fb9cbecd6, []int{4, 1}
+	return fileDescriptor_308767df5ffe18af, []int{4, 1}
 }
 
 // Generated classes can be optimized for speed or code size.
@@ -168,6 +178,7 @@
 	2: "CODE_SIZE",
 	3: "LITE_RUNTIME",
 }
+
 var FileOptions_OptimizeMode_value = map[string]int32{
 	"SPEED":        1,
 	"CODE_SIZE":    2,
@@ -179,9 +190,11 @@
 	*p = x
 	return p
 }
+
 func (x FileOptions_OptimizeMode) String() string {
 	return proto.EnumName(FileOptions_OptimizeMode_name, int32(x))
 }
+
 func (x *FileOptions_OptimizeMode) UnmarshalJSON(data []byte) error {
 	value, err := proto.UnmarshalJSONEnum(FileOptions_OptimizeMode_value, data, "FileOptions_OptimizeMode")
 	if err != nil {
@@ -190,8 +203,9 @@
 	*x = FileOptions_OptimizeMode(value)
 	return nil
 }
+
 func (FileOptions_OptimizeMode) EnumDescriptor() ([]byte, []int) {
-	return fileDescriptor_descriptor_9588782fb9cbecd6, []int{10, 0}
+	return fileDescriptor_308767df5ffe18af, []int{10, 0}
 }
 
 type FieldOptions_CType int32
@@ -208,6 +222,7 @@
 	1: "CORD",
 	2: "STRING_PIECE",
 }
+
 var FieldOptions_CType_value = map[string]int32{
 	"STRING":       0,
 	"CORD":         1,
@@ -219,9 +234,11 @@
 	*p = x
 	return p
 }
+
 func (x FieldOptions_CType) String() string {
 	return proto.EnumName(FieldOptions_CType_name, int32(x))
 }
+
 func (x *FieldOptions_CType) UnmarshalJSON(data []byte) error {
 	value, err := proto.UnmarshalJSONEnum(FieldOptions_CType_value, data, "FieldOptions_CType")
 	if err != nil {
@@ -230,8 +247,9 @@
 	*x = FieldOptions_CType(value)
 	return nil
 }
+
 func (FieldOptions_CType) EnumDescriptor() ([]byte, []int) {
-	return fileDescriptor_descriptor_9588782fb9cbecd6, []int{12, 0}
+	return fileDescriptor_308767df5ffe18af, []int{12, 0}
 }
 
 type FieldOptions_JSType int32
@@ -250,6 +268,7 @@
 	1: "JS_STRING",
 	2: "JS_NUMBER",
 }
+
 var FieldOptions_JSType_value = map[string]int32{
 	"JS_NORMAL": 0,
 	"JS_STRING": 1,
@@ -261,9 +280,11 @@
 	*p = x
 	return p
 }
+
 func (x FieldOptions_JSType) String() string {
 	return proto.EnumName(FieldOptions_JSType_name, int32(x))
 }
+
 func (x *FieldOptions_JSType) UnmarshalJSON(data []byte) error {
 	value, err := proto.UnmarshalJSONEnum(FieldOptions_JSType_value, data, "FieldOptions_JSType")
 	if err != nil {
@@ -272,8 +293,9 @@
 	*x = FieldOptions_JSType(value)
 	return nil
 }
+
 func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) {
-	return fileDescriptor_descriptor_9588782fb9cbecd6, []int{12, 1}
+	return fileDescriptor_308767df5ffe18af, []int{12, 1}
 }
 
 // Is this method side-effect-free (or safe in HTTP parlance), or idempotent,
@@ -292,6 +314,7 @@
 	1: "NO_SIDE_EFFECTS",
 	2: "IDEMPOTENT",
 }
+
 var MethodOptions_IdempotencyLevel_value = map[string]int32{
 	"IDEMPOTENCY_UNKNOWN": 0,
 	"NO_SIDE_EFFECTS":     1,
@@ -303,9 +326,11 @@
 	*p = x
 	return p
 }
+
 func (x MethodOptions_IdempotencyLevel) String() string {
 	return proto.EnumName(MethodOptions_IdempotencyLevel_name, int32(x))
 }
+
 func (x *MethodOptions_IdempotencyLevel) UnmarshalJSON(data []byte) error {
 	value, err := proto.UnmarshalJSONEnum(MethodOptions_IdempotencyLevel_value, data, "MethodOptions_IdempotencyLevel")
 	if err != nil {
@@ -314,8 +339,9 @@
 	*x = MethodOptions_IdempotencyLevel(value)
 	return nil
 }
+
 func (MethodOptions_IdempotencyLevel) EnumDescriptor() ([]byte, []int) {
-	return fileDescriptor_descriptor_9588782fb9cbecd6, []int{17, 0}
+	return fileDescriptor_308767df5ffe18af, []int{17, 0}
 }
 
 // The protocol compiler can output a FileDescriptorSet containing the .proto
@@ -331,7 +357,7 @@
 func (m *FileDescriptorSet) String() string { return proto.CompactTextString(m) }
 func (*FileDescriptorSet) ProtoMessage()    {}
 func (*FileDescriptorSet) Descriptor() ([]byte, []int) {
-	return fileDescriptor_descriptor_9588782fb9cbecd6, []int{0}
+	return fileDescriptor_308767df5ffe18af, []int{0}
 }
 func (m *FileDescriptorSet) XXX_Unmarshal(b []byte) error {
 	return xxx_messageInfo_FileDescriptorSet.Unmarshal(m, b)
@@ -339,8 +365,8 @@
 func (m *FileDescriptorSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
 	return xxx_messageInfo_FileDescriptorSet.Marshal(b, m, deterministic)
 }
-func (dst *FileDescriptorSet) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_FileDescriptorSet.Merge(dst, src)
+func (m *FileDescriptorSet) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_FileDescriptorSet.Merge(m, src)
 }
 func (m *FileDescriptorSet) XXX_Size() int {
 	return xxx_messageInfo_FileDescriptorSet.Size(m)
@@ -392,7 +418,7 @@
 func (m *FileDescriptorProto) String() string { return proto.CompactTextString(m) }
 func (*FileDescriptorProto) ProtoMessage()    {}
 func (*FileDescriptorProto) Descriptor() ([]byte, []int) {
-	return fileDescriptor_descriptor_9588782fb9cbecd6, []int{1}
+	return fileDescriptor_308767df5ffe18af, []int{1}
 }
 func (m *FileDescriptorProto) XXX_Unmarshal(b []byte) error {
 	return xxx_messageInfo_FileDescriptorProto.Unmarshal(m, b)
@@ -400,8 +426,8 @@
 func (m *FileDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
 	return xxx_messageInfo_FileDescriptorProto.Marshal(b, m, deterministic)
 }
-func (dst *FileDescriptorProto) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_FileDescriptorProto.Merge(dst, src)
+func (m *FileDescriptorProto) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_FileDescriptorProto.Merge(m, src)
 }
 func (m *FileDescriptorProto) XXX_Size() int {
 	return xxx_messageInfo_FileDescriptorProto.Size(m)
@@ -519,7 +545,7 @@
 func (m *DescriptorProto) String() string { return proto.CompactTextString(m) }
 func (*DescriptorProto) ProtoMessage()    {}
 func (*DescriptorProto) Descriptor() ([]byte, []int) {
-	return fileDescriptor_descriptor_9588782fb9cbecd6, []int{2}
+	return fileDescriptor_308767df5ffe18af, []int{2}
 }
 func (m *DescriptorProto) XXX_Unmarshal(b []byte) error {
 	return xxx_messageInfo_DescriptorProto.Unmarshal(m, b)
@@ -527,8 +553,8 @@
 func (m *DescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
 	return xxx_messageInfo_DescriptorProto.Marshal(b, m, deterministic)
 }
-func (dst *DescriptorProto) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_DescriptorProto.Merge(dst, src)
+func (m *DescriptorProto) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DescriptorProto.Merge(m, src)
 }
 func (m *DescriptorProto) XXX_Size() int {
 	return xxx_messageInfo_DescriptorProto.Size(m)
@@ -622,7 +648,7 @@
 func (m *DescriptorProto_ExtensionRange) String() string { return proto.CompactTextString(m) }
 func (*DescriptorProto_ExtensionRange) ProtoMessage()    {}
 func (*DescriptorProto_ExtensionRange) Descriptor() ([]byte, []int) {
-	return fileDescriptor_descriptor_9588782fb9cbecd6, []int{2, 0}
+	return fileDescriptor_308767df5ffe18af, []int{2, 0}
 }
 func (m *DescriptorProto_ExtensionRange) XXX_Unmarshal(b []byte) error {
 	return xxx_messageInfo_DescriptorProto_ExtensionRange.Unmarshal(m, b)
@@ -630,8 +656,8 @@
 func (m *DescriptorProto_ExtensionRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
 	return xxx_messageInfo_DescriptorProto_ExtensionRange.Marshal(b, m, deterministic)
 }
-func (dst *DescriptorProto_ExtensionRange) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_DescriptorProto_ExtensionRange.Merge(dst, src)
+func (m *DescriptorProto_ExtensionRange) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DescriptorProto_ExtensionRange.Merge(m, src)
 }
 func (m *DescriptorProto_ExtensionRange) XXX_Size() int {
 	return xxx_messageInfo_DescriptorProto_ExtensionRange.Size(m)
@@ -678,7 +704,7 @@
 func (m *DescriptorProto_ReservedRange) String() string { return proto.CompactTextString(m) }
 func (*DescriptorProto_ReservedRange) ProtoMessage()    {}
 func (*DescriptorProto_ReservedRange) Descriptor() ([]byte, []int) {
-	return fileDescriptor_descriptor_9588782fb9cbecd6, []int{2, 1}
+	return fileDescriptor_308767df5ffe18af, []int{2, 1}
 }
 func (m *DescriptorProto_ReservedRange) XXX_Unmarshal(b []byte) error {
 	return xxx_messageInfo_DescriptorProto_ReservedRange.Unmarshal(m, b)
@@ -686,8 +712,8 @@
 func (m *DescriptorProto_ReservedRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
 	return xxx_messageInfo_DescriptorProto_ReservedRange.Marshal(b, m, deterministic)
 }
-func (dst *DescriptorProto_ReservedRange) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_DescriptorProto_ReservedRange.Merge(dst, src)
+func (m *DescriptorProto_ReservedRange) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DescriptorProto_ReservedRange.Merge(m, src)
 }
 func (m *DescriptorProto_ReservedRange) XXX_Size() int {
 	return xxx_messageInfo_DescriptorProto_ReservedRange.Size(m)
@@ -725,7 +751,7 @@
 func (m *ExtensionRangeOptions) String() string { return proto.CompactTextString(m) }
 func (*ExtensionRangeOptions) ProtoMessage()    {}
 func (*ExtensionRangeOptions) Descriptor() ([]byte, []int) {
-	return fileDescriptor_descriptor_9588782fb9cbecd6, []int{3}
+	return fileDescriptor_308767df5ffe18af, []int{3}
 }
 
 var extRange_ExtensionRangeOptions = []proto.ExtensionRange{
@@ -735,14 +761,15 @@
 func (*ExtensionRangeOptions) ExtensionRangeArray() []proto.ExtensionRange {
 	return extRange_ExtensionRangeOptions
 }
+
 func (m *ExtensionRangeOptions) XXX_Unmarshal(b []byte) error {
 	return xxx_messageInfo_ExtensionRangeOptions.Unmarshal(m, b)
 }
 func (m *ExtensionRangeOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
 	return xxx_messageInfo_ExtensionRangeOptions.Marshal(b, m, deterministic)
 }
-func (dst *ExtensionRangeOptions) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_ExtensionRangeOptions.Merge(dst, src)
+func (m *ExtensionRangeOptions) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ExtensionRangeOptions.Merge(m, src)
 }
 func (m *ExtensionRangeOptions) XXX_Size() int {
 	return xxx_messageInfo_ExtensionRangeOptions.Size(m)
@@ -801,7 +828,7 @@
 func (m *FieldDescriptorProto) String() string { return proto.CompactTextString(m) }
 func (*FieldDescriptorProto) ProtoMessage()    {}
 func (*FieldDescriptorProto) Descriptor() ([]byte, []int) {
-	return fileDescriptor_descriptor_9588782fb9cbecd6, []int{4}
+	return fileDescriptor_308767df5ffe18af, []int{4}
 }
 func (m *FieldDescriptorProto) XXX_Unmarshal(b []byte) error {
 	return xxx_messageInfo_FieldDescriptorProto.Unmarshal(m, b)
@@ -809,8 +836,8 @@
 func (m *FieldDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
 	return xxx_messageInfo_FieldDescriptorProto.Marshal(b, m, deterministic)
 }
-func (dst *FieldDescriptorProto) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_FieldDescriptorProto.Merge(dst, src)
+func (m *FieldDescriptorProto) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_FieldDescriptorProto.Merge(m, src)
 }
 func (m *FieldDescriptorProto) XXX_Size() int {
 	return xxx_messageInfo_FieldDescriptorProto.Size(m)
@@ -904,7 +931,7 @@
 func (m *OneofDescriptorProto) String() string { return proto.CompactTextString(m) }
 func (*OneofDescriptorProto) ProtoMessage()    {}
 func (*OneofDescriptorProto) Descriptor() ([]byte, []int) {
-	return fileDescriptor_descriptor_9588782fb9cbecd6, []int{5}
+	return fileDescriptor_308767df5ffe18af, []int{5}
 }
 func (m *OneofDescriptorProto) XXX_Unmarshal(b []byte) error {
 	return xxx_messageInfo_OneofDescriptorProto.Unmarshal(m, b)
@@ -912,8 +939,8 @@
 func (m *OneofDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
 	return xxx_messageInfo_OneofDescriptorProto.Marshal(b, m, deterministic)
 }
-func (dst *OneofDescriptorProto) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_OneofDescriptorProto.Merge(dst, src)
+func (m *OneofDescriptorProto) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OneofDescriptorProto.Merge(m, src)
 }
 func (m *OneofDescriptorProto) XXX_Size() int {
 	return xxx_messageInfo_OneofDescriptorProto.Size(m)
@@ -959,7 +986,7 @@
 func (m *EnumDescriptorProto) String() string { return proto.CompactTextString(m) }
 func (*EnumDescriptorProto) ProtoMessage()    {}
 func (*EnumDescriptorProto) Descriptor() ([]byte, []int) {
-	return fileDescriptor_descriptor_9588782fb9cbecd6, []int{6}
+	return fileDescriptor_308767df5ffe18af, []int{6}
 }
 func (m *EnumDescriptorProto) XXX_Unmarshal(b []byte) error {
 	return xxx_messageInfo_EnumDescriptorProto.Unmarshal(m, b)
@@ -967,8 +994,8 @@
 func (m *EnumDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
 	return xxx_messageInfo_EnumDescriptorProto.Marshal(b, m, deterministic)
 }
-func (dst *EnumDescriptorProto) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_EnumDescriptorProto.Merge(dst, src)
+func (m *EnumDescriptorProto) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_EnumDescriptorProto.Merge(m, src)
 }
 func (m *EnumDescriptorProto) XXX_Size() int {
 	return xxx_messageInfo_EnumDescriptorProto.Size(m)
@@ -1032,7 +1059,7 @@
 func (m *EnumDescriptorProto_EnumReservedRange) String() string { return proto.CompactTextString(m) }
 func (*EnumDescriptorProto_EnumReservedRange) ProtoMessage()    {}
 func (*EnumDescriptorProto_EnumReservedRange) Descriptor() ([]byte, []int) {
-	return fileDescriptor_descriptor_9588782fb9cbecd6, []int{6, 0}
+	return fileDescriptor_308767df5ffe18af, []int{6, 0}
 }
 func (m *EnumDescriptorProto_EnumReservedRange) XXX_Unmarshal(b []byte) error {
 	return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Unmarshal(m, b)
@@ -1040,8 +1067,8 @@
 func (m *EnumDescriptorProto_EnumReservedRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
 	return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Marshal(b, m, deterministic)
 }
-func (dst *EnumDescriptorProto_EnumReservedRange) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Merge(dst, src)
+func (m *EnumDescriptorProto_EnumReservedRange) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Merge(m, src)
 }
 func (m *EnumDescriptorProto_EnumReservedRange) XXX_Size() int {
 	return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Size(m)
@@ -1080,7 +1107,7 @@
 func (m *EnumValueDescriptorProto) String() string { return proto.CompactTextString(m) }
 func (*EnumValueDescriptorProto) ProtoMessage()    {}
 func (*EnumValueDescriptorProto) Descriptor() ([]byte, []int) {
-	return fileDescriptor_descriptor_9588782fb9cbecd6, []int{7}
+	return fileDescriptor_308767df5ffe18af, []int{7}
 }
 func (m *EnumValueDescriptorProto) XXX_Unmarshal(b []byte) error {
 	return xxx_messageInfo_EnumValueDescriptorProto.Unmarshal(m, b)
@@ -1088,8 +1115,8 @@
 func (m *EnumValueDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
 	return xxx_messageInfo_EnumValueDescriptorProto.Marshal(b, m, deterministic)
 }
-func (dst *EnumValueDescriptorProto) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_EnumValueDescriptorProto.Merge(dst, src)
+func (m *EnumValueDescriptorProto) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_EnumValueDescriptorProto.Merge(m, src)
 }
 func (m *EnumValueDescriptorProto) XXX_Size() int {
 	return xxx_messageInfo_EnumValueDescriptorProto.Size(m)
@@ -1135,7 +1162,7 @@
 func (m *ServiceDescriptorProto) String() string { return proto.CompactTextString(m) }
 func (*ServiceDescriptorProto) ProtoMessage()    {}
 func (*ServiceDescriptorProto) Descriptor() ([]byte, []int) {
-	return fileDescriptor_descriptor_9588782fb9cbecd6, []int{8}
+	return fileDescriptor_308767df5ffe18af, []int{8}
 }
 func (m *ServiceDescriptorProto) XXX_Unmarshal(b []byte) error {
 	return xxx_messageInfo_ServiceDescriptorProto.Unmarshal(m, b)
@@ -1143,8 +1170,8 @@
 func (m *ServiceDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
 	return xxx_messageInfo_ServiceDescriptorProto.Marshal(b, m, deterministic)
 }
-func (dst *ServiceDescriptorProto) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_ServiceDescriptorProto.Merge(dst, src)
+func (m *ServiceDescriptorProto) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ServiceDescriptorProto.Merge(m, src)
 }
 func (m *ServiceDescriptorProto) XXX_Size() int {
 	return xxx_messageInfo_ServiceDescriptorProto.Size(m)
@@ -1197,7 +1224,7 @@
 func (m *MethodDescriptorProto) String() string { return proto.CompactTextString(m) }
 func (*MethodDescriptorProto) ProtoMessage()    {}
 func (*MethodDescriptorProto) Descriptor() ([]byte, []int) {
-	return fileDescriptor_descriptor_9588782fb9cbecd6, []int{9}
+	return fileDescriptor_308767df5ffe18af, []int{9}
 }
 func (m *MethodDescriptorProto) XXX_Unmarshal(b []byte) error {
 	return xxx_messageInfo_MethodDescriptorProto.Unmarshal(m, b)
@@ -1205,8 +1232,8 @@
 func (m *MethodDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
 	return xxx_messageInfo_MethodDescriptorProto.Marshal(b, m, deterministic)
 }
-func (dst *MethodDescriptorProto) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_MethodDescriptorProto.Merge(dst, src)
+func (m *MethodDescriptorProto) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_MethodDescriptorProto.Merge(m, src)
 }
 func (m *MethodDescriptorProto) XXX_Size() int {
 	return xxx_messageInfo_MethodDescriptorProto.Size(m)
@@ -1336,6 +1363,14 @@
 	// is empty. When this option is empty, the package name will be used for
 	// determining the namespace.
 	PhpNamespace *string `protobuf:"bytes,41,opt,name=php_namespace,json=phpNamespace" json:"php_namespace,omitempty"`
+	// Use this option to change the namespace of php generated metadata classes.
+	// Default is empty. When this option is empty, the proto file name will be used
+	// for determining the namespace.
+	PhpMetadataNamespace *string `protobuf:"bytes,44,opt,name=php_metadata_namespace,json=phpMetadataNamespace" json:"php_metadata_namespace,omitempty"`
+	// Use this option to change the package of ruby generated classes. Default
+	// is empty. When this option is not set, the package name will be used for
+	// determining the ruby package.
+	RubyPackage *string `protobuf:"bytes,45,opt,name=ruby_package,json=rubyPackage" json:"ruby_package,omitempty"`
 	// The parser stores options it doesn't recognize here.
 	// See the documentation for the "Options" section above.
 	UninterpretedOption          []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
@@ -1349,7 +1384,7 @@
 func (m *FileOptions) String() string { return proto.CompactTextString(m) }
 func (*FileOptions) ProtoMessage()    {}
 func (*FileOptions) Descriptor() ([]byte, []int) {
-	return fileDescriptor_descriptor_9588782fb9cbecd6, []int{10}
+	return fileDescriptor_308767df5ffe18af, []int{10}
 }
 
 var extRange_FileOptions = []proto.ExtensionRange{
@@ -1359,14 +1394,15 @@
 func (*FileOptions) ExtensionRangeArray() []proto.ExtensionRange {
 	return extRange_FileOptions
 }
+
 func (m *FileOptions) XXX_Unmarshal(b []byte) error {
 	return xxx_messageInfo_FileOptions.Unmarshal(m, b)
 }
 func (m *FileOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
 	return xxx_messageInfo_FileOptions.Marshal(b, m, deterministic)
 }
-func (dst *FileOptions) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_FileOptions.Merge(dst, src)
+func (m *FileOptions) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_FileOptions.Merge(m, src)
 }
 func (m *FileOptions) XXX_Size() int {
 	return xxx_messageInfo_FileOptions.Size(m)
@@ -1514,6 +1550,20 @@
 	return ""
 }
 
+func (m *FileOptions) GetPhpMetadataNamespace() string {
+	if m != nil && m.PhpMetadataNamespace != nil {
+		return *m.PhpMetadataNamespace
+	}
+	return ""
+}
+
+func (m *FileOptions) GetRubyPackage() string {
+	if m != nil && m.RubyPackage != nil {
+		return *m.RubyPackage
+	}
+	return ""
+}
+
 func (m *FileOptions) GetUninterpretedOption() []*UninterpretedOption {
 	if m != nil {
 		return m.UninterpretedOption
@@ -1584,7 +1634,7 @@
 func (m *MessageOptions) String() string { return proto.CompactTextString(m) }
 func (*MessageOptions) ProtoMessage()    {}
 func (*MessageOptions) Descriptor() ([]byte, []int) {
-	return fileDescriptor_descriptor_9588782fb9cbecd6, []int{11}
+	return fileDescriptor_308767df5ffe18af, []int{11}
 }
 
 var extRange_MessageOptions = []proto.ExtensionRange{
@@ -1594,14 +1644,15 @@
 func (*MessageOptions) ExtensionRangeArray() []proto.ExtensionRange {
 	return extRange_MessageOptions
 }
+
 func (m *MessageOptions) XXX_Unmarshal(b []byte) error {
 	return xxx_messageInfo_MessageOptions.Unmarshal(m, b)
 }
 func (m *MessageOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
 	return xxx_messageInfo_MessageOptions.Marshal(b, m, deterministic)
 }
-func (dst *MessageOptions) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_MessageOptions.Merge(dst, src)
+func (m *MessageOptions) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_MessageOptions.Merge(m, src)
 }
 func (m *MessageOptions) XXX_Size() int {
 	return xxx_messageInfo_MessageOptions.Size(m)
@@ -1723,7 +1774,7 @@
 func (m *FieldOptions) String() string { return proto.CompactTextString(m) }
 func (*FieldOptions) ProtoMessage()    {}
 func (*FieldOptions) Descriptor() ([]byte, []int) {
-	return fileDescriptor_descriptor_9588782fb9cbecd6, []int{12}
+	return fileDescriptor_308767df5ffe18af, []int{12}
 }
 
 var extRange_FieldOptions = []proto.ExtensionRange{
@@ -1733,14 +1784,15 @@
 func (*FieldOptions) ExtensionRangeArray() []proto.ExtensionRange {
 	return extRange_FieldOptions
 }
+
 func (m *FieldOptions) XXX_Unmarshal(b []byte) error {
 	return xxx_messageInfo_FieldOptions.Unmarshal(m, b)
 }
 func (m *FieldOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
 	return xxx_messageInfo_FieldOptions.Marshal(b, m, deterministic)
 }
-func (dst *FieldOptions) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_FieldOptions.Merge(dst, src)
+func (m *FieldOptions) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_FieldOptions.Merge(m, src)
 }
 func (m *FieldOptions) XXX_Size() int {
 	return xxx_messageInfo_FieldOptions.Size(m)
@@ -1819,7 +1871,7 @@
 func (m *OneofOptions) String() string { return proto.CompactTextString(m) }
 func (*OneofOptions) ProtoMessage()    {}
 func (*OneofOptions) Descriptor() ([]byte, []int) {
-	return fileDescriptor_descriptor_9588782fb9cbecd6, []int{13}
+	return fileDescriptor_308767df5ffe18af, []int{13}
 }
 
 var extRange_OneofOptions = []proto.ExtensionRange{
@@ -1829,14 +1881,15 @@
 func (*OneofOptions) ExtensionRangeArray() []proto.ExtensionRange {
 	return extRange_OneofOptions
 }
+
 func (m *OneofOptions) XXX_Unmarshal(b []byte) error {
 	return xxx_messageInfo_OneofOptions.Unmarshal(m, b)
 }
 func (m *OneofOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
 	return xxx_messageInfo_OneofOptions.Marshal(b, m, deterministic)
 }
-func (dst *OneofOptions) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_OneofOptions.Merge(dst, src)
+func (m *OneofOptions) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OneofOptions.Merge(m, src)
 }
 func (m *OneofOptions) XXX_Size() int {
 	return xxx_messageInfo_OneofOptions.Size(m)
@@ -1875,7 +1928,7 @@
 func (m *EnumOptions) String() string { return proto.CompactTextString(m) }
 func (*EnumOptions) ProtoMessage()    {}
 func (*EnumOptions) Descriptor() ([]byte, []int) {
-	return fileDescriptor_descriptor_9588782fb9cbecd6, []int{14}
+	return fileDescriptor_308767df5ffe18af, []int{14}
 }
 
 var extRange_EnumOptions = []proto.ExtensionRange{
@@ -1885,14 +1938,15 @@
 func (*EnumOptions) ExtensionRangeArray() []proto.ExtensionRange {
 	return extRange_EnumOptions
 }
+
 func (m *EnumOptions) XXX_Unmarshal(b []byte) error {
 	return xxx_messageInfo_EnumOptions.Unmarshal(m, b)
 }
 func (m *EnumOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
 	return xxx_messageInfo_EnumOptions.Marshal(b, m, deterministic)
 }
-func (dst *EnumOptions) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_EnumOptions.Merge(dst, src)
+func (m *EnumOptions) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_EnumOptions.Merge(m, src)
 }
 func (m *EnumOptions) XXX_Size() int {
 	return xxx_messageInfo_EnumOptions.Size(m)
@@ -1944,7 +1998,7 @@
 func (m *EnumValueOptions) String() string { return proto.CompactTextString(m) }
 func (*EnumValueOptions) ProtoMessage()    {}
 func (*EnumValueOptions) Descriptor() ([]byte, []int) {
-	return fileDescriptor_descriptor_9588782fb9cbecd6, []int{15}
+	return fileDescriptor_308767df5ffe18af, []int{15}
 }
 
 var extRange_EnumValueOptions = []proto.ExtensionRange{
@@ -1954,14 +2008,15 @@
 func (*EnumValueOptions) ExtensionRangeArray() []proto.ExtensionRange {
 	return extRange_EnumValueOptions
 }
+
 func (m *EnumValueOptions) XXX_Unmarshal(b []byte) error {
 	return xxx_messageInfo_EnumValueOptions.Unmarshal(m, b)
 }
 func (m *EnumValueOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
 	return xxx_messageInfo_EnumValueOptions.Marshal(b, m, deterministic)
 }
-func (dst *EnumValueOptions) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_EnumValueOptions.Merge(dst, src)
+func (m *EnumValueOptions) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_EnumValueOptions.Merge(m, src)
 }
 func (m *EnumValueOptions) XXX_Size() int {
 	return xxx_messageInfo_EnumValueOptions.Size(m)
@@ -2006,7 +2061,7 @@
 func (m *ServiceOptions) String() string { return proto.CompactTextString(m) }
 func (*ServiceOptions) ProtoMessage()    {}
 func (*ServiceOptions) Descriptor() ([]byte, []int) {
-	return fileDescriptor_descriptor_9588782fb9cbecd6, []int{16}
+	return fileDescriptor_308767df5ffe18af, []int{16}
 }
 
 var extRange_ServiceOptions = []proto.ExtensionRange{
@@ -2016,14 +2071,15 @@
 func (*ServiceOptions) ExtensionRangeArray() []proto.ExtensionRange {
 	return extRange_ServiceOptions
 }
+
 func (m *ServiceOptions) XXX_Unmarshal(b []byte) error {
 	return xxx_messageInfo_ServiceOptions.Unmarshal(m, b)
 }
 func (m *ServiceOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
 	return xxx_messageInfo_ServiceOptions.Marshal(b, m, deterministic)
 }
-func (dst *ServiceOptions) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_ServiceOptions.Merge(dst, src)
+func (m *ServiceOptions) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ServiceOptions.Merge(m, src)
 }
 func (m *ServiceOptions) XXX_Size() int {
 	return xxx_messageInfo_ServiceOptions.Size(m)
@@ -2069,7 +2125,7 @@
 func (m *MethodOptions) String() string { return proto.CompactTextString(m) }
 func (*MethodOptions) ProtoMessage()    {}
 func (*MethodOptions) Descriptor() ([]byte, []int) {
-	return fileDescriptor_descriptor_9588782fb9cbecd6, []int{17}
+	return fileDescriptor_308767df5ffe18af, []int{17}
 }
 
 var extRange_MethodOptions = []proto.ExtensionRange{
@@ -2079,14 +2135,15 @@
 func (*MethodOptions) ExtensionRangeArray() []proto.ExtensionRange {
 	return extRange_MethodOptions
 }
+
 func (m *MethodOptions) XXX_Unmarshal(b []byte) error {
 	return xxx_messageInfo_MethodOptions.Unmarshal(m, b)
 }
 func (m *MethodOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
 	return xxx_messageInfo_MethodOptions.Marshal(b, m, deterministic)
 }
-func (dst *MethodOptions) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_MethodOptions.Merge(dst, src)
+func (m *MethodOptions) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_MethodOptions.Merge(m, src)
 }
 func (m *MethodOptions) XXX_Size() int {
 	return xxx_messageInfo_MethodOptions.Size(m)
@@ -2146,7 +2203,7 @@
 func (m *UninterpretedOption) String() string { return proto.CompactTextString(m) }
 func (*UninterpretedOption) ProtoMessage()    {}
 func (*UninterpretedOption) Descriptor() ([]byte, []int) {
-	return fileDescriptor_descriptor_9588782fb9cbecd6, []int{18}
+	return fileDescriptor_308767df5ffe18af, []int{18}
 }
 func (m *UninterpretedOption) XXX_Unmarshal(b []byte) error {
 	return xxx_messageInfo_UninterpretedOption.Unmarshal(m, b)
@@ -2154,8 +2211,8 @@
 func (m *UninterpretedOption) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
 	return xxx_messageInfo_UninterpretedOption.Marshal(b, m, deterministic)
 }
-func (dst *UninterpretedOption) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_UninterpretedOption.Merge(dst, src)
+func (m *UninterpretedOption) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_UninterpretedOption.Merge(m, src)
 }
 func (m *UninterpretedOption) XXX_Size() int {
 	return xxx_messageInfo_UninterpretedOption.Size(m)
@@ -2232,7 +2289,7 @@
 func (m *UninterpretedOption_NamePart) String() string { return proto.CompactTextString(m) }
 func (*UninterpretedOption_NamePart) ProtoMessage()    {}
 func (*UninterpretedOption_NamePart) Descriptor() ([]byte, []int) {
-	return fileDescriptor_descriptor_9588782fb9cbecd6, []int{18, 0}
+	return fileDescriptor_308767df5ffe18af, []int{18, 0}
 }
 func (m *UninterpretedOption_NamePart) XXX_Unmarshal(b []byte) error {
 	return xxx_messageInfo_UninterpretedOption_NamePart.Unmarshal(m, b)
@@ -2240,8 +2297,8 @@
 func (m *UninterpretedOption_NamePart) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
 	return xxx_messageInfo_UninterpretedOption_NamePart.Marshal(b, m, deterministic)
 }
-func (dst *UninterpretedOption_NamePart) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_UninterpretedOption_NamePart.Merge(dst, src)
+func (m *UninterpretedOption_NamePart) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_UninterpretedOption_NamePart.Merge(m, src)
 }
 func (m *UninterpretedOption_NamePart) XXX_Size() int {
 	return xxx_messageInfo_UninterpretedOption_NamePart.Size(m)
@@ -2322,7 +2379,7 @@
 func (m *SourceCodeInfo) String() string { return proto.CompactTextString(m) }
 func (*SourceCodeInfo) ProtoMessage()    {}
 func (*SourceCodeInfo) Descriptor() ([]byte, []int) {
-	return fileDescriptor_descriptor_9588782fb9cbecd6, []int{19}
+	return fileDescriptor_308767df5ffe18af, []int{19}
 }
 func (m *SourceCodeInfo) XXX_Unmarshal(b []byte) error {
 	return xxx_messageInfo_SourceCodeInfo.Unmarshal(m, b)
@@ -2330,8 +2387,8 @@
 func (m *SourceCodeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
 	return xxx_messageInfo_SourceCodeInfo.Marshal(b, m, deterministic)
 }
-func (dst *SourceCodeInfo) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_SourceCodeInfo.Merge(dst, src)
+func (m *SourceCodeInfo) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_SourceCodeInfo.Merge(m, src)
 }
 func (m *SourceCodeInfo) XXX_Size() int {
 	return xxx_messageInfo_SourceCodeInfo.Size(m)
@@ -2439,7 +2496,7 @@
 func (m *SourceCodeInfo_Location) String() string { return proto.CompactTextString(m) }
 func (*SourceCodeInfo_Location) ProtoMessage()    {}
 func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) {
-	return fileDescriptor_descriptor_9588782fb9cbecd6, []int{19, 0}
+	return fileDescriptor_308767df5ffe18af, []int{19, 0}
 }
 func (m *SourceCodeInfo_Location) XXX_Unmarshal(b []byte) error {
 	return xxx_messageInfo_SourceCodeInfo_Location.Unmarshal(m, b)
@@ -2447,8 +2504,8 @@
 func (m *SourceCodeInfo_Location) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
 	return xxx_messageInfo_SourceCodeInfo_Location.Marshal(b, m, deterministic)
 }
-func (dst *SourceCodeInfo_Location) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_SourceCodeInfo_Location.Merge(dst, src)
+func (m *SourceCodeInfo_Location) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_SourceCodeInfo_Location.Merge(m, src)
 }
 func (m *SourceCodeInfo_Location) XXX_Size() int {
 	return xxx_messageInfo_SourceCodeInfo_Location.Size(m)
@@ -2510,7 +2567,7 @@
 func (m *GeneratedCodeInfo) String() string { return proto.CompactTextString(m) }
 func (*GeneratedCodeInfo) ProtoMessage()    {}
 func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) {
-	return fileDescriptor_descriptor_9588782fb9cbecd6, []int{20}
+	return fileDescriptor_308767df5ffe18af, []int{20}
 }
 func (m *GeneratedCodeInfo) XXX_Unmarshal(b []byte) error {
 	return xxx_messageInfo_GeneratedCodeInfo.Unmarshal(m, b)
@@ -2518,8 +2575,8 @@
 func (m *GeneratedCodeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
 	return xxx_messageInfo_GeneratedCodeInfo.Marshal(b, m, deterministic)
 }
-func (dst *GeneratedCodeInfo) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_GeneratedCodeInfo.Merge(dst, src)
+func (m *GeneratedCodeInfo) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_GeneratedCodeInfo.Merge(m, src)
 }
 func (m *GeneratedCodeInfo) XXX_Size() int {
 	return xxx_messageInfo_GeneratedCodeInfo.Size(m)
@@ -2559,7 +2616,7 @@
 func (m *GeneratedCodeInfo_Annotation) String() string { return proto.CompactTextString(m) }
 func (*GeneratedCodeInfo_Annotation) ProtoMessage()    {}
 func (*GeneratedCodeInfo_Annotation) Descriptor() ([]byte, []int) {
-	return fileDescriptor_descriptor_9588782fb9cbecd6, []int{20, 0}
+	return fileDescriptor_308767df5ffe18af, []int{20, 0}
 }
 func (m *GeneratedCodeInfo_Annotation) XXX_Unmarshal(b []byte) error {
 	return xxx_messageInfo_GeneratedCodeInfo_Annotation.Unmarshal(m, b)
@@ -2567,8 +2624,8 @@
 func (m *GeneratedCodeInfo_Annotation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
 	return xxx_messageInfo_GeneratedCodeInfo_Annotation.Marshal(b, m, deterministic)
 }
-func (dst *GeneratedCodeInfo_Annotation) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_GeneratedCodeInfo_Annotation.Merge(dst, src)
+func (m *GeneratedCodeInfo_Annotation) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_GeneratedCodeInfo_Annotation.Merge(m, src)
 }
 func (m *GeneratedCodeInfo_Annotation) XXX_Size() int {
 	return xxx_messageInfo_GeneratedCodeInfo_Annotation.Size(m)
@@ -2608,6 +2665,12 @@
 }
 
 func init() {
+	proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Type", FieldDescriptorProto_Type_name, FieldDescriptorProto_Type_value)
+	proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Label", FieldDescriptorProto_Label_name, FieldDescriptorProto_Label_value)
+	proto.RegisterEnum("google.protobuf.FileOptions_OptimizeMode", FileOptions_OptimizeMode_name, FileOptions_OptimizeMode_value)
+	proto.RegisterEnum("google.protobuf.FieldOptions_CType", FieldOptions_CType_name, FieldOptions_CType_value)
+	proto.RegisterEnum("google.protobuf.FieldOptions_JSType", FieldOptions_JSType_name, FieldOptions_JSType_value)
+	proto.RegisterEnum("google.protobuf.MethodOptions_IdempotencyLevel", MethodOptions_IdempotencyLevel_name, MethodOptions_IdempotencyLevel_value)
 	proto.RegisterType((*FileDescriptorSet)(nil), "google.protobuf.FileDescriptorSet")
 	proto.RegisterType((*FileDescriptorProto)(nil), "google.protobuf.FileDescriptorProto")
 	proto.RegisterType((*DescriptorProto)(nil), "google.protobuf.DescriptorProto")
@@ -2635,172 +2698,168 @@
 	proto.RegisterType((*SourceCodeInfo_Location)(nil), "google.protobuf.SourceCodeInfo.Location")
 	proto.RegisterType((*GeneratedCodeInfo)(nil), "google.protobuf.GeneratedCodeInfo")
 	proto.RegisterType((*GeneratedCodeInfo_Annotation)(nil), "google.protobuf.GeneratedCodeInfo.Annotation")
-	proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Type", FieldDescriptorProto_Type_name, FieldDescriptorProto_Type_value)
-	proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Label", FieldDescriptorProto_Label_name, FieldDescriptorProto_Label_value)
-	proto.RegisterEnum("google.protobuf.FileOptions_OptimizeMode", FileOptions_OptimizeMode_name, FileOptions_OptimizeMode_value)
-	proto.RegisterEnum("google.protobuf.FieldOptions_CType", FieldOptions_CType_name, FieldOptions_CType_value)
-	proto.RegisterEnum("google.protobuf.FieldOptions_JSType", FieldOptions_JSType_name, FieldOptions_JSType_value)
-	proto.RegisterEnum("google.protobuf.MethodOptions_IdempotencyLevel", MethodOptions_IdempotencyLevel_name, MethodOptions_IdempotencyLevel_value)
 }
 
-func init() { proto.RegisterFile("descriptor.proto", fileDescriptor_descriptor_9588782fb9cbecd6) }
+func init() { proto.RegisterFile("descriptor.proto", fileDescriptor_308767df5ffe18af) }
 
-var fileDescriptor_descriptor_9588782fb9cbecd6 = []byte{
-	// 2487 bytes of a gzipped FileDescriptorProto
+var fileDescriptor_308767df5ffe18af = []byte{
+	// 2522 bytes of a gzipped FileDescriptorProto
 	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x59, 0xcd, 0x6f, 0xdb, 0xc8,
-	0x15, 0x5f, 0x7d, 0x5a, 0x7a, 0x92, 0xe5, 0xf1, 0xd8, 0x9b, 0x30, 0xde, 0x8f, 0x38, 0xda, 0x8f,
-	0x38, 0x49, 0xab, 0x2c, 0x9c, 0xc4, 0xc9, 0x3a, 0xc5, 0xb6, 0xb2, 0xc4, 0x78, 0x95, 0xca, 0x92,
+	0x15, 0x5f, 0x7d, 0x5a, 0x7a, 0x92, 0x65, 0x7a, 0xec, 0x75, 0x18, 0xef, 0x47, 0x1c, 0xed, 0x66,
+	0xe3, 0x24, 0xbb, 0xca, 0xc2, 0x49, 0x9c, 0xac, 0x53, 0x6c, 0x2b, 0x4b, 0x8c, 0x57, 0xa9, 0xbe,
 	0x4a, 0xc9, 0xdd, 0x64, 0x8b, 0x82, 0x18, 0x93, 0x23, 0x89, 0x09, 0x45, 0x72, 0x49, 0x2a, 0x89,
-	0x83, 0x1e, 0x02, 0xf4, 0xd4, 0xff, 0xa0, 0x28, 0x8a, 0x1e, 0x7a, 0x59, 0xa0, 0xd7, 0x02, 0x05,
-	0xda, 0x7b, 0xaf, 0x05, 0x7a, 0xef, 0xa1, 0x40, 0x0b, 0xb4, 0x7f, 0x42, 0x8f, 0xc5, 0xcc, 0x90,
-	0x14, 0xf5, 0x95, 0x78, 0x17, 0x48, 0xf6, 0x64, 0xcf, 0xef, 0xfd, 0xde, 0xe3, 0x9b, 0x37, 0x6f,
-	0xde, 0xbc, 0x19, 0x01, 0xd2, 0xa9, 0xa7, 0xb9, 0x86, 0xe3, 0xdb, 0x6e, 0xc5, 0x71, 0x6d, 0xdf,
-	0xc6, 0x6b, 0x03, 0xdb, 0x1e, 0x98, 0x54, 0x8c, 0x4e, 0xc6, 0xfd, 0xf2, 0x11, 0xac, 0xdf, 0x33,
-	0x4c, 0x5a, 0x8f, 0x88, 0x5d, 0xea, 0xe3, 0x3b, 0x90, 0xee, 0x1b, 0x26, 0x95, 0x12, 0xdb, 0xa9,
-	0x9d, 0xc2, 0xee, 0x87, 0x95, 0x19, 0xa5, 0xca, 0xb4, 0x46, 0x87, 0xc1, 0x0a, 0xd7, 0x28, 0xff,
-	0x3b, 0x0d, 0x1b, 0x0b, 0xa4, 0x18, 0x43, 0xda, 0x22, 0x23, 0x66, 0x31, 0xb1, 0x93, 0x57, 0xf8,
-	0xff, 0x58, 0x82, 0x15, 0x87, 0x68, 0x8f, 0xc9, 0x80, 0x4a, 0x49, 0x0e, 0x87, 0x43, 0xfc, 0x3e,
-	0x80, 0x4e, 0x1d, 0x6a, 0xe9, 0xd4, 0xd2, 0x4e, 0xa5, 0xd4, 0x76, 0x6a, 0x27, 0xaf, 0xc4, 0x10,
-	0x7c, 0x0d, 0xd6, 0x9d, 0xf1, 0x89, 0x69, 0x68, 0x6a, 0x8c, 0x06, 0xdb, 0xa9, 0x9d, 0x8c, 0x82,
-	0x84, 0xa0, 0x3e, 0x21, 0x5f, 0x86, 0xb5, 0xa7, 0x94, 0x3c, 0x8e, 0x53, 0x0b, 0x9c, 0x5a, 0x62,
-	0x70, 0x8c, 0x58, 0x83, 0xe2, 0x88, 0x7a, 0x1e, 0x19, 0x50, 0xd5, 0x3f, 0x75, 0xa8, 0x94, 0xe6,
-	0xb3, 0xdf, 0x9e, 0x9b, 0xfd, 0xec, 0xcc, 0x0b, 0x81, 0x56, 0xef, 0xd4, 0xa1, 0xb8, 0x0a, 0x79,
-	0x6a, 0x8d, 0x47, 0xc2, 0x42, 0x66, 0x49, 0xfc, 0x64, 0x6b, 0x3c, 0x9a, 0xb5, 0x92, 0x63, 0x6a,
-	0x81, 0x89, 0x15, 0x8f, 0xba, 0x4f, 0x0c, 0x8d, 0x4a, 0x59, 0x6e, 0xe0, 0xf2, 0x9c, 0x81, 0xae,
-	0x90, 0xcf, 0xda, 0x08, 0xf5, 0x70, 0x0d, 0xf2, 0xf4, 0x99, 0x4f, 0x2d, 0xcf, 0xb0, 0x2d, 0x69,
-	0x85, 0x1b, 0xf9, 0x68, 0xc1, 0x2a, 0x52, 0x53, 0x9f, 0x35, 0x31, 0xd1, 0xc3, 0x7b, 0xb0, 0x62,
-	0x3b, 0xbe, 0x61, 0x5b, 0x9e, 0x94, 0xdb, 0x4e, 0xec, 0x14, 0x76, 0xdf, 0x5d, 0x98, 0x08, 0x6d,
-	0xc1, 0x51, 0x42, 0x32, 0x6e, 0x00, 0xf2, 0xec, 0xb1, 0xab, 0x51, 0x55, 0xb3, 0x75, 0xaa, 0x1a,
-	0x56, 0xdf, 0x96, 0xf2, 0xdc, 0xc0, 0xc5, 0xf9, 0x89, 0x70, 0x62, 0xcd, 0xd6, 0x69, 0xc3, 0xea,
-	0xdb, 0x4a, 0xc9, 0x9b, 0x1a, 0xe3, 0x73, 0x90, 0xf5, 0x4e, 0x2d, 0x9f, 0x3c, 0x93, 0x8a, 0x3c,
-	0x43, 0x82, 0x51, 0xf9, 0xcf, 0x59, 0x58, 0x3b, 0x4b, 0x8a, 0xdd, 0x85, 0x4c, 0x9f, 0xcd, 0x52,
-	0x4a, 0x7e, 0x93, 0x18, 0x08, 0x9d, 0xe9, 0x20, 0x66, 0xbf, 0x65, 0x10, 0xab, 0x50, 0xb0, 0xa8,
-	0xe7, 0x53, 0x5d, 0x64, 0x44, 0xea, 0x8c, 0x39, 0x05, 0x42, 0x69, 0x3e, 0xa5, 0xd2, 0xdf, 0x2a,
-	0xa5, 0x1e, 0xc0, 0x5a, 0xe4, 0x92, 0xea, 0x12, 0x6b, 0x10, 0xe6, 0xe6, 0xf5, 0x57, 0x79, 0x52,
-	0x91, 0x43, 0x3d, 0x85, 0xa9, 0x29, 0x25, 0x3a, 0x35, 0xc6, 0x75, 0x00, 0xdb, 0xa2, 0x76, 0x5f,
-	0xd5, 0xa9, 0x66, 0x4a, 0xb9, 0x25, 0x51, 0x6a, 0x33, 0xca, 0x5c, 0x94, 0x6c, 0x81, 0x6a, 0x26,
-	0xfe, 0x74, 0x92, 0x6a, 0x2b, 0x4b, 0x32, 0xe5, 0x48, 0x6c, 0xb2, 0xb9, 0x6c, 0x3b, 0x86, 0x92,
-	0x4b, 0x59, 0xde, 0x53, 0x3d, 0x98, 0x59, 0x9e, 0x3b, 0x51, 0x79, 0xe5, 0xcc, 0x94, 0x40, 0x4d,
-	0x4c, 0x6c, 0xd5, 0x8d, 0x0f, 0xf1, 0x07, 0x10, 0x01, 0x2a, 0x4f, 0x2b, 0xe0, 0x55, 0xa8, 0x18,
-	0x82, 0x2d, 0x32, 0xa2, 0x5b, 0xcf, 0xa1, 0x34, 0x1d, 0x1e, 0xbc, 0x09, 0x19, 0xcf, 0x27, 0xae,
-	0xcf, 0xb3, 0x30, 0xa3, 0x88, 0x01, 0x46, 0x90, 0xa2, 0x96, 0xce, 0xab, 0x5c, 0x46, 0x61, 0xff,
-	0xe2, 0x1f, 0x4d, 0x26, 0x9c, 0xe2, 0x13, 0xfe, 0x78, 0x7e, 0x45, 0xa7, 0x2c, 0xcf, 0xce, 0x7b,
-	0xeb, 0x36, 0xac, 0x4e, 0x4d, 0xe0, 0xac, 0x9f, 0x2e, 0xff, 0x02, 0xde, 0x5e, 0x68, 0x1a, 0x3f,
-	0x80, 0xcd, 0xb1, 0x65, 0x58, 0x3e, 0x75, 0x1d, 0x97, 0xb2, 0x8c, 0x15, 0x9f, 0x92, 0xfe, 0xb3,
-	0xb2, 0x24, 0xe7, 0x8e, 0xe3, 0x6c, 0x61, 0x45, 0xd9, 0x18, 0xcf, 0x83, 0x57, 0xf3, 0xb9, 0xff,
-	0xae, 0xa0, 0x17, 0x2f, 0x5e, 0xbc, 0x48, 0x96, 0x7f, 0x9d, 0x85, 0xcd, 0x45, 0x7b, 0x66, 0xe1,
-	0xf6, 0x3d, 0x07, 0x59, 0x6b, 0x3c, 0x3a, 0xa1, 0x2e, 0x0f, 0x52, 0x46, 0x09, 0x46, 0xb8, 0x0a,
-	0x19, 0x93, 0x9c, 0x50, 0x53, 0x4a, 0x6f, 0x27, 0x76, 0x4a, 0xbb, 0xd7, 0xce, 0xb4, 0x2b, 0x2b,
-	0x4d, 0xa6, 0xa2, 0x08, 0x4d, 0xfc, 0x19, 0xa4, 0x83, 0x12, 0xcd, 0x2c, 0x5c, 0x3d, 0x9b, 0x05,
-	0xb6, 0x97, 0x14, 0xae, 0x87, 0xdf, 0x81, 0x3c, 0xfb, 0x2b, 0x72, 0x23, 0xcb, 0x7d, 0xce, 0x31,
-	0x80, 0xe5, 0x05, 0xde, 0x82, 0x1c, 0xdf, 0x26, 0x3a, 0x0d, 0x8f, 0xb6, 0x68, 0xcc, 0x12, 0x4b,
-	0xa7, 0x7d, 0x32, 0x36, 0x7d, 0xf5, 0x09, 0x31, 0xc7, 0x94, 0x27, 0x7c, 0x5e, 0x29, 0x06, 0xe0,
-	0x4f, 0x19, 0x86, 0x2f, 0x42, 0x41, 0xec, 0x2a, 0xc3, 0xd2, 0xe9, 0x33, 0x5e, 0x3d, 0x33, 0x8a,
-	0xd8, 0x68, 0x0d, 0x86, 0xb0, 0xcf, 0x3f, 0xf2, 0x6c, 0x2b, 0x4c, 0x4d, 0xfe, 0x09, 0x06, 0xf0,
-	0xcf, 0xdf, 0x9e, 0x2d, 0xdc, 0xef, 0x2d, 0x9e, 0xde, 0x6c, 0x4e, 0x95, 0xff, 0x94, 0x84, 0x34,
-	0xaf, 0x17, 0x6b, 0x50, 0xe8, 0x3d, 0xec, 0xc8, 0x6a, 0xbd, 0x7d, 0x7c, 0xd0, 0x94, 0x51, 0x02,
-	0x97, 0x00, 0x38, 0x70, 0xaf, 0xd9, 0xae, 0xf6, 0x50, 0x32, 0x1a, 0x37, 0x5a, 0xbd, 0xbd, 0x9b,
-	0x28, 0x15, 0x29, 0x1c, 0x0b, 0x20, 0x1d, 0x27, 0xdc, 0xd8, 0x45, 0x19, 0x8c, 0xa0, 0x28, 0x0c,
-	0x34, 0x1e, 0xc8, 0xf5, 0xbd, 0x9b, 0x28, 0x3b, 0x8d, 0xdc, 0xd8, 0x45, 0x2b, 0x78, 0x15, 0xf2,
-	0x1c, 0x39, 0x68, 0xb7, 0x9b, 0x28, 0x17, 0xd9, 0xec, 0xf6, 0x94, 0x46, 0xeb, 0x10, 0xe5, 0x23,
-	0x9b, 0x87, 0x4a, 0xfb, 0xb8, 0x83, 0x20, 0xb2, 0x70, 0x24, 0x77, 0xbb, 0xd5, 0x43, 0x19, 0x15,
-	0x22, 0xc6, 0xc1, 0xc3, 0x9e, 0xdc, 0x45, 0xc5, 0x29, 0xb7, 0x6e, 0xec, 0xa2, 0xd5, 0xe8, 0x13,
-	0x72, 0xeb, 0xf8, 0x08, 0x95, 0xf0, 0x3a, 0xac, 0x8a, 0x4f, 0x84, 0x4e, 0xac, 0xcd, 0x40, 0x7b,
-	0x37, 0x11, 0x9a, 0x38, 0x22, 0xac, 0xac, 0x4f, 0x01, 0x7b, 0x37, 0x11, 0x2e, 0xd7, 0x20, 0xc3,
-	0xb3, 0x0b, 0x63, 0x28, 0x35, 0xab, 0x07, 0x72, 0x53, 0x6d, 0x77, 0x7a, 0x8d, 0x76, 0xab, 0xda,
-	0x44, 0x89, 0x09, 0xa6, 0xc8, 0x3f, 0x39, 0x6e, 0x28, 0x72, 0x1d, 0x25, 0xe3, 0x58, 0x47, 0xae,
-	0xf6, 0xe4, 0x3a, 0x4a, 0x95, 0x35, 0xd8, 0x5c, 0x54, 0x27, 0x17, 0xee, 0x8c, 0xd8, 0x12, 0x27,
-	0x97, 0x2c, 0x31, 0xb7, 0x35, 0xb7, 0xc4, 0xff, 0x4a, 0xc2, 0xc6, 0x82, 0xb3, 0x62, 0xe1, 0x47,
-	0x7e, 0x08, 0x19, 0x91, 0xa2, 0xe2, 0xf4, 0xbc, 0xb2, 0xf0, 0xd0, 0xe1, 0x09, 0x3b, 0x77, 0x82,
-	0x72, 0xbd, 0x78, 0x07, 0x91, 0x5a, 0xd2, 0x41, 0x30, 0x13, 0x73, 0x35, 0xfd, 0xe7, 0x73, 0x35,
-	0x5d, 0x1c, 0x7b, 0x7b, 0x67, 0x39, 0xf6, 0x38, 0xf6, 0xcd, 0x6a, 0x7b, 0x66, 0x41, 0x6d, 0xbf,
-	0x0b, 0xeb, 0x73, 0x86, 0xce, 0x5c, 0x63, 0x7f, 0x99, 0x00, 0x69, 0x59, 0x70, 0x5e, 0x51, 0xe9,
-	0x92, 0x53, 0x95, 0xee, 0xee, 0x6c, 0x04, 0x2f, 0x2d, 0x5f, 0x84, 0xb9, 0xb5, 0xfe, 0x3a, 0x01,
-	0xe7, 0x16, 0x77, 0x8a, 0x0b, 0x7d, 0xf8, 0x0c, 0xb2, 0x23, 0xea, 0x0f, 0xed, 0xb0, 0x5b, 0xfa,
-	0x78, 0xc1, 0x19, 0xcc, 0xc4, 0xb3, 0x8b, 0x1d, 0x68, 0xc5, 0x0f, 0xf1, 0xd4, 0xb2, 0x76, 0x4f,
-	0x78, 0x33, 0xe7, 0xe9, 0xaf, 0x92, 0xf0, 0xf6, 0x42, 0xe3, 0x0b, 0x1d, 0x7d, 0x0f, 0xc0, 0xb0,
-	0x9c, 0xb1, 0x2f, 0x3a, 0x22, 0x51, 0x60, 0xf3, 0x1c, 0xe1, 0xc5, 0x8b, 0x15, 0xcf, 0xb1, 0x1f,
-	0xc9, 0x53, 0x5c, 0x0e, 0x02, 0xe2, 0x84, 0x3b, 0x13, 0x47, 0xd3, 0xdc, 0xd1, 0xf7, 0x97, 0xcc,
-	0x74, 0x2e, 0x31, 0x3f, 0x01, 0xa4, 0x99, 0x06, 0xb5, 0x7c, 0xd5, 0xf3, 0x5d, 0x4a, 0x46, 0x86,
-	0x35, 0xe0, 0x27, 0x48, 0x6e, 0x3f, 0xd3, 0x27, 0xa6, 0x47, 0x95, 0x35, 0x21, 0xee, 0x86, 0x52,
-	0xa6, 0xc1, 0x13, 0xc8, 0x8d, 0x69, 0x64, 0xa7, 0x34, 0x84, 0x38, 0xd2, 0x28, 0xff, 0x31, 0x07,
-	0x85, 0x58, 0x5f, 0x8d, 0x2f, 0x41, 0xf1, 0x11, 0x79, 0x42, 0xd4, 0xf0, 0xae, 0x24, 0x22, 0x51,
-	0x60, 0x58, 0x27, 0xb8, 0x2f, 0x7d, 0x02, 0x9b, 0x9c, 0x62, 0x8f, 0x7d, 0xea, 0xaa, 0x9a, 0x49,
-	0x3c, 0x8f, 0x07, 0x2d, 0xc7, 0xa9, 0x98, 0xc9, 0xda, 0x4c, 0x54, 0x0b, 0x25, 0xf8, 0x16, 0x6c,
-	0x70, 0x8d, 0xd1, 0xd8, 0xf4, 0x0d, 0xc7, 0xa4, 0x2a, 0xbb, 0xbd, 0x79, 0xfc, 0x24, 0x89, 0x3c,
-	0x5b, 0x67, 0x8c, 0xa3, 0x80, 0xc0, 0x3c, 0xf2, 0x70, 0x1d, 0xde, 0xe3, 0x6a, 0x03, 0x6a, 0x51,
-	0x97, 0xf8, 0x54, 0xa5, 0x5f, 0x8d, 0x89, 0xe9, 0xa9, 0xc4, 0xd2, 0xd5, 0x21, 0xf1, 0x86, 0xd2,
-	0x26, 0x33, 0x70, 0x90, 0x94, 0x12, 0xca, 0x05, 0x46, 0x3c, 0x0c, 0x78, 0x32, 0xa7, 0x55, 0x2d,
-	0xfd, 0x73, 0xe2, 0x0d, 0xf1, 0x3e, 0x9c, 0xe3, 0x56, 0x3c, 0xdf, 0x35, 0xac, 0x81, 0xaa, 0x0d,
-	0xa9, 0xf6, 0x58, 0x1d, 0xfb, 0xfd, 0x3b, 0xd2, 0x3b, 0xf1, 0xef, 0x73, 0x0f, 0xbb, 0x9c, 0x53,
-	0x63, 0x94, 0x63, 0xbf, 0x7f, 0x07, 0x77, 0xa1, 0xc8, 0x16, 0x63, 0x64, 0x3c, 0xa7, 0x6a, 0xdf,
-	0x76, 0xf9, 0xd1, 0x58, 0x5a, 0x50, 0x9a, 0x62, 0x11, 0xac, 0xb4, 0x03, 0x85, 0x23, 0x5b, 0xa7,
-	0xfb, 0x99, 0x6e, 0x47, 0x96, 0xeb, 0x4a, 0x21, 0xb4, 0x72, 0xcf, 0x76, 0x59, 0x42, 0x0d, 0xec,
-	0x28, 0xc0, 0x05, 0x91, 0x50, 0x03, 0x3b, 0x0c, 0xef, 0x2d, 0xd8, 0xd0, 0x34, 0x31, 0x67, 0x43,
-	0x53, 0x83, 0x3b, 0x96, 0x27, 0xa1, 0xa9, 0x60, 0x69, 0xda, 0xa1, 0x20, 0x04, 0x39, 0xee, 0xe1,
-	0x4f, 0xe1, 0xed, 0x49, 0xb0, 0xe2, 0x8a, 0xeb, 0x73, 0xb3, 0x9c, 0x55, 0xbd, 0x05, 0x1b, 0xce,
-	0xe9, 0xbc, 0x22, 0x9e, 0xfa, 0xa2, 0x73, 0x3a, 0xab, 0x76, 0x1b, 0x36, 0x9d, 0xa1, 0x33, 0xaf,
-	0x77, 0x35, 0xae, 0x87, 0x9d, 0xa1, 0x33, 0xab, 0xf8, 0x11, 0xbf, 0x70, 0xbb, 0x54, 0x23, 0x3e,
-	0xd5, 0xa5, 0xf3, 0x71, 0x7a, 0x4c, 0x80, 0xaf, 0x03, 0xd2, 0x34, 0x95, 0x5a, 0xe4, 0xc4, 0xa4,
-	0x2a, 0x71, 0xa9, 0x45, 0x3c, 0xe9, 0x62, 0x9c, 0x5c, 0xd2, 0x34, 0x99, 0x4b, 0xab, 0x5c, 0x88,
-	0xaf, 0xc2, 0xba, 0x7d, 0xf2, 0x48, 0x13, 0x29, 0xa9, 0x3a, 0x2e, 0xed, 0x1b, 0xcf, 0xa4, 0x0f,
-	0x79, 0x7c, 0xd7, 0x98, 0x80, 0x27, 0x64, 0x87, 0xc3, 0xf8, 0x0a, 0x20, 0xcd, 0x1b, 0x12, 0xd7,
-	0xe1, 0x35, 0xd9, 0x73, 0x88, 0x46, 0xa5, 0x8f, 0x04, 0x55, 0xe0, 0xad, 0x10, 0x66, 0x5b, 0xc2,
-	0x7b, 0x6a, 0xf4, 0xfd, 0xd0, 0xe2, 0x65, 0xb1, 0x25, 0x38, 0x16, 0x58, 0xdb, 0x01, 0xc4, 0x42,
-	0x31, 0xf5, 0xe1, 0x1d, 0x4e, 0x2b, 0x39, 0x43, 0x27, 0xfe, 0xdd, 0x0f, 0x60, 0x95, 0x31, 0x27,
-	0x1f, 0xbd, 0x22, 0x1a, 0x32, 0x67, 0x18, 0xfb, 0xe2, 0x6b, 0xeb, 0x8d, 0xcb, 0xfb, 0x50, 0x8c,
-	0xe7, 0x27, 0xce, 0x83, 0xc8, 0x50, 0x94, 0x60, 0xcd, 0x4a, 0xad, 0x5d, 0x67, 0x6d, 0xc6, 0x97,
-	0x32, 0x4a, 0xb2, 0x76, 0xa7, 0xd9, 0xe8, 0xc9, 0xaa, 0x72, 0xdc, 0xea, 0x35, 0x8e, 0x64, 0x94,
-	0x8a, 0xf7, 0xd5, 0x7f, 0x4d, 0x42, 0x69, 0xfa, 0x8a, 0x84, 0x7f, 0x00, 0xe7, 0xc3, 0xf7, 0x0c,
-	0x8f, 0xfa, 0xea, 0x53, 0xc3, 0xe5, 0x5b, 0x66, 0x44, 0xc4, 0xf1, 0x15, 0x2d, 0xda, 0x66, 0xc0,
-	0xea, 0x52, 0xff, 0x0b, 0xc3, 0x65, 0x1b, 0x62, 0x44, 0x7c, 0xdc, 0x84, 0x8b, 0x96, 0xad, 0x7a,
-	0x3e, 0xb1, 0x74, 0xe2, 0xea, 0xea, 0xe4, 0x25, 0x49, 0x25, 0x9a, 0x46, 0x3d, 0xcf, 0x16, 0x47,
-	0x55, 0x64, 0xe5, 0x5d, 0xcb, 0xee, 0x06, 0xe4, 0x49, 0x0d, 0xaf, 0x06, 0xd4, 0x99, 0x04, 0x4b,
-	0x2d, 0x4b, 0xb0, 0x77, 0x20, 0x3f, 0x22, 0x8e, 0x4a, 0x2d, 0xdf, 0x3d, 0xe5, 0x8d, 0x71, 0x4e,
-	0xc9, 0x8d, 0x88, 0x23, 0xb3, 0xf1, 0x9b, 0xb9, 0x9f, 0xfc, 0x23, 0x05, 0xc5, 0x78, 0x73, 0xcc,
-	0xee, 0x1a, 0x1a, 0x3f, 0x47, 0x12, 0xbc, 0xd2, 0x7c, 0xf0, 0xd2, 0x56, 0xba, 0x52, 0x63, 0x07,
-	0xcc, 0x7e, 0x56, 0xb4, 0xac, 0x8a, 0xd0, 0x64, 0x87, 0x3b, 0xab, 0x2d, 0x54, 0xb4, 0x08, 0x39,
-	0x25, 0x18, 0xe1, 0x43, 0xc8, 0x3e, 0xf2, 0xb8, 0xed, 0x2c, 0xb7, 0xfd, 0xe1, 0xcb, 0x6d, 0xdf,
-	0xef, 0x72, 0xe3, 0xf9, 0xfb, 0x5d, 0xb5, 0xd5, 0x56, 0x8e, 0xaa, 0x4d, 0x25, 0x50, 0xc7, 0x17,
-	0x20, 0x6d, 0x92, 0xe7, 0xa7, 0xd3, 0x47, 0x11, 0x87, 0xce, 0x1a, 0xf8, 0x0b, 0x90, 0x7e, 0x4a,
-	0xc9, 0xe3, 0xe9, 0x03, 0x80, 0x43, 0xaf, 0x31, 0xf5, 0xaf, 0x43, 0x86, 0xc7, 0x0b, 0x03, 0x04,
-	0x11, 0x43, 0x6f, 0xe1, 0x1c, 0xa4, 0x6b, 0x6d, 0x85, 0xa5, 0x3f, 0x82, 0xa2, 0x40, 0xd5, 0x4e,
-	0x43, 0xae, 0xc9, 0x28, 0x59, 0xbe, 0x05, 0x59, 0x11, 0x04, 0xb6, 0x35, 0xa2, 0x30, 0xa0, 0xb7,
-	0x82, 0x61, 0x60, 0x23, 0x11, 0x4a, 0x8f, 0x8f, 0x0e, 0x64, 0x05, 0x25, 0xe3, 0xcb, 0xeb, 0x41,
-	0x31, 0xde, 0x17, 0xbf, 0x99, 0x9c, 0xfa, 0x4b, 0x02, 0x0a, 0xb1, 0x3e, 0x97, 0x35, 0x28, 0xc4,
-	0x34, 0xed, 0xa7, 0x2a, 0x31, 0x0d, 0xe2, 0x05, 0x49, 0x01, 0x1c, 0xaa, 0x32, 0xe4, 0xac, 0x8b,
-	0xf6, 0x46, 0x9c, 0xff, 0x5d, 0x02, 0xd0, 0x6c, 0x8b, 0x39, 0xe3, 0x60, 0xe2, 0x3b, 0x75, 0xf0,
-	0xb7, 0x09, 0x28, 0x4d, 0xf7, 0x95, 0x33, 0xee, 0x5d, 0xfa, 0x4e, 0xdd, 0xfb, 0x67, 0x12, 0x56,
-	0xa7, 0xba, 0xc9, 0xb3, 0x7a, 0xf7, 0x15, 0xac, 0x1b, 0x3a, 0x1d, 0x39, 0xb6, 0x4f, 0x2d, 0xed,
-	0x54, 0x35, 0xe9, 0x13, 0x6a, 0x4a, 0x65, 0x5e, 0x28, 0xae, 0xbf, 0xbc, 0x5f, 0xad, 0x34, 0x26,
-	0x7a, 0x4d, 0xa6, 0xb6, 0xbf, 0xd1, 0xa8, 0xcb, 0x47, 0x9d, 0x76, 0x4f, 0x6e, 0xd5, 0x1e, 0xaa,
-	0xc7, 0xad, 0x1f, 0xb7, 0xda, 0x5f, 0xb4, 0x14, 0x64, 0xcc, 0xd0, 0x5e, 0xe3, 0x56, 0xef, 0x00,
-	0x9a, 0x75, 0x0a, 0x9f, 0x87, 0x45, 0x6e, 0xa1, 0xb7, 0xf0, 0x06, 0xac, 0xb5, 0xda, 0x6a, 0xb7,
-	0x51, 0x97, 0x55, 0xf9, 0xde, 0x3d, 0xb9, 0xd6, 0xeb, 0x8a, 0x17, 0x88, 0x88, 0xdd, 0x9b, 0xde,
-	0xd4, 0xbf, 0x49, 0xc1, 0xc6, 0x02, 0x4f, 0x70, 0x35, 0xb8, 0x3b, 0x88, 0xeb, 0xcc, 0xf7, 0xcf,
-	0xe2, 0x7d, 0x85, 0x1d, 0xf9, 0x1d, 0xe2, 0xfa, 0xc1, 0x55, 0xe3, 0x0a, 0xb0, 0x28, 0x59, 0xbe,
-	0xd1, 0x37, 0xa8, 0x1b, 0x3c, 0xd8, 0x88, 0x0b, 0xc5, 0xda, 0x04, 0x17, 0x6f, 0x36, 0xdf, 0x03,
-	0xec, 0xd8, 0x9e, 0xe1, 0x1b, 0x4f, 0xa8, 0x6a, 0x58, 0xe1, 0xeb, 0x0e, 0xbb, 0x60, 0xa4, 0x15,
-	0x14, 0x4a, 0x1a, 0x96, 0x1f, 0xb1, 0x2d, 0x3a, 0x20, 0x33, 0x6c, 0x56, 0xc0, 0x53, 0x0a, 0x0a,
-	0x25, 0x11, 0xfb, 0x12, 0x14, 0x75, 0x7b, 0xcc, 0xba, 0x2e, 0xc1, 0x63, 0xe7, 0x45, 0x42, 0x29,
-	0x08, 0x2c, 0xa2, 0x04, 0xfd, 0xf4, 0xe4, 0x59, 0xa9, 0xa8, 0x14, 0x04, 0x26, 0x28, 0x97, 0x61,
-	0x8d, 0x0c, 0x06, 0x2e, 0x33, 0x1e, 0x1a, 0x12, 0x37, 0x84, 0x52, 0x04, 0x73, 0xe2, 0xd6, 0x7d,
-	0xc8, 0x85, 0x71, 0x60, 0x47, 0x32, 0x8b, 0x84, 0xea, 0x88, 0x6b, 0x6f, 0x72, 0x27, 0xaf, 0xe4,
-	0xac, 0x50, 0x78, 0x09, 0x8a, 0x86, 0xa7, 0x4e, 0x5e, 0xc9, 0x93, 0xdb, 0xc9, 0x9d, 0x9c, 0x52,
-	0x30, 0xbc, 0xe8, 0x85, 0xb1, 0xfc, 0x75, 0x12, 0x4a, 0xd3, 0xaf, 0xfc, 0xb8, 0x0e, 0x39, 0xd3,
-	0xd6, 0x08, 0x4f, 0x2d, 0xf1, 0x13, 0xd3, 0xce, 0x2b, 0x7e, 0x18, 0xa8, 0x34, 0x03, 0xbe, 0x12,
-	0x69, 0x6e, 0xfd, 0x2d, 0x01, 0xb9, 0x10, 0xc6, 0xe7, 0x20, 0xed, 0x10, 0x7f, 0xc8, 0xcd, 0x65,
-	0x0e, 0x92, 0x28, 0xa1, 0xf0, 0x31, 0xc3, 0x3d, 0x87, 0x58, 0x3c, 0x05, 0x02, 0x9c, 0x8d, 0xd9,
-	0xba, 0x9a, 0x94, 0xe8, 0xfc, 0xfa, 0x61, 0x8f, 0x46, 0xd4, 0xf2, 0xbd, 0x70, 0x5d, 0x03, 0xbc,
-	0x16, 0xc0, 0xf8, 0x1a, 0xac, 0xfb, 0x2e, 0x31, 0xcc, 0x29, 0x6e, 0x9a, 0x73, 0x51, 0x28, 0x88,
-	0xc8, 0xfb, 0x70, 0x21, 0xb4, 0xab, 0x53, 0x9f, 0x68, 0x43, 0xaa, 0x4f, 0x94, 0xb2, 0xfc, 0x99,
-	0xe1, 0x7c, 0x40, 0xa8, 0x07, 0xf2, 0x50, 0xb7, 0xfc, 0xf7, 0x04, 0xac, 0x87, 0x17, 0x26, 0x3d,
-	0x0a, 0xd6, 0x11, 0x00, 0xb1, 0x2c, 0xdb, 0x8f, 0x87, 0x6b, 0x3e, 0x95, 0xe7, 0xf4, 0x2a, 0xd5,
-	0x48, 0x49, 0x89, 0x19, 0xd8, 0x1a, 0x01, 0x4c, 0x24, 0x4b, 0xc3, 0x76, 0x11, 0x0a, 0xc1, 0x4f,
-	0x38, 0xfc, 0x77, 0x40, 0x71, 0xc5, 0x06, 0x01, 0xb1, 0x9b, 0x15, 0xde, 0x84, 0xcc, 0x09, 0x1d,
-	0x18, 0x56, 0xf0, 0x30, 0x2b, 0x06, 0xe1, 0x43, 0x48, 0x3a, 0x7a, 0x08, 0x39, 0xf8, 0x19, 0x6c,
-	0x68, 0xf6, 0x68, 0xd6, 0xdd, 0x03, 0x34, 0x73, 0xcd, 0xf7, 0x3e, 0x4f, 0x7c, 0x09, 0x93, 0x16,
-	0xf3, 0x7f, 0x89, 0xc4, 0xef, 0x93, 0xa9, 0xc3, 0xce, 0xc1, 0x1f, 0x92, 0x5b, 0x87, 0x42, 0xb5,
-	0x13, 0xce, 0x54, 0xa1, 0x7d, 0x93, 0x6a, 0xcc, 0xfb, 0xff, 0x07, 0x00, 0x00, 0xff, 0xff, 0xa3,
-	0x58, 0x22, 0x30, 0xdf, 0x1c, 0x00, 0x00,
+	0x83, 0x1e, 0x02, 0xf4, 0x54, 0xa0, 0x7f, 0x40, 0x51, 0x14, 0x3d, 0xf4, 0xb2, 0x40, 0xff, 0x80,
+	0x02, 0xed, 0xbd, 0xd7, 0x02, 0xbd, 0xf7, 0x50, 0xa0, 0x05, 0xda, 0x3f, 0xa1, 0xc7, 0x62, 0x66,
+	0x48, 0x8a, 0xd4, 0x47, 0xe2, 0x5d, 0x20, 0xd9, 0x93, 0x3d, 0xef, 0xfd, 0xde, 0x9b, 0x37, 0x8f,
+	0xbf, 0x79, 0xf3, 0x66, 0x04, 0x82, 0x46, 0x5c, 0xd5, 0xd1, 0x6d, 0xcf, 0x72, 0x2a, 0xb6, 0x63,
+	0x79, 0x16, 0x5a, 0x1b, 0x5a, 0xd6, 0xd0, 0x20, 0x7c, 0x74, 0x32, 0x19, 0x94, 0x5b, 0xb0, 0x7e,
+	0x4f, 0x37, 0x48, 0x3d, 0x04, 0xf6, 0x88, 0x87, 0xee, 0x40, 0x7a, 0xa0, 0x1b, 0x44, 0x4c, 0xec,
+	0xa4, 0x76, 0x0b, 0x7b, 0x1f, 0x56, 0x66, 0x8c, 0x2a, 0x71, 0x8b, 0x2e, 0x15, 0xcb, 0xcc, 0xa2,
+	0xfc, 0xef, 0x34, 0x6c, 0x2c, 0xd0, 0x22, 0x04, 0x69, 0x13, 0x8f, 0xa9, 0xc7, 0xc4, 0x6e, 0x5e,
+	0x66, 0xff, 0x23, 0x11, 0x56, 0x6c, 0xac, 0x3e, 0xc6, 0x43, 0x22, 0x26, 0x99, 0x38, 0x18, 0xa2,
+	0xf7, 0x01, 0x34, 0x62, 0x13, 0x53, 0x23, 0xa6, 0x7a, 0x2a, 0xa6, 0x76, 0x52, 0xbb, 0x79, 0x39,
+	0x22, 0x41, 0xd7, 0x60, 0xdd, 0x9e, 0x9c, 0x18, 0xba, 0xaa, 0x44, 0x60, 0xb0, 0x93, 0xda, 0xcd,
+	0xc8, 0x02, 0x57, 0xd4, 0xa7, 0xe0, 0xcb, 0xb0, 0xf6, 0x94, 0xe0, 0xc7, 0x51, 0x68, 0x81, 0x41,
+	0x4b, 0x54, 0x1c, 0x01, 0xd6, 0xa0, 0x38, 0x26, 0xae, 0x8b, 0x87, 0x44, 0xf1, 0x4e, 0x6d, 0x22,
+	0xa6, 0xd9, 0xea, 0x77, 0xe6, 0x56, 0x3f, 0xbb, 0xf2, 0x82, 0x6f, 0xd5, 0x3f, 0xb5, 0x09, 0xaa,
+	0x42, 0x9e, 0x98, 0x93, 0x31, 0xf7, 0x90, 0x59, 0x92, 0x3f, 0xc9, 0x9c, 0x8c, 0x67, 0xbd, 0xe4,
+	0xa8, 0x99, 0xef, 0x62, 0xc5, 0x25, 0xce, 0x13, 0x5d, 0x25, 0x62, 0x96, 0x39, 0xb8, 0x3c, 0xe7,
+	0xa0, 0xc7, 0xf5, 0xb3, 0x3e, 0x02, 0x3b, 0x54, 0x83, 0x3c, 0x79, 0xe6, 0x11, 0xd3, 0xd5, 0x2d,
+	0x53, 0x5c, 0x61, 0x4e, 0x2e, 0x2d, 0xf8, 0x8a, 0xc4, 0xd0, 0x66, 0x5d, 0x4c, 0xed, 0xd0, 0x3e,
+	0xac, 0x58, 0xb6, 0xa7, 0x5b, 0xa6, 0x2b, 0xe6, 0x76, 0x12, 0xbb, 0x85, 0xbd, 0x77, 0x17, 0x12,
+	0xa1, 0xc3, 0x31, 0x72, 0x00, 0x46, 0x0d, 0x10, 0x5c, 0x6b, 0xe2, 0xa8, 0x44, 0x51, 0x2d, 0x8d,
+	0x28, 0xba, 0x39, 0xb0, 0xc4, 0x3c, 0x73, 0x70, 0x61, 0x7e, 0x21, 0x0c, 0x58, 0xb3, 0x34, 0xd2,
+	0x30, 0x07, 0x96, 0x5c, 0x72, 0x63, 0x63, 0xb4, 0x05, 0x59, 0xf7, 0xd4, 0xf4, 0xf0, 0x33, 0xb1,
+	0xc8, 0x18, 0xe2, 0x8f, 0xca, 0x7f, 0xce, 0xc2, 0xda, 0x59, 0x28, 0x76, 0x17, 0x32, 0x03, 0xba,
+	0x4a, 0x31, 0xf9, 0x6d, 0x72, 0xc0, 0x6d, 0xe2, 0x49, 0xcc, 0x7e, 0xc7, 0x24, 0x56, 0xa1, 0x60,
+	0x12, 0xd7, 0x23, 0x1a, 0x67, 0x44, 0xea, 0x8c, 0x9c, 0x02, 0x6e, 0x34, 0x4f, 0xa9, 0xf4, 0x77,
+	0xa2, 0xd4, 0x03, 0x58, 0x0b, 0x43, 0x52, 0x1c, 0x6c, 0x0e, 0x03, 0x6e, 0x5e, 0x7f, 0x55, 0x24,
+	0x15, 0x29, 0xb0, 0x93, 0xa9, 0x99, 0x5c, 0x22, 0xb1, 0x31, 0xaa, 0x03, 0x58, 0x26, 0xb1, 0x06,
+	0x8a, 0x46, 0x54, 0x43, 0xcc, 0x2d, 0xc9, 0x52, 0x87, 0x42, 0xe6, 0xb2, 0x64, 0x71, 0xa9, 0x6a,
+	0xa0, 0xcf, 0xa6, 0x54, 0x5b, 0x59, 0xc2, 0x94, 0x16, 0xdf, 0x64, 0x73, 0x6c, 0x3b, 0x86, 0x92,
+	0x43, 0x28, 0xef, 0x89, 0xe6, 0xaf, 0x2c, 0xcf, 0x82, 0xa8, 0xbc, 0x72, 0x65, 0xb2, 0x6f, 0xc6,
+	0x17, 0xb6, 0xea, 0x44, 0x87, 0xe8, 0x03, 0x08, 0x05, 0x0a, 0xa3, 0x15, 0xb0, 0x2a, 0x54, 0x0c,
+	0x84, 0x6d, 0x3c, 0x26, 0xdb, 0xcf, 0xa1, 0x14, 0x4f, 0x0f, 0xda, 0x84, 0x8c, 0xeb, 0x61, 0xc7,
+	0x63, 0x2c, 0xcc, 0xc8, 0x7c, 0x80, 0x04, 0x48, 0x11, 0x53, 0x63, 0x55, 0x2e, 0x23, 0xd3, 0x7f,
+	0xd1, 0x8f, 0xa6, 0x0b, 0x4e, 0xb1, 0x05, 0x7f, 0x34, 0xff, 0x45, 0x63, 0x9e, 0x67, 0xd7, 0xbd,
+	0x7d, 0x1b, 0x56, 0x63, 0x0b, 0x38, 0xeb, 0xd4, 0xe5, 0x5f, 0xc0, 0xdb, 0x0b, 0x5d, 0xa3, 0x07,
+	0xb0, 0x39, 0x31, 0x75, 0xd3, 0x23, 0x8e, 0xed, 0x10, 0xca, 0x58, 0x3e, 0x95, 0xf8, 0x9f, 0x95,
+	0x25, 0x9c, 0x3b, 0x8e, 0xa2, 0xb9, 0x17, 0x79, 0x63, 0x32, 0x2f, 0xbc, 0x9a, 0xcf, 0xfd, 0x77,
+	0x45, 0x78, 0xf1, 0xe2, 0xc5, 0x8b, 0x64, 0xf9, 0x37, 0x59, 0xd8, 0x5c, 0xb4, 0x67, 0x16, 0x6e,
+	0xdf, 0x2d, 0xc8, 0x9a, 0x93, 0xf1, 0x09, 0x71, 0x58, 0x92, 0x32, 0xb2, 0x3f, 0x42, 0x55, 0xc8,
+	0x18, 0xf8, 0x84, 0x18, 0x62, 0x7a, 0x27, 0xb1, 0x5b, 0xda, 0xbb, 0x76, 0xa6, 0x5d, 0x59, 0x69,
+	0x52, 0x13, 0x99, 0x5b, 0xa2, 0xcf, 0x21, 0xed, 0x97, 0x68, 0xea, 0xe1, 0xea, 0xd9, 0x3c, 0xd0,
+	0xbd, 0x24, 0x33, 0x3b, 0xf4, 0x0e, 0xe4, 0xe9, 0x5f, 0xce, 0x8d, 0x2c, 0x8b, 0x39, 0x47, 0x05,
+	0x94, 0x17, 0x68, 0x1b, 0x72, 0x6c, 0x9b, 0x68, 0x24, 0x38, 0xda, 0xc2, 0x31, 0x25, 0x96, 0x46,
+	0x06, 0x78, 0x62, 0x78, 0xca, 0x13, 0x6c, 0x4c, 0x08, 0x23, 0x7c, 0x5e, 0x2e, 0xfa, 0xc2, 0x9f,
+	0x52, 0x19, 0xba, 0x00, 0x05, 0xbe, 0xab, 0x74, 0x53, 0x23, 0xcf, 0x58, 0xf5, 0xcc, 0xc8, 0x7c,
+	0xa3, 0x35, 0xa8, 0x84, 0x4e, 0xff, 0xc8, 0xb5, 0xcc, 0x80, 0x9a, 0x6c, 0x0a, 0x2a, 0x60, 0xd3,
+	0xdf, 0x9e, 0x2d, 0xdc, 0xef, 0x2d, 0x5e, 0xde, 0x2c, 0xa7, 0xca, 0x7f, 0x4a, 0x42, 0x9a, 0xd5,
+	0x8b, 0x35, 0x28, 0xf4, 0x1f, 0x76, 0x25, 0xa5, 0xde, 0x39, 0x3e, 0x6c, 0x4a, 0x42, 0x02, 0x95,
+	0x00, 0x98, 0xe0, 0x5e, 0xb3, 0x53, 0xed, 0x0b, 0xc9, 0x70, 0xdc, 0x68, 0xf7, 0xf7, 0x6f, 0x0a,
+	0xa9, 0xd0, 0xe0, 0x98, 0x0b, 0xd2, 0x51, 0xc0, 0x8d, 0x3d, 0x21, 0x83, 0x04, 0x28, 0x72, 0x07,
+	0x8d, 0x07, 0x52, 0x7d, 0xff, 0xa6, 0x90, 0x8d, 0x4b, 0x6e, 0xec, 0x09, 0x2b, 0x68, 0x15, 0xf2,
+	0x4c, 0x72, 0xd8, 0xe9, 0x34, 0x85, 0x5c, 0xe8, 0xb3, 0xd7, 0x97, 0x1b, 0xed, 0x23, 0x21, 0x1f,
+	0xfa, 0x3c, 0x92, 0x3b, 0xc7, 0x5d, 0x01, 0x42, 0x0f, 0x2d, 0xa9, 0xd7, 0xab, 0x1e, 0x49, 0x42,
+	0x21, 0x44, 0x1c, 0x3e, 0xec, 0x4b, 0x3d, 0xa1, 0x18, 0x0b, 0xeb, 0xc6, 0x9e, 0xb0, 0x1a, 0x4e,
+	0x21, 0xb5, 0x8f, 0x5b, 0x42, 0x09, 0xad, 0xc3, 0x2a, 0x9f, 0x22, 0x08, 0x62, 0x6d, 0x46, 0xb4,
+	0x7f, 0x53, 0x10, 0xa6, 0x81, 0x70, 0x2f, 0xeb, 0x31, 0xc1, 0xfe, 0x4d, 0x01, 0x95, 0x6b, 0x90,
+	0x61, 0xec, 0x42, 0x08, 0x4a, 0xcd, 0xea, 0xa1, 0xd4, 0x54, 0x3a, 0xdd, 0x7e, 0xa3, 0xd3, 0xae,
+	0x36, 0x85, 0xc4, 0x54, 0x26, 0x4b, 0x3f, 0x39, 0x6e, 0xc8, 0x52, 0x5d, 0x48, 0x46, 0x65, 0x5d,
+	0xa9, 0xda, 0x97, 0xea, 0x42, 0xaa, 0xac, 0xc2, 0xe6, 0xa2, 0x3a, 0xb9, 0x70, 0x67, 0x44, 0x3e,
+	0x71, 0x72, 0xc9, 0x27, 0x66, 0xbe, 0xe6, 0x3e, 0xf1, 0xbf, 0x92, 0xb0, 0xb1, 0xe0, 0xac, 0x58,
+	0x38, 0xc9, 0x0f, 0x21, 0xc3, 0x29, 0xca, 0x4f, 0xcf, 0x2b, 0x0b, 0x0f, 0x1d, 0x46, 0xd8, 0xb9,
+	0x13, 0x94, 0xd9, 0x45, 0x3b, 0x88, 0xd4, 0x92, 0x0e, 0x82, 0xba, 0x98, 0xab, 0xe9, 0x3f, 0x9f,
+	0xab, 0xe9, 0xfc, 0xd8, 0xdb, 0x3f, 0xcb, 0xb1, 0xc7, 0x64, 0xdf, 0xae, 0xb6, 0x67, 0x16, 0xd4,
+	0xf6, 0xbb, 0xb0, 0x3e, 0xe7, 0xe8, 0xcc, 0x35, 0xf6, 0x97, 0x09, 0x10, 0x97, 0x25, 0xe7, 0x15,
+	0x95, 0x2e, 0x19, 0xab, 0x74, 0x77, 0x67, 0x33, 0x78, 0x71, 0xf9, 0x47, 0x98, 0xfb, 0xd6, 0xdf,
+	0x24, 0x60, 0x6b, 0x71, 0xa7, 0xb8, 0x30, 0x86, 0xcf, 0x21, 0x3b, 0x26, 0xde, 0xc8, 0x0a, 0xba,
+	0xa5, 0x8f, 0x16, 0x9c, 0xc1, 0x54, 0x3d, 0xfb, 0xb1, 0x7d, 0xab, 0xe8, 0x21, 0x9e, 0x5a, 0xd6,
+	0xee, 0xf1, 0x68, 0xe6, 0x22, 0xfd, 0x55, 0x12, 0xde, 0x5e, 0xe8, 0x7c, 0x61, 0xa0, 0xef, 0x01,
+	0xe8, 0xa6, 0x3d, 0xf1, 0x78, 0x47, 0xc4, 0x0b, 0x6c, 0x9e, 0x49, 0x58, 0xf1, 0xa2, 0xc5, 0x73,
+	0xe2, 0x85, 0xfa, 0x14, 0xd3, 0x03, 0x17, 0x31, 0xc0, 0x9d, 0x69, 0xa0, 0x69, 0x16, 0xe8, 0xfb,
+	0x4b, 0x56, 0x3a, 0x47, 0xcc, 0x4f, 0x41, 0x50, 0x0d, 0x9d, 0x98, 0x9e, 0xe2, 0x7a, 0x0e, 0xc1,
+	0x63, 0xdd, 0x1c, 0xb2, 0x13, 0x24, 0x77, 0x90, 0x19, 0x60, 0xc3, 0x25, 0xf2, 0x1a, 0x57, 0xf7,
+	0x02, 0x2d, 0xb5, 0x60, 0x04, 0x72, 0x22, 0x16, 0xd9, 0x98, 0x05, 0x57, 0x87, 0x16, 0xe5, 0x5f,
+	0xe7, 0xa1, 0x10, 0xe9, 0xab, 0xd1, 0x45, 0x28, 0x3e, 0xc2, 0x4f, 0xb0, 0x12, 0xdc, 0x95, 0x78,
+	0x26, 0x0a, 0x54, 0xd6, 0xf5, 0xef, 0x4b, 0x9f, 0xc2, 0x26, 0x83, 0x58, 0x13, 0x8f, 0x38, 0x8a,
+	0x6a, 0x60, 0xd7, 0x65, 0x49, 0xcb, 0x31, 0x28, 0xa2, 0xba, 0x0e, 0x55, 0xd5, 0x02, 0x0d, 0xba,
+	0x05, 0x1b, 0xcc, 0x62, 0x3c, 0x31, 0x3c, 0xdd, 0x36, 0x88, 0x42, 0x6f, 0x6f, 0x2e, 0x3b, 0x49,
+	0xc2, 0xc8, 0xd6, 0x29, 0xa2, 0xe5, 0x03, 0x68, 0x44, 0x2e, 0xaa, 0xc3, 0x7b, 0xcc, 0x6c, 0x48,
+	0x4c, 0xe2, 0x60, 0x8f, 0x28, 0xe4, 0xeb, 0x09, 0x36, 0x5c, 0x05, 0x9b, 0x9a, 0x32, 0xc2, 0xee,
+	0x48, 0xdc, 0xa4, 0x0e, 0x0e, 0x93, 0x62, 0x42, 0x3e, 0x4f, 0x81, 0x47, 0x3e, 0x4e, 0x62, 0xb0,
+	0xaa, 0xa9, 0x7d, 0x81, 0xdd, 0x11, 0x3a, 0x80, 0x2d, 0xe6, 0xc5, 0xf5, 0x1c, 0xdd, 0x1c, 0x2a,
+	0xea, 0x88, 0xa8, 0x8f, 0x95, 0x89, 0x37, 0xb8, 0x23, 0xbe, 0x13, 0x9d, 0x9f, 0x45, 0xd8, 0x63,
+	0x98, 0x1a, 0x85, 0x1c, 0x7b, 0x83, 0x3b, 0xa8, 0x07, 0x45, 0xfa, 0x31, 0xc6, 0xfa, 0x73, 0xa2,
+	0x0c, 0x2c, 0x87, 0x1d, 0x8d, 0xa5, 0x05, 0xa5, 0x29, 0x92, 0xc1, 0x4a, 0xc7, 0x37, 0x68, 0x59,
+	0x1a, 0x39, 0xc8, 0xf4, 0xba, 0x92, 0x54, 0x97, 0x0b, 0x81, 0x97, 0x7b, 0x96, 0x43, 0x09, 0x35,
+	0xb4, 0xc2, 0x04, 0x17, 0x38, 0xa1, 0x86, 0x56, 0x90, 0xde, 0x5b, 0xb0, 0xa1, 0xaa, 0x7c, 0xcd,
+	0xba, 0xaa, 0xf8, 0x77, 0x2c, 0x57, 0x14, 0x62, 0xc9, 0x52, 0xd5, 0x23, 0x0e, 0xf0, 0x39, 0xee,
+	0xa2, 0xcf, 0xe0, 0xed, 0x69, 0xb2, 0xa2, 0x86, 0xeb, 0x73, 0xab, 0x9c, 0x35, 0xbd, 0x05, 0x1b,
+	0xf6, 0xe9, 0xbc, 0x21, 0x8a, 0xcd, 0x68, 0x9f, 0xce, 0x9a, 0xdd, 0x86, 0x4d, 0x7b, 0x64, 0xcf,
+	0xdb, 0x5d, 0x8d, 0xda, 0x21, 0x7b, 0x64, 0xcf, 0x1a, 0x5e, 0x62, 0x17, 0x6e, 0x87, 0xa8, 0xd8,
+	0x23, 0x9a, 0x78, 0x2e, 0x0a, 0x8f, 0x28, 0xd0, 0x75, 0x10, 0x54, 0x55, 0x21, 0x26, 0x3e, 0x31,
+	0x88, 0x82, 0x1d, 0x62, 0x62, 0x57, 0xbc, 0x10, 0x05, 0x97, 0x54, 0x55, 0x62, 0xda, 0x2a, 0x53,
+	0xa2, 0xab, 0xb0, 0x6e, 0x9d, 0x3c, 0x52, 0x39, 0x25, 0x15, 0xdb, 0x21, 0x03, 0xfd, 0x99, 0xf8,
+	0x21, 0xcb, 0xef, 0x1a, 0x55, 0x30, 0x42, 0x76, 0x99, 0x18, 0x5d, 0x01, 0x41, 0x75, 0x47, 0xd8,
+	0xb1, 0x59, 0x4d, 0x76, 0x6d, 0xac, 0x12, 0xf1, 0x12, 0x87, 0x72, 0x79, 0x3b, 0x10, 0xd3, 0x2d,
+	0xe1, 0x3e, 0xd5, 0x07, 0x5e, 0xe0, 0xf1, 0x32, 0xdf, 0x12, 0x4c, 0xe6, 0x7b, 0xdb, 0x05, 0x81,
+	0xa6, 0x22, 0x36, 0xf1, 0x2e, 0x83, 0x95, 0xec, 0x91, 0x1d, 0x9d, 0xf7, 0x03, 0x58, 0xa5, 0xc8,
+	0xe9, 0xa4, 0x57, 0x78, 0x43, 0x66, 0x8f, 0x22, 0x33, 0xde, 0x84, 0x2d, 0x0a, 0x1a, 0x13, 0x0f,
+	0x6b, 0xd8, 0xc3, 0x11, 0xf4, 0xc7, 0x0c, 0x4d, 0xf3, 0xde, 0xf2, 0x95, 0xb1, 0x38, 0x9d, 0xc9,
+	0xc9, 0x69, 0xc8, 0xac, 0x4f, 0x78, 0x9c, 0x54, 0x16, 0x70, 0xeb, 0xb5, 0x35, 0xdd, 0xe5, 0x03,
+	0x28, 0x46, 0x89, 0x8f, 0xf2, 0xc0, 0xa9, 0x2f, 0x24, 0x68, 0x17, 0x54, 0xeb, 0xd4, 0x69, 0xff,
+	0xf2, 0x95, 0x24, 0x24, 0x69, 0x1f, 0xd5, 0x6c, 0xf4, 0x25, 0x45, 0x3e, 0x6e, 0xf7, 0x1b, 0x2d,
+	0x49, 0x48, 0x45, 0x1b, 0xf6, 0xbf, 0x26, 0xa1, 0x14, 0xbf, 0x7b, 0xa1, 0x1f, 0xc0, 0xb9, 0xe0,
+	0xa1, 0xc4, 0x25, 0x9e, 0xf2, 0x54, 0x77, 0xd8, 0x5e, 0x1c, 0x63, 0x7e, 0x2e, 0x86, 0x6c, 0xd8,
+	0xf4, 0x51, 0x3d, 0xe2, 0x7d, 0xa9, 0x3b, 0x74, 0xa7, 0x8d, 0xb1, 0x87, 0x9a, 0x70, 0xc1, 0xb4,
+	0x14, 0xd7, 0xc3, 0xa6, 0x86, 0x1d, 0x4d, 0x99, 0x3e, 0x51, 0x29, 0x58, 0x55, 0x89, 0xeb, 0x5a,
+	0xfc, 0x0c, 0x0c, 0xbd, 0xbc, 0x6b, 0x5a, 0x3d, 0x1f, 0x3c, 0x3d, 0x1c, 0xaa, 0x3e, 0x74, 0x86,
+	0xb9, 0xa9, 0x65, 0xcc, 0x7d, 0x07, 0xf2, 0x63, 0x6c, 0x2b, 0xc4, 0xf4, 0x9c, 0x53, 0xd6, 0x71,
+	0xe7, 0xe4, 0xdc, 0x18, 0xdb, 0x12, 0x1d, 0xbf, 0x99, 0x8b, 0xcf, 0x3f, 0x52, 0x50, 0x8c, 0x76,
+	0xdd, 0xf4, 0x12, 0xa3, 0xb2, 0x03, 0x2a, 0xc1, 0x4a, 0xd8, 0x07, 0x2f, 0xed, 0xd1, 0x2b, 0x35,
+	0x7a, 0x72, 0x1d, 0x64, 0x79, 0x2f, 0x2c, 0x73, 0x4b, 0xda, 0x35, 0x50, 0x6a, 0x11, 0xde, 0x7b,
+	0xe4, 0x64, 0x7f, 0x84, 0x8e, 0x20, 0xfb, 0xc8, 0x65, 0xbe, 0xb3, 0xcc, 0xf7, 0x87, 0x2f, 0xf7,
+	0x7d, 0xbf, 0xc7, 0x9c, 0xe7, 0xef, 0xf7, 0x94, 0x76, 0x47, 0x6e, 0x55, 0x9b, 0xb2, 0x6f, 0x8e,
+	0xce, 0x43, 0xda, 0xc0, 0xcf, 0x4f, 0xe3, 0x67, 0x1c, 0x13, 0x9d, 0x35, 0xf1, 0xe7, 0x21, 0xfd,
+	0x94, 0xe0, 0xc7, 0xf1, 0x93, 0x85, 0x89, 0x5e, 0x23, 0xf5, 0xaf, 0x43, 0x86, 0xe5, 0x0b, 0x01,
+	0xf8, 0x19, 0x13, 0xde, 0x42, 0x39, 0x48, 0xd7, 0x3a, 0x32, 0xa5, 0xbf, 0x00, 0x45, 0x2e, 0x55,
+	0xba, 0x0d, 0xa9, 0x26, 0x09, 0xc9, 0xf2, 0x2d, 0xc8, 0xf2, 0x24, 0xd0, 0xad, 0x11, 0xa6, 0x41,
+	0x78, 0xcb, 0x1f, 0xfa, 0x3e, 0x12, 0x81, 0xf6, 0xb8, 0x75, 0x28, 0xc9, 0x42, 0x32, 0xfa, 0x79,
+	0x5d, 0x28, 0x46, 0x1b, 0xee, 0x37, 0xc3, 0xa9, 0xbf, 0x24, 0xa0, 0x10, 0x69, 0xa0, 0x69, 0xe7,
+	0x83, 0x0d, 0xc3, 0x7a, 0xaa, 0x60, 0x43, 0xc7, 0xae, 0x4f, 0x0a, 0x60, 0xa2, 0x2a, 0x95, 0x9c,
+	0xf5, 0xa3, 0xbd, 0x91, 0xe0, 0x7f, 0x9f, 0x00, 0x61, 0xb6, 0x77, 0x9d, 0x09, 0x30, 0xf1, 0xbd,
+	0x06, 0xf8, 0xbb, 0x04, 0x94, 0xe2, 0x0d, 0xeb, 0x4c, 0x78, 0x17, 0xbf, 0xd7, 0xf0, 0xfe, 0x99,
+	0x84, 0xd5, 0x58, 0x9b, 0x7a, 0xd6, 0xe8, 0xbe, 0x86, 0x75, 0x5d, 0x23, 0x63, 0xdb, 0xf2, 0x88,
+	0xa9, 0x9e, 0x2a, 0x06, 0x79, 0x42, 0x0c, 0xb1, 0xcc, 0x0a, 0xc5, 0xf5, 0x97, 0x37, 0xc2, 0x95,
+	0xc6, 0xd4, 0xae, 0x49, 0xcd, 0x0e, 0x36, 0x1a, 0x75, 0xa9, 0xd5, 0xed, 0xf4, 0xa5, 0x76, 0xed,
+	0xa1, 0x72, 0xdc, 0xfe, 0x71, 0xbb, 0xf3, 0x65, 0x5b, 0x16, 0xf4, 0x19, 0xd8, 0x6b, 0xdc, 0xea,
+	0x5d, 0x10, 0x66, 0x83, 0x42, 0xe7, 0x60, 0x51, 0x58, 0xc2, 0x5b, 0x68, 0x03, 0xd6, 0xda, 0x1d,
+	0xa5, 0xd7, 0xa8, 0x4b, 0x8a, 0x74, 0xef, 0x9e, 0x54, 0xeb, 0xf7, 0xf8, 0xd3, 0x46, 0x88, 0xee,
+	0xc7, 0x37, 0xf5, 0x6f, 0x53, 0xb0, 0xb1, 0x20, 0x12, 0x54, 0xf5, 0x2f, 0x25, 0xfc, 0x9e, 0xf4,
+	0xc9, 0x59, 0xa2, 0xaf, 0xd0, 0xae, 0xa0, 0x8b, 0x1d, 0xcf, 0xbf, 0xc3, 0x5c, 0x01, 0x9a, 0x25,
+	0xd3, 0xd3, 0x07, 0x3a, 0x71, 0xfc, 0x97, 0x20, 0x7e, 0x53, 0x59, 0x9b, 0xca, 0xf9, 0x63, 0xd0,
+	0xc7, 0x80, 0x6c, 0xcb, 0xd5, 0x3d, 0xfd, 0x09, 0x51, 0x74, 0x33, 0x78, 0x36, 0xa2, 0x37, 0x97,
+	0xb4, 0x2c, 0x04, 0x9a, 0x86, 0xe9, 0x85, 0x68, 0x93, 0x0c, 0xf1, 0x0c, 0x9a, 0x16, 0xf0, 0x94,
+	0x2c, 0x04, 0x9a, 0x10, 0x7d, 0x11, 0x8a, 0x9a, 0x35, 0xa1, 0xed, 0x1c, 0xc7, 0xd1, 0xf3, 0x22,
+	0x21, 0x17, 0xb8, 0x2c, 0x84, 0xf8, 0x8d, 0xfa, 0xf4, 0xbd, 0xaa, 0x28, 0x17, 0xb8, 0x8c, 0x43,
+	0x2e, 0xc3, 0x1a, 0x1e, 0x0e, 0x1d, 0xea, 0x3c, 0x70, 0xc4, 0xaf, 0x1e, 0xa5, 0x50, 0xcc, 0x80,
+	0xdb, 0xf7, 0x21, 0x17, 0xe4, 0x81, 0x1e, 0xc9, 0x34, 0x13, 0x8a, 0xcd, 0xef, 0xd3, 0xc9, 0xdd,
+	0xbc, 0x9c, 0x33, 0x03, 0xe5, 0x45, 0x28, 0xea, 0xae, 0x32, 0x7d, 0x7e, 0x4f, 0xee, 0x24, 0x77,
+	0x73, 0x72, 0x41, 0x77, 0xc3, 0xa7, 0xcb, 0xf2, 0x37, 0x49, 0x28, 0xc5, 0x7f, 0x3e, 0x40, 0x75,
+	0xc8, 0x19, 0x96, 0x8a, 0x19, 0xb5, 0xf8, 0x6f, 0x57, 0xbb, 0xaf, 0xf8, 0xc5, 0xa1, 0xd2, 0xf4,
+	0xf1, 0x72, 0x68, 0xb9, 0xfd, 0xb7, 0x04, 0xe4, 0x02, 0x31, 0xda, 0x82, 0xb4, 0x8d, 0xbd, 0x11,
+	0x73, 0x97, 0x39, 0x4c, 0x0a, 0x09, 0x99, 0x8d, 0xa9, 0xdc, 0xb5, 0xb1, 0xc9, 0x28, 0xe0, 0xcb,
+	0xe9, 0x98, 0x7e, 0x57, 0x83, 0x60, 0x8d, 0xdd, 0x6b, 0xac, 0xf1, 0x98, 0x98, 0x9e, 0x1b, 0x7c,
+	0x57, 0x5f, 0x5e, 0xf3, 0xc5, 0xe8, 0x1a, 0xac, 0x7b, 0x0e, 0xd6, 0x8d, 0x18, 0x36, 0xcd, 0xb0,
+	0x42, 0xa0, 0x08, 0xc1, 0x07, 0x70, 0x3e, 0xf0, 0xab, 0x11, 0x0f, 0xab, 0x23, 0xa2, 0x4d, 0x8d,
+	0xb2, 0xec, 0xfd, 0xe2, 0x9c, 0x0f, 0xa8, 0xfb, 0xfa, 0xc0, 0xb6, 0xfc, 0xf7, 0x04, 0xac, 0x07,
+	0x37, 0x31, 0x2d, 0x4c, 0x56, 0x0b, 0x00, 0x9b, 0xa6, 0xe5, 0x45, 0xd3, 0x35, 0x4f, 0xe5, 0x39,
+	0xbb, 0x4a, 0x35, 0x34, 0x92, 0x23, 0x0e, 0xb6, 0xc7, 0x00, 0x53, 0xcd, 0xd2, 0xb4, 0x5d, 0x80,
+	0x82, 0xff, 0xdb, 0x10, 0xfb, 0x81, 0x91, 0xdf, 0xdd, 0x81, 0x8b, 0xe8, 0x95, 0x0d, 0x6d, 0x42,
+	0xe6, 0x84, 0x0c, 0x75, 0xd3, 0x7f, 0xf1, 0xe5, 0x83, 0xe0, 0x85, 0x25, 0x1d, 0xbe, 0xb0, 0x1c,
+	0xfe, 0x0c, 0x36, 0x54, 0x6b, 0x3c, 0x1b, 0xee, 0xa1, 0x30, 0xf3, 0x7e, 0xe0, 0x7e, 0x91, 0xf8,
+	0x0a, 0xa6, 0x2d, 0xe6, 0xff, 0x12, 0x89, 0x3f, 0x24, 0x53, 0x47, 0xdd, 0xc3, 0x3f, 0x26, 0xb7,
+	0x8f, 0xb8, 0x69, 0x37, 0x58, 0xa9, 0x4c, 0x06, 0x06, 0x51, 0x69, 0xf4, 0xff, 0x0f, 0x00, 0x00,
+	0xff, 0xff, 0x88, 0x17, 0xc1, 0xbe, 0x38, 0x1d, 0x00, 0x00,
 }
diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go
index ec6eb16..165b211 100644
--- a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go
+++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go
@@ -3,14 +3,16 @@
 
 package descriptor
 
-import fmt "fmt"
-import strings "strings"
-import github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto"
-import sort "sort"
-import strconv "strconv"
-import reflect "reflect"
-import proto "github.com/gogo/protobuf/proto"
-import math "math"
+import (
+	fmt "fmt"
+	github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto"
+	proto "github.com/gogo/protobuf/proto"
+	math "math"
+	reflect "reflect"
+	sort "sort"
+	strconv "strconv"
+	strings "strings"
+)
 
 // Reference imports to suppress errors if they are not otherwise used.
 var _ = proto.Marshal
@@ -358,7 +360,7 @@
 	if this == nil {
 		return "nil"
 	}
-	s := make([]string, 0, 23)
+	s := make([]string, 0, 25)
 	s = append(s, "&descriptor.FileOptions{")
 	if this.JavaPackage != nil {
 		s = append(s, "JavaPackage: "+valueToGoStringDescriptor(this.JavaPackage, "string")+",\n")
@@ -414,6 +416,12 @@
 	if this.PhpNamespace != nil {
 		s = append(s, "PhpNamespace: "+valueToGoStringDescriptor(this.PhpNamespace, "string")+",\n")
 	}
+	if this.PhpMetadataNamespace != nil {
+		s = append(s, "PhpMetadataNamespace: "+valueToGoStringDescriptor(this.PhpMetadataNamespace, "string")+",\n")
+	}
+	if this.RubyPackage != nil {
+		s = append(s, "RubyPackage: "+valueToGoStringDescriptor(this.RubyPackage, "string")+",\n")
+	}
 	if this.UninterpretedOption != nil {
 		s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n")
 	}
diff --git a/vendor/github.com/gogo/protobuf/types/any.pb.go b/vendor/github.com/gogo/protobuf/types/any.pb.go
index cb774db..202fe8e 100644
--- a/vendor/github.com/gogo/protobuf/types/any.pb.go
+++ b/vendor/github.com/gogo/protobuf/types/any.pb.go
@@ -3,16 +3,15 @@
 
 package types
 
-import proto "github.com/gogo/protobuf/proto"
-import fmt "fmt"
-import math "math"
-
-import bytes "bytes"
-
-import strings "strings"
-import reflect "reflect"
-
-import io "io"
+import (
+	bytes "bytes"
+	fmt "fmt"
+	proto "github.com/gogo/protobuf/proto"
+	io "io"
+	math "math"
+	reflect "reflect"
+	strings "strings"
+)
 
 // Reference imports to suppress errors if they are not otherwise used.
 var _ = proto.Marshal
@@ -107,7 +106,8 @@
 //
 type Any struct {
 	// A URL/resource name that uniquely identifies the type of the serialized
-	// protocol buffer message. The last segment of the URL's path must represent
+	// protocol buffer message. This string must contain at least
+	// one "/" character. The last segment of the URL's path must represent
 	// the fully qualified name of the type (as in
 	// `path/google.protobuf.Duration`). The name should be in a canonical form
 	// (e.g., leading "." is not accepted).
@@ -144,7 +144,7 @@
 func (m *Any) Reset()      { *m = Any{} }
 func (*Any) ProtoMessage() {}
 func (*Any) Descriptor() ([]byte, []int) {
-	return fileDescriptor_any_f098d1a3c592d16a, []int{0}
+	return fileDescriptor_b53526c13ae22eb4, []int{0}
 }
 func (*Any) XXX_WellKnownType() string { return "Any" }
 func (m *Any) XXX_Unmarshal(b []byte) error {
@@ -162,8 +162,8 @@
 		return b[:n], nil
 	}
 }
-func (dst *Any) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_Any.Merge(dst, src)
+func (m *Any) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Any.Merge(m, src)
 }
 func (m *Any) XXX_Size() int {
 	return m.Size()
@@ -194,6 +194,27 @@
 func init() {
 	proto.RegisterType((*Any)(nil), "google.protobuf.Any")
 }
+
+func init() { proto.RegisterFile("google/protobuf/any.proto", fileDescriptor_b53526c13ae22eb4) }
+
+var fileDescriptor_b53526c13ae22eb4 = []byte{
+	// 211 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4c, 0xcf, 0xcf, 0x4f,
+	0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcc, 0xab, 0xd4,
+	0x03, 0x73, 0x84, 0xf8, 0x21, 0x52, 0x7a, 0x30, 0x29, 0x25, 0x33, 0x2e, 0x66, 0xc7, 0xbc, 0x4a,
+	0x21, 0x49, 0x2e, 0x8e, 0x92, 0xca, 0x82, 0xd4, 0xf8, 0xd2, 0xa2, 0x1c, 0x09, 0x46, 0x05, 0x46,
+	0x0d, 0xce, 0x20, 0x76, 0x10, 0x3f, 0xb4, 0x28, 0x47, 0x48, 0x84, 0x8b, 0xb5, 0x2c, 0x31, 0xa7,
+	0x34, 0x55, 0x82, 0x49, 0x81, 0x51, 0x83, 0x27, 0x08, 0xc2, 0x71, 0xaa, 0xbf, 0xf1, 0x50, 0x8e,
+	0xe1, 0xc3, 0x43, 0x39, 0xc6, 0x1f, 0x0f, 0xe5, 0x18, 0x1b, 0x1e, 0xc9, 0x31, 0xae, 0x78, 0x24,
+	0xc7, 0x78, 0xe2, 0x91, 0x1c, 0xe3, 0x85, 0x47, 0x72, 0x8c, 0x0f, 0x1e, 0xc9, 0x31, 0xbe, 0x78,
+	0x24, 0xc7, 0xf0, 0x01, 0x24, 0xfe, 0x58, 0x8e, 0xf1, 0xc4, 0x63, 0x39, 0x46, 0x2e, 0xe1, 0xe4,
+	0xfc, 0x5c, 0x3d, 0x34, 0xeb, 0x9d, 0x38, 0x1c, 0xf3, 0x2a, 0x03, 0x40, 0x9c, 0x00, 0xc6, 0x28,
+	0x56, 0x90, 0x8d, 0xc5, 0x8b, 0x98, 0x98, 0xdd, 0x03, 0x9c, 0x56, 0x31, 0xc9, 0xb9, 0x43, 0x94,
+	0x06, 0x40, 0x95, 0xea, 0x85, 0xa7, 0xe6, 0xe4, 0x78, 0xe7, 0xe5, 0x97, 0xe7, 0x85, 0x80, 0x94,
+	0x25, 0xb1, 0x81, 0xcd, 0x30, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0xb7, 0x81, 0x82, 0xd3, 0xed,
+	0x00, 0x00, 0x00,
+}
+
 func (this *Any) Compare(that interface{}) int {
 	if that == nil {
 		if this == nil {
@@ -481,7 +502,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -509,7 +530,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -519,6 +540,9 @@
 				return ErrInvalidLengthAny
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthAny
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -538,7 +562,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				byteLen |= (int(b) & 0x7F) << shift
+				byteLen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -547,6 +571,9 @@
 				return ErrInvalidLengthAny
 			}
 			postIndex := iNdEx + byteLen
+			if postIndex < 0 {
+				return ErrInvalidLengthAny
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -564,6 +591,9 @@
 			if skippy < 0 {
 				return ErrInvalidLengthAny
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthAny
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -631,10 +661,13 @@
 					break
 				}
 			}
-			iNdEx += length
 			if length < 0 {
 				return 0, ErrInvalidLengthAny
 			}
+			iNdEx += length
+			if iNdEx < 0 {
+				return 0, ErrInvalidLengthAny
+			}
 			return iNdEx, nil
 		case 3:
 			for {
@@ -663,6 +696,9 @@
 					return 0, err
 				}
 				iNdEx = start + next
+				if iNdEx < 0 {
+					return 0, ErrInvalidLengthAny
+				}
 			}
 			return iNdEx, nil
 		case 4:
@@ -681,23 +717,3 @@
 	ErrInvalidLengthAny = fmt.Errorf("proto: negative length found during unmarshaling")
 	ErrIntOverflowAny   = fmt.Errorf("proto: integer overflow")
 )
-
-func init() { proto.RegisterFile("google/protobuf/any.proto", fileDescriptor_any_f098d1a3c592d16a) }
-
-var fileDescriptor_any_f098d1a3c592d16a = []byte{
-	// 211 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4c, 0xcf, 0xcf, 0x4f,
-	0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcc, 0xab, 0xd4,
-	0x03, 0x73, 0x84, 0xf8, 0x21, 0x52, 0x7a, 0x30, 0x29, 0x25, 0x33, 0x2e, 0x66, 0xc7, 0xbc, 0x4a,
-	0x21, 0x49, 0x2e, 0x8e, 0x92, 0xca, 0x82, 0xd4, 0xf8, 0xd2, 0xa2, 0x1c, 0x09, 0x46, 0x05, 0x46,
-	0x0d, 0xce, 0x20, 0x76, 0x10, 0x3f, 0xb4, 0x28, 0x47, 0x48, 0x84, 0x8b, 0xb5, 0x2c, 0x31, 0xa7,
-	0x34, 0x55, 0x82, 0x49, 0x81, 0x51, 0x83, 0x27, 0x08, 0xc2, 0x71, 0xaa, 0xbf, 0xf1, 0x50, 0x8e,
-	0xe1, 0xc3, 0x43, 0x39, 0xc6, 0x1f, 0x0f, 0xe5, 0x18, 0x1b, 0x1e, 0xc9, 0x31, 0xae, 0x78, 0x24,
-	0xc7, 0x78, 0xe2, 0x91, 0x1c, 0xe3, 0x85, 0x47, 0x72, 0x8c, 0x0f, 0x1e, 0xc9, 0x31, 0xbe, 0x78,
-	0x24, 0xc7, 0xf0, 0x01, 0x24, 0xfe, 0x58, 0x8e, 0xf1, 0xc4, 0x63, 0x39, 0x46, 0x2e, 0xe1, 0xe4,
-	0xfc, 0x5c, 0x3d, 0x34, 0xeb, 0x9d, 0x38, 0x1c, 0xf3, 0x2a, 0x03, 0x40, 0x9c, 0x00, 0xc6, 0x28,
-	0x56, 0x90, 0x8d, 0xc5, 0x8b, 0x98, 0x98, 0xdd, 0x03, 0x9c, 0x56, 0x31, 0xc9, 0xb9, 0x43, 0x94,
-	0x06, 0x40, 0x95, 0xea, 0x85, 0xa7, 0xe6, 0xe4, 0x78, 0xe7, 0xe5, 0x97, 0xe7, 0x85, 0x80, 0x94,
-	0x25, 0xb1, 0x81, 0xcd, 0x30, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0xb7, 0x81, 0x82, 0xd3, 0xed,
-	0x00, 0x00, 0x00,
-}
diff --git a/vendor/github.com/gogo/protobuf/types/api.pb.go b/vendor/github.com/gogo/protobuf/types/api.pb.go
index 420fa55..fe0eefd 100644
--- a/vendor/github.com/gogo/protobuf/types/api.pb.go
+++ b/vendor/github.com/gogo/protobuf/types/api.pb.go
@@ -3,16 +3,15 @@
 
 package types
 
-import proto "github.com/gogo/protobuf/proto"
-import fmt "fmt"
-import math "math"
-
-import bytes "bytes"
-
-import strings "strings"
-import reflect "reflect"
-
-import io "io"
+import (
+	bytes "bytes"
+	fmt "fmt"
+	proto "github.com/gogo/protobuf/proto"
+	io "io"
+	math "math"
+	reflect "reflect"
+	strings "strings"
+)
 
 // Reference imports to suppress errors if they are not otherwise used.
 var _ = proto.Marshal
@@ -79,7 +78,7 @@
 func (m *Api) Reset()      { *m = Api{} }
 func (*Api) ProtoMessage() {}
 func (*Api) Descriptor() ([]byte, []int) {
-	return fileDescriptor_api_a4406062c749da1f, []int{0}
+	return fileDescriptor_a2ec32096296c143, []int{0}
 }
 func (m *Api) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -96,8 +95,8 @@
 		return b[:n], nil
 	}
 }
-func (dst *Api) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_Api.Merge(dst, src)
+func (m *Api) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Api.Merge(m, src)
 }
 func (m *Api) XXX_Size() int {
 	return m.Size()
@@ -185,7 +184,7 @@
 func (m *Method) Reset()      { *m = Method{} }
 func (*Method) ProtoMessage() {}
 func (*Method) Descriptor() ([]byte, []int) {
-	return fileDescriptor_api_a4406062c749da1f, []int{1}
+	return fileDescriptor_a2ec32096296c143, []int{1}
 }
 func (m *Method) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -202,8 +201,8 @@
 		return b[:n], nil
 	}
 }
-func (dst *Method) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_Method.Merge(dst, src)
+func (m *Method) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Method.Merge(m, src)
 }
 func (m *Method) XXX_Size() int {
 	return m.Size()
@@ -359,7 +358,7 @@
 func (m *Mixin) Reset()      { *m = Mixin{} }
 func (*Mixin) ProtoMessage() {}
 func (*Mixin) Descriptor() ([]byte, []int) {
-	return fileDescriptor_api_a4406062c749da1f, []int{2}
+	return fileDescriptor_a2ec32096296c143, []int{2}
 }
 func (m *Mixin) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -376,8 +375,8 @@
 		return b[:n], nil
 	}
 }
-func (dst *Mixin) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_Mixin.Merge(dst, src)
+func (m *Mixin) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Mixin.Merge(m, src)
 }
 func (m *Mixin) XXX_Size() int {
 	return m.Size()
@@ -410,6 +409,43 @@
 	proto.RegisterType((*Method)(nil), "google.protobuf.Method")
 	proto.RegisterType((*Mixin)(nil), "google.protobuf.Mixin")
 }
+
+func init() { proto.RegisterFile("google/protobuf/api.proto", fileDescriptor_a2ec32096296c143) }
+
+var fileDescriptor_a2ec32096296c143 = []byte{
+	// 467 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x91, 0x31, 0x6f, 0x13, 0x31,
+	0x14, 0xc7, 0xeb, 0xbb, 0xe4, 0x52, 0x5c, 0x91, 0x82, 0x91, 0xc0, 0x64, 0xb0, 0x4e, 0x15, 0xc3,
+	0x09, 0xc4, 0x45, 0x94, 0x4f, 0xd0, 0x20, 0xd4, 0x01, 0x21, 0xa2, 0x0b, 0x08, 0x89, 0x25, 0x4a,
+	0x83, 0x09, 0x96, 0xee, 0x6c, 0x63, 0x3b, 0x90, 0x4c, 0xf0, 0x59, 0x98, 0x10, 0x23, 0xdf, 0x80,
+	0xad, 0x23, 0x23, 0x23, 0xb9, 0x2e, 0x8c, 0x1d, 0x19, 0x91, 0x7d, 0xe7, 0xa6, 0x5c, 0x83, 0x04,
+	0x9b, 0xdf, 0xfb, 0xff, 0xfc, 0xf7, 0x7b, 0x7f, 0xc3, 0x9b, 0x33, 0x21, 0x66, 0x39, 0xed, 0x4b,
+	0x25, 0x8c, 0x38, 0x9a, 0xbf, 0xea, 0x4f, 0x24, 0x4b, 0x5d, 0x81, 0x76, 0x2b, 0x29, 0xf5, 0x52,
+	0xef, 0x56, 0x93, 0xd5, 0x62, 0xae, 0xa6, 0x74, 0x3c, 0x15, 0xdc, 0xd0, 0x85, 0xa9, 0xc0, 0x5e,
+	0xaf, 0x49, 0x99, 0xa5, 0xac, 0x4d, 0xf6, 0xbe, 0x06, 0x30, 0x3c, 0x90, 0x0c, 0x21, 0xd8, 0xe2,
+	0x93, 0x82, 0x62, 0x10, 0x83, 0xe4, 0x52, 0xe6, 0xce, 0xe8, 0x1e, 0xec, 0x14, 0xd4, 0xbc, 0x16,
+	0x2f, 0x35, 0x0e, 0xe2, 0x30, 0xd9, 0xd9, 0xbf, 0x91, 0x36, 0x06, 0x48, 0x1f, 0x3b, 0x3d, 0xf3,
+	0x9c, 0xbd, 0x22, 0xa4, 0x61, 0x82, 0x6b, 0x1c, 0xfe, 0xe5, 0xca, 0x13, 0xa7, 0x67, 0x9e, 0x43,
+	0x18, 0x76, 0xde, 0x52, 0xa5, 0x99, 0xe0, 0xb8, 0xe5, 0x1e, 0xf7, 0x25, 0x7a, 0x08, 0xbb, 0x7f,
+	0xee, 0x83, 0xdb, 0x31, 0x48, 0x76, 0xf6, 0xc9, 0x05, 0xcf, 0x91, 0xc3, 0x1e, 0x54, 0x54, 0x76,
+	0x59, 0x9f, 0x2f, 0x51, 0x0a, 0xa3, 0x82, 0x2d, 0x18, 0xd7, 0x38, 0x72, 0x23, 0x5d, 0xbf, 0xb8,
+	0x85, 0x95, 0xb3, 0x9a, 0x42, 0x7d, 0x18, 0xe9, 0x25, 0x37, 0x93, 0x05, 0xee, 0xc4, 0x20, 0xe9,
+	0x6e, 0x58, 0x61, 0xe4, 0xe4, 0xac, 0xc6, 0xf6, 0xbe, 0x04, 0x30, 0xaa, 0x82, 0xd8, 0x18, 0x63,
+	0x02, 0xaf, 0x28, 0xfa, 0x66, 0x4e, 0xb5, 0x19, 0xdb, 0xe0, 0xc7, 0x73, 0x95, 0xe3, 0xc0, 0xe9,
+	0xdd, 0xba, 0xff, 0x74, 0x29, 0xe9, 0x33, 0x95, 0xa3, 0x3b, 0xf0, 0xaa, 0x27, 0xb5, 0x51, 0x74,
+	0x52, 0x30, 0x3e, 0xc3, 0x61, 0x0c, 0x92, 0xed, 0xcc, 0x5b, 0x8c, 0x7c, 0x1f, 0xdd, 0xb6, 0xb0,
+	0x96, 0x82, 0x6b, 0xba, 0xf6, 0xad, 0x12, 0xdc, 0xf5, 0x82, 0x37, 0xbe, 0x0b, 0xd1, 0x19, 0xbb,
+	0x76, 0x6e, 0x3b, 0xe7, 0x33, 0x97, 0xb5, 0xf5, 0xb9, 0x5f, 0x8c, 0xfe, 0xf1, 0x17, 0xff, 0x3b,
+	0xb4, 0x3e, 0x6c, 0xbb, 0xd8, 0x37, 0x46, 0x86, 0x60, 0x4b, 0x09, 0x61, 0xea, 0x98, 0xdc, 0x79,
+	0xf0, 0xfe, 0xfb, 0x8a, 0x6c, 0x9d, 0xae, 0x08, 0xf8, 0xb5, 0x22, 0xe0, 0x43, 0x49, 0xc0, 0xa7,
+	0x92, 0x80, 0xe3, 0x92, 0x80, 0x6f, 0x25, 0x01, 0x3f, 0x4a, 0x02, 0x7e, 0x96, 0x64, 0xeb, 0xd4,
+	0xf6, 0x4f, 0x08, 0x38, 0x3e, 0x21, 0x00, 0x5e, 0x9b, 0x8a, 0xa2, 0x39, 0xc6, 0x60, 0xfb, 0x40,
+	0xb2, 0xa1, 0x2d, 0x86, 0xe0, 0x45, 0xdb, 0xe6, 0xa6, 0x3f, 0x06, 0xe1, 0xe1, 0x70, 0xf0, 0x39,
+	0x20, 0x87, 0x15, 0x3a, 0xf4, 0x13, 0x3f, 0xa7, 0x79, 0xfe, 0x88, 0x8b, 0x77, 0xdc, 0xc6, 0xa8,
+	0x8f, 0x22, 0xe7, 0x71, 0xff, 0x77, 0x00, 0x00, 0x00, 0xff, 0xff, 0x2b, 0x64, 0x40, 0x40, 0xa1,
+	0x03, 0x00, 0x00,
+}
+
 func (this *Api) Compare(that interface{}) int {
 	if that == nil {
 		if this == nil {
@@ -1349,7 +1385,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -1377,7 +1413,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1387,6 +1423,9 @@
 				return ErrInvalidLengthApi
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthApi
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -1406,7 +1445,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1415,6 +1454,9 @@
 				return ErrInvalidLengthApi
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthApi
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -1437,7 +1479,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1446,6 +1488,9 @@
 				return ErrInvalidLengthApi
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthApi
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -1468,7 +1513,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1478,6 +1523,9 @@
 				return ErrInvalidLengthApi
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthApi
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -1497,7 +1545,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1506,6 +1554,9 @@
 				return ErrInvalidLengthApi
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthApi
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -1530,7 +1581,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1539,6 +1590,9 @@
 				return ErrInvalidLengthApi
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthApi
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -1561,7 +1615,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				m.Syntax |= (Syntax(b) & 0x7F) << shift
+				m.Syntax |= Syntax(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1575,6 +1629,9 @@
 			if skippy < 0 {
 				return ErrInvalidLengthApi
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthApi
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -1603,7 +1660,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -1631,7 +1688,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1641,6 +1698,9 @@
 				return ErrInvalidLengthApi
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthApi
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -1660,7 +1720,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1670,6 +1730,9 @@
 				return ErrInvalidLengthApi
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthApi
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -1689,7 +1752,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				v |= (int(b) & 0x7F) << shift
+				v |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1709,7 +1772,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1719,6 +1782,9 @@
 				return ErrInvalidLengthApi
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthApi
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -1738,7 +1804,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				v |= (int(b) & 0x7F) << shift
+				v |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1758,7 +1824,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1767,6 +1833,9 @@
 				return ErrInvalidLengthApi
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthApi
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -1789,7 +1858,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				m.Syntax |= (Syntax(b) & 0x7F) << shift
+				m.Syntax |= Syntax(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1803,6 +1872,9 @@
 			if skippy < 0 {
 				return ErrInvalidLengthApi
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthApi
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -1831,7 +1903,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -1859,7 +1931,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1869,6 +1941,9 @@
 				return ErrInvalidLengthApi
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthApi
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -1888,7 +1963,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1898,6 +1973,9 @@
 				return ErrInvalidLengthApi
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthApi
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -1912,6 +1990,9 @@
 			if skippy < 0 {
 				return ErrInvalidLengthApi
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthApi
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -1979,10 +2060,13 @@
 					break
 				}
 			}
-			iNdEx += length
 			if length < 0 {
 				return 0, ErrInvalidLengthApi
 			}
+			iNdEx += length
+			if iNdEx < 0 {
+				return 0, ErrInvalidLengthApi
+			}
 			return iNdEx, nil
 		case 3:
 			for {
@@ -2011,6 +2095,9 @@
 					return 0, err
 				}
 				iNdEx = start + next
+				if iNdEx < 0 {
+					return 0, ErrInvalidLengthApi
+				}
 			}
 			return iNdEx, nil
 		case 4:
@@ -2029,39 +2116,3 @@
 	ErrInvalidLengthApi = fmt.Errorf("proto: negative length found during unmarshaling")
 	ErrIntOverflowApi   = fmt.Errorf("proto: integer overflow")
 )
-
-func init() { proto.RegisterFile("google/protobuf/api.proto", fileDescriptor_api_a4406062c749da1f) }
-
-var fileDescriptor_api_a4406062c749da1f = []byte{
-	// 467 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x91, 0x31, 0x6f, 0x13, 0x31,
-	0x14, 0xc7, 0xeb, 0xbb, 0xe4, 0x52, 0x5c, 0x91, 0x82, 0x91, 0xc0, 0x64, 0xb0, 0x4e, 0x15, 0xc3,
-	0x09, 0xc4, 0x45, 0x94, 0x4f, 0xd0, 0x20, 0xd4, 0x01, 0x21, 0xa2, 0x0b, 0x08, 0x89, 0x25, 0x4a,
-	0x83, 0x09, 0x96, 0xee, 0x6c, 0x63, 0x3b, 0x90, 0x4c, 0xf0, 0x59, 0x98, 0x10, 0x23, 0xdf, 0x80,
-	0xad, 0x23, 0x23, 0x23, 0xb9, 0x2e, 0x8c, 0x1d, 0x19, 0x91, 0x7d, 0xe7, 0xa6, 0x5c, 0x83, 0x04,
-	0x9b, 0xdf, 0xfb, 0xff, 0xfc, 0xf7, 0x7b, 0x7f, 0xc3, 0x9b, 0x33, 0x21, 0x66, 0x39, 0xed, 0x4b,
-	0x25, 0x8c, 0x38, 0x9a, 0xbf, 0xea, 0x4f, 0x24, 0x4b, 0x5d, 0x81, 0x76, 0x2b, 0x29, 0xf5, 0x52,
-	0xef, 0x56, 0x93, 0xd5, 0x62, 0xae, 0xa6, 0x74, 0x3c, 0x15, 0xdc, 0xd0, 0x85, 0xa9, 0xc0, 0x5e,
-	0xaf, 0x49, 0x99, 0xa5, 0xac, 0x4d, 0xf6, 0xbe, 0x06, 0x30, 0x3c, 0x90, 0x0c, 0x21, 0xd8, 0xe2,
-	0x93, 0x82, 0x62, 0x10, 0x83, 0xe4, 0x52, 0xe6, 0xce, 0xe8, 0x1e, 0xec, 0x14, 0xd4, 0xbc, 0x16,
-	0x2f, 0x35, 0x0e, 0xe2, 0x30, 0xd9, 0xd9, 0xbf, 0x91, 0x36, 0x06, 0x48, 0x1f, 0x3b, 0x3d, 0xf3,
-	0x9c, 0xbd, 0x22, 0xa4, 0x61, 0x82, 0x6b, 0x1c, 0xfe, 0xe5, 0xca, 0x13, 0xa7, 0x67, 0x9e, 0x43,
-	0x18, 0x76, 0xde, 0x52, 0xa5, 0x99, 0xe0, 0xb8, 0xe5, 0x1e, 0xf7, 0x25, 0x7a, 0x08, 0xbb, 0x7f,
-	0xee, 0x83, 0xdb, 0x31, 0x48, 0x76, 0xf6, 0xc9, 0x05, 0xcf, 0x91, 0xc3, 0x1e, 0x54, 0x54, 0x76,
-	0x59, 0x9f, 0x2f, 0x51, 0x0a, 0xa3, 0x82, 0x2d, 0x18, 0xd7, 0x38, 0x72, 0x23, 0x5d, 0xbf, 0xb8,
-	0x85, 0x95, 0xb3, 0x9a, 0x42, 0x7d, 0x18, 0xe9, 0x25, 0x37, 0x93, 0x05, 0xee, 0xc4, 0x20, 0xe9,
-	0x6e, 0x58, 0x61, 0xe4, 0xe4, 0xac, 0xc6, 0xf6, 0xbe, 0x04, 0x30, 0xaa, 0x82, 0xd8, 0x18, 0x63,
-	0x02, 0xaf, 0x28, 0xfa, 0x66, 0x4e, 0xb5, 0x19, 0xdb, 0xe0, 0xc7, 0x73, 0x95, 0xe3, 0xc0, 0xe9,
-	0xdd, 0xba, 0xff, 0x74, 0x29, 0xe9, 0x33, 0x95, 0xa3, 0x3b, 0xf0, 0xaa, 0x27, 0xb5, 0x51, 0x74,
-	0x52, 0x30, 0x3e, 0xc3, 0x61, 0x0c, 0x92, 0xed, 0xcc, 0x5b, 0x8c, 0x7c, 0x1f, 0xdd, 0xb6, 0xb0,
-	0x96, 0x82, 0x6b, 0xba, 0xf6, 0xad, 0x12, 0xdc, 0xf5, 0x82, 0x37, 0xbe, 0x0b, 0xd1, 0x19, 0xbb,
-	0x76, 0x6e, 0x3b, 0xe7, 0x33, 0x97, 0xb5, 0xf5, 0xb9, 0x5f, 0x8c, 0xfe, 0xf1, 0x17, 0xff, 0x3b,
-	0xb4, 0x3e, 0x6c, 0xbb, 0xd8, 0x37, 0x46, 0x86, 0x60, 0x4b, 0x09, 0x61, 0xea, 0x98, 0xdc, 0x79,
-	0xf0, 0xfe, 0xfb, 0x8a, 0x6c, 0x9d, 0xae, 0x08, 0xf8, 0xb5, 0x22, 0xe0, 0x43, 0x49, 0xc0, 0xa7,
-	0x92, 0x80, 0xe3, 0x92, 0x80, 0x6f, 0x25, 0x01, 0x3f, 0x4a, 0x02, 0x7e, 0x96, 0x64, 0xeb, 0xd4,
-	0xf6, 0x4f, 0x08, 0x38, 0x3e, 0x21, 0x00, 0x5e, 0x9b, 0x8a, 0xa2, 0x39, 0xc6, 0x60, 0xfb, 0x40,
-	0xb2, 0xa1, 0x2d, 0x86, 0xe0, 0x45, 0xdb, 0xe6, 0xa6, 0x3f, 0x06, 0xe1, 0xe1, 0x70, 0xf0, 0x39,
-	0x20, 0x87, 0x15, 0x3a, 0xf4, 0x13, 0x3f, 0xa7, 0x79, 0xfe, 0x88, 0x8b, 0x77, 0xdc, 0xc6, 0xa8,
-	0x8f, 0x22, 0xe7, 0x71, 0xff, 0x77, 0x00, 0x00, 0x00, 0xff, 0xff, 0x2b, 0x64, 0x40, 0x40, 0xa1,
-	0x03, 0x00, 0x00,
-}
diff --git a/vendor/github.com/gogo/protobuf/types/duration.go b/vendor/github.com/gogo/protobuf/types/duration.go
index 475d61f..979b8e7 100644
--- a/vendor/github.com/gogo/protobuf/types/duration.go
+++ b/vendor/github.com/gogo/protobuf/types/duration.go
@@ -80,7 +80,7 @@
 		return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p)
 	}
 	if p.Nanos != 0 {
-		d += time.Duration(p.Nanos)
+		d += time.Duration(p.Nanos) * time.Nanosecond
 		if (d < 0) != (p.Nanos < 0) {
 			return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p)
 		}
diff --git a/vendor/github.com/gogo/protobuf/types/duration.pb.go b/vendor/github.com/gogo/protobuf/types/duration.pb.go
index 545ef96..f328ee0 100644
--- a/vendor/github.com/gogo/protobuf/types/duration.pb.go
+++ b/vendor/github.com/gogo/protobuf/types/duration.pb.go
@@ -3,16 +3,15 @@
 
 package types
 
-import proto "github.com/gogo/protobuf/proto"
-import fmt "fmt"
-import math "math"
-
-import bytes "bytes"
-
-import strings "strings"
-import reflect "reflect"
-
-import io "io"
+import (
+	bytes "bytes"
+	fmt "fmt"
+	proto "github.com/gogo/protobuf/proto"
+	io "io"
+	math "math"
+	reflect "reflect"
+	strings "strings"
+)
 
 // Reference imports to suppress errors if they are not otherwise used.
 var _ = proto.Marshal
@@ -105,7 +104,7 @@
 func (m *Duration) Reset()      { *m = Duration{} }
 func (*Duration) ProtoMessage() {}
 func (*Duration) Descriptor() ([]byte, []int) {
-	return fileDescriptor_duration_187e4d5f80a83848, []int{0}
+	return fileDescriptor_23597b2ebd7ac6c5, []int{0}
 }
 func (*Duration) XXX_WellKnownType() string { return "Duration" }
 func (m *Duration) XXX_Unmarshal(b []byte) error {
@@ -123,8 +122,8 @@
 		return b[:n], nil
 	}
 }
-func (dst *Duration) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_Duration.Merge(dst, src)
+func (m *Duration) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Duration.Merge(m, src)
 }
 func (m *Duration) XXX_Size() int {
 	return m.Size()
@@ -155,6 +154,27 @@
 func init() {
 	proto.RegisterType((*Duration)(nil), "google.protobuf.Duration")
 }
+
+func init() { proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor_23597b2ebd7ac6c5) }
+
+var fileDescriptor_23597b2ebd7ac6c5 = []byte{
+	// 209 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f,
+	0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0x29, 0x2d, 0x4a,
+	0x2c, 0xc9, 0xcc, 0xcf, 0xd3, 0x03, 0x8b, 0x08, 0xf1, 0x43, 0xe4, 0xf5, 0x60, 0xf2, 0x4a, 0x56,
+	0x5c, 0x1c, 0x2e, 0x50, 0x25, 0x42, 0x12, 0x5c, 0xec, 0xc5, 0xa9, 0xc9, 0xf9, 0x79, 0x29, 0xc5,
+	0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0xcc, 0x41, 0x30, 0xae, 0x90, 0x08, 0x17, 0x6b, 0x5e, 0x62, 0x5e,
+	0x7e, 0xb1, 0x04, 0x93, 0x02, 0xa3, 0x06, 0x6b, 0x10, 0x84, 0xe3, 0x54, 0x7f, 0xe3, 0xa1, 0x1c,
+	0xc3, 0x87, 0x87, 0x72, 0x8c, 0x2b, 0x1e, 0xc9, 0x31, 0x9e, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91,
+	0x1c, 0xe3, 0x83, 0x47, 0x72, 0x8c, 0x2f, 0x1e, 0xc9, 0x31, 0x7c, 0x78, 0x24, 0xc7, 0xb8, 0xe2,
+	0xb1, 0x1c, 0xe3, 0x89, 0xc7, 0x72, 0x8c, 0x5c, 0xc2, 0xc9, 0xf9, 0xb9, 0x7a, 0x68, 0x56, 0x3b,
+	0xf1, 0xc2, 0x2c, 0x0e, 0x00, 0x89, 0x04, 0x30, 0x46, 0xb1, 0x96, 0x54, 0x16, 0xa4, 0x16, 0xff,
+	0x60, 0x64, 0x5c, 0xc4, 0xc4, 0xec, 0x1e, 0xe0, 0xb4, 0x8a, 0x49, 0xce, 0x1d, 0xa2, 0x25, 0x00,
+	0xaa, 0x45, 0x2f, 0x3c, 0x35, 0x27, 0xc7, 0x3b, 0x2f, 0xbf, 0x3c, 0x2f, 0x04, 0xa4, 0x32, 0x89,
+	0x0d, 0x6c, 0x96, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0x8a, 0x1c, 0x64, 0x4e, 0xf6, 0x00, 0x00,
+	0x00,
+}
+
 func (this *Duration) Compare(that interface{}) int {
 	if that == nil {
 		if this == nil {
@@ -335,7 +355,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -363,7 +383,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				m.Seconds |= (int64(b) & 0x7F) << shift
+				m.Seconds |= int64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -382,7 +402,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				m.Nanos |= (int32(b) & 0x7F) << shift
+				m.Nanos |= int32(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -396,6 +416,9 @@
 			if skippy < 0 {
 				return ErrInvalidLengthDuration
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthDuration
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -463,10 +486,13 @@
 					break
 				}
 			}
-			iNdEx += length
 			if length < 0 {
 				return 0, ErrInvalidLengthDuration
 			}
+			iNdEx += length
+			if iNdEx < 0 {
+				return 0, ErrInvalidLengthDuration
+			}
 			return iNdEx, nil
 		case 3:
 			for {
@@ -495,6 +521,9 @@
 					return 0, err
 				}
 				iNdEx = start + next
+				if iNdEx < 0 {
+					return 0, ErrInvalidLengthDuration
+				}
 			}
 			return iNdEx, nil
 		case 4:
@@ -513,25 +542,3 @@
 	ErrInvalidLengthDuration = fmt.Errorf("proto: negative length found during unmarshaling")
 	ErrIntOverflowDuration   = fmt.Errorf("proto: integer overflow")
 )
-
-func init() {
-	proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor_duration_187e4d5f80a83848)
-}
-
-var fileDescriptor_duration_187e4d5f80a83848 = []byte{
-	// 209 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f,
-	0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0x29, 0x2d, 0x4a,
-	0x2c, 0xc9, 0xcc, 0xcf, 0xd3, 0x03, 0x8b, 0x08, 0xf1, 0x43, 0xe4, 0xf5, 0x60, 0xf2, 0x4a, 0x56,
-	0x5c, 0x1c, 0x2e, 0x50, 0x25, 0x42, 0x12, 0x5c, 0xec, 0xc5, 0xa9, 0xc9, 0xf9, 0x79, 0x29, 0xc5,
-	0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0xcc, 0x41, 0x30, 0xae, 0x90, 0x08, 0x17, 0x6b, 0x5e, 0x62, 0x5e,
-	0x7e, 0xb1, 0x04, 0x93, 0x02, 0xa3, 0x06, 0x6b, 0x10, 0x84, 0xe3, 0x54, 0x7f, 0xe3, 0xa1, 0x1c,
-	0xc3, 0x87, 0x87, 0x72, 0x8c, 0x2b, 0x1e, 0xc9, 0x31, 0x9e, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91,
-	0x1c, 0xe3, 0x83, 0x47, 0x72, 0x8c, 0x2f, 0x1e, 0xc9, 0x31, 0x7c, 0x78, 0x24, 0xc7, 0xb8, 0xe2,
-	0xb1, 0x1c, 0xe3, 0x89, 0xc7, 0x72, 0x8c, 0x5c, 0xc2, 0xc9, 0xf9, 0xb9, 0x7a, 0x68, 0x56, 0x3b,
-	0xf1, 0xc2, 0x2c, 0x0e, 0x00, 0x89, 0x04, 0x30, 0x46, 0xb1, 0x96, 0x54, 0x16, 0xa4, 0x16, 0xff,
-	0x60, 0x64, 0x5c, 0xc4, 0xc4, 0xec, 0x1e, 0xe0, 0xb4, 0x8a, 0x49, 0xce, 0x1d, 0xa2, 0x25, 0x00,
-	0xaa, 0x45, 0x2f, 0x3c, 0x35, 0x27, 0xc7, 0x3b, 0x2f, 0xbf, 0x3c, 0x2f, 0x04, 0xa4, 0x32, 0x89,
-	0x0d, 0x6c, 0x96, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0x8a, 0x1c, 0x64, 0x4e, 0xf6, 0x00, 0x00,
-	0x00,
-}
diff --git a/vendor/github.com/gogo/protobuf/types/empty.pb.go b/vendor/github.com/gogo/protobuf/types/empty.pb.go
index 323b246..8588187 100644
--- a/vendor/github.com/gogo/protobuf/types/empty.pb.go
+++ b/vendor/github.com/gogo/protobuf/types/empty.pb.go
@@ -3,16 +3,15 @@
 
 package types
 
-import proto "github.com/gogo/protobuf/proto"
-import fmt "fmt"
-import math "math"
-
-import bytes "bytes"
-
-import strings "strings"
-import reflect "reflect"
-
-import io "io"
+import (
+	bytes "bytes"
+	fmt "fmt"
+	proto "github.com/gogo/protobuf/proto"
+	io "io"
+	math "math"
+	reflect "reflect"
+	strings "strings"
+)
 
 // Reference imports to suppress errors if they are not otherwise used.
 var _ = proto.Marshal
@@ -43,7 +42,7 @@
 func (m *Empty) Reset()      { *m = Empty{} }
 func (*Empty) ProtoMessage() {}
 func (*Empty) Descriptor() ([]byte, []int) {
-	return fileDescriptor_empty_b366a5cbb7c614df, []int{0}
+	return fileDescriptor_900544acb223d5b8, []int{0}
 }
 func (*Empty) XXX_WellKnownType() string { return "Empty" }
 func (m *Empty) XXX_Unmarshal(b []byte) error {
@@ -61,8 +60,8 @@
 		return b[:n], nil
 	}
 }
-func (dst *Empty) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_Empty.Merge(dst, src)
+func (m *Empty) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Empty.Merge(m, src)
 }
 func (m *Empty) XXX_Size() int {
 	return m.Size()
@@ -79,6 +78,24 @@
 func init() {
 	proto.RegisterType((*Empty)(nil), "google.protobuf.Empty")
 }
+
+func init() { proto.RegisterFile("google/protobuf/empty.proto", fileDescriptor_900544acb223d5b8) }
+
+var fileDescriptor_900544acb223d5b8 = []byte{
+	// 176 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4e, 0xcf, 0xcf, 0x4f,
+	0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcd, 0x2d, 0x28,
+	0xa9, 0xd4, 0x03, 0x73, 0x85, 0xf8, 0x21, 0x92, 0x7a, 0x30, 0x49, 0x25, 0x76, 0x2e, 0x56, 0x57,
+	0x90, 0xbc, 0x53, 0x0b, 0xe3, 0x8d, 0x87, 0x72, 0x0c, 0x1f, 0x1e, 0xca, 0x31, 0xfe, 0x78, 0x28,
+	0xc7, 0xd8, 0xf0, 0x48, 0x8e, 0x71, 0xc5, 0x23, 0x39, 0xc6, 0x13, 0x8f, 0xe4, 0x18, 0x2f, 0x3c,
+	0x92, 0x63, 0x7c, 0xf0, 0x48, 0x8e, 0xf1, 0xc5, 0x23, 0x39, 0x86, 0x0f, 0x20, 0xf1, 0xc7, 0x72,
+	0x8c, 0x27, 0x1e, 0xcb, 0x31, 0x72, 0x09, 0x27, 0xe7, 0xe7, 0xea, 0xa1, 0x19, 0xe8, 0xc4, 0x05,
+	0x36, 0x2e, 0x00, 0xc4, 0x0d, 0x60, 0x8c, 0x62, 0x2d, 0xa9, 0x2c, 0x48, 0x2d, 0xfe, 0xc1, 0xc8,
+	0xb8, 0x88, 0x89, 0xd9, 0x3d, 0xc0, 0x69, 0x15, 0x93, 0x9c, 0x3b, 0x44, 0x7d, 0x00, 0x54, 0xbd,
+	0x5e, 0x78, 0x6a, 0x4e, 0x8e, 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x08, 0x48, 0x65, 0x12, 0x1b, 0xd8,
+	0x20, 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0x21, 0xbe, 0xb6, 0x31, 0xc6, 0x00, 0x00, 0x00,
+}
+
 func (this *Empty) Compare(that interface{}) int {
 	if that == nil {
 		if this == nil {
@@ -321,7 +338,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -344,6 +361,9 @@
 			if skippy < 0 {
 				return ErrInvalidLengthEmpty
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthEmpty
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -411,10 +431,13 @@
 					break
 				}
 			}
-			iNdEx += length
 			if length < 0 {
 				return 0, ErrInvalidLengthEmpty
 			}
+			iNdEx += length
+			if iNdEx < 0 {
+				return 0, ErrInvalidLengthEmpty
+			}
 			return iNdEx, nil
 		case 3:
 			for {
@@ -443,6 +466,9 @@
 					return 0, err
 				}
 				iNdEx = start + next
+				if iNdEx < 0 {
+					return 0, ErrInvalidLengthEmpty
+				}
 			}
 			return iNdEx, nil
 		case 4:
@@ -461,20 +487,3 @@
 	ErrInvalidLengthEmpty = fmt.Errorf("proto: negative length found during unmarshaling")
 	ErrIntOverflowEmpty   = fmt.Errorf("proto: integer overflow")
 )
-
-func init() { proto.RegisterFile("google/protobuf/empty.proto", fileDescriptor_empty_b366a5cbb7c614df) }
-
-var fileDescriptor_empty_b366a5cbb7c614df = []byte{
-	// 176 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4e, 0xcf, 0xcf, 0x4f,
-	0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcd, 0x2d, 0x28,
-	0xa9, 0xd4, 0x03, 0x73, 0x85, 0xf8, 0x21, 0x92, 0x7a, 0x30, 0x49, 0x25, 0x76, 0x2e, 0x56, 0x57,
-	0x90, 0xbc, 0x53, 0x0b, 0xe3, 0x8d, 0x87, 0x72, 0x0c, 0x1f, 0x1e, 0xca, 0x31, 0xfe, 0x78, 0x28,
-	0xc7, 0xd8, 0xf0, 0x48, 0x8e, 0x71, 0xc5, 0x23, 0x39, 0xc6, 0x13, 0x8f, 0xe4, 0x18, 0x2f, 0x3c,
-	0x92, 0x63, 0x7c, 0xf0, 0x48, 0x8e, 0xf1, 0xc5, 0x23, 0x39, 0x86, 0x0f, 0x20, 0xf1, 0xc7, 0x72,
-	0x8c, 0x27, 0x1e, 0xcb, 0x31, 0x72, 0x09, 0x27, 0xe7, 0xe7, 0xea, 0xa1, 0x19, 0xe8, 0xc4, 0x05,
-	0x36, 0x2e, 0x00, 0xc4, 0x0d, 0x60, 0x8c, 0x62, 0x2d, 0xa9, 0x2c, 0x48, 0x2d, 0xfe, 0xc1, 0xc8,
-	0xb8, 0x88, 0x89, 0xd9, 0x3d, 0xc0, 0x69, 0x15, 0x93, 0x9c, 0x3b, 0x44, 0x7d, 0x00, 0x54, 0xbd,
-	0x5e, 0x78, 0x6a, 0x4e, 0x8e, 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x08, 0x48, 0x65, 0x12, 0x1b, 0xd8,
-	0x20, 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0x21, 0xbe, 0xb6, 0x31, 0xc6, 0x00, 0x00, 0x00,
-}
diff --git a/vendor/github.com/gogo/protobuf/types/field_mask.pb.go b/vendor/github.com/gogo/protobuf/types/field_mask.pb.go
index 3e60fcf..b401a2b 100644
--- a/vendor/github.com/gogo/protobuf/types/field_mask.pb.go
+++ b/vendor/github.com/gogo/protobuf/types/field_mask.pb.go
@@ -3,16 +3,15 @@
 
 package types
 
-import proto "github.com/gogo/protobuf/proto"
-import fmt "fmt"
-import math "math"
-
-import bytes "bytes"
-
-import strings "strings"
-import reflect "reflect"
-
-import io "io"
+import (
+	bytes "bytes"
+	fmt "fmt"
+	proto "github.com/gogo/protobuf/proto"
+	io "io"
+	math "math"
+	reflect "reflect"
+	strings "strings"
+)
 
 // Reference imports to suppress errors if they are not otherwise used.
 var _ = proto.Marshal
@@ -93,57 +92,49 @@
 // describe the updated values, the API ignores the values of all
 // fields not covered by the mask.
 //
-// If a repeated field is specified for an update operation, the existing
-// repeated values in the target resource will be overwritten by the new values.
-// Note that a repeated field is only allowed in the last position of a `paths`
-// string.
+// If a repeated field is specified for an update operation, new values will
+// be appended to the existing repeated field in the target resource. Note that
+// a repeated field is only allowed in the last position of a `paths` string.
 //
 // If a sub-message is specified in the last position of the field mask for an
-// update operation, then the existing sub-message in the target resource is
-// overwritten. Given the target message:
+// update operation, then new value will be merged into the existing sub-message
+// in the target resource.
+//
+// For example, given the target message:
 //
 //     f {
 //       b {
-//         d : 1
-//         x : 2
+//         d: 1
+//         x: 2
 //       }
-//       c : 1
+//       c: [1]
 //     }
 //
 // And an update message:
 //
 //     f {
 //       b {
-//         d : 10
+//         d: 10
 //       }
+//       c: [2]
 //     }
 //
 // then if the field mask is:
 //
-//  paths: "f.b"
+//  paths: ["f.b", "f.c"]
 //
 // then the result will be:
 //
 //     f {
 //       b {
-//         d : 10
+//         d: 10
+//         x: 2
 //       }
-//       c : 1
+//       c: [1, 2]
 //     }
 //
-// However, if the update mask was:
-//
-//  paths: "f.b.d"
-//
-// then the result would be:
-//
-//     f {
-//       b {
-//         d : 10
-//         x : 2
-//       }
-//       c : 1
-//     }
+// An implementation may provide options to override this default behavior for
+// repeated and message fields.
 //
 // In order to reset a field's value to the default, the field must
 // be in the mask and set to the default value in the provided resource.
@@ -229,8 +220,8 @@
 //
 // ## Field Mask Verification
 //
-// The implementation of the all the API methods, which have any FieldMask type
-// field in the request, should verify the included field paths, and return
+// The implementation of any API method which has a FieldMask type field in the
+// request should verify the included field paths, and return an
 // `INVALID_ARGUMENT` error if any path is duplicated or unmappable.
 type FieldMask struct {
 	// The set of field mask paths.
@@ -243,7 +234,7 @@
 func (m *FieldMask) Reset()      { *m = FieldMask{} }
 func (*FieldMask) ProtoMessage() {}
 func (*FieldMask) Descriptor() ([]byte, []int) {
-	return fileDescriptor_field_mask_f1676d06eb3d88ba, []int{0}
+	return fileDescriptor_5158202634f0da48, []int{0}
 }
 func (m *FieldMask) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -260,8 +251,8 @@
 		return b[:n], nil
 	}
 }
-func (dst *FieldMask) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_FieldMask.Merge(dst, src)
+func (m *FieldMask) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_FieldMask.Merge(m, src)
 }
 func (m *FieldMask) XXX_Size() int {
 	return m.Size()
@@ -285,6 +276,26 @@
 func init() {
 	proto.RegisterType((*FieldMask)(nil), "google.protobuf.FieldMask")
 }
+
+func init() { proto.RegisterFile("google/protobuf/field_mask.proto", fileDescriptor_5158202634f0da48) }
+
+var fileDescriptor_5158202634f0da48 = []byte{
+	// 203 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x48, 0xcf, 0xcf, 0x4f,
+	0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcb, 0x4c, 0xcd,
+	0x49, 0x89, 0xcf, 0x4d, 0x2c, 0xce, 0xd6, 0x03, 0x8b, 0x09, 0xf1, 0x43, 0x54, 0xe8, 0xc1, 0x54,
+	0x28, 0x29, 0x72, 0x71, 0xba, 0x81, 0x14, 0xf9, 0x26, 0x16, 0x67, 0x0b, 0x89, 0x70, 0xb1, 0x16,
+	0x24, 0x96, 0x64, 0x14, 0x4b, 0x30, 0x2a, 0x30, 0x6b, 0x70, 0x06, 0x41, 0x38, 0x4e, 0x1d, 0x8c,
+	0x37, 0x1e, 0xca, 0x31, 0x7c, 0x78, 0x28, 0xc7, 0xf8, 0xe3, 0xa1, 0x1c, 0x63, 0xc3, 0x23, 0x39,
+	0xc6, 0x15, 0x8f, 0xe4, 0x18, 0x4f, 0x3c, 0x92, 0x63, 0xbc, 0xf0, 0x48, 0x8e, 0xf1, 0xc1, 0x23,
+	0x39, 0xc6, 0x17, 0x8f, 0xe4, 0x18, 0x3e, 0x80, 0xc4, 0x1f, 0xcb, 0x31, 0x9e, 0x78, 0x2c, 0xc7,
+	0xc8, 0x25, 0x9c, 0x9c, 0x9f, 0xab, 0x87, 0x66, 0x95, 0x13, 0x1f, 0xdc, 0xa2, 0x00, 0x90, 0x50,
+	0x00, 0x63, 0x14, 0x6b, 0x49, 0x65, 0x41, 0x6a, 0xf1, 0x0f, 0x46, 0xc6, 0x45, 0x4c, 0xcc, 0xee,
+	0x01, 0x4e, 0xab, 0x98, 0xe4, 0xdc, 0x21, 0x7a, 0x02, 0xa0, 0x7a, 0xf4, 0xc2, 0x53, 0x73, 0x72,
+	0xbc, 0xf3, 0xf2, 0xcb, 0xf3, 0x42, 0x40, 0x2a, 0x93, 0xd8, 0xc0, 0x86, 0x19, 0x03, 0x02, 0x00,
+	0x00, 0xff, 0xff, 0x43, 0xa0, 0x83, 0xd0, 0xe9, 0x00, 0x00, 0x00,
+}
+
 func (this *FieldMask) Compare(that interface{}) int {
 	if that == nil {
 		if this == nil {
@@ -577,7 +588,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -605,7 +616,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -615,6 +626,9 @@
 				return ErrInvalidLengthFieldMask
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthFieldMask
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -629,6 +643,9 @@
 			if skippy < 0 {
 				return ErrInvalidLengthFieldMask
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthFieldMask
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -696,10 +713,13 @@
 					break
 				}
 			}
-			iNdEx += length
 			if length < 0 {
 				return 0, ErrInvalidLengthFieldMask
 			}
+			iNdEx += length
+			if iNdEx < 0 {
+				return 0, ErrInvalidLengthFieldMask
+			}
 			return iNdEx, nil
 		case 3:
 			for {
@@ -728,6 +748,9 @@
 					return 0, err
 				}
 				iNdEx = start + next
+				if iNdEx < 0 {
+					return 0, ErrInvalidLengthFieldMask
+				}
 			}
 			return iNdEx, nil
 		case 4:
@@ -746,24 +769,3 @@
 	ErrInvalidLengthFieldMask = fmt.Errorf("proto: negative length found during unmarshaling")
 	ErrIntOverflowFieldMask   = fmt.Errorf("proto: integer overflow")
 )
-
-func init() {
-	proto.RegisterFile("google/protobuf/field_mask.proto", fileDescriptor_field_mask_f1676d06eb3d88ba)
-}
-
-var fileDescriptor_field_mask_f1676d06eb3d88ba = []byte{
-	// 200 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x48, 0xcf, 0xcf, 0x4f,
-	0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcb, 0x4c, 0xcd,
-	0x49, 0x89, 0xcf, 0x4d, 0x2c, 0xce, 0xd6, 0x03, 0x8b, 0x09, 0xf1, 0x43, 0x54, 0xe8, 0xc1, 0x54,
-	0x28, 0x29, 0x72, 0x71, 0xba, 0x81, 0x14, 0xf9, 0x26, 0x16, 0x67, 0x0b, 0x89, 0x70, 0xb1, 0x16,
-	0x24, 0x96, 0x64, 0x14, 0x4b, 0x30, 0x2a, 0x30, 0x6b, 0x70, 0x06, 0x41, 0x38, 0x4e, 0xad, 0x8c,
-	0x37, 0x1e, 0xca, 0x31, 0x7c, 0x78, 0x28, 0xc7, 0xf8, 0xe3, 0xa1, 0x1c, 0x63, 0xc3, 0x23, 0x39,
-	0xc6, 0x15, 0x8f, 0xe4, 0x18, 0x4f, 0x3c, 0x92, 0x63, 0xbc, 0xf0, 0x48, 0x8e, 0xf1, 0xc1, 0x23,
-	0x39, 0xc6, 0x17, 0x8f, 0xe4, 0x18, 0x3e, 0x80, 0xc4, 0x1f, 0xcb, 0x31, 0x9e, 0x78, 0x2c, 0xc7,
-	0xc8, 0x25, 0x9c, 0x9c, 0x9f, 0xab, 0x87, 0x66, 0x95, 0x13, 0x1f, 0xdc, 0xa2, 0x00, 0x90, 0x50,
-	0x00, 0x63, 0x14, 0x6b, 0x49, 0x65, 0x41, 0x6a, 0xf1, 0x22, 0x26, 0x66, 0xf7, 0x00, 0xa7, 0x55,
-	0x4c, 0x72, 0xee, 0x10, 0x0d, 0x01, 0x50, 0x0d, 0x7a, 0xe1, 0xa9, 0x39, 0x39, 0xde, 0x79, 0xf9,
-	0xe5, 0x79, 0x21, 0x20, 0x65, 0x49, 0x6c, 0x60, 0x93, 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff,
-	0xcf, 0xae, 0x5b, 0xec, 0xe6, 0x00, 0x00, 0x00,
-}
diff --git a/vendor/github.com/gogo/protobuf/types/source_context.pb.go b/vendor/github.com/gogo/protobuf/types/source_context.pb.go
index 97b6d62..3688840 100644
--- a/vendor/github.com/gogo/protobuf/types/source_context.pb.go
+++ b/vendor/github.com/gogo/protobuf/types/source_context.pb.go
@@ -3,16 +3,15 @@
 
 package types
 
-import proto "github.com/gogo/protobuf/proto"
-import fmt "fmt"
-import math "math"
-
-import bytes "bytes"
-
-import strings "strings"
-import reflect "reflect"
-
-import io "io"
+import (
+	bytes "bytes"
+	fmt "fmt"
+	proto "github.com/gogo/protobuf/proto"
+	io "io"
+	math "math"
+	reflect "reflect"
+	strings "strings"
+)
 
 // Reference imports to suppress errors if they are not otherwise used.
 var _ = proto.Marshal
@@ -39,7 +38,7 @@
 func (m *SourceContext) Reset()      { *m = SourceContext{} }
 func (*SourceContext) ProtoMessage() {}
 func (*SourceContext) Descriptor() ([]byte, []int) {
-	return fileDescriptor_source_context_b387e69fb08d10e5, []int{0}
+	return fileDescriptor_b686cdb126d509db, []int{0}
 }
 func (m *SourceContext) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -56,8 +55,8 @@
 		return b[:n], nil
 	}
 }
-func (dst *SourceContext) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_SourceContext.Merge(dst, src)
+func (m *SourceContext) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_SourceContext.Merge(m, src)
 }
 func (m *SourceContext) XXX_Size() int {
 	return m.Size()
@@ -81,6 +80,29 @@
 func init() {
 	proto.RegisterType((*SourceContext)(nil), "google.protobuf.SourceContext")
 }
+
+func init() {
+	proto.RegisterFile("google/protobuf/source_context.proto", fileDescriptor_b686cdb126d509db)
+}
+
+var fileDescriptor_b686cdb126d509db = []byte{
+	// 212 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x49, 0xcf, 0xcf, 0x4f,
+	0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0xce, 0x2f, 0x2d,
+	0x4a, 0x4e, 0x8d, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xad, 0x28, 0xd1, 0x03, 0x8b, 0x0b, 0xf1, 0x43,
+	0x54, 0xe9, 0xc1, 0x54, 0x29, 0xe9, 0x70, 0xf1, 0x06, 0x83, 0x15, 0x3a, 0x43, 0xd4, 0x09, 0x49,
+	0x73, 0x71, 0xa6, 0x65, 0xe6, 0xa4, 0xc6, 0xe7, 0x25, 0xe6, 0xa6, 0x4a, 0x30, 0x2a, 0x30, 0x6a,
+	0x70, 0x06, 0x71, 0x80, 0x04, 0xfc, 0x12, 0x73, 0x53, 0x9d, 0x3a, 0x19, 0x6f, 0x3c, 0x94, 0x63,
+	0xf8, 0xf0, 0x50, 0x8e, 0xf1, 0xc7, 0x43, 0x39, 0xc6, 0x86, 0x47, 0x72, 0x8c, 0x2b, 0x1e, 0xc9,
+	0x31, 0x9e, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c, 0xe3, 0x83, 0x47, 0x72, 0x8c, 0x2f, 0x1e,
+	0xc9, 0x31, 0x7c, 0x00, 0x89, 0x3f, 0x96, 0x63, 0x3c, 0xf1, 0x58, 0x8e, 0x91, 0x4b, 0x38, 0x39,
+	0x3f, 0x57, 0x0f, 0xcd, 0x56, 0x27, 0x21, 0x14, 0x3b, 0x03, 0x40, 0xc2, 0x01, 0x8c, 0x51, 0xac,
+	0x25, 0x95, 0x05, 0xa9, 0xc5, 0x8b, 0x98, 0x98, 0xdd, 0x03, 0x9c, 0x56, 0x31, 0xc9, 0xb9, 0x43,
+	0x34, 0x05, 0x40, 0x35, 0xe9, 0x85, 0xa7, 0xe6, 0xe4, 0x78, 0xe7, 0xe5, 0x97, 0xe7, 0x85, 0x80,
+	0x94, 0x25, 0xb1, 0x81, 0x4d, 0x33, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0xb8, 0x37, 0x2a, 0xa1,
+	0xf9, 0x00, 0x00, 0x00,
+}
+
 func (this *SourceContext) Compare(that interface{}) int {
 	if that == nil {
 		if this == nil {
@@ -345,7 +367,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -373,7 +395,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -383,6 +405,9 @@
 				return ErrInvalidLengthSourceContext
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthSourceContext
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -397,6 +422,9 @@
 			if skippy < 0 {
 				return ErrInvalidLengthSourceContext
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthSourceContext
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -464,10 +492,13 @@
 					break
 				}
 			}
-			iNdEx += length
 			if length < 0 {
 				return 0, ErrInvalidLengthSourceContext
 			}
+			iNdEx += length
+			if iNdEx < 0 {
+				return 0, ErrInvalidLengthSourceContext
+			}
 			return iNdEx, nil
 		case 3:
 			for {
@@ -496,6 +527,9 @@
 					return 0, err
 				}
 				iNdEx = start + next
+				if iNdEx < 0 {
+					return 0, ErrInvalidLengthSourceContext
+				}
 			}
 			return iNdEx, nil
 		case 4:
@@ -514,25 +548,3 @@
 	ErrInvalidLengthSourceContext = fmt.Errorf("proto: negative length found during unmarshaling")
 	ErrIntOverflowSourceContext   = fmt.Errorf("proto: integer overflow")
 )
-
-func init() {
-	proto.RegisterFile("google/protobuf/source_context.proto", fileDescriptor_source_context_b387e69fb08d10e5)
-}
-
-var fileDescriptor_source_context_b387e69fb08d10e5 = []byte{
-	// 212 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x49, 0xcf, 0xcf, 0x4f,
-	0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0xce, 0x2f, 0x2d,
-	0x4a, 0x4e, 0x8d, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xad, 0x28, 0xd1, 0x03, 0x8b, 0x0b, 0xf1, 0x43,
-	0x54, 0xe9, 0xc1, 0x54, 0x29, 0xe9, 0x70, 0xf1, 0x06, 0x83, 0x15, 0x3a, 0x43, 0xd4, 0x09, 0x49,
-	0x73, 0x71, 0xa6, 0x65, 0xe6, 0xa4, 0xc6, 0xe7, 0x25, 0xe6, 0xa6, 0x4a, 0x30, 0x2a, 0x30, 0x6a,
-	0x70, 0x06, 0x71, 0x80, 0x04, 0xfc, 0x12, 0x73, 0x53, 0x9d, 0x3a, 0x19, 0x6f, 0x3c, 0x94, 0x63,
-	0xf8, 0xf0, 0x50, 0x8e, 0xf1, 0xc7, 0x43, 0x39, 0xc6, 0x86, 0x47, 0x72, 0x8c, 0x2b, 0x1e, 0xc9,
-	0x31, 0x9e, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c, 0xe3, 0x83, 0x47, 0x72, 0x8c, 0x2f, 0x1e,
-	0xc9, 0x31, 0x7c, 0x00, 0x89, 0x3f, 0x96, 0x63, 0x3c, 0xf1, 0x58, 0x8e, 0x91, 0x4b, 0x38, 0x39,
-	0x3f, 0x57, 0x0f, 0xcd, 0x56, 0x27, 0x21, 0x14, 0x3b, 0x03, 0x40, 0xc2, 0x01, 0x8c, 0x51, 0xac,
-	0x25, 0x95, 0x05, 0xa9, 0xc5, 0x8b, 0x98, 0x98, 0xdd, 0x03, 0x9c, 0x56, 0x31, 0xc9, 0xb9, 0x43,
-	0x34, 0x05, 0x40, 0x35, 0xe9, 0x85, 0xa7, 0xe6, 0xe4, 0x78, 0xe7, 0xe5, 0x97, 0xe7, 0x85, 0x80,
-	0x94, 0x25, 0xb1, 0x81, 0x4d, 0x33, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0xb8, 0x37, 0x2a, 0xa1,
-	0xf9, 0x00, 0x00, 0x00,
-}
diff --git a/vendor/github.com/gogo/protobuf/types/struct.pb.go b/vendor/github.com/gogo/protobuf/types/struct.pb.go
index 2b6575f..63fd17b 100644
--- a/vendor/github.com/gogo/protobuf/types/struct.pb.go
+++ b/vendor/github.com/gogo/protobuf/types/struct.pb.go
@@ -3,21 +3,18 @@
 
 package types
 
-import proto "github.com/gogo/protobuf/proto"
-import fmt "fmt"
-import math "math"
-
-import strconv "strconv"
-
-import bytes "bytes"
-
-import strings "strings"
-import reflect "reflect"
-import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
-
-import encoding_binary "encoding/binary"
-
-import io "io"
+import (
+	bytes "bytes"
+	encoding_binary "encoding/binary"
+	fmt "fmt"
+	proto "github.com/gogo/protobuf/proto"
+	github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
+	io "io"
+	math "math"
+	reflect "reflect"
+	strconv "strconv"
+	strings "strings"
+)
 
 // Reference imports to suppress errors if they are not otherwise used.
 var _ = proto.Marshal
@@ -44,13 +41,15 @@
 var NullValue_name = map[int32]string{
 	0: "NULL_VALUE",
 }
+
 var NullValue_value = map[string]int32{
 	"NULL_VALUE": 0,
 }
 
 func (NullValue) EnumDescriptor() ([]byte, []int) {
-	return fileDescriptor_struct_7f9d36853146434f, []int{0}
+	return fileDescriptor_df322afd6c9fb402, []int{0}
 }
+
 func (NullValue) XXX_WellKnownType() string { return "NullValue" }
 
 // `Struct` represents a structured data value, consisting of fields
@@ -72,7 +71,7 @@
 func (m *Struct) Reset()      { *m = Struct{} }
 func (*Struct) ProtoMessage() {}
 func (*Struct) Descriptor() ([]byte, []int) {
-	return fileDescriptor_struct_7f9d36853146434f, []int{0}
+	return fileDescriptor_df322afd6c9fb402, []int{0}
 }
 func (*Struct) XXX_WellKnownType() string { return "Struct" }
 func (m *Struct) XXX_Unmarshal(b []byte) error {
@@ -90,8 +89,8 @@
 		return b[:n], nil
 	}
 }
-func (dst *Struct) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_Struct.Merge(dst, src)
+func (m *Struct) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Struct.Merge(m, src)
 }
 func (m *Struct) XXX_Size() int {
 	return m.Size()
@@ -138,7 +137,7 @@
 func (m *Value) Reset()      { *m = Value{} }
 func (*Value) ProtoMessage() {}
 func (*Value) Descriptor() ([]byte, []int) {
-	return fileDescriptor_struct_7f9d36853146434f, []int{1}
+	return fileDescriptor_df322afd6c9fb402, []int{1}
 }
 func (*Value) XXX_WellKnownType() string { return "Value" }
 func (m *Value) XXX_Unmarshal(b []byte) error {
@@ -156,8 +155,8 @@
 		return b[:n], nil
 	}
 }
-func (dst *Value) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_Value.Merge(dst, src)
+func (m *Value) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Value.Merge(m, src)
 }
 func (m *Value) XXX_Size() int {
 	return m.Size()
@@ -403,7 +402,7 @@
 func (m *ListValue) Reset()      { *m = ListValue{} }
 func (*ListValue) ProtoMessage() {}
 func (*ListValue) Descriptor() ([]byte, []int) {
-	return fileDescriptor_struct_7f9d36853146434f, []int{2}
+	return fileDescriptor_df322afd6c9fb402, []int{2}
 }
 func (*ListValue) XXX_WellKnownType() string { return "ListValue" }
 func (m *ListValue) XXX_Unmarshal(b []byte) error {
@@ -421,8 +420,8 @@
 		return b[:n], nil
 	}
 }
-func (dst *ListValue) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_ListValue.Merge(dst, src)
+func (m *ListValue) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ListValue.Merge(m, src)
 }
 func (m *ListValue) XXX_Size() int {
 	return m.Size()
@@ -444,12 +443,47 @@
 	return "google.protobuf.ListValue"
 }
 func init() {
+	proto.RegisterEnum("google.protobuf.NullValue", NullValue_name, NullValue_value)
 	proto.RegisterType((*Struct)(nil), "google.protobuf.Struct")
 	proto.RegisterMapType((map[string]*Value)(nil), "google.protobuf.Struct.FieldsEntry")
 	proto.RegisterType((*Value)(nil), "google.protobuf.Value")
 	proto.RegisterType((*ListValue)(nil), "google.protobuf.ListValue")
-	proto.RegisterEnum("google.protobuf.NullValue", NullValue_name, NullValue_value)
 }
+
+func init() { proto.RegisterFile("google/protobuf/struct.proto", fileDescriptor_df322afd6c9fb402) }
+
+var fileDescriptor_df322afd6c9fb402 = []byte{
+	// 439 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x91, 0xc1, 0x6b, 0xd4, 0x40,
+	0x14, 0xc6, 0xf3, 0xb2, 0xdd, 0xe0, 0xbe, 0x48, 0x2d, 0x23, 0xe8, 0x52, 0x65, 0x5c, 0xb6, 0x97,
+	0x45, 0x24, 0x85, 0xf5, 0x22, 0xae, 0x17, 0x17, 0x6a, 0x0b, 0x86, 0x12, 0xa3, 0xad, 0xe0, 0x65,
+	0x31, 0x69, 0xba, 0x84, 0x4e, 0x67, 0x4a, 0x32, 0xa3, 0xec, 0x4d, 0xff, 0x0b, 0xcf, 0x9e, 0xc4,
+	0xa3, 0x7f, 0x85, 0x47, 0x8f, 0x1e, 0xdd, 0x78, 0xf1, 0xd8, 0x63, 0x8f, 0x32, 0x33, 0x49, 0x94,
+	0x2e, 0xbd, 0xe5, 0x7d, 0xf3, 0x7b, 0xdf, 0x7b, 0xdf, 0x0b, 0xde, 0x9d, 0x0b, 0x31, 0x67, 0xd9,
+	0xf6, 0x59, 0x21, 0xa4, 0x48, 0xd4, 0xf1, 0x76, 0x29, 0x0b, 0x95, 0xca, 0xc0, 0xd4, 0xe4, 0x86,
+	0x7d, 0x0d, 0x9a, 0xd7, 0xe1, 0x27, 0x40, 0xef, 0xa5, 0x21, 0xc8, 0x04, 0xbd, 0xe3, 0x3c, 0x63,
+	0x47, 0x65, 0x1f, 0x06, 0x9d, 0x91, 0x3f, 0xde, 0x0a, 0x2e, 0xc1, 0x81, 0x05, 0x83, 0x67, 0x86,
+	0xda, 0xe1, 0xb2, 0x58, 0xc4, 0x75, 0xcb, 0xe6, 0x0b, 0xf4, 0xff, 0x93, 0xc9, 0x06, 0x76, 0x4e,
+	0xb2, 0x45, 0x1f, 0x06, 0x30, 0xea, 0xc5, 0xfa, 0x93, 0x3c, 0xc0, 0xee, 0xbb, 0xb7, 0x4c, 0x65,
+	0x7d, 0x77, 0x00, 0x23, 0x7f, 0x7c, 0x6b, 0xc5, 0xfc, 0x50, 0xbf, 0xc6, 0x16, 0x7a, 0xec, 0x3e,
+	0x82, 0xe1, 0x37, 0x17, 0xbb, 0x46, 0x24, 0x13, 0x44, 0xae, 0x18, 0x9b, 0x59, 0x03, 0x6d, 0xba,
+	0x3e, 0xde, 0x5c, 0x31, 0xd8, 0x57, 0x8c, 0x19, 0x7e, 0xcf, 0x89, 0x7b, 0xbc, 0x29, 0xc8, 0x16,
+	0x5e, 0xe7, 0xea, 0x34, 0xc9, 0x8a, 0xd9, 0xbf, 0xf9, 0xb0, 0xe7, 0xc4, 0xbe, 0x55, 0x5b, 0xa8,
+	0x94, 0x45, 0xce, 0xe7, 0x35, 0xd4, 0xd1, 0x8b, 0x6b, 0xc8, 0xaa, 0x16, 0xba, 0x87, 0x98, 0x08,
+	0xd1, 0xac, 0xb1, 0x36, 0x80, 0xd1, 0x35, 0x3d, 0x4a, 0x6b, 0x16, 0x78, 0x62, 0x5c, 0x54, 0x2a,
+	0x6b, 0xa4, 0x6b, 0xa2, 0xde, 0xbe, 0xe2, 0x8e, 0xb5, 0xbd, 0x4a, 0x65, 0x9b, 0x92, 0xe5, 0x65,
+	0xd3, 0xeb, 0x99, 0xde, 0xd5, 0x94, 0x61, 0x5e, 0xca, 0x36, 0x25, 0x6b, 0x8a, 0xa9, 0x87, 0x6b,
+	0x27, 0x39, 0x3f, 0x1a, 0x4e, 0xb0, 0xd7, 0x12, 0x24, 0x40, 0xcf, 0x98, 0x35, 0x7f, 0xf4, 0xaa,
+	0xa3, 0xd7, 0xd4, 0xfd, 0x3b, 0xd8, 0x6b, 0x8f, 0x48, 0xd6, 0x11, 0xf7, 0x0f, 0xc2, 0x70, 0x76,
+	0xf8, 0x34, 0x3c, 0xd8, 0xd9, 0x70, 0xa6, 0x1f, 0xe1, 0xe7, 0x92, 0x3a, 0xe7, 0x4b, 0x0a, 0x17,
+	0x4b, 0x0a, 0x1f, 0x2a, 0x0a, 0x5f, 0x2a, 0x0a, 0xdf, 0x2b, 0x0a, 0x3f, 0x2a, 0x0a, 0xbf, 0x2a,
+	0x0a, 0x7f, 0x2a, 0xea, 0x9c, 0x6b, 0xed, 0x37, 0x05, 0xbc, 0x99, 0x8a, 0xd3, 0xcb, 0xe3, 0xa6,
+	0xbe, 0x4d, 0x1e, 0xe9, 0x3a, 0x82, 0x37, 0x5d, 0xb9, 0x38, 0xcb, 0xca, 0x0b, 0x80, 0xcf, 0x6e,
+	0x67, 0x37, 0x9a, 0x7e, 0x75, 0xe9, 0xae, 0x6d, 0x88, 0x9a, 0xfd, 0x5e, 0x67, 0x8c, 0x3d, 0xe7,
+	0xe2, 0x3d, 0x7f, 0xa5, 0xc9, 0xc4, 0x33, 0x4e, 0x0f, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0xad,
+	0x84, 0x08, 0xae, 0xe5, 0x02, 0x00, 0x00,
+}
+
 func (x NullValue) String() string {
 	s, ok := NullValue_name[int32(x)]
 	if ok {
@@ -1407,7 +1441,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -1435,7 +1469,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1444,6 +1478,9 @@
 				return ErrInvalidLengthStruct
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthStruct
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -1464,7 +1501,7 @@
 					}
 					b := dAtA[iNdEx]
 					iNdEx++
-					wire |= (uint64(b) & 0x7F) << shift
+					wire |= uint64(b&0x7F) << shift
 					if b < 0x80 {
 						break
 					}
@@ -1481,7 +1518,7 @@
 						}
 						b := dAtA[iNdEx]
 						iNdEx++
-						stringLenmapkey |= (uint64(b) & 0x7F) << shift
+						stringLenmapkey |= uint64(b&0x7F) << shift
 						if b < 0x80 {
 							break
 						}
@@ -1491,6 +1528,9 @@
 						return ErrInvalidLengthStruct
 					}
 					postStringIndexmapkey := iNdEx + intStringLenmapkey
+					if postStringIndexmapkey < 0 {
+						return ErrInvalidLengthStruct
+					}
 					if postStringIndexmapkey > l {
 						return io.ErrUnexpectedEOF
 					}
@@ -1507,7 +1547,7 @@
 						}
 						b := dAtA[iNdEx]
 						iNdEx++
-						mapmsglen |= (int(b) & 0x7F) << shift
+						mapmsglen |= int(b&0x7F) << shift
 						if b < 0x80 {
 							break
 						}
@@ -1516,7 +1556,7 @@
 						return ErrInvalidLengthStruct
 					}
 					postmsgIndex := iNdEx + mapmsglen
-					if mapmsglen < 0 {
+					if postmsgIndex < 0 {
 						return ErrInvalidLengthStruct
 					}
 					if postmsgIndex > l {
@@ -1553,6 +1593,9 @@
 			if skippy < 0 {
 				return ErrInvalidLengthStruct
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthStruct
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -1581,7 +1624,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -1609,7 +1652,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				v |= (NullValue(b) & 0x7F) << shift
+				v |= NullValue(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1640,7 +1683,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1650,6 +1693,9 @@
 				return ErrInvalidLengthStruct
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthStruct
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -1669,7 +1715,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				v |= (int(b) & 0x7F) << shift
+				v |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1690,7 +1736,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1699,6 +1745,9 @@
 				return ErrInvalidLengthStruct
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthStruct
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -1722,7 +1771,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1731,6 +1780,9 @@
 				return ErrInvalidLengthStruct
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthStruct
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -1749,6 +1801,9 @@
 			if skippy < 0 {
 				return ErrInvalidLengthStruct
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthStruct
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -1777,7 +1832,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -1805,7 +1860,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1814,6 +1869,9 @@
 				return ErrInvalidLengthStruct
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthStruct
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -1831,6 +1889,9 @@
 			if skippy < 0 {
 				return ErrInvalidLengthStruct
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthStruct
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -1898,10 +1959,13 @@
 					break
 				}
 			}
-			iNdEx += length
 			if length < 0 {
 				return 0, ErrInvalidLengthStruct
 			}
+			iNdEx += length
+			if iNdEx < 0 {
+				return 0, ErrInvalidLengthStruct
+			}
 			return iNdEx, nil
 		case 3:
 			for {
@@ -1930,6 +1994,9 @@
 					return 0, err
 				}
 				iNdEx = start + next
+				if iNdEx < 0 {
+					return 0, ErrInvalidLengthStruct
+				}
 			}
 			return iNdEx, nil
 		case 4:
@@ -1948,39 +2015,3 @@
 	ErrInvalidLengthStruct = fmt.Errorf("proto: negative length found during unmarshaling")
 	ErrIntOverflowStruct   = fmt.Errorf("proto: integer overflow")
 )
-
-func init() {
-	proto.RegisterFile("google/protobuf/struct.proto", fileDescriptor_struct_7f9d36853146434f)
-}
-
-var fileDescriptor_struct_7f9d36853146434f = []byte{
-	// 439 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x91, 0xc1, 0x6b, 0xd4, 0x40,
-	0x14, 0xc6, 0xf3, 0xb2, 0xdd, 0xe0, 0xbe, 0x48, 0x2d, 0x23, 0xe8, 0x52, 0x65, 0x5c, 0xb6, 0x97,
-	0x45, 0x24, 0x85, 0xf5, 0x22, 0xae, 0x17, 0x17, 0x6a, 0x0b, 0x86, 0x12, 0xa3, 0xad, 0xe0, 0x65,
-	0x31, 0x69, 0xba, 0x84, 0x4e, 0x67, 0x4a, 0x32, 0xa3, 0xec, 0x4d, 0xff, 0x0b, 0xcf, 0x9e, 0xc4,
-	0xa3, 0x7f, 0x85, 0x47, 0x8f, 0x1e, 0xdd, 0x78, 0xf1, 0xd8, 0x63, 0x8f, 0x32, 0x33, 0x49, 0x94,
-	0x2e, 0xbd, 0xe5, 0x7d, 0xf3, 0x7b, 0xdf, 0x7b, 0xdf, 0x0b, 0xde, 0x9d, 0x0b, 0x31, 0x67, 0xd9,
-	0xf6, 0x59, 0x21, 0xa4, 0x48, 0xd4, 0xf1, 0x76, 0x29, 0x0b, 0x95, 0xca, 0xc0, 0xd4, 0xe4, 0x86,
-	0x7d, 0x0d, 0x9a, 0xd7, 0xe1, 0x27, 0x40, 0xef, 0xa5, 0x21, 0xc8, 0x04, 0xbd, 0xe3, 0x3c, 0x63,
-	0x47, 0x65, 0x1f, 0x06, 0x9d, 0x91, 0x3f, 0xde, 0x0a, 0x2e, 0xc1, 0x81, 0x05, 0x83, 0x67, 0x86,
-	0xda, 0xe1, 0xb2, 0x58, 0xc4, 0x75, 0xcb, 0xe6, 0x0b, 0xf4, 0xff, 0x93, 0xc9, 0x06, 0x76, 0x4e,
-	0xb2, 0x45, 0x1f, 0x06, 0x30, 0xea, 0xc5, 0xfa, 0x93, 0x3c, 0xc0, 0xee, 0xbb, 0xb7, 0x4c, 0x65,
-	0x7d, 0x77, 0x00, 0x23, 0x7f, 0x7c, 0x6b, 0xc5, 0xfc, 0x50, 0xbf, 0xc6, 0x16, 0x7a, 0xec, 0x3e,
-	0x82, 0xe1, 0x37, 0x17, 0xbb, 0x46, 0x24, 0x13, 0x44, 0xae, 0x18, 0x9b, 0x59, 0x03, 0x6d, 0xba,
-	0x3e, 0xde, 0x5c, 0x31, 0xd8, 0x57, 0x8c, 0x19, 0x7e, 0xcf, 0x89, 0x7b, 0xbc, 0x29, 0xc8, 0x16,
-	0x5e, 0xe7, 0xea, 0x34, 0xc9, 0x8a, 0xd9, 0xbf, 0xf9, 0xb0, 0xe7, 0xc4, 0xbe, 0x55, 0x5b, 0xa8,
-	0x94, 0x45, 0xce, 0xe7, 0x35, 0xd4, 0xd1, 0x8b, 0x6b, 0xc8, 0xaa, 0x16, 0xba, 0x87, 0x98, 0x08,
-	0xd1, 0xac, 0xb1, 0x36, 0x80, 0xd1, 0x35, 0x3d, 0x4a, 0x6b, 0x16, 0x78, 0x62, 0x5c, 0x54, 0x2a,
-	0x6b, 0xa4, 0x6b, 0xa2, 0xde, 0xbe, 0xe2, 0x8e, 0xb5, 0xbd, 0x4a, 0x65, 0x9b, 0x92, 0xe5, 0x65,
-	0xd3, 0xeb, 0x99, 0xde, 0xd5, 0x94, 0x61, 0x5e, 0xca, 0x36, 0x25, 0x6b, 0x8a, 0xa9, 0x87, 0x6b,
-	0x27, 0x39, 0x3f, 0x1a, 0x4e, 0xb0, 0xd7, 0x12, 0x24, 0x40, 0xcf, 0x98, 0x35, 0x7f, 0xf4, 0xaa,
-	0xa3, 0xd7, 0xd4, 0xfd, 0x3b, 0xd8, 0x6b, 0x8f, 0x48, 0xd6, 0x11, 0xf7, 0x0f, 0xc2, 0x70, 0x76,
-	0xf8, 0x34, 0x3c, 0xd8, 0xd9, 0x70, 0xa6, 0x1f, 0xe1, 0xe7, 0x92, 0x3a, 0xe7, 0x4b, 0x0a, 0x17,
-	0x4b, 0x0a, 0x1f, 0x2a, 0x0a, 0x5f, 0x2a, 0x0a, 0xdf, 0x2b, 0x0a, 0x3f, 0x2a, 0x0a, 0xbf, 0x2a,
-	0x0a, 0x7f, 0x2a, 0xea, 0x9c, 0x6b, 0xed, 0x37, 0x05, 0xbc, 0x99, 0x8a, 0xd3, 0xcb, 0xe3, 0xa6,
-	0xbe, 0x4d, 0x1e, 0xe9, 0x3a, 0x82, 0x37, 0x5d, 0xb9, 0x38, 0xcb, 0xca, 0x0b, 0x80, 0xcf, 0x6e,
-	0x67, 0x37, 0x9a, 0x7e, 0x75, 0xe9, 0xae, 0x6d, 0x88, 0x9a, 0xfd, 0x5e, 0x67, 0x8c, 0x3d, 0xe7,
-	0xe2, 0x3d, 0x7f, 0xa5, 0xc9, 0xc4, 0x33, 0x4e, 0x0f, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0xad,
-	0x84, 0x08, 0xae, 0xe5, 0x02, 0x00, 0x00,
-}
diff --git a/vendor/github.com/gogo/protobuf/types/timestamp.go b/vendor/github.com/gogo/protobuf/types/timestamp.go
index 7ae54d8..232ada5 100644
--- a/vendor/github.com/gogo/protobuf/types/timestamp.go
+++ b/vendor/github.com/gogo/protobuf/types/timestamp.go
@@ -109,11 +109,9 @@
 // TimestampProto converts the time.Time to a google.protobuf.Timestamp proto.
 // It returns an error if the resulting Timestamp is invalid.
 func TimestampProto(t time.Time) (*Timestamp, error) {
-	seconds := t.Unix()
-	nanos := int32(t.Sub(time.Unix(seconds, 0)))
 	ts := &Timestamp{
-		Seconds: seconds,
-		Nanos:   nanos,
+		Seconds: t.Unix(),
+		Nanos:   int32(t.Nanosecond()),
 	}
 	if err := validateTimestamp(ts); err != nil {
 		return nil, err
diff --git a/vendor/github.com/gogo/protobuf/types/timestamp.pb.go b/vendor/github.com/gogo/protobuf/types/timestamp.pb.go
index b6c9100..3ee6cb0 100644
--- a/vendor/github.com/gogo/protobuf/types/timestamp.pb.go
+++ b/vendor/github.com/gogo/protobuf/types/timestamp.pb.go
@@ -3,16 +3,15 @@
 
 package types
 
-import proto "github.com/gogo/protobuf/proto"
-import fmt "fmt"
-import math "math"
-
-import bytes "bytes"
-
-import strings "strings"
-import reflect "reflect"
-
-import io "io"
+import (
+	bytes "bytes"
+	fmt "fmt"
+	proto "github.com/gogo/protobuf/proto"
+	io "io"
+	math "math"
+	reflect "reflect"
+	strings "strings"
+)
 
 // Reference imports to suppress errors if they are not otherwise used.
 var _ = proto.Marshal
@@ -25,17 +24,19 @@
 // proto package needs to be updated.
 const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
 
-// A Timestamp represents a point in time independent of any time zone
-// or calendar, represented as seconds and fractions of seconds at
-// nanosecond resolution in UTC Epoch time. It is encoded using the
-// Proleptic Gregorian Calendar which extends the Gregorian calendar
-// backwards to year one. It is encoded assuming all minutes are 60
-// seconds long, i.e. leap seconds are "smeared" so that no leap second
-// table is needed for interpretation. Range is from
-// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z.
-// By restricting to that range, we ensure that we can convert to
-// and from  RFC 3339 date strings.
-// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt).
+// A Timestamp represents a point in time independent of any time zone or local
+// calendar, encoded as a count of seconds and fractions of seconds at
+// nanosecond resolution. The count is relative to an epoch at UTC midnight on
+// January 1, 1970, in the proleptic Gregorian calendar which extends the
+// Gregorian calendar backwards to year one.
+//
+// All minutes are 60 seconds long. Leap seconds are "smeared" so that no leap
+// second table is needed for interpretation, using a [24-hour linear
+// smear](https://developers.google.com/time/smear).
+//
+// The range is from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. By
+// restricting to that range, we ensure that we can convert to and from [RFC
+// 3339](https://www.ietf.org/rfc/rfc3339.txt) date strings.
 //
 // # Examples
 //
@@ -96,12 +97,12 @@
 // 01:30 UTC on January 15, 2017.
 //
 // In JavaScript, one can convert a Date object to this format using the
-// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString]
+// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString)
 // method. In Python, a standard `datetime.datetime` object can be converted
 // to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime)
 // with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one
 // can use the Joda Time's [`ISODateTimeFormat.dateTime()`](
-// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime--
+// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime%2D%2D
 // ) to obtain a formatter capable of generating timestamps in this format.
 //
 //
@@ -123,7 +124,7 @@
 func (m *Timestamp) Reset()      { *m = Timestamp{} }
 func (*Timestamp) ProtoMessage() {}
 func (*Timestamp) Descriptor() ([]byte, []int) {
-	return fileDescriptor_timestamp_820f61227bd8f1e8, []int{0}
+	return fileDescriptor_292007bbfe81227e, []int{0}
 }
 func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" }
 func (m *Timestamp) XXX_Unmarshal(b []byte) error {
@@ -141,8 +142,8 @@
 		return b[:n], nil
 	}
 }
-func (dst *Timestamp) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_Timestamp.Merge(dst, src)
+func (m *Timestamp) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Timestamp.Merge(m, src)
 }
 func (m *Timestamp) XXX_Size() int {
 	return m.Size()
@@ -173,6 +174,27 @@
 func init() {
 	proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp")
 }
+
+func init() { proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor_292007bbfe81227e) }
+
+var fileDescriptor_292007bbfe81227e = []byte{
+	// 212 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4f, 0xcf, 0xcf, 0x4f,
+	0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0xc9, 0xcc, 0x4d,
+	0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0xd0, 0x03, 0x0b, 0x09, 0xf1, 0x43, 0x14, 0xe8, 0xc1, 0x14, 0x28,
+	0x59, 0x73, 0x71, 0x86, 0xc0, 0xd4, 0x08, 0x49, 0x70, 0xb1, 0x17, 0xa7, 0x26, 0xe7, 0xe7, 0xa5,
+	0x14, 0x4b, 0x30, 0x2a, 0x30, 0x6a, 0x30, 0x07, 0xc1, 0xb8, 0x42, 0x22, 0x5c, 0xac, 0x79, 0x89,
+	0x79, 0xf9, 0xc5, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0xac, 0x41, 0x10, 0x8e, 0x53, 0x03, 0xe3, 0x8d,
+	0x87, 0x72, 0x0c, 0x1f, 0x1e, 0xca, 0x31, 0xae, 0x78, 0x24, 0xc7, 0x78, 0xe2, 0x91, 0x1c, 0xe3,
+	0x85, 0x47, 0x72, 0x8c, 0x0f, 0x1e, 0xc9, 0x31, 0xbe, 0x78, 0x24, 0xc7, 0xf0, 0xe1, 0x91, 0x1c,
+	0xe3, 0x8a, 0xc7, 0x72, 0x8c, 0x27, 0x1e, 0xcb, 0x31, 0x72, 0x09, 0x27, 0xe7, 0xe7, 0xea, 0xa1,
+	0x59, 0xee, 0xc4, 0x07, 0xb7, 0x3a, 0x00, 0x24, 0x14, 0xc0, 0x18, 0xc5, 0x5a, 0x52, 0x59, 0x90,
+	0x5a, 0xfc, 0x83, 0x91, 0x71, 0x11, 0x13, 0xb3, 0x7b, 0x80, 0xd3, 0x2a, 0x26, 0x39, 0x77, 0x88,
+	0x9e, 0x00, 0xa8, 0x1e, 0xbd, 0xf0, 0xd4, 0x9c, 0x1c, 0xef, 0xbc, 0xfc, 0xf2, 0xbc, 0x10, 0x90,
+	0xca, 0x24, 0x36, 0xb0, 0x61, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0x0b, 0x23, 0x83, 0xdd,
+	0xfa, 0x00, 0x00, 0x00,
+}
+
 func (this *Timestamp) Compare(that interface{}) int {
 	if that == nil {
 		if this == nil {
@@ -353,7 +375,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -381,7 +403,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				m.Seconds |= (int64(b) & 0x7F) << shift
+				m.Seconds |= int64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -400,7 +422,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				m.Nanos |= (int32(b) & 0x7F) << shift
+				m.Nanos |= int32(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -414,6 +436,9 @@
 			if skippy < 0 {
 				return ErrInvalidLengthTimestamp
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthTimestamp
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -481,10 +506,13 @@
 					break
 				}
 			}
-			iNdEx += length
 			if length < 0 {
 				return 0, ErrInvalidLengthTimestamp
 			}
+			iNdEx += length
+			if iNdEx < 0 {
+				return 0, ErrInvalidLengthTimestamp
+			}
 			return iNdEx, nil
 		case 3:
 			for {
@@ -513,6 +541,9 @@
 					return 0, err
 				}
 				iNdEx = start + next
+				if iNdEx < 0 {
+					return 0, ErrInvalidLengthTimestamp
+				}
 			}
 			return iNdEx, nil
 		case 4:
@@ -531,25 +562,3 @@
 	ErrInvalidLengthTimestamp = fmt.Errorf("proto: negative length found during unmarshaling")
 	ErrIntOverflowTimestamp   = fmt.Errorf("proto: integer overflow")
 )
-
-func init() {
-	proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor_timestamp_820f61227bd8f1e8)
-}
-
-var fileDescriptor_timestamp_820f61227bd8f1e8 = []byte{
-	// 212 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4f, 0xcf, 0xcf, 0x4f,
-	0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0xc9, 0xcc, 0x4d,
-	0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0xd0, 0x03, 0x0b, 0x09, 0xf1, 0x43, 0x14, 0xe8, 0xc1, 0x14, 0x28,
-	0x59, 0x73, 0x71, 0x86, 0xc0, 0xd4, 0x08, 0x49, 0x70, 0xb1, 0x17, 0xa7, 0x26, 0xe7, 0xe7, 0xa5,
-	0x14, 0x4b, 0x30, 0x2a, 0x30, 0x6a, 0x30, 0x07, 0xc1, 0xb8, 0x42, 0x22, 0x5c, 0xac, 0x79, 0x89,
-	0x79, 0xf9, 0xc5, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0xac, 0x41, 0x10, 0x8e, 0x53, 0x03, 0xe3, 0x8d,
-	0x87, 0x72, 0x0c, 0x1f, 0x1e, 0xca, 0x31, 0xae, 0x78, 0x24, 0xc7, 0x78, 0xe2, 0x91, 0x1c, 0xe3,
-	0x85, 0x47, 0x72, 0x8c, 0x0f, 0x1e, 0xc9, 0x31, 0xbe, 0x78, 0x24, 0xc7, 0xf0, 0xe1, 0x91, 0x1c,
-	0xe3, 0x8a, 0xc7, 0x72, 0x8c, 0x27, 0x1e, 0xcb, 0x31, 0x72, 0x09, 0x27, 0xe7, 0xe7, 0xea, 0xa1,
-	0x59, 0xee, 0xc4, 0x07, 0xb7, 0x3a, 0x00, 0x24, 0x14, 0xc0, 0x18, 0xc5, 0x5a, 0x52, 0x59, 0x90,
-	0x5a, 0xfc, 0x83, 0x91, 0x71, 0x11, 0x13, 0xb3, 0x7b, 0x80, 0xd3, 0x2a, 0x26, 0x39, 0x77, 0x88,
-	0x9e, 0x00, 0xa8, 0x1e, 0xbd, 0xf0, 0xd4, 0x9c, 0x1c, 0xef, 0xbc, 0xfc, 0xf2, 0xbc, 0x10, 0x90,
-	0xca, 0x24, 0x36, 0xb0, 0x61, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0x0b, 0x23, 0x83, 0xdd,
-	0xfa, 0x00, 0x00, 0x00,
-}
diff --git a/vendor/github.com/gogo/protobuf/types/type.pb.go b/vendor/github.com/gogo/protobuf/types/type.pb.go
index b422be3..366f493 100644
--- a/vendor/github.com/gogo/protobuf/types/type.pb.go
+++ b/vendor/github.com/gogo/protobuf/types/type.pb.go
@@ -3,18 +3,16 @@
 
 package types
 
-import proto "github.com/gogo/protobuf/proto"
-import fmt "fmt"
-import math "math"
-
-import bytes "bytes"
-
-import strconv "strconv"
-
-import strings "strings"
-import reflect "reflect"
-
-import io "io"
+import (
+	bytes "bytes"
+	fmt "fmt"
+	proto "github.com/gogo/protobuf/proto"
+	io "io"
+	math "math"
+	reflect "reflect"
+	strconv "strconv"
+	strings "strings"
+)
 
 // Reference imports to suppress errors if they are not otherwise used.
 var _ = proto.Marshal
@@ -41,13 +39,14 @@
 	0: "SYNTAX_PROTO2",
 	1: "SYNTAX_PROTO3",
 }
+
 var Syntax_value = map[string]int32{
 	"SYNTAX_PROTO2": 0,
 	"SYNTAX_PROTO3": 1,
 }
 
 func (Syntax) EnumDescriptor() ([]byte, []int) {
-	return fileDescriptor_type_0082d870c49329d7, []int{0}
+	return fileDescriptor_dd271cc1e348c538, []int{0}
 }
 
 // Basic field types.
@@ -115,6 +114,7 @@
 	17: "TYPE_SINT32",
 	18: "TYPE_SINT64",
 }
+
 var Field_Kind_value = map[string]int32{
 	"TYPE_UNKNOWN":  0,
 	"TYPE_DOUBLE":   1,
@@ -138,7 +138,7 @@
 }
 
 func (Field_Kind) EnumDescriptor() ([]byte, []int) {
-	return fileDescriptor_type_0082d870c49329d7, []int{1, 0}
+	return fileDescriptor_dd271cc1e348c538, []int{1, 0}
 }
 
 // Whether a field is optional, required, or repeated.
@@ -161,6 +161,7 @@
 	2: "CARDINALITY_REQUIRED",
 	3: "CARDINALITY_REPEATED",
 }
+
 var Field_Cardinality_value = map[string]int32{
 	"CARDINALITY_UNKNOWN":  0,
 	"CARDINALITY_OPTIONAL": 1,
@@ -169,7 +170,7 @@
 }
 
 func (Field_Cardinality) EnumDescriptor() ([]byte, []int) {
-	return fileDescriptor_type_0082d870c49329d7, []int{1, 1}
+	return fileDescriptor_dd271cc1e348c538, []int{1, 1}
 }
 
 // A protocol buffer message type.
@@ -194,7 +195,7 @@
 func (m *Type) Reset()      { *m = Type{} }
 func (*Type) ProtoMessage() {}
 func (*Type) Descriptor() ([]byte, []int) {
-	return fileDescriptor_type_0082d870c49329d7, []int{0}
+	return fileDescriptor_dd271cc1e348c538, []int{0}
 }
 func (m *Type) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -211,8 +212,8 @@
 		return b[:n], nil
 	}
 }
-func (dst *Type) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_Type.Merge(dst, src)
+func (m *Type) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Type.Merge(m, src)
 }
 func (m *Type) XXX_Size() int {
 	return m.Size()
@@ -301,7 +302,7 @@
 func (m *Field) Reset()      { *m = Field{} }
 func (*Field) ProtoMessage() {}
 func (*Field) Descriptor() ([]byte, []int) {
-	return fileDescriptor_type_0082d870c49329d7, []int{1}
+	return fileDescriptor_dd271cc1e348c538, []int{1}
 }
 func (m *Field) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -318,8 +319,8 @@
 		return b[:n], nil
 	}
 }
-func (dst *Field) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_Field.Merge(dst, src)
+func (m *Field) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Field.Merge(m, src)
 }
 func (m *Field) XXX_Size() int {
 	return m.Size()
@@ -424,7 +425,7 @@
 func (m *Enum) Reset()      { *m = Enum{} }
 func (*Enum) ProtoMessage() {}
 func (*Enum) Descriptor() ([]byte, []int) {
-	return fileDescriptor_type_0082d870c49329d7, []int{2}
+	return fileDescriptor_dd271cc1e348c538, []int{2}
 }
 func (m *Enum) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -441,8 +442,8 @@
 		return b[:n], nil
 	}
 }
-func (dst *Enum) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_Enum.Merge(dst, src)
+func (m *Enum) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Enum.Merge(m, src)
 }
 func (m *Enum) XXX_Size() int {
 	return m.Size()
@@ -508,7 +509,7 @@
 func (m *EnumValue) Reset()      { *m = EnumValue{} }
 func (*EnumValue) ProtoMessage() {}
 func (*EnumValue) Descriptor() ([]byte, []int) {
-	return fileDescriptor_type_0082d870c49329d7, []int{3}
+	return fileDescriptor_dd271cc1e348c538, []int{3}
 }
 func (m *EnumValue) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -525,8 +526,8 @@
 		return b[:n], nil
 	}
 }
-func (dst *EnumValue) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_EnumValue.Merge(dst, src)
+func (m *EnumValue) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_EnumValue.Merge(m, src)
 }
 func (m *EnumValue) XXX_Size() int {
 	return m.Size()
@@ -583,7 +584,7 @@
 func (m *Option) Reset()      { *m = Option{} }
 func (*Option) ProtoMessage() {}
 func (*Option) Descriptor() ([]byte, []int) {
-	return fileDescriptor_type_0082d870c49329d7, []int{4}
+	return fileDescriptor_dd271cc1e348c538, []int{4}
 }
 func (m *Option) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -600,8 +601,8 @@
 		return b[:n], nil
 	}
 }
-func (dst *Option) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_Option.Merge(dst, src)
+func (m *Option) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Option.Merge(m, src)
 }
 func (m *Option) XXX_Size() int {
 	return m.Size()
@@ -630,15 +631,75 @@
 	return "google.protobuf.Option"
 }
 func init() {
+	proto.RegisterEnum("google.protobuf.Syntax", Syntax_name, Syntax_value)
+	proto.RegisterEnum("google.protobuf.Field_Kind", Field_Kind_name, Field_Kind_value)
+	proto.RegisterEnum("google.protobuf.Field_Cardinality", Field_Cardinality_name, Field_Cardinality_value)
 	proto.RegisterType((*Type)(nil), "google.protobuf.Type")
 	proto.RegisterType((*Field)(nil), "google.protobuf.Field")
 	proto.RegisterType((*Enum)(nil), "google.protobuf.Enum")
 	proto.RegisterType((*EnumValue)(nil), "google.protobuf.EnumValue")
 	proto.RegisterType((*Option)(nil), "google.protobuf.Option")
-	proto.RegisterEnum("google.protobuf.Syntax", Syntax_name, Syntax_value)
-	proto.RegisterEnum("google.protobuf.Field_Kind", Field_Kind_name, Field_Kind_value)
-	proto.RegisterEnum("google.protobuf.Field_Cardinality", Field_Cardinality_name, Field_Cardinality_value)
 }
+
+func init() { proto.RegisterFile("google/protobuf/type.proto", fileDescriptor_dd271cc1e348c538) }
+
+var fileDescriptor_dd271cc1e348c538 = []byte{
+	// 840 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0xcf, 0x73, 0xda, 0x46,
+	0x14, 0xf6, 0x0a, 0x21, 0xa3, 0x87, 0xc1, 0x9b, 0x4d, 0x26, 0x51, 0x9c, 0x19, 0x95, 0xa1, 0x3d,
+	0x30, 0x39, 0xe0, 0x29, 0x78, 0x3c, 0xbd, 0x82, 0x91, 0x29, 0x63, 0x22, 0xa9, 0x8b, 0x68, 0xe2,
+	0x5e, 0x18, 0x0c, 0x72, 0x86, 0x44, 0xac, 0x18, 0x24, 0x5a, 0x73, 0xeb, 0x4c, 0xcf, 0xfd, 0x27,
+	0x7a, 0xea, 0xf4, 0xdc, 0x3f, 0xc2, 0xc7, 0x1e, 0x7b, 0xac, 0xc9, 0xa5, 0xc7, 0x1c, 0x73, 0x6b,
+	0x67, 0x57, 0x20, 0x8b, 0x1f, 0x9d, 0x49, 0xdb, 0x1b, 0xef, 0xfb, 0xbe, 0xf7, 0x73, 0x9f, 0x1e,
+	0x70, 0xf4, 0xda, 0xf7, 0x5f, 0x7b, 0xee, 0xf1, 0x64, 0xea, 0x87, 0xfe, 0xd5, 0xec, 0xfa, 0x38,
+	0x9c, 0x4f, 0xdc, 0xb2, 0xb0, 0xc8, 0x61, 0xc4, 0x95, 0x57, 0xdc, 0xd1, 0xd3, 0x4d, 0x71, 0x9f,
+	0xcd, 0x23, 0xf6, 0xe8, 0xb3, 0x4d, 0x2a, 0xf0, 0x67, 0xd3, 0x81, 0xdb, 0x1b, 0xf8, 0x2c, 0x74,
+	0x6f, 0xc2, 0x48, 0x55, 0xfc, 0x51, 0x02, 0xd9, 0x99, 0x4f, 0x5c, 0x42, 0x40, 0x66, 0xfd, 0xb1,
+	0xab, 0xa1, 0x02, 0x2a, 0xa9, 0x54, 0xfc, 0x26, 0x65, 0x50, 0xae, 0x47, 0xae, 0x37, 0x0c, 0x34,
+	0xa9, 0x90, 0x2a, 0x65, 0x2b, 0x8f, 0xcb, 0x1b, 0xf9, 0xcb, 0xe7, 0x9c, 0xa6, 0x4b, 0x15, 0x79,
+	0x0c, 0x8a, 0xcf, 0x5c, 0xff, 0x3a, 0xd0, 0x52, 0x85, 0x54, 0x49, 0xa5, 0x4b, 0x8b, 0x7c, 0x0e,
+	0xfb, 0xfe, 0x24, 0x1c, 0xf9, 0x2c, 0xd0, 0x64, 0x11, 0xe8, 0xc9, 0x56, 0x20, 0x4b, 0xf0, 0x74,
+	0xa5, 0x23, 0x06, 0xe4, 0xd7, 0xeb, 0xd5, 0xd2, 0x05, 0x54, 0xca, 0x56, 0xf4, 0x2d, 0xcf, 0x8e,
+	0x90, 0x9d, 0x45, 0x2a, 0x9a, 0x0b, 0x92, 0x26, 0x39, 0x06, 0x25, 0x98, 0xb3, 0xb0, 0x7f, 0xa3,
+	0x29, 0x05, 0x54, 0xca, 0xef, 0x48, 0xdc, 0x11, 0x34, 0x5d, 0xca, 0x8a, 0xbf, 0x2a, 0x90, 0x16,
+	0x4d, 0x91, 0x63, 0x90, 0xdf, 0x8e, 0xd8, 0x50, 0x0c, 0x24, 0x5f, 0x79, 0xb6, 0xbb, 0xf5, 0xf2,
+	0xc5, 0x88, 0x0d, 0xa9, 0x10, 0x92, 0x06, 0x64, 0x07, 0xfd, 0xe9, 0x70, 0xc4, 0xfa, 0xde, 0x28,
+	0x9c, 0x6b, 0x92, 0xf0, 0x2b, 0xfe, 0x83, 0xdf, 0xd9, 0xbd, 0x92, 0x26, 0xdd, 0xf8, 0x0c, 0xd9,
+	0x6c, 0x7c, 0xe5, 0x4e, 0xb5, 0x54, 0x01, 0x95, 0xd2, 0x74, 0x69, 0xc5, 0xef, 0x23, 0x27, 0xde,
+	0xe7, 0x29, 0x64, 0xf8, 0x72, 0xf4, 0x66, 0x53, 0x4f, 0xf4, 0xa7, 0xd2, 0x7d, 0x6e, 0x77, 0xa7,
+	0x1e, 0xf9, 0x04, 0xb2, 0x62, 0xf8, 0xbd, 0x11, 0x1b, 0xba, 0x37, 0xda, 0xbe, 0x88, 0x05, 0x02,
+	0x6a, 0x71, 0x84, 0xe7, 0x99, 0xf4, 0x07, 0x6f, 0xdd, 0xa1, 0x96, 0x29, 0xa0, 0x52, 0x86, 0x2e,
+	0xad, 0xe4, 0x5b, 0xa9, 0x1f, 0xf9, 0x56, 0xcf, 0x40, 0x7d, 0x13, 0xf8, 0xac, 0x27, 0xea, 0x03,
+	0x51, 0x47, 0x86, 0x03, 0x26, 0xaf, 0xf1, 0x53, 0xc8, 0x0d, 0xdd, 0xeb, 0xfe, 0xcc, 0x0b, 0x7b,
+	0xdf, 0xf6, 0xbd, 0x99, 0xab, 0x65, 0x85, 0xe0, 0x60, 0x09, 0x7e, 0xcd, 0xb1, 0xe2, 0xad, 0x04,
+	0x32, 0x9f, 0x24, 0xc1, 0x70, 0xe0, 0x5c, 0xda, 0x46, 0xaf, 0x6b, 0x5e, 0x98, 0xd6, 0x4b, 0x13,
+	0xef, 0x91, 0x43, 0xc8, 0x0a, 0xa4, 0x61, 0x75, 0xeb, 0x6d, 0x03, 0x23, 0x92, 0x07, 0x10, 0xc0,
+	0x79, 0xdb, 0xaa, 0x39, 0x58, 0x8a, 0xed, 0x96, 0xe9, 0x9c, 0x9e, 0xe0, 0x54, 0xec, 0xd0, 0x8d,
+	0x00, 0x39, 0x29, 0xa8, 0x56, 0x70, 0x3a, 0xce, 0x71, 0xde, 0x7a, 0x65, 0x34, 0x4e, 0x4f, 0xb0,
+	0xb2, 0x8e, 0x54, 0x2b, 0x78, 0x9f, 0xe4, 0x40, 0x15, 0x48, 0xdd, 0xb2, 0xda, 0x38, 0x13, 0xc7,
+	0xec, 0x38, 0xb4, 0x65, 0x36, 0xb1, 0x1a, 0xc7, 0x6c, 0x52, 0xab, 0x6b, 0x63, 0x88, 0x23, 0xbc,
+	0x30, 0x3a, 0x9d, 0x5a, 0xd3, 0xc0, 0xd9, 0x58, 0x51, 0xbf, 0x74, 0x8c, 0x0e, 0x3e, 0x58, 0x2b,
+	0xab, 0x5a, 0xc1, 0xb9, 0x38, 0x85, 0x61, 0x76, 0x5f, 0xe0, 0x3c, 0x79, 0x00, 0xb9, 0x28, 0xc5,
+	0xaa, 0x88, 0xc3, 0x0d, 0xe8, 0xf4, 0x04, 0xe3, 0xfb, 0x42, 0xa2, 0x28, 0x0f, 0xd6, 0x80, 0xd3,
+	0x13, 0x4c, 0x8a, 0x21, 0x64, 0x13, 0xbb, 0x45, 0x9e, 0xc0, 0xc3, 0xb3, 0x1a, 0x6d, 0xb4, 0xcc,
+	0x5a, 0xbb, 0xe5, 0x5c, 0x26, 0xe6, 0xaa, 0xc1, 0xa3, 0x24, 0x61, 0xd9, 0x4e, 0xcb, 0x32, 0x6b,
+	0x6d, 0x8c, 0x36, 0x19, 0x6a, 0x7c, 0xd5, 0x6d, 0x51, 0xa3, 0x81, 0xa5, 0x6d, 0xc6, 0x36, 0x6a,
+	0x8e, 0xd1, 0xc0, 0xa9, 0xe2, 0x5f, 0x08, 0x64, 0x83, 0xcd, 0xc6, 0x3b, 0xcf, 0xc8, 0x17, 0xa0,
+	0xba, 0x6c, 0x36, 0x8e, 0x9e, 0x3f, 0xba, 0x24, 0x47, 0x5b, 0x4b, 0xc5, 0xbd, 0xc5, 0x32, 0xd0,
+	0x7b, 0x71, 0x72, 0x19, 0x53, 0xff, 0xf9, 0x70, 0xc8, 0xff, 0xef, 0x70, 0xa4, 0x3f, 0xee, 0x70,
+	0xbc, 0x01, 0x35, 0x6e, 0x61, 0xe7, 0x14, 0xee, 0x3f, 0x6c, 0x69, 0xed, 0xc3, 0xfe, 0xf7, 0x3d,
+	0x16, 0xbf, 0x04, 0x25, 0x82, 0x76, 0x26, 0x7a, 0x0e, 0xe9, 0xd5, 0xa8, 0x79, 0xe3, 0x8f, 0xb6,
+	0xc2, 0xd5, 0xd8, 0x9c, 0x46, 0x92, 0xe7, 0x65, 0x50, 0xa2, 0x3e, 0xf8, 0xb2, 0x75, 0x2e, 0x4d,
+	0xa7, 0xf6, 0xaa, 0x67, 0x53, 0xcb, 0xb1, 0x2a, 0x78, 0x6f, 0x13, 0xaa, 0x62, 0x54, 0xff, 0x01,
+	0xfd, 0x7e, 0xa7, 0xef, 0xbd, 0xbf, 0xd3, 0xd1, 0x87, 0x3b, 0x1d, 0x7d, 0xbf, 0xd0, 0xd1, 0xcf,
+	0x0b, 0x1d, 0xdd, 0x2e, 0x74, 0xf4, 0xdb, 0x42, 0x47, 0x7f, 0x2c, 0x74, 0xf4, 0xe7, 0x42, 0xdf,
+	0x7b, 0xcf, 0xf1, 0x77, 0x3a, 0xba, 0x7d, 0xa7, 0x23, 0x78, 0x38, 0xf0, 0xc7, 0x9b, 0x25, 0xd4,
+	0x55, 0xfe, 0x9f, 0x63, 0x73, 0xcb, 0x46, 0xdf, 0xa4, 0xf9, 0xd1, 0x0a, 0x3e, 0x20, 0xf4, 0x93,
+	0x94, 0x6a, 0xda, 0xf5, 0x5f, 0x24, 0xbd, 0x19, 0xc9, 0xed, 0x55, 0xc5, 0x2f, 0x5d, 0xcf, 0xbb,
+	0x60, 0xfe, 0x77, 0x8c, 0xbb, 0x05, 0x57, 0x8a, 0x88, 0x53, 0xfd, 0x3b, 0x00, 0x00, 0xff, 0xff,
+	0xbc, 0x2a, 0x5e, 0x82, 0x2b, 0x07, 0x00, 0x00,
+}
+
 func (this *Type) Compare(that interface{}) int {
 	if that == nil {
 		if this == nil {
@@ -2139,7 +2200,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -2167,7 +2228,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2177,6 +2238,9 @@
 				return ErrInvalidLengthType
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthType
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -2196,7 +2260,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2205,6 +2269,9 @@
 				return ErrInvalidLengthType
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthType
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -2227,7 +2294,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2237,6 +2304,9 @@
 				return ErrInvalidLengthType
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthType
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -2256,7 +2326,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2265,6 +2335,9 @@
 				return ErrInvalidLengthType
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthType
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -2287,7 +2360,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2296,6 +2369,9 @@
 				return ErrInvalidLengthType
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthType
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -2320,7 +2396,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				m.Syntax |= (Syntax(b) & 0x7F) << shift
+				m.Syntax |= Syntax(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2334,6 +2410,9 @@
 			if skippy < 0 {
 				return ErrInvalidLengthType
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthType
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -2362,7 +2441,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -2390,7 +2469,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				m.Kind |= (Field_Kind(b) & 0x7F) << shift
+				m.Kind |= Field_Kind(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2409,7 +2488,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				m.Cardinality |= (Field_Cardinality(b) & 0x7F) << shift
+				m.Cardinality |= Field_Cardinality(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2428,7 +2507,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				m.Number |= (int32(b) & 0x7F) << shift
+				m.Number |= int32(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2447,7 +2526,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2457,6 +2536,9 @@
 				return ErrInvalidLengthType
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthType
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -2476,7 +2558,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2486,6 +2568,9 @@
 				return ErrInvalidLengthType
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthType
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -2505,7 +2590,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				m.OneofIndex |= (int32(b) & 0x7F) << shift
+				m.OneofIndex |= int32(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2524,7 +2609,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				v |= (int(b) & 0x7F) << shift
+				v |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2544,7 +2629,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2553,6 +2638,9 @@
 				return ErrInvalidLengthType
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthType
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -2575,7 +2663,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2585,6 +2673,9 @@
 				return ErrInvalidLengthType
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthType
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -2604,7 +2695,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2614,6 +2705,9 @@
 				return ErrInvalidLengthType
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthType
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -2628,6 +2722,9 @@
 			if skippy < 0 {
 				return ErrInvalidLengthType
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthType
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -2656,7 +2753,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -2684,7 +2781,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2694,6 +2791,9 @@
 				return ErrInvalidLengthType
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthType
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -2713,7 +2813,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2722,6 +2822,9 @@
 				return ErrInvalidLengthType
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthType
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -2744,7 +2847,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2753,6 +2856,9 @@
 				return ErrInvalidLengthType
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthType
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -2775,7 +2881,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2784,6 +2890,9 @@
 				return ErrInvalidLengthType
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthType
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -2808,7 +2917,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				m.Syntax |= (Syntax(b) & 0x7F) << shift
+				m.Syntax |= Syntax(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2822,6 +2931,9 @@
 			if skippy < 0 {
 				return ErrInvalidLengthType
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthType
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -2850,7 +2962,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -2878,7 +2990,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2888,6 +3000,9 @@
 				return ErrInvalidLengthType
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthType
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -2907,7 +3022,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				m.Number |= (int32(b) & 0x7F) << shift
+				m.Number |= int32(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2926,7 +3041,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2935,6 +3050,9 @@
 				return ErrInvalidLengthType
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthType
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -2952,6 +3070,9 @@
 			if skippy < 0 {
 				return ErrInvalidLengthType
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthType
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -2980,7 +3101,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -3008,7 +3129,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -3018,6 +3139,9 @@
 				return ErrInvalidLengthType
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthType
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -3037,7 +3161,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -3046,6 +3170,9 @@
 				return ErrInvalidLengthType
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthType
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -3065,6 +3192,9 @@
 			if skippy < 0 {
 				return ErrInvalidLengthType
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthType
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -3132,10 +3262,13 @@
 					break
 				}
 			}
-			iNdEx += length
 			if length < 0 {
 				return 0, ErrInvalidLengthType
 			}
+			iNdEx += length
+			if iNdEx < 0 {
+				return 0, ErrInvalidLengthType
+			}
 			return iNdEx, nil
 		case 3:
 			for {
@@ -3164,6 +3297,9 @@
 					return 0, err
 				}
 				iNdEx = start + next
+				if iNdEx < 0 {
+					return 0, ErrInvalidLengthType
+				}
 			}
 			return iNdEx, nil
 		case 4:
@@ -3182,62 +3318,3 @@
 	ErrInvalidLengthType = fmt.Errorf("proto: negative length found during unmarshaling")
 	ErrIntOverflowType   = fmt.Errorf("proto: integer overflow")
 )
-
-func init() { proto.RegisterFile("google/protobuf/type.proto", fileDescriptor_type_0082d870c49329d7) }
-
-var fileDescriptor_type_0082d870c49329d7 = []byte{
-	// 840 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0xcf, 0x73, 0xda, 0x46,
-	0x14, 0xf6, 0x0a, 0x21, 0xa3, 0x87, 0xc1, 0x9b, 0x4d, 0x26, 0x51, 0x9c, 0x19, 0x95, 0xa1, 0x3d,
-	0x30, 0x39, 0xe0, 0x29, 0x78, 0x3c, 0xbd, 0x82, 0x91, 0x29, 0x63, 0x22, 0xa9, 0x8b, 0x68, 0xe2,
-	0x5e, 0x18, 0x0c, 0x72, 0x86, 0x44, 0xac, 0x18, 0x24, 0x5a, 0x73, 0xeb, 0x4c, 0xcf, 0xfd, 0x27,
-	0x7a, 0xea, 0xf4, 0xdc, 0x3f, 0xc2, 0xc7, 0x1e, 0x7b, 0xac, 0xc9, 0xa5, 0xc7, 0x1c, 0x73, 0x6b,
-	0x67, 0x57, 0x20, 0x8b, 0x1f, 0x9d, 0x49, 0xdb, 0x1b, 0xef, 0xfb, 0xbe, 0xf7, 0x73, 0x9f, 0x1e,
-	0x70, 0xf4, 0xda, 0xf7, 0x5f, 0x7b, 0xee, 0xf1, 0x64, 0xea, 0x87, 0xfe, 0xd5, 0xec, 0xfa, 0x38,
-	0x9c, 0x4f, 0xdc, 0xb2, 0xb0, 0xc8, 0x61, 0xc4, 0x95, 0x57, 0xdc, 0xd1, 0xd3, 0x4d, 0x71, 0x9f,
-	0xcd, 0x23, 0xf6, 0xe8, 0xb3, 0x4d, 0x2a, 0xf0, 0x67, 0xd3, 0x81, 0xdb, 0x1b, 0xf8, 0x2c, 0x74,
-	0x6f, 0xc2, 0x48, 0x55, 0xfc, 0x51, 0x02, 0xd9, 0x99, 0x4f, 0x5c, 0x42, 0x40, 0x66, 0xfd, 0xb1,
-	0xab, 0xa1, 0x02, 0x2a, 0xa9, 0x54, 0xfc, 0x26, 0x65, 0x50, 0xae, 0x47, 0xae, 0x37, 0x0c, 0x34,
-	0xa9, 0x90, 0x2a, 0x65, 0x2b, 0x8f, 0xcb, 0x1b, 0xf9, 0xcb, 0xe7, 0x9c, 0xa6, 0x4b, 0x15, 0x79,
-	0x0c, 0x8a, 0xcf, 0x5c, 0xff, 0x3a, 0xd0, 0x52, 0x85, 0x54, 0x49, 0xa5, 0x4b, 0x8b, 0x7c, 0x0e,
-	0xfb, 0xfe, 0x24, 0x1c, 0xf9, 0x2c, 0xd0, 0x64, 0x11, 0xe8, 0xc9, 0x56, 0x20, 0x4b, 0xf0, 0x74,
-	0xa5, 0x23, 0x06, 0xe4, 0xd7, 0xeb, 0xd5, 0xd2, 0x05, 0x54, 0xca, 0x56, 0xf4, 0x2d, 0xcf, 0x8e,
-	0x90, 0x9d, 0x45, 0x2a, 0x9a, 0x0b, 0x92, 0x26, 0x39, 0x06, 0x25, 0x98, 0xb3, 0xb0, 0x7f, 0xa3,
-	0x29, 0x05, 0x54, 0xca, 0xef, 0x48, 0xdc, 0x11, 0x34, 0x5d, 0xca, 0x8a, 0xbf, 0x2a, 0x90, 0x16,
-	0x4d, 0x91, 0x63, 0x90, 0xdf, 0x8e, 0xd8, 0x50, 0x0c, 0x24, 0x5f, 0x79, 0xb6, 0xbb, 0xf5, 0xf2,
-	0xc5, 0x88, 0x0d, 0xa9, 0x10, 0x92, 0x06, 0x64, 0x07, 0xfd, 0xe9, 0x70, 0xc4, 0xfa, 0xde, 0x28,
-	0x9c, 0x6b, 0x92, 0xf0, 0x2b, 0xfe, 0x83, 0xdf, 0xd9, 0xbd, 0x92, 0x26, 0xdd, 0xf8, 0x0c, 0xd9,
-	0x6c, 0x7c, 0xe5, 0x4e, 0xb5, 0x54, 0x01, 0x95, 0xd2, 0x74, 0x69, 0xc5, 0xef, 0x23, 0x27, 0xde,
-	0xe7, 0x29, 0x64, 0xf8, 0x72, 0xf4, 0x66, 0x53, 0x4f, 0xf4, 0xa7, 0xd2, 0x7d, 0x6e, 0x77, 0xa7,
-	0x1e, 0xf9, 0x04, 0xb2, 0x62, 0xf8, 0xbd, 0x11, 0x1b, 0xba, 0x37, 0xda, 0xbe, 0x88, 0x05, 0x02,
-	0x6a, 0x71, 0x84, 0xe7, 0x99, 0xf4, 0x07, 0x6f, 0xdd, 0xa1, 0x96, 0x29, 0xa0, 0x52, 0x86, 0x2e,
-	0xad, 0xe4, 0x5b, 0xa9, 0x1f, 0xf9, 0x56, 0xcf, 0x40, 0x7d, 0x13, 0xf8, 0xac, 0x27, 0xea, 0x03,
-	0x51, 0x47, 0x86, 0x03, 0x26, 0xaf, 0xf1, 0x53, 0xc8, 0x0d, 0xdd, 0xeb, 0xfe, 0xcc, 0x0b, 0x7b,
-	0xdf, 0xf6, 0xbd, 0x99, 0xab, 0x65, 0x85, 0xe0, 0x60, 0x09, 0x7e, 0xcd, 0xb1, 0xe2, 0xad, 0x04,
-	0x32, 0x9f, 0x24, 0xc1, 0x70, 0xe0, 0x5c, 0xda, 0x46, 0xaf, 0x6b, 0x5e, 0x98, 0xd6, 0x4b, 0x13,
-	0xef, 0x91, 0x43, 0xc8, 0x0a, 0xa4, 0x61, 0x75, 0xeb, 0x6d, 0x03, 0x23, 0x92, 0x07, 0x10, 0xc0,
-	0x79, 0xdb, 0xaa, 0x39, 0x58, 0x8a, 0xed, 0x96, 0xe9, 0x9c, 0x9e, 0xe0, 0x54, 0xec, 0xd0, 0x8d,
-	0x00, 0x39, 0x29, 0xa8, 0x56, 0x70, 0x3a, 0xce, 0x71, 0xde, 0x7a, 0x65, 0x34, 0x4e, 0x4f, 0xb0,
-	0xb2, 0x8e, 0x54, 0x2b, 0x78, 0x9f, 0xe4, 0x40, 0x15, 0x48, 0xdd, 0xb2, 0xda, 0x38, 0x13, 0xc7,
-	0xec, 0x38, 0xb4, 0x65, 0x36, 0xb1, 0x1a, 0xc7, 0x6c, 0x52, 0xab, 0x6b, 0x63, 0x88, 0x23, 0xbc,
-	0x30, 0x3a, 0x9d, 0x5a, 0xd3, 0xc0, 0xd9, 0x58, 0x51, 0xbf, 0x74, 0x8c, 0x0e, 0x3e, 0x58, 0x2b,
-	0xab, 0x5a, 0xc1, 0xb9, 0x38, 0x85, 0x61, 0x76, 0x5f, 0xe0, 0x3c, 0x79, 0x00, 0xb9, 0x28, 0xc5,
-	0xaa, 0x88, 0xc3, 0x0d, 0xe8, 0xf4, 0x04, 0xe3, 0xfb, 0x42, 0xa2, 0x28, 0x0f, 0xd6, 0x80, 0xd3,
-	0x13, 0x4c, 0x8a, 0x21, 0x64, 0x13, 0xbb, 0x45, 0x9e, 0xc0, 0xc3, 0xb3, 0x1a, 0x6d, 0xb4, 0xcc,
-	0x5a, 0xbb, 0xe5, 0x5c, 0x26, 0xe6, 0xaa, 0xc1, 0xa3, 0x24, 0x61, 0xd9, 0x4e, 0xcb, 0x32, 0x6b,
-	0x6d, 0x8c, 0x36, 0x19, 0x6a, 0x7c, 0xd5, 0x6d, 0x51, 0xa3, 0x81, 0xa5, 0x6d, 0xc6, 0x36, 0x6a,
-	0x8e, 0xd1, 0xc0, 0xa9, 0xe2, 0x5f, 0x08, 0x64, 0x83, 0xcd, 0xc6, 0x3b, 0xcf, 0xc8, 0x17, 0xa0,
-	0xba, 0x6c, 0x36, 0x8e, 0x9e, 0x3f, 0xba, 0x24, 0x47, 0x5b, 0x4b, 0xc5, 0xbd, 0xc5, 0x32, 0xd0,
-	0x7b, 0x71, 0x72, 0x19, 0x53, 0xff, 0xf9, 0x70, 0xc8, 0xff, 0xef, 0x70, 0xa4, 0x3f, 0xee, 0x70,
-	0xbc, 0x01, 0x35, 0x6e, 0x61, 0xe7, 0x14, 0xee, 0x3f, 0x6c, 0x69, 0xed, 0xc3, 0xfe, 0xf7, 0x3d,
-	0x16, 0xbf, 0x04, 0x25, 0x82, 0x76, 0x26, 0x7a, 0x0e, 0xe9, 0xd5, 0xa8, 0x79, 0xe3, 0x8f, 0xb6,
-	0xc2, 0xd5, 0xd8, 0x9c, 0x46, 0x92, 0xe7, 0x65, 0x50, 0xa2, 0x3e, 0xf8, 0xb2, 0x75, 0x2e, 0x4d,
-	0xa7, 0xf6, 0xaa, 0x67, 0x53, 0xcb, 0xb1, 0x2a, 0x78, 0x6f, 0x13, 0xaa, 0x62, 0x54, 0xff, 0x01,
-	0xfd, 0x7e, 0xa7, 0xef, 0xbd, 0xbf, 0xd3, 0xd1, 0x87, 0x3b, 0x1d, 0x7d, 0xbf, 0xd0, 0xd1, 0xcf,
-	0x0b, 0x1d, 0xdd, 0x2e, 0x74, 0xf4, 0xdb, 0x42, 0x47, 0x7f, 0x2c, 0x74, 0xf4, 0xe7, 0x42, 0xdf,
-	0x7b, 0xcf, 0xf1, 0x77, 0x3a, 0xba, 0x7d, 0xa7, 0x23, 0x78, 0x38, 0xf0, 0xc7, 0x9b, 0x25, 0xd4,
-	0x55, 0xfe, 0x9f, 0x63, 0x73, 0xcb, 0x46, 0xdf, 0xa4, 0xf9, 0xd1, 0x0a, 0x3e, 0x20, 0xf4, 0x93,
-	0x94, 0x6a, 0xda, 0xf5, 0x5f, 0x24, 0xbd, 0x19, 0xc9, 0xed, 0x55, 0xc5, 0x2f, 0x5d, 0xcf, 0xbb,
-	0x60, 0xfe, 0x77, 0x8c, 0xbb, 0x05, 0x57, 0x8a, 0x88, 0x53, 0xfd, 0x3b, 0x00, 0x00, 0xff, 0xff,
-	0xbc, 0x2a, 0x5e, 0x82, 0x2b, 0x07, 0x00, 0x00,
-}
diff --git a/vendor/github.com/gogo/protobuf/types/wrappers.pb.go b/vendor/github.com/gogo/protobuf/types/wrappers.pb.go
index 7516cc9..5ade933 100644
--- a/vendor/github.com/gogo/protobuf/types/wrappers.pb.go
+++ b/vendor/github.com/gogo/protobuf/types/wrappers.pb.go
@@ -3,18 +3,16 @@
 
 package types
 
-import proto "github.com/gogo/protobuf/proto"
-import fmt "fmt"
-import math "math"
-
-import bytes "bytes"
-
-import strings "strings"
-import reflect "reflect"
-
-import encoding_binary "encoding/binary"
-
-import io "io"
+import (
+	bytes "bytes"
+	encoding_binary "encoding/binary"
+	fmt "fmt"
+	proto "github.com/gogo/protobuf/proto"
+	io "io"
+	math "math"
+	reflect "reflect"
+	strings "strings"
+)
 
 // Reference imports to suppress errors if they are not otherwise used.
 var _ = proto.Marshal
@@ -41,7 +39,7 @@
 func (m *DoubleValue) Reset()      { *m = DoubleValue{} }
 func (*DoubleValue) ProtoMessage() {}
 func (*DoubleValue) Descriptor() ([]byte, []int) {
-	return fileDescriptor_wrappers_c5239a825c7dfb53, []int{0}
+	return fileDescriptor_5377b62bda767935, []int{0}
 }
 func (*DoubleValue) XXX_WellKnownType() string { return "DoubleValue" }
 func (m *DoubleValue) XXX_Unmarshal(b []byte) error {
@@ -59,8 +57,8 @@
 		return b[:n], nil
 	}
 }
-func (dst *DoubleValue) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_DoubleValue.Merge(dst, src)
+func (m *DoubleValue) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DoubleValue.Merge(m, src)
 }
 func (m *DoubleValue) XXX_Size() int {
 	return m.Size()
@@ -96,7 +94,7 @@
 func (m *FloatValue) Reset()      { *m = FloatValue{} }
 func (*FloatValue) ProtoMessage() {}
 func (*FloatValue) Descriptor() ([]byte, []int) {
-	return fileDescriptor_wrappers_c5239a825c7dfb53, []int{1}
+	return fileDescriptor_5377b62bda767935, []int{1}
 }
 func (*FloatValue) XXX_WellKnownType() string { return "FloatValue" }
 func (m *FloatValue) XXX_Unmarshal(b []byte) error {
@@ -114,8 +112,8 @@
 		return b[:n], nil
 	}
 }
-func (dst *FloatValue) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_FloatValue.Merge(dst, src)
+func (m *FloatValue) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_FloatValue.Merge(m, src)
 }
 func (m *FloatValue) XXX_Size() int {
 	return m.Size()
@@ -151,7 +149,7 @@
 func (m *Int64Value) Reset()      { *m = Int64Value{} }
 func (*Int64Value) ProtoMessage() {}
 func (*Int64Value) Descriptor() ([]byte, []int) {
-	return fileDescriptor_wrappers_c5239a825c7dfb53, []int{2}
+	return fileDescriptor_5377b62bda767935, []int{2}
 }
 func (*Int64Value) XXX_WellKnownType() string { return "Int64Value" }
 func (m *Int64Value) XXX_Unmarshal(b []byte) error {
@@ -169,8 +167,8 @@
 		return b[:n], nil
 	}
 }
-func (dst *Int64Value) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_Int64Value.Merge(dst, src)
+func (m *Int64Value) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Int64Value.Merge(m, src)
 }
 func (m *Int64Value) XXX_Size() int {
 	return m.Size()
@@ -206,7 +204,7 @@
 func (m *UInt64Value) Reset()      { *m = UInt64Value{} }
 func (*UInt64Value) ProtoMessage() {}
 func (*UInt64Value) Descriptor() ([]byte, []int) {
-	return fileDescriptor_wrappers_c5239a825c7dfb53, []int{3}
+	return fileDescriptor_5377b62bda767935, []int{3}
 }
 func (*UInt64Value) XXX_WellKnownType() string { return "UInt64Value" }
 func (m *UInt64Value) XXX_Unmarshal(b []byte) error {
@@ -224,8 +222,8 @@
 		return b[:n], nil
 	}
 }
-func (dst *UInt64Value) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_UInt64Value.Merge(dst, src)
+func (m *UInt64Value) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_UInt64Value.Merge(m, src)
 }
 func (m *UInt64Value) XXX_Size() int {
 	return m.Size()
@@ -261,7 +259,7 @@
 func (m *Int32Value) Reset()      { *m = Int32Value{} }
 func (*Int32Value) ProtoMessage() {}
 func (*Int32Value) Descriptor() ([]byte, []int) {
-	return fileDescriptor_wrappers_c5239a825c7dfb53, []int{4}
+	return fileDescriptor_5377b62bda767935, []int{4}
 }
 func (*Int32Value) XXX_WellKnownType() string { return "Int32Value" }
 func (m *Int32Value) XXX_Unmarshal(b []byte) error {
@@ -279,8 +277,8 @@
 		return b[:n], nil
 	}
 }
-func (dst *Int32Value) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_Int32Value.Merge(dst, src)
+func (m *Int32Value) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Int32Value.Merge(m, src)
 }
 func (m *Int32Value) XXX_Size() int {
 	return m.Size()
@@ -316,7 +314,7 @@
 func (m *UInt32Value) Reset()      { *m = UInt32Value{} }
 func (*UInt32Value) ProtoMessage() {}
 func (*UInt32Value) Descriptor() ([]byte, []int) {
-	return fileDescriptor_wrappers_c5239a825c7dfb53, []int{5}
+	return fileDescriptor_5377b62bda767935, []int{5}
 }
 func (*UInt32Value) XXX_WellKnownType() string { return "UInt32Value" }
 func (m *UInt32Value) XXX_Unmarshal(b []byte) error {
@@ -334,8 +332,8 @@
 		return b[:n], nil
 	}
 }
-func (dst *UInt32Value) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_UInt32Value.Merge(dst, src)
+func (m *UInt32Value) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_UInt32Value.Merge(m, src)
 }
 func (m *UInt32Value) XXX_Size() int {
 	return m.Size()
@@ -371,7 +369,7 @@
 func (m *BoolValue) Reset()      { *m = BoolValue{} }
 func (*BoolValue) ProtoMessage() {}
 func (*BoolValue) Descriptor() ([]byte, []int) {
-	return fileDescriptor_wrappers_c5239a825c7dfb53, []int{6}
+	return fileDescriptor_5377b62bda767935, []int{6}
 }
 func (*BoolValue) XXX_WellKnownType() string { return "BoolValue" }
 func (m *BoolValue) XXX_Unmarshal(b []byte) error {
@@ -389,8 +387,8 @@
 		return b[:n], nil
 	}
 }
-func (dst *BoolValue) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_BoolValue.Merge(dst, src)
+func (m *BoolValue) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_BoolValue.Merge(m, src)
 }
 func (m *BoolValue) XXX_Size() int {
 	return m.Size()
@@ -426,7 +424,7 @@
 func (m *StringValue) Reset()      { *m = StringValue{} }
 func (*StringValue) ProtoMessage() {}
 func (*StringValue) Descriptor() ([]byte, []int) {
-	return fileDescriptor_wrappers_c5239a825c7dfb53, []int{7}
+	return fileDescriptor_5377b62bda767935, []int{7}
 }
 func (*StringValue) XXX_WellKnownType() string { return "StringValue" }
 func (m *StringValue) XXX_Unmarshal(b []byte) error {
@@ -444,8 +442,8 @@
 		return b[:n], nil
 	}
 }
-func (dst *StringValue) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_StringValue.Merge(dst, src)
+func (m *StringValue) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_StringValue.Merge(m, src)
 }
 func (m *StringValue) XXX_Size() int {
 	return m.Size()
@@ -481,7 +479,7 @@
 func (m *BytesValue) Reset()      { *m = BytesValue{} }
 func (*BytesValue) ProtoMessage() {}
 func (*BytesValue) Descriptor() ([]byte, []int) {
-	return fileDescriptor_wrappers_c5239a825c7dfb53, []int{8}
+	return fileDescriptor_5377b62bda767935, []int{8}
 }
 func (*BytesValue) XXX_WellKnownType() string { return "BytesValue" }
 func (m *BytesValue) XXX_Unmarshal(b []byte) error {
@@ -499,8 +497,8 @@
 		return b[:n], nil
 	}
 }
-func (dst *BytesValue) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_BytesValue.Merge(dst, src)
+func (m *BytesValue) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_BytesValue.Merge(m, src)
 }
 func (m *BytesValue) XXX_Size() int {
 	return m.Size()
@@ -532,6 +530,31 @@
 	proto.RegisterType((*StringValue)(nil), "google.protobuf.StringValue")
 	proto.RegisterType((*BytesValue)(nil), "google.protobuf.BytesValue")
 }
+
+func init() { proto.RegisterFile("google/protobuf/wrappers.proto", fileDescriptor_5377b62bda767935) }
+
+var fileDescriptor_5377b62bda767935 = []byte{
+	// 285 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f,
+	0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x2f, 0x4a, 0x2c,
+	0x28, 0x48, 0x2d, 0x2a, 0xd6, 0x03, 0x8b, 0x08, 0xf1, 0x43, 0xe4, 0xf5, 0x60, 0xf2, 0x4a, 0xca,
+	0x5c, 0xdc, 0x2e, 0xf9, 0xa5, 0x49, 0x39, 0xa9, 0x61, 0x89, 0x39, 0xa5, 0xa9, 0x42, 0x22, 0x5c,
+	0xac, 0x65, 0x20, 0x86, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x63, 0x10, 0x84, 0xa3, 0xa4, 0xc4, 0xc5,
+	0xe5, 0x96, 0x93, 0x9f, 0x58, 0x82, 0x45, 0x0d, 0x13, 0x92, 0x1a, 0xcf, 0xbc, 0x12, 0x33, 0x13,
+	0x2c, 0x6a, 0x98, 0x61, 0x6a, 0x94, 0xb9, 0xb8, 0x43, 0x71, 0x29, 0x62, 0x41, 0x35, 0xc8, 0xd8,
+	0x08, 0x8b, 0x1a, 0x56, 0x34, 0x83, 0xb0, 0x2a, 0xe2, 0x85, 0x29, 0x52, 0xe4, 0xe2, 0x74, 0xca,
+	0xcf, 0xcf, 0xc1, 0xa2, 0x84, 0x03, 0xc9, 0x9c, 0xe0, 0x92, 0xa2, 0xcc, 0xbc, 0x74, 0x2c, 0x8a,
+	0x38, 0x91, 0x1c, 0xe4, 0x54, 0x59, 0x92, 0x5a, 0x8c, 0x45, 0x0d, 0x0f, 0x54, 0x8d, 0x53, 0x3b,
+	0xe3, 0x8d, 0x87, 0x72, 0x0c, 0x1f, 0x1e, 0xca, 0x31, 0xfe, 0x78, 0x28, 0xc7, 0xd8, 0xf0, 0x48,
+	0x8e, 0x71, 0xc5, 0x23, 0x39, 0xc6, 0x13, 0x8f, 0xe4, 0x18, 0x2f, 0x3c, 0x92, 0x63, 0x7c, 0xf0,
+	0x48, 0x8e, 0xf1, 0xc5, 0x23, 0x39, 0x86, 0x0f, 0x20, 0xf1, 0xc7, 0x72, 0x8c, 0x27, 0x1e, 0xcb,
+	0x31, 0x72, 0x09, 0x27, 0xe7, 0xe7, 0xea, 0xa1, 0x45, 0x87, 0x13, 0x6f, 0x38, 0x34, 0xbe, 0x02,
+	0x40, 0x22, 0x01, 0x8c, 0x51, 0xac, 0x25, 0x95, 0x05, 0xa9, 0xc5, 0x3f, 0x18, 0x19, 0x17, 0x31,
+	0x31, 0xbb, 0x07, 0x38, 0xad, 0x62, 0x92, 0x73, 0x87, 0x68, 0x09, 0x80, 0x6a, 0xd1, 0x0b, 0x4f,
+	0xcd, 0xc9, 0xf1, 0xce, 0xcb, 0x2f, 0xcf, 0x0b, 0x01, 0xa9, 0x4c, 0x62, 0x03, 0x9b, 0x65, 0x0c,
+	0x08, 0x00, 0x00, 0xff, 0xff, 0x31, 0x55, 0x64, 0x90, 0x0a, 0x02, 0x00, 0x00,
+}
+
 func (this *DoubleValue) Compare(that interface{}) int {
 	if that == nil {
 		if this == nil {
@@ -1914,7 +1937,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -1948,6 +1971,9 @@
 			if skippy < 0 {
 				return ErrInvalidLengthWrappers
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthWrappers
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -1976,7 +2002,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -2010,6 +2036,9 @@
 			if skippy < 0 {
 				return ErrInvalidLengthWrappers
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthWrappers
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -2038,7 +2067,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -2066,7 +2095,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				m.Value |= (int64(b) & 0x7F) << shift
+				m.Value |= int64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2080,6 +2109,9 @@
 			if skippy < 0 {
 				return ErrInvalidLengthWrappers
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthWrappers
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -2108,7 +2140,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -2136,7 +2168,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				m.Value |= (uint64(b) & 0x7F) << shift
+				m.Value |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2150,6 +2182,9 @@
 			if skippy < 0 {
 				return ErrInvalidLengthWrappers
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthWrappers
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -2178,7 +2213,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -2206,7 +2241,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				m.Value |= (int32(b) & 0x7F) << shift
+				m.Value |= int32(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2220,6 +2255,9 @@
 			if skippy < 0 {
 				return ErrInvalidLengthWrappers
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthWrappers
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -2248,7 +2286,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -2276,7 +2314,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				m.Value |= (uint32(b) & 0x7F) << shift
+				m.Value |= uint32(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2290,6 +2328,9 @@
 			if skippy < 0 {
 				return ErrInvalidLengthWrappers
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthWrappers
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -2318,7 +2359,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -2346,7 +2387,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				v |= (int(b) & 0x7F) << shift
+				v |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2361,6 +2402,9 @@
 			if skippy < 0 {
 				return ErrInvalidLengthWrappers
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthWrappers
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -2389,7 +2433,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -2417,7 +2461,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2427,6 +2471,9 @@
 				return ErrInvalidLengthWrappers
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthWrappers
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -2441,6 +2488,9 @@
 			if skippy < 0 {
 				return ErrInvalidLengthWrappers
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthWrappers
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -2469,7 +2519,7 @@
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -2497,7 +2547,7 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				byteLen |= (int(b) & 0x7F) << shift
+				byteLen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -2506,6 +2556,9 @@
 				return ErrInvalidLengthWrappers
 			}
 			postIndex := iNdEx + byteLen
+			if postIndex < 0 {
+				return ErrInvalidLengthWrappers
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -2523,6 +2576,9 @@
 			if skippy < 0 {
 				return ErrInvalidLengthWrappers
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthWrappers
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -2590,10 +2646,13 @@
 					break
 				}
 			}
-			iNdEx += length
 			if length < 0 {
 				return 0, ErrInvalidLengthWrappers
 			}
+			iNdEx += length
+			if iNdEx < 0 {
+				return 0, ErrInvalidLengthWrappers
+			}
 			return iNdEx, nil
 		case 3:
 			for {
@@ -2622,6 +2681,9 @@
 					return 0, err
 				}
 				iNdEx = start + next
+				if iNdEx < 0 {
+					return 0, ErrInvalidLengthWrappers
+				}
 			}
 			return iNdEx, nil
 		case 4:
@@ -2640,29 +2702,3 @@
 	ErrInvalidLengthWrappers = fmt.Errorf("proto: negative length found during unmarshaling")
 	ErrIntOverflowWrappers   = fmt.Errorf("proto: integer overflow")
 )
-
-func init() {
-	proto.RegisterFile("google/protobuf/wrappers.proto", fileDescriptor_wrappers_c5239a825c7dfb53)
-}
-
-var fileDescriptor_wrappers_c5239a825c7dfb53 = []byte{
-	// 285 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f,
-	0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x2f, 0x4a, 0x2c,
-	0x28, 0x48, 0x2d, 0x2a, 0xd6, 0x03, 0x8b, 0x08, 0xf1, 0x43, 0xe4, 0xf5, 0x60, 0xf2, 0x4a, 0xca,
-	0x5c, 0xdc, 0x2e, 0xf9, 0xa5, 0x49, 0x39, 0xa9, 0x61, 0x89, 0x39, 0xa5, 0xa9, 0x42, 0x22, 0x5c,
-	0xac, 0x65, 0x20, 0x86, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x63, 0x10, 0x84, 0xa3, 0xa4, 0xc4, 0xc5,
-	0xe5, 0x96, 0x93, 0x9f, 0x58, 0x82, 0x45, 0x0d, 0x13, 0x92, 0x1a, 0xcf, 0xbc, 0x12, 0x33, 0x13,
-	0x2c, 0x6a, 0x98, 0x61, 0x6a, 0x94, 0xb9, 0xb8, 0x43, 0x71, 0x29, 0x62, 0x41, 0x35, 0xc8, 0xd8,
-	0x08, 0x8b, 0x1a, 0x56, 0x34, 0x83, 0xb0, 0x2a, 0xe2, 0x85, 0x29, 0x52, 0xe4, 0xe2, 0x74, 0xca,
-	0xcf, 0xcf, 0xc1, 0xa2, 0x84, 0x03, 0xc9, 0x9c, 0xe0, 0x92, 0xa2, 0xcc, 0xbc, 0x74, 0x2c, 0x8a,
-	0x38, 0x91, 0x1c, 0xe4, 0x54, 0x59, 0x92, 0x5a, 0x8c, 0x45, 0x0d, 0x0f, 0x54, 0x8d, 0x53, 0x3b,
-	0xe3, 0x8d, 0x87, 0x72, 0x0c, 0x1f, 0x1e, 0xca, 0x31, 0xfe, 0x78, 0x28, 0xc7, 0xd8, 0xf0, 0x48,
-	0x8e, 0x71, 0xc5, 0x23, 0x39, 0xc6, 0x13, 0x8f, 0xe4, 0x18, 0x2f, 0x3c, 0x92, 0x63, 0x7c, 0xf0,
-	0x48, 0x8e, 0xf1, 0xc5, 0x23, 0x39, 0x86, 0x0f, 0x20, 0xf1, 0xc7, 0x72, 0x8c, 0x27, 0x1e, 0xcb,
-	0x31, 0x72, 0x09, 0x27, 0xe7, 0xe7, 0xea, 0xa1, 0x45, 0x87, 0x13, 0x6f, 0x38, 0x34, 0xbe, 0x02,
-	0x40, 0x22, 0x01, 0x8c, 0x51, 0xac, 0x25, 0x95, 0x05, 0xa9, 0xc5, 0x3f, 0x18, 0x19, 0x17, 0x31,
-	0x31, 0xbb, 0x07, 0x38, 0xad, 0x62, 0x92, 0x73, 0x87, 0x68, 0x09, 0x80, 0x6a, 0xd1, 0x0b, 0x4f,
-	0xcd, 0xc9, 0xf1, 0xce, 0xcb, 0x2f, 0xcf, 0x0b, 0x01, 0xa9, 0x4c, 0x62, 0x03, 0x9b, 0x65, 0x0c,
-	0x08, 0x00, 0x00, 0xff, 0xff, 0x31, 0x55, 0x64, 0x90, 0x0a, 0x02, 0x00, 0x00,
-}
diff --git a/vendor/github.com/moby/buildkit/client/llb/exec.go b/vendor/github.com/moby/buildkit/client/llb/exec.go
index e45a233..8e2d1d4 100644
--- a/vendor/github.com/moby/buildkit/client/llb/exec.go
+++ b/vendor/github.com/moby/buildkit/client/llb/exec.go
@@ -177,7 +177,7 @@
 		addCap(&e.constraints, pb.CapExecMetaNetwork)
 	}
 
-	if e.meta.Security != SecurityModeInsecure {
+	if e.meta.Security != SecurityModeSandbox {
 		addCap(&e.constraints, pb.CapExecMetaSecurity)
 	}
 
diff --git a/vendor/github.com/moby/buildkit/client/solve.go b/vendor/github.com/moby/buildkit/client/solve.go
index 830c018..17b3810 100644
--- a/vendor/github.com/moby/buildkit/client/solve.go
+++ b/vendor/github.com/moby/buildkit/client/solve.go
@@ -410,9 +410,6 @@
 			if csDir == "" {
 				return nil, errors.New("local cache importer requires src")
 			}
-			if err := os.MkdirAll(csDir, 0755); err != nil {
-				return nil, err
-			}
 			cs, err := contentlocal.NewStore(csDir)
 			if err != nil {
 				return nil, err
diff --git a/vendor/github.com/moby/buildkit/control/gateway/gateway.go b/vendor/github.com/moby/buildkit/control/gateway/gateway.go
index 074e739..29dc2b3 100644
--- a/vendor/github.com/moby/buildkit/control/gateway/gateway.go
+++ b/vendor/github.com/moby/buildkit/control/gateway/gateway.go
@@ -63,7 +63,9 @@
 
 	go func() {
 		<-ctx.Done()
+		gwf.mu.Lock()
 		gwf.updateCond.Broadcast()
+		gwf.mu.Unlock()
 	}()
 
 	gwf.mu.RLock()
diff --git a/vendor/github.com/moby/buildkit/executor/oci/spec_unix.go b/vendor/github.com/moby/buildkit/executor/oci/spec_unix.go
index 08b0ee2..0d12a18 100644
--- a/vendor/github.com/moby/buildkit/executor/oci/spec_unix.go
+++ b/vendor/github.com/moby/buildkit/executor/oci/spec_unix.go
@@ -95,6 +95,23 @@
 		Options:     []string{"ro", "nosuid", "noexec", "nodev"},
 	})
 
+	if processMode == NoProcessSandbox {
+		var maskedPaths []string
+		for _, s := range s.Linux.MaskedPaths {
+			if !hasPrefix(s, "/proc") {
+				maskedPaths = append(maskedPaths, s)
+			}
+		}
+		s.Linux.MaskedPaths = maskedPaths
+		var readonlyPaths []string
+		for _, s := range s.Linux.ReadonlyPaths {
+			if !hasPrefix(s, "/proc") {
+				readonlyPaths = append(readonlyPaths, s)
+			}
+		}
+		s.Linux.ReadonlyPaths = readonlyPaths
+	}
+
 	if meta.SecurityMode == pb.SecurityMode_INSECURE {
 		//make sysfs rw mount for insecure mode.
 		for _, m := range s.Mounts {
diff --git a/vendor/github.com/moby/buildkit/executor/runcexecutor/executor.go b/vendor/github.com/moby/buildkit/executor/runcexecutor/executor.go
index 2fe0c51..ceca9d8 100644
--- a/vendor/github.com/moby/buildkit/executor/runcexecutor/executor.go
+++ b/vendor/github.com/moby/buildkit/executor/runcexecutor/executor.go
@@ -41,6 +41,8 @@
 	// ProcessMode
 	ProcessMode     oci.ProcessMode
 	IdentityMapping *idtools.IdentityMapping
+	// runc run --no-pivot (unrecommended)
+	NoPivot bool
 }
 
 var defaultCommandCandidates = []string{"buildkit-runc", "runc"}
@@ -54,6 +56,7 @@
 	networkProviders map[pb.NetMode]network.Provider
 	processMode      oci.ProcessMode
 	idmap            *idtools.IdentityMapping
+	noPivot          bool
 }
 
 func New(opt Opt, networkProviders map[pb.NetMode]network.Provider) (executor.Executor, error) {
@@ -111,6 +114,7 @@
 		networkProviders: networkProviders,
 		processMode:      opt.ProcessMode,
 		idmap:            opt.IdentityMapping,
+		noPivot:          opt.NoPivot,
 	}
 	return w, nil
 }
@@ -193,6 +197,17 @@
 		opts = append(opts, containerdoci.WithRootFSReadonly())
 	}
 
+	identity = idtools.Identity{
+		UID: int(uid),
+		GID: int(gid),
+	}
+	if w.idmap != nil {
+		identity, err = w.idmap.ToHost(identity)
+		if err != nil {
+			return err
+		}
+	}
+
 	if w.cgroupParent != "" {
 		var cgroupsPath string
 		lastSeparator := w.cgroupParent[len(w.cgroupParent)-1:]
@@ -269,7 +284,8 @@
 
 	logrus.Debugf("> creating %s %v", id, meta.Args)
 	status, err := w.runc.Run(runCtx, id, bundle, &runc.CreateOpts{
-		IO: &forwardIO{stdin: stdin, stdout: stdout, stderr: stderr},
+		IO:      &forwardIO{stdin: stdin, stdout: stdout, stderr: stderr},
+		NoPivot: w.noPivot,
 	})
 	close(done)
 	if err != nil {
diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/builder/build.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/builder/build.go
index 76777ee..6af3bab 100644
--- a/vendor/github.com/moby/buildkit/frontend/dockerfile/builder/build.go
+++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/builder/build.go
@@ -8,6 +8,7 @@
 	"encoding/json"
 	"fmt"
 	"net"
+	"path"
 	"regexp"
 	"strconv"
 	"strings"
@@ -46,6 +47,7 @@
 	keyOverrideCopyImage       = "override-copy-image" // remove after CopyOp implemented
 	keyNameContext             = "contextkey"
 	keyNameDockerfile          = "dockerfilekey"
+	keyContextSubDir           = "contextsubdir"
 )
 
 var httpPrefix = regexp.MustCompile("^https?://")
@@ -122,6 +124,8 @@
 		dockerfile2llb.WithInternalName(name),
 	)
 
+	fileop := useFileOp(opts, &caps)
+
 	var buildContext *llb.State
 	isScratchContext := false
 	if st, ok := detectGitContext(opts[localNameContext]); ok {
@@ -157,7 +161,6 @@
 			return nil, errors.Errorf("failed to read downloaded context")
 		}
 		if isArchive(dt) {
-			fileop := useFileOp(opts, &caps)
 			if fileop {
 				bc := llb.Scratch().File(llb.Copy(httpContext, "/context", "/", &llb.CopyInfo{
 					AttemptUnpack: true,
@@ -190,6 +193,12 @@
 		}
 	}
 
+	if buildContext != nil {
+		if sub, ok := opts[keyContextSubDir]; ok {
+			buildContext = scopeToSubDir(buildContext, fileop, sub)
+		}
+	}
+
 	def, err := src.Marshal(marshalOpts...)
 	if err != nil {
 		return nil, errors.Wrapf(err, "failed to marshal local source")
@@ -561,3 +570,17 @@
 	}
 	return enabled && caps != nil && caps.Supports(pb.CapFileBase) == nil
 }
+
+func scopeToSubDir(c *llb.State, fileop bool, dir string) *llb.State {
+	if fileop {
+		bc := llb.Scratch().File(llb.Copy(*c, dir, "/", &llb.CopyInfo{
+			CopyDirContentsOnly: true,
+		}))
+		return &bc
+	}
+	unpack := llb.Image(dockerfile2llb.DefaultCopyImage, dockerfile2llb.WithInternalName("helper image for file operations")).
+		Run(llb.Shlexf("copy %s/. /out/", path.Join("/src", dir)), llb.ReadonlyRootFS(), dockerfile2llb.WithInternalName("filtering build context"))
+	unpack.AddMount("/src", *c, llb.Readonly)
+	bc := unpack.AddMount("/out", llb.Scratch())
+	return &bc
+}
diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert.go
index 0527923..f368fe6 100644
--- a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert.go
+++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert.go
@@ -172,10 +172,6 @@
 		}
 	}
 
-	if len(allDispatchStates.states) == 1 {
-		allDispatchStates.states[0].stageName = ""
-	}
-
 	var target *dispatchState
 	if opt.Target == "" {
 		target = allDispatchStates.lastTarget()
@@ -207,6 +203,14 @@
 		}
 	}
 
+	if has, state := hasCircularDependency(allDispatchStates.states); has {
+		return nil, nil, fmt.Errorf("circular dependency detected on stage: %s", state.stageName)
+	}
+
+	if len(allDispatchStates.states) == 1 {
+		allDispatchStates.states[0].stageName = ""
+	}
+
 	eg, ctx := errgroup.WithContext(ctx)
 	for i, d := range allDispatchStates.states {
 		reachable := isReachable(target, d)
@@ -1130,6 +1134,41 @@
 	return false
 }
 
+func hasCircularDependency(states []*dispatchState) (bool, *dispatchState) {
+	var visit func(state *dispatchState) bool
+	if states == nil {
+		return false, nil
+	}
+	visited := make(map[*dispatchState]struct{})
+	path := make(map[*dispatchState]struct{})
+
+	visit = func(state *dispatchState) bool {
+		_, ok := visited[state]
+		if ok {
+			return false
+		}
+		visited[state] = struct{}{}
+		path[state] = struct{}{}
+		for dep := range state.deps {
+			_, ok = path[dep]
+			if ok {
+				return true
+			}
+			if visit(dep) {
+				return true
+			}
+		}
+		delete(path, state)
+		return false
+	}
+	for _, state := range states {
+		if visit(state) {
+			return true, state
+		}
+	}
+	return false, nil
+}
+
 func parseUser(str string) (uid uint32, gid uint32, err error) {
 	if str == "" {
 		return 0, 0, nil
diff --git a/vendor/github.com/moby/buildkit/frontend/gateway/gateway.go b/vendor/github.com/moby/buildkit/frontend/gateway/gateway.go
index 87e226a..fa9de6f 100644
--- a/vendor/github.com/moby/buildkit/frontend/gateway/gateway.go
+++ b/vendor/github.com/moby/buildkit/frontend/gateway/gateway.go
@@ -158,7 +158,7 @@
 		rootFS = workerRef.ImmutableRef
 	}
 
-	lbf, err := newLLBBridgeForwarder(ctx, llbBridge, gf.workers)
+	lbf, ctx, err := newLLBBridgeForwarder(ctx, llbBridge, gf.workers)
 	defer lbf.conn.Close()
 	if err != nil {
 		return nil, err
@@ -210,6 +210,9 @@
 	err = llbBridge.Exec(ctx, meta, rootFS, lbf.Stdin, lbf.Stdout, os.Stderr)
 
 	if err != nil {
+		if errors.Cause(err) == context.Canceled && lbf.isErrServerClosed {
+			err = errors.Errorf("frontend grpc server closed unexpectedly")
+		}
 		// An existing error (set via Return rpc) takes
 		// precedence over this error, which in turn takes
 		// precedence over a success reported via Return.
@@ -294,15 +297,24 @@
 	return lbf
 }
 
-func newLLBBridgeForwarder(ctx context.Context, llbBridge frontend.FrontendLLBBridge, workers frontend.WorkerInfos) (*llbBridgeForwarder, error) {
+func newLLBBridgeForwarder(ctx context.Context, llbBridge frontend.FrontendLLBBridge, workers frontend.WorkerInfos) (*llbBridgeForwarder, context.Context, error) {
+	ctx, cancel := context.WithCancel(ctx)
 	lbf := NewBridgeForwarder(ctx, llbBridge, workers)
 	server := grpc.NewServer()
 	grpc_health_v1.RegisterHealthServer(server, health.NewServer())
 	pb.RegisterLLBBridgeServer(server, lbf)
 
-	go serve(ctx, server, lbf.conn)
+	go func() {
+		serve(ctx, server, lbf.conn)
+		select {
+		case <-ctx.Done():
+		default:
+			lbf.isErrServerClosed = true
+		}
+		cancel()
+	}()
 
-	return lbf, nil
+	return lbf, ctx, nil
 }
 
 type pipe struct {
@@ -372,11 +384,12 @@
 	// lastRef      solver.CachedResult
 	// lastRefs     map[string]solver.CachedResult
 	// err          error
-	doneCh       chan struct{} // closed when result or err become valid through a call to a Return
-	result       *frontend.Result
-	err          error
-	exporterAttr map[string][]byte
-	workers      frontend.WorkerInfos
+	doneCh            chan struct{} // closed when result or err become valid through a call to a Return
+	result            *frontend.Result
+	err               error
+	exporterAttr      map[string][]byte
+	workers           frontend.WorkerInfos
+	isErrServerClosed bool
 	*pipe
 }
 
diff --git a/vendor/github.com/moby/buildkit/frontend/gateway/grpcclient/client.go b/vendor/github.com/moby/buildkit/frontend/gateway/grpcclient/client.go
index c9f4373..b39b280 100644
--- a/vendor/github.com/moby/buildkit/frontend/gateway/grpcclient/client.go
+++ b/vendor/github.com/moby/buildkit/frontend/gateway/grpcclient/client.go
@@ -28,6 +28,8 @@
 }
 
 func New(ctx context.Context, opts map[string]string, session, product string, c pb.LLBBridgeClient, w []client.WorkerInfo) (GrpcClient, error) {
+	ctx, cancel := context.WithTimeout(ctx, 5*time.Second)
+	defer cancel()
 	resp, err := c.Ping(ctx, &pb.PingRequest{})
 	if err != nil {
 		return nil, err
diff --git a/vendor/github.com/moby/buildkit/session/grpchijack/dial.go b/vendor/github.com/moby/buildkit/session/grpchijack/dial.go
index 151ab54..2486563 100644
--- a/vendor/github.com/moby/buildkit/session/grpchijack/dial.go
+++ b/vendor/github.com/moby/buildkit/session/grpchijack/dial.go
@@ -46,6 +46,7 @@
 
 	closedOnce sync.Once
 	readMu     sync.Mutex
+	writeMu    sync.Mutex
 	err        error
 	closeCh    chan struct{}
 }
@@ -79,6 +80,8 @@
 }
 
 func (c *conn) Write(b []byte) (int, error) {
+	c.writeMu.Lock()
+	defer c.writeMu.Unlock()
 	m := &controlapi.BytesMessage{Data: b}
 	if err := c.stream.SendMsg(m); err != nil {
 		return 0, err
@@ -93,7 +96,9 @@
 		}()
 
 		if cs, ok := c.stream.(grpc.ClientStream); ok {
+			c.writeMu.Lock()
 			err = cs.CloseSend()
+			c.writeMu.Unlock()
 			if err != nil {
 				return
 			}
@@ -106,6 +111,7 @@
 			err = c.stream.RecvMsg(m)
 			if err != nil {
 				if err != io.EOF {
+					c.readMu.Unlock()
 					return
 				}
 				err = nil
diff --git a/vendor/github.com/moby/buildkit/session/manager.go b/vendor/github.com/moby/buildkit/session/manager.go
index f401c7f..e01b047 100644
--- a/vendor/github.com/moby/buildkit/session/manager.go
+++ b/vendor/github.com/moby/buildkit/session/manager.go
@@ -162,7 +162,9 @@
 	go func() {
 		select {
 		case <-ctx.Done():
+			sm.mu.Lock()
 			sm.updateCondition.Broadcast()
+			sm.mu.Unlock()
 		}
 	}()
 
diff --git a/vendor/github.com/moby/buildkit/snapshot/snapshotter.go b/vendor/github.com/moby/buildkit/snapshot/snapshotter.go
index 19155dc..b712f18 100644
--- a/vendor/github.com/moby/buildkit/snapshot/snapshotter.go
+++ b/vendor/github.com/moby/buildkit/snapshot/snapshotter.go
@@ -18,6 +18,7 @@
 }
 
 type SnapshotterBase interface {
+	Name() string
 	Mounts(ctx context.Context, key string) (Mountable, error)
 	Prepare(ctx context.Context, key, parent string, opts ...snapshots.Opt) error
 	View(ctx context.Context, key, parent string, opts ...snapshots.Opt) (Mountable, error)
@@ -43,15 +44,20 @@
 	SetBlob(ctx context.Context, key string, diffID, blob digest.Digest) error
 }
 
-func FromContainerdSnapshotter(s snapshots.Snapshotter, idmap *idtools.IdentityMapping) SnapshotterBase {
-	return &fromContainerd{Snapshotter: s, idmap: idmap}
+func FromContainerdSnapshotter(name string, s snapshots.Snapshotter, idmap *idtools.IdentityMapping) SnapshotterBase {
+	return &fromContainerd{name: name, Snapshotter: s, idmap: idmap}
 }
 
 type fromContainerd struct {
+	name string
 	snapshots.Snapshotter
 	idmap *idtools.IdentityMapping
 }
 
+func (s *fromContainerd) Name() string {
+	return s.name
+}
+
 func (s *fromContainerd) Mounts(ctx context.Context, key string) (Mountable, error) {
 	mounts, err := s.Snapshotter.Mounts(ctx, key)
 	if err != nil {
diff --git a/vendor/github.com/moby/buildkit/solver/jobs.go b/vendor/github.com/moby/buildkit/solver/jobs.go
index 72c605f..99c7c8b 100644
--- a/vendor/github.com/moby/buildkit/solver/jobs.go
+++ b/vendor/github.com/moby/buildkit/solver/jobs.go
@@ -404,7 +404,9 @@
 
 	go func() {
 		<-ctx.Done()
+		jl.mu.Lock()
 		jl.updateCond.Broadcast()
+		jl.mu.Unlock()
 	}()
 
 	jl.mu.RLock()
diff --git a/vendor/github.com/moby/buildkit/util/progress/progress.go b/vendor/github.com/moby/buildkit/util/progress/progress.go
index b802716..ffe3d88 100644
--- a/vendor/github.com/moby/buildkit/util/progress/progress.go
+++ b/vendor/github.com/moby/buildkit/util/progress/progress.go
@@ -101,7 +101,9 @@
 		select {
 		case <-done:
 		case <-ctx.Done():
+			pr.mu.Lock()
 			pr.cond.Broadcast()
+			pr.mu.Unlock()
 		}
 	}()
 	pr.mu.Lock()
@@ -163,7 +165,9 @@
 	pr.cond = sync.NewCond(&pr.mu)
 	go func() {
 		<-ctx.Done()
+		pr.mu.Lock()
 		pr.cond.Broadcast()
+		pr.mu.Unlock()
 	}()
 	pw := &progressWriter{
 		reader: pr,
diff --git a/vendor/google.golang.org/grpc/README.md b/vendor/google.golang.org/grpc/README.md
index 789adfd..f5eec67 100644
--- a/vendor/google.golang.org/grpc/README.md
+++ b/vendor/google.golang.org/grpc/README.md
@@ -16,11 +16,11 @@
 Prerequisites
 -------------
 
-This requires Go 1.6 or later. Go 1.7 will be required soon.
+gRPC-Go requires Go 1.9 or later.
 
 Constraints
 -----------
-The grpc package should only depend on standard Go packages and a small number of exceptions. If your contribution introduces new dependencies which are NOT in the [list](http://godoc.org/google.golang.org/grpc?imports), you need a discussion with gRPC-Go authors and consultants.
+The grpc package should only depend on standard Go packages and a small number of exceptions. If your contribution introduces new dependencies which are NOT in the [list](https://godoc.org/google.golang.org/grpc?imports), you need a discussion with gRPC-Go authors and consultants.
 
 Documentation
 -------------
@@ -43,3 +43,25 @@
  - `go get -u github.com/golang/protobuf/{proto,protoc-gen-go}`
  - `go get -u google.golang.org/grpc`
  - `protoc --go_out=plugins=grpc:. *.proto`
+
+#### How to turn on logging
+
+The default logger is controlled by the environment variables. Turn everything
+on by setting:
+
+```
+GRPC_GO_LOG_VERBOSITY_LEVEL=99 GRPC_GO_LOG_SEVERITY_LEVEL=info
+```
+
+#### The RPC failed with error `"code = Unavailable desc = transport is closing"`
+
+This error means the connection the RPC is using was closed, and there are many
+possible reasons, including:
+ 1. mis-configured transport credentials, connection failed on handshaking
+ 1. bytes disrupted, possibly by a proxy in between
+ 1. server shutdown
+
+It can be tricky to debug this because the error happens on the client side but
+the root cause of the connection being closed is on the server side. Turn on
+logging on __both client and server__, and see if there are any transport
+errors.
diff --git a/vendor/google.golang.org/grpc/backoff.go b/vendor/google.golang.org/grpc/backoff.go
index c40facc..97c6e25 100644
--- a/vendor/google.golang.org/grpc/backoff.go
+++ b/vendor/google.golang.org/grpc/backoff.go
@@ -16,81 +16,23 @@
  *
  */
 
+// See internal/backoff package for the backoff implementation. This file is
+// kept for the exported types and API backward compatibility.
+
 package grpc
 
 import (
-	"math/rand"
 	"time"
 )
 
 // DefaultBackoffConfig uses values specified for backoff in
 // https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
 var DefaultBackoffConfig = BackoffConfig{
-	MaxDelay:  120 * time.Second,
-	baseDelay: 1.0 * time.Second,
-	factor:    1.6,
-	jitter:    0.2,
-}
-
-// backoffStrategy defines the methodology for backing off after a grpc
-// connection failure.
-//
-// This is unexported until the gRPC project decides whether or not to allow
-// alternative backoff strategies. Once a decision is made, this type and its
-// method may be exported.
-type backoffStrategy interface {
-	// backoff returns the amount of time to wait before the next retry given
-	// the number of consecutive failures.
-	backoff(retries int) time.Duration
+	MaxDelay: 120 * time.Second,
 }
 
 // BackoffConfig defines the parameters for the default gRPC backoff strategy.
 type BackoffConfig struct {
 	// MaxDelay is the upper bound of backoff delay.
 	MaxDelay time.Duration
-
-	// TODO(stevvooe): The following fields are not exported, as allowing
-	// changes would violate the current gRPC specification for backoff. If
-	// gRPC decides to allow more interesting backoff strategies, these fields
-	// may be opened up in the future.
-
-	// baseDelay is the amount of time to wait before retrying after the first
-	// failure.
-	baseDelay time.Duration
-
-	// factor is applied to the backoff after each retry.
-	factor float64
-
-	// jitter provides a range to randomize backoff delays.
-	jitter float64
-}
-
-func setDefaults(bc *BackoffConfig) {
-	md := bc.MaxDelay
-	*bc = DefaultBackoffConfig
-
-	if md > 0 {
-		bc.MaxDelay = md
-	}
-}
-
-func (bc BackoffConfig) backoff(retries int) time.Duration {
-	if retries == 0 {
-		return bc.baseDelay
-	}
-	backoff, max := float64(bc.baseDelay), float64(bc.MaxDelay)
-	for backoff < max && retries > 0 {
-		backoff *= bc.factor
-		retries--
-	}
-	if backoff > max {
-		backoff = max
-	}
-	// Randomize backoff delays so that if a cluster of requests start at
-	// the same time, they won't operate in lockstep.
-	backoff *= 1 + bc.jitter*(rand.Float64()*2-1)
-	if backoff < 0 {
-		return 0
-	}
-	return time.Duration(backoff)
 }
diff --git a/vendor/google.golang.org/grpc/balancer.go b/vendor/google.golang.org/grpc/balancer.go
index e173016..a78e702 100644
--- a/vendor/google.golang.org/grpc/balancer.go
+++ b/vendor/google.golang.org/grpc/balancer.go
@@ -19,11 +19,10 @@
 package grpc
 
 import (
-	"fmt"
+	"context"
 	"net"
 	"sync"
 
-	"golang.org/x/net/context"
 	"google.golang.org/grpc/codes"
 	"google.golang.org/grpc/credentials"
 	"google.golang.org/grpc/grpclog"
@@ -118,26 +117,6 @@
 	Close() error
 }
 
-// downErr implements net.Error. It is constructed by gRPC internals and passed to the down
-// call of Balancer.
-type downErr struct {
-	timeout   bool
-	temporary bool
-	desc      string
-}
-
-func (e downErr) Error() string   { return e.desc }
-func (e downErr) Timeout() bool   { return e.timeout }
-func (e downErr) Temporary() bool { return e.temporary }
-
-func downErrorf(timeout, temporary bool, format string, a ...interface{}) downErr {
-	return downErr{
-		timeout:   timeout,
-		temporary: temporary,
-		desc:      fmt.Sprintf(format, a...),
-	}
-}
-
 // RoundRobin returns a Balancer that selects addresses round-robin. It uses r to watch
 // the name resolution updates and updates the addresses available correspondingly.
 //
@@ -410,7 +389,3 @@
 type pickFirst struct {
 	*roundRobin
 }
-
-func pickFirstBalancerV1(r naming.Resolver) Balancer {
-	return &pickFirst{&roundRobin{r: r}}
-}
diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go
index 63b8d71..fafede2 100644
--- a/vendor/google.golang.org/grpc/balancer/balancer.go
+++ b/vendor/google.golang.org/grpc/balancer/balancer.go
@@ -21,13 +21,15 @@
 package balancer
 
 import (
+	"context"
 	"errors"
 	"net"
 	"strings"
 
-	"golang.org/x/net/context"
 	"google.golang.org/grpc/connectivity"
 	"google.golang.org/grpc/credentials"
+	"google.golang.org/grpc/internal"
+	"google.golang.org/grpc/metadata"
 	"google.golang.org/grpc/resolver"
 )
 
@@ -46,8 +48,20 @@
 	m[strings.ToLower(b.Name())] = b
 }
 
+// unregisterForTesting deletes the balancer with the given name from the
+// balancer map.
+//
+// This function is not thread-safe.
+func unregisterForTesting(name string) {
+	delete(m, name)
+}
+
+func init() {
+	internal.BalancerUnregister = unregisterForTesting
+}
+
 // Get returns the resolver builder registered with the given name.
-// Note that the compare is done in a case-insenstive fashion.
+// Note that the compare is done in a case-insensitive fashion.
 // If no builder is register with the name, nil will be returned.
 func Get(name string) Builder {
 	if b, ok := m[strings.ToLower(name)]; ok {
@@ -88,7 +102,15 @@
 }
 
 // NewSubConnOptions contains options to create new SubConn.
-type NewSubConnOptions struct{}
+type NewSubConnOptions struct {
+	// CredsBundle is the credentials bundle that will be used in the created
+	// SubConn. If it's nil, the original creds from grpc DialOptions will be
+	// used.
+	CredsBundle credentials.Bundle
+	// HealthCheckEnabled indicates whether health check service should be
+	// enabled on this SubConn
+	HealthCheckEnabled bool
+}
 
 // ClientConn represents a gRPC ClientConn.
 //
@@ -105,7 +127,7 @@
 	// The SubConn will be shutdown.
 	RemoveSubConn(SubConn)
 
-	// UpdateBalancerState is called by balancer to nofity gRPC that some internal
+	// UpdateBalancerState is called by balancer to notify gRPC that some internal
 	// state in balancer has changed.
 	//
 	// gRPC will update the connectivity state of the ClientConn, and will call pick
@@ -125,6 +147,8 @@
 	// use to dial to a remote load balancer server. The Balancer implementations
 	// can ignore this if it does not need to talk to another party securely.
 	DialCreds credentials.TransportCredentials
+	// CredsBundle is the credentials bundle that the Balancer can use.
+	CredsBundle credentials.Bundle
 	// Dialer is the custom dialer the Balancer implementation can use to dial
 	// to a remote load balancer server. The Balancer implementations
 	// can ignore this if it doesn't need to talk to remote balancer.
@@ -143,16 +167,27 @@
 }
 
 // PickOptions contains addition information for the Pick operation.
-type PickOptions struct{}
+type PickOptions struct {
+	// FullMethodName is the method name that NewClientStream() is called
+	// with. The canonical format is /service/Method.
+	FullMethodName string
+}
 
 // DoneInfo contains additional information for done.
 type DoneInfo struct {
 	// Err is the rpc error the RPC finished with. It could be nil.
 	Err error
+	// Trailer contains the metadata from the RPC's trailer, if present.
+	Trailer metadata.MD
 	// BytesSent indicates if any bytes have been sent to the server.
 	BytesSent bool
 	// BytesReceived indicates if any byte has been received from the server.
 	BytesReceived bool
+	// ServerLoad is the load received from server. It's usually sent as part of
+	// trailing metadata.
+	//
+	// The only supported type now is *orca_v1.LoadReport.
+	ServerLoad interface{}
 }
 
 var (
@@ -182,8 +217,10 @@
 	//
 	// If a SubConn is returned:
 	// - If it is READY, gRPC will send the RPC on it;
-	// - If it is not ready, or becomes not ready after it's returned, gRPC will block
-	//   until UpdateBalancerState() is called and will call pick on the new picker.
+	// - If it is not ready, or becomes not ready after it's returned, gRPC will
+	//   block until UpdateBalancerState() is called and will call pick on the
+	//   new picker. The done function returned from Pick(), if not nil, will be
+	//   called with nil error, no bytes sent and no bytes received.
 	//
 	// If the returned error is not nil:
 	// - If the error is ErrNoSubConnAvailable, gRPC will block until UpdateBalancerState()
@@ -194,9 +231,10 @@
 	// - Else (error is other non-nil error):
 	//   - The RPC will fail with unavailable error.
 	//
-	// The returned done() function will be called once the rpc has finished, with the
-	// final status of that RPC.
-	// done may be nil if balancer doesn't care about the RPC status.
+	// The returned done() function will be called once the rpc has finished,
+	// with the final status of that RPC.  If the SubConn returned is not a
+	// valid SubConn type, done may not be called.  done may be nil if balancer
+	// doesn't care about the RPC status.
 	Pick(ctx context.Context, opts PickOptions) (conn SubConn, done func(DoneInfo), err error)
 }
 
@@ -215,14 +253,84 @@
 	// that back to gRPC.
 	// Balancer should also generate and update Pickers when its internal state has
 	// been changed by the new state.
+	//
+	// Deprecated: if V2Balancer is implemented by the Balancer,
+	// UpdateSubConnState will be called instead.
 	HandleSubConnStateChange(sc SubConn, state connectivity.State)
 	// HandleResolvedAddrs is called by gRPC to send updated resolved addresses to
 	// balancers.
 	// Balancer can create new SubConn or remove SubConn with the addresses.
 	// An empty address slice and a non-nil error will be passed if the resolver returns
 	// non-nil error to gRPC.
+	//
+	// Deprecated: if V2Balancer is implemented by the Balancer,
+	// UpdateResolverState will be called instead.
 	HandleResolvedAddrs([]resolver.Address, error)
 	// Close closes the balancer. The balancer is not required to call
 	// ClientConn.RemoveSubConn for its existing SubConns.
 	Close()
 }
+
+// SubConnState describes the state of a SubConn.
+type SubConnState struct {
+	ConnectivityState connectivity.State
+	// TODO: add last connection error
+}
+
+// V2Balancer is defined for documentation purposes.  If a Balancer also
+// implements V2Balancer, its UpdateResolverState method will be called instead
+// of HandleResolvedAddrs and its UpdateSubConnState will be called instead of
+// HandleSubConnStateChange.
+type V2Balancer interface {
+	// UpdateResolverState is called by gRPC when the state of the resolver
+	// changes.
+	UpdateResolverState(resolver.State)
+	// UpdateSubConnState is called by gRPC when the state of a SubConn
+	// changes.
+	UpdateSubConnState(SubConn, SubConnState)
+	// Close closes the balancer. The balancer is not required to call
+	// ClientConn.RemoveSubConn for its existing SubConns.
+	Close()
+}
+
+// ConnectivityStateEvaluator takes the connectivity states of multiple SubConns
+// and returns one aggregated connectivity state.
+//
+// It's not thread safe.
+type ConnectivityStateEvaluator struct {
+	numReady            uint64 // Number of addrConns in ready state.
+	numConnecting       uint64 // Number of addrConns in connecting state.
+	numTransientFailure uint64 // Number of addrConns in transientFailure.
+}
+
+// RecordTransition records state change happening in subConn and based on that
+// it evaluates what aggregated state should be.
+//
+//  - If at least one SubConn in Ready, the aggregated state is Ready;
+//  - Else if at least one SubConn in Connecting, the aggregated state is Connecting;
+//  - Else the aggregated state is TransientFailure.
+//
+// Idle and Shutdown are not considered.
+func (cse *ConnectivityStateEvaluator) RecordTransition(oldState, newState connectivity.State) connectivity.State {
+	// Update counters.
+	for idx, state := range []connectivity.State{oldState, newState} {
+		updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new.
+		switch state {
+		case connectivity.Ready:
+			cse.numReady += updateVal
+		case connectivity.Connecting:
+			cse.numConnecting += updateVal
+		case connectivity.TransientFailure:
+			cse.numTransientFailure += updateVal
+		}
+	}
+
+	// Evaluate.
+	if cse.numReady > 0 {
+		return connectivity.Ready
+	}
+	if cse.numConnecting > 0 {
+		return connectivity.Connecting
+	}
+	return connectivity.TransientFailure
+}
diff --git a/vendor/google.golang.org/grpc/balancer/base/balancer.go b/vendor/google.golang.org/grpc/balancer/base/balancer.go
index 23d1351..c5a51bd 100644
--- a/vendor/google.golang.org/grpc/balancer/base/balancer.go
+++ b/vendor/google.golang.org/grpc/balancer/base/balancer.go
@@ -19,7 +19,8 @@
 package base
 
 import (
-	"golang.org/x/net/context"
+	"context"
+
 	"google.golang.org/grpc/balancer"
 	"google.golang.org/grpc/connectivity"
 	"google.golang.org/grpc/grpclog"
@@ -29,6 +30,7 @@
 type baseBuilder struct {
 	name          string
 	pickerBuilder PickerBuilder
+	config        Config
 }
 
 func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer {
@@ -38,11 +40,12 @@
 
 		subConns: make(map[resolver.Address]balancer.SubConn),
 		scStates: make(map[balancer.SubConn]connectivity.State),
-		csEvltr:  &connectivityStateEvaluator{},
+		csEvltr:  &balancer.ConnectivityStateEvaluator{},
 		// Initialize picker to a picker that always return
 		// ErrNoSubConnAvailable, because when state of a SubConn changes, we
 		// may call UpdateBalancerState with this picker.
 		picker: NewErrPicker(balancer.ErrNoSubConnAvailable),
+		config: bb.config,
 	}
 }
 
@@ -54,27 +57,30 @@
 	cc            balancer.ClientConn
 	pickerBuilder PickerBuilder
 
-	csEvltr *connectivityStateEvaluator
+	csEvltr *balancer.ConnectivityStateEvaluator
 	state   connectivity.State
 
 	subConns map[resolver.Address]balancer.SubConn
 	scStates map[balancer.SubConn]connectivity.State
 	picker   balancer.Picker
+	config   Config
 }
 
 func (b *baseBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) {
-	if err != nil {
-		grpclog.Infof("base.baseBalancer: HandleResolvedAddrs called with error %v", err)
-		return
-	}
-	grpclog.Infoln("base.baseBalancer: got new resolved addresses: ", addrs)
+	panic("not implemented")
+}
+
+func (b *baseBalancer) UpdateResolverState(s resolver.State) {
+	// TODO: handle s.Err (log if not nil) once implemented.
+	// TODO: handle s.ServiceConfig?
+	grpclog.Infoln("base.baseBalancer: got new resolver state: ", s)
 	// addrsSet is the set converted from addrs, it's used for quick lookup of an address.
 	addrsSet := make(map[resolver.Address]struct{})
-	for _, a := range addrs {
+	for _, a := range s.Addresses {
 		addrsSet[a] = struct{}{}
 		if _, ok := b.subConns[a]; !ok {
 			// a is a new address (not existing in b.subConns).
-			sc, err := b.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{})
+			sc, err := b.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{HealthCheckEnabled: b.config.HealthCheck})
 			if err != nil {
 				grpclog.Warningf("base.baseBalancer: failed to create new SubConn: %v", err)
 				continue
@@ -116,6 +122,11 @@
 }
 
 func (b *baseBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
+	panic("not implemented")
+}
+
+func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) {
+	s := state.ConnectivityState
 	grpclog.Infof("base.baseBalancer: handle SubConn state change: %p, %v", sc, s)
 	oldS, ok := b.scStates[sc]
 	if !ok {
@@ -133,7 +144,7 @@
 	}
 
 	oldAggrState := b.state
-	b.state = b.csEvltr.recordTransition(oldS, s)
+	b.state = b.csEvltr.RecordTransition(oldS, s)
 
 	// Regenerate picker when one of the following happens:
 	//  - this sc became ready from not-ready
@@ -165,44 +176,3 @@
 func (p *errPicker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
 	return nil, nil, p.err
 }
-
-// connectivityStateEvaluator gets updated by addrConns when their
-// states transition, based on which it evaluates the state of
-// ClientConn.
-type connectivityStateEvaluator struct {
-	numReady            uint64 // Number of addrConns in ready state.
-	numConnecting       uint64 // Number of addrConns in connecting state.
-	numTransientFailure uint64 // Number of addrConns in transientFailure.
-}
-
-// recordTransition records state change happening in every subConn and based on
-// that it evaluates what aggregated state should be.
-// It can only transition between Ready, Connecting and TransientFailure. Other states,
-// Idle and Shutdown are transitioned into by ClientConn; in the beginning of the connection
-// before any subConn is created ClientConn is in idle state. In the end when ClientConn
-// closes it is in Shutdown state.
-//
-// recordTransition should only be called synchronously from the same goroutine.
-func (cse *connectivityStateEvaluator) recordTransition(oldState, newState connectivity.State) connectivity.State {
-	// Update counters.
-	for idx, state := range []connectivity.State{oldState, newState} {
-		updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new.
-		switch state {
-		case connectivity.Ready:
-			cse.numReady += updateVal
-		case connectivity.Connecting:
-			cse.numConnecting += updateVal
-		case connectivity.TransientFailure:
-			cse.numTransientFailure += updateVal
-		}
-	}
-
-	// Evaluate.
-	if cse.numReady > 0 {
-		return connectivity.Ready
-	}
-	if cse.numConnecting > 0 {
-		return connectivity.Connecting
-	}
-	return connectivity.TransientFailure
-}
diff --git a/vendor/google.golang.org/grpc/balancer/base/base.go b/vendor/google.golang.org/grpc/balancer/base/base.go
index 012ace2..34b1f29 100644
--- a/vendor/google.golang.org/grpc/balancer/base/base.go
+++ b/vendor/google.golang.org/grpc/balancer/base/base.go
@@ -45,8 +45,20 @@
 // NewBalancerBuilder returns a balancer builder. The balancers
 // built by this builder will use the picker builder to build pickers.
 func NewBalancerBuilder(name string, pb PickerBuilder) balancer.Builder {
+	return NewBalancerBuilderWithConfig(name, pb, Config{})
+}
+
+// Config contains the config info about the base balancer builder.
+type Config struct {
+	// HealthCheck indicates whether health checking should be enabled for this specific balancer.
+	HealthCheck bool
+}
+
+// NewBalancerBuilderWithConfig returns a base balancer builder configured by the provided config.
+func NewBalancerBuilderWithConfig(name string, pb PickerBuilder, config Config) balancer.Builder {
 	return &baseBuilder{
 		name:          name,
 		pickerBuilder: pb,
+		config:        config,
 	}
 }
diff --git a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go
index 2eda0a1..29f7a4d 100644
--- a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go
+++ b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go
@@ -22,12 +22,13 @@
 package roundrobin
 
 import (
+	"context"
 	"sync"
 
-	"golang.org/x/net/context"
 	"google.golang.org/grpc/balancer"
 	"google.golang.org/grpc/balancer/base"
 	"google.golang.org/grpc/grpclog"
+	"google.golang.org/grpc/internal/grpcrand"
 	"google.golang.org/grpc/resolver"
 )
 
@@ -36,7 +37,7 @@
 
 // newBuilder creates a new roundrobin balancer builder.
 func newBuilder() balancer.Builder {
-	return base.NewBalancerBuilder(Name, &rrPickerBuilder{})
+	return base.NewBalancerBuilderWithConfig(Name, &rrPickerBuilder{}, base.Config{HealthCheck: true})
 }
 
 func init() {
@@ -47,12 +48,19 @@
 
 func (*rrPickerBuilder) Build(readySCs map[resolver.Address]balancer.SubConn) balancer.Picker {
 	grpclog.Infof("roundrobinPicker: newPicker called with readySCs: %v", readySCs)
+	if len(readySCs) == 0 {
+		return base.NewErrPicker(balancer.ErrNoSubConnAvailable)
+	}
 	var scs []balancer.SubConn
 	for _, sc := range readySCs {
 		scs = append(scs, sc)
 	}
 	return &rrPicker{
 		subConns: scs,
+		// Start at a random index, as the same RR balancer rebuilds a new
+		// picker when SubConn states change, and we don't want to apply excess
+		// load to the first server in the list.
+		next: grpcrand.Intn(len(scs)),
 	}
 }
 
@@ -67,10 +75,6 @@
 }
 
 func (p *rrPicker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
-	if len(p.subConns) <= 0 {
-		return nil, nil, balancer.ErrNoSubConnAvailable
-	}
-
 	p.mu.Lock()
 	sc := p.subConns[p.next]
 	p.next = (p.next + 1) % len(p.subConns)
diff --git a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go
index c23f817..bc965f0 100644
--- a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go
+++ b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go
@@ -82,20 +82,13 @@
 	return b.c
 }
 
-// resolverUpdate contains the new resolved addresses or error if there's
-// any.
-type resolverUpdate struct {
-	addrs []resolver.Address
-	err   error
-}
-
 // ccBalancerWrapper is a wrapper on top of cc for balancers.
 // It implements balancer.ClientConn interface.
 type ccBalancerWrapper struct {
 	cc               *ClientConn
 	balancer         balancer.Balancer
 	stateChangeQueue *scStateUpdateBuffer
-	resolverUpdateCh chan *resolverUpdate
+	resolverUpdateCh chan *resolver.State
 	done             chan struct{}
 
 	mu       sync.Mutex
@@ -106,7 +99,7 @@
 	ccb := &ccBalancerWrapper{
 		cc:               cc,
 		stateChangeQueue: newSCStateUpdateBuffer(),
-		resolverUpdateCh: make(chan *resolverUpdate, 1),
+		resolverUpdateCh: make(chan *resolver.State, 1),
 		done:             make(chan struct{}),
 		subConns:         make(map[*acBalancerWrapper]struct{}),
 	}
@@ -128,15 +121,23 @@
 				return
 			default:
 			}
-			ccb.balancer.HandleSubConnStateChange(t.sc, t.state)
-		case t := <-ccb.resolverUpdateCh:
+			if ub, ok := ccb.balancer.(balancer.V2Balancer); ok {
+				ub.UpdateSubConnState(t.sc, balancer.SubConnState{ConnectivityState: t.state})
+			} else {
+				ccb.balancer.HandleSubConnStateChange(t.sc, t.state)
+			}
+		case s := <-ccb.resolverUpdateCh:
 			select {
 			case <-ccb.done:
 				ccb.balancer.Close()
 				return
 			default:
 			}
-			ccb.balancer.HandleResolvedAddrs(t.addrs, t.err)
+			if ub, ok := ccb.balancer.(balancer.V2Balancer); ok {
+				ub.UpdateResolverState(*s)
+			} else {
+				ccb.balancer.HandleResolvedAddrs(s.Addresses, nil)
+			}
 		case <-ccb.done:
 		}
 
@@ -177,15 +178,23 @@
 	})
 }
 
-func (ccb *ccBalancerWrapper) handleResolvedAddrs(addrs []resolver.Address, err error) {
+func (ccb *ccBalancerWrapper) updateResolverState(s resolver.State) {
+	if ccb.cc.curBalancerName != grpclbName {
+		// Filter any grpclb addresses since we don't have the grpclb balancer.
+		for i := 0; i < len(s.Addresses); {
+			if s.Addresses[i].Type == resolver.GRPCLB {
+				copy(s.Addresses[i:], s.Addresses[i+1:])
+				s.Addresses = s.Addresses[:len(s.Addresses)-1]
+				continue
+			}
+			i++
+		}
+	}
 	select {
 	case <-ccb.resolverUpdateCh:
 	default:
 	}
-	ccb.resolverUpdateCh <- &resolverUpdate{
-		addrs: addrs,
-		err:   err,
-	}
+	ccb.resolverUpdateCh <- &s
 }
 
 func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) {
@@ -197,7 +206,7 @@
 	if ccb.subConns == nil {
 		return nil, fmt.Errorf("grpc: ClientConn balancer wrapper was closed")
 	}
-	ac, err := ccb.cc.newAddrConn(addrs)
+	ac, err := ccb.cc.newAddrConn(addrs, opts)
 	if err != nil {
 		return nil, err
 	}
@@ -229,8 +238,13 @@
 	if ccb.subConns == nil {
 		return
 	}
-	ccb.cc.csMgr.updateState(s)
+	// Update picker before updating state.  Even though the ordering here does
+	// not matter, it can lead to multiple calls of Pick in the common start-up
+	// case where we wait for ready and then perform an RPC.  If the picker is
+	// updated later, we could call the "connecting" picker when the state is
+	// updated, and then call the "ready" picker after the picker gets updated.
 	ccb.cc.blockingpicker.updatePicker(p)
+	ccb.cc.csMgr.updateState(s)
 }
 
 func (ccb *ccBalancerWrapper) ResolveNow(o resolver.ResolveNowOption) {
@@ -257,6 +271,7 @@
 	}
 	if !acbw.ac.tryUpdateAddrs(addrs) {
 		cc := acbw.ac.cc
+		opts := acbw.ac.scopts
 		acbw.ac.mu.Lock()
 		// Set old ac.acbw to nil so the Shutdown state update will be ignored
 		// by balancer.
@@ -272,7 +287,7 @@
 			return
 		}
 
-		ac, err := cc.newAddrConn(addrs)
+		ac, err := cc.newAddrConn(addrs, opts)
 		if err != nil {
 			grpclog.Warningf("acBalancerWrapper: UpdateAddresses: failed to newAddrConn: %v", err)
 			return
diff --git a/vendor/google.golang.org/grpc/balancer_v1_wrapper.go b/vendor/google.golang.org/grpc/balancer_v1_wrapper.go
index b7abc6b..29bda63 100644
--- a/vendor/google.golang.org/grpc/balancer_v1_wrapper.go
+++ b/vendor/google.golang.org/grpc/balancer_v1_wrapper.go
@@ -19,16 +19,14 @@
 package grpc
 
 import (
+	"context"
 	"strings"
 	"sync"
 
-	"golang.org/x/net/context"
 	"google.golang.org/grpc/balancer"
-	"google.golang.org/grpc/codes"
 	"google.golang.org/grpc/connectivity"
 	"google.golang.org/grpc/grpclog"
 	"google.golang.org/grpc/resolver"
-	"google.golang.org/grpc/status"
 )
 
 type balancerWrapperBuilder struct {
@@ -55,7 +53,7 @@
 		startCh:    make(chan struct{}),
 		conns:      make(map[resolver.Address]balancer.SubConn),
 		connSt:     make(map[balancer.SubConn]*scState),
-		csEvltr:    &connectivityStateEvaluator{},
+		csEvltr:    &balancer.ConnectivityStateEvaluator{},
 		state:      connectivity.Idle,
 	}
 	cc.UpdateBalancerState(connectivity.Idle, bw)
@@ -80,10 +78,6 @@
 	cc         balancer.ClientConn
 	targetAddr string // Target without the scheme.
 
-	// To aggregate the connectivity state.
-	csEvltr *connectivityStateEvaluator
-	state   connectivity.State
-
 	mu     sync.Mutex
 	conns  map[resolver.Address]balancer.SubConn
 	connSt map[balancer.SubConn]*scState
@@ -92,6 +86,10 @@
 	// - NewSubConn is created, cc wants to notify balancer of state changes;
 	// - Build hasn't return, cc doesn't have access to balancer.
 	startCh chan struct{}
+
+	// To aggregate the connectivity state.
+	csEvltr *balancer.ConnectivityStateEvaluator
+	state   connectivity.State
 }
 
 // lbWatcher watches the Notify channel of the balancer and manages
@@ -248,7 +246,7 @@
 			scSt.down(errConnClosing)
 		}
 	}
-	sa := bw.csEvltr.recordTransition(oldS, s)
+	sa := bw.csEvltr.RecordTransition(oldS, s)
 	if bw.state != sa {
 		bw.state = sa
 	}
@@ -283,9 +281,8 @@
 }
 
 // The picker is the balancerWrapper itself.
-// Pick should never return ErrNoSubConnAvailable.
 // It either blocks or returns error, consistent with v1 balancer Get().
-func (bw *balancerWrapper) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
+func (bw *balancerWrapper) Pick(ctx context.Context, opts balancer.PickOptions) (sc balancer.SubConn, done func(balancer.DoneInfo), err error) {
 	failfast := true // Default failfast is true.
 	if ss, ok := rpcInfoFromContext(ctx); ok {
 		failfast = ss.failfast
@@ -294,79 +291,51 @@
 	if err != nil {
 		return nil, nil, err
 	}
-	var done func(balancer.DoneInfo)
 	if p != nil {
-		done = func(i balancer.DoneInfo) { p() }
+		done = func(balancer.DoneInfo) { p() }
+		defer func() {
+			if err != nil {
+				p()
+			}
+		}()
 	}
-	var sc balancer.SubConn
+
 	bw.mu.Lock()
 	defer bw.mu.Unlock()
 	if bw.pickfirst {
 		// Get the first sc in conns.
-		for _, sc = range bw.conns {
-			break
+		for _, sc := range bw.conns {
+			return sc, done, nil
 		}
-	} else {
-		var ok bool
-		sc, ok = bw.conns[resolver.Address{
-			Addr:       a.Addr,
-			Type:       resolver.Backend,
-			ServerName: "",
-			Metadata:   a.Metadata,
-		}]
-		if !ok && failfast {
-			return nil, nil, status.Errorf(codes.Unavailable, "there is no connection available")
-		}
-		if s, ok := bw.connSt[sc]; failfast && (!ok || s.s != connectivity.Ready) {
-			// If the returned sc is not ready and RPC is failfast,
-			// return error, and this RPC will fail.
-			return nil, nil, status.Errorf(codes.Unavailable, "there is no connection available")
-		}
+		return nil, nil, balancer.ErrNoSubConnAvailable
 	}
-
-	return sc, done, nil
-}
-
-// connectivityStateEvaluator gets updated by addrConns when their
-// states transition, based on which it evaluates the state of
-// ClientConn.
-type connectivityStateEvaluator struct {
-	mu                  sync.Mutex
-	numReady            uint64 // Number of addrConns in ready state.
-	numConnecting       uint64 // Number of addrConns in connecting state.
-	numTransientFailure uint64 // Number of addrConns in transientFailure.
-}
-
-// recordTransition records state change happening in every subConn and based on
-// that it evaluates what aggregated state should be.
-// It can only transition between Ready, Connecting and TransientFailure. Other states,
-// Idle and Shutdown are transitioned into by ClientConn; in the beginning of the connection
-// before any subConn is created ClientConn is in idle state. In the end when ClientConn
-// closes it is in Shutdown state.
-// TODO Note that in later releases, a ClientConn with no activity will be put into an Idle state.
-func (cse *connectivityStateEvaluator) recordTransition(oldState, newState connectivity.State) connectivity.State {
-	cse.mu.Lock()
-	defer cse.mu.Unlock()
-
-	// Update counters.
-	for idx, state := range []connectivity.State{oldState, newState} {
-		updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new.
-		switch state {
-		case connectivity.Ready:
-			cse.numReady += updateVal
-		case connectivity.Connecting:
-			cse.numConnecting += updateVal
-		case connectivity.TransientFailure:
-			cse.numTransientFailure += updateVal
-		}
+	sc, ok1 := bw.conns[resolver.Address{
+		Addr:       a.Addr,
+		Type:       resolver.Backend,
+		ServerName: "",
+		Metadata:   a.Metadata,
+	}]
+	s, ok2 := bw.connSt[sc]
+	if !ok1 || !ok2 {
+		// This can only happen due to a race where Get() returned an address
+		// that was subsequently removed by Notify.  In this case we should
+		// retry always.
+		return nil, nil, balancer.ErrNoSubConnAvailable
 	}
-
-	// Evaluate.
-	if cse.numReady > 0 {
-		return connectivity.Ready
+	switch s.s {
+	case connectivity.Ready, connectivity.Idle:
+		return sc, done, nil
+	case connectivity.Shutdown, connectivity.TransientFailure:
+		// If the returned sc has been shut down or is in transient failure,
+		// return error, and this RPC will fail or wait for another picker (if
+		// non-failfast).
+		return nil, nil, balancer.ErrTransientFailure
+	default:
+		// For other states (connecting or unknown), the v1 balancer would
+		// traditionally wait until ready and then issue the RPC.  Returning
+		// ErrNoSubConnAvailable will be a slight improvement in that it will
+		// allow the balancer to choose another address in case others are
+		// connected.
+		return nil, nil, balancer.ErrNoSubConnAvailable
 	}
-	if cse.numConnecting > 0 {
-		return connectivity.Connecting
-	}
-	return connectivity.TransientFailure
 }
diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go
new file mode 100644
index 0000000..f393bb6
--- /dev/null
+++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go
@@ -0,0 +1,900 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: grpc/binarylog/grpc_binarylog_v1/binarylog.proto
+
+package grpc_binarylog_v1 // import "google.golang.org/grpc/binarylog/grpc_binarylog_v1"
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+import duration "github.com/golang/protobuf/ptypes/duration"
+import timestamp "github.com/golang/protobuf/ptypes/timestamp"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+// Enumerates the type of event
+// Note the terminology is different from the RPC semantics
+// definition, but the same meaning is expressed here.
+type GrpcLogEntry_EventType int32
+
+const (
+	GrpcLogEntry_EVENT_TYPE_UNKNOWN GrpcLogEntry_EventType = 0
+	// Header sent from client to server
+	GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER GrpcLogEntry_EventType = 1
+	// Header sent from server to client
+	GrpcLogEntry_EVENT_TYPE_SERVER_HEADER GrpcLogEntry_EventType = 2
+	// Message sent from client to server
+	GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE GrpcLogEntry_EventType = 3
+	// Message sent from server to client
+	GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE GrpcLogEntry_EventType = 4
+	// A signal that client is done sending
+	GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE GrpcLogEntry_EventType = 5
+	// Trailer indicates the end of the RPC.
+	// On client side, this event means a trailer was either received
+	// from the network or the gRPC library locally generated a status
+	// to inform the application about a failure.
+	// On server side, this event means the server application requested
+	// to send a trailer. Note: EVENT_TYPE_CANCEL may still arrive after
+	// this due to races on server side.
+	GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER GrpcLogEntry_EventType = 6
+	// A signal that the RPC is cancelled. On client side, this
+	// indicates the client application requests a cancellation.
+	// On server side, this indicates that cancellation was detected.
+	// Note: This marks the end of the RPC. Events may arrive after
+	// this due to races. For example, on client side a trailer
+	// may arrive even though the application requested to cancel the RPC.
+	GrpcLogEntry_EVENT_TYPE_CANCEL GrpcLogEntry_EventType = 7
+)
+
+var GrpcLogEntry_EventType_name = map[int32]string{
+	0: "EVENT_TYPE_UNKNOWN",
+	1: "EVENT_TYPE_CLIENT_HEADER",
+	2: "EVENT_TYPE_SERVER_HEADER",
+	3: "EVENT_TYPE_CLIENT_MESSAGE",
+	4: "EVENT_TYPE_SERVER_MESSAGE",
+	5: "EVENT_TYPE_CLIENT_HALF_CLOSE",
+	6: "EVENT_TYPE_SERVER_TRAILER",
+	7: "EVENT_TYPE_CANCEL",
+}
+var GrpcLogEntry_EventType_value = map[string]int32{
+	"EVENT_TYPE_UNKNOWN":           0,
+	"EVENT_TYPE_CLIENT_HEADER":     1,
+	"EVENT_TYPE_SERVER_HEADER":     2,
+	"EVENT_TYPE_CLIENT_MESSAGE":    3,
+	"EVENT_TYPE_SERVER_MESSAGE":    4,
+	"EVENT_TYPE_CLIENT_HALF_CLOSE": 5,
+	"EVENT_TYPE_SERVER_TRAILER":    6,
+	"EVENT_TYPE_CANCEL":            7,
+}
+
+func (x GrpcLogEntry_EventType) String() string {
+	return proto.EnumName(GrpcLogEntry_EventType_name, int32(x))
+}
+func (GrpcLogEntry_EventType) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_binarylog_264c8c9c551ce911, []int{0, 0}
+}
+
+// Enumerates the entity that generates the log entry
+type GrpcLogEntry_Logger int32
+
+const (
+	GrpcLogEntry_LOGGER_UNKNOWN GrpcLogEntry_Logger = 0
+	GrpcLogEntry_LOGGER_CLIENT  GrpcLogEntry_Logger = 1
+	GrpcLogEntry_LOGGER_SERVER  GrpcLogEntry_Logger = 2
+)
+
+var GrpcLogEntry_Logger_name = map[int32]string{
+	0: "LOGGER_UNKNOWN",
+	1: "LOGGER_CLIENT",
+	2: "LOGGER_SERVER",
+}
+var GrpcLogEntry_Logger_value = map[string]int32{
+	"LOGGER_UNKNOWN": 0,
+	"LOGGER_CLIENT":  1,
+	"LOGGER_SERVER":  2,
+}
+
+func (x GrpcLogEntry_Logger) String() string {
+	return proto.EnumName(GrpcLogEntry_Logger_name, int32(x))
+}
+func (GrpcLogEntry_Logger) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_binarylog_264c8c9c551ce911, []int{0, 1}
+}
+
+type Address_Type int32
+
+const (
+	Address_TYPE_UNKNOWN Address_Type = 0
+	// address is in 1.2.3.4 form
+	Address_TYPE_IPV4 Address_Type = 1
+	// address is in IPv6 canonical form (RFC5952 section 4)
+	// The scope is NOT included in the address string.
+	Address_TYPE_IPV6 Address_Type = 2
+	// address is UDS string
+	Address_TYPE_UNIX Address_Type = 3
+)
+
+var Address_Type_name = map[int32]string{
+	0: "TYPE_UNKNOWN",
+	1: "TYPE_IPV4",
+	2: "TYPE_IPV6",
+	3: "TYPE_UNIX",
+}
+var Address_Type_value = map[string]int32{
+	"TYPE_UNKNOWN": 0,
+	"TYPE_IPV4":    1,
+	"TYPE_IPV6":    2,
+	"TYPE_UNIX":    3,
+}
+
+func (x Address_Type) String() string {
+	return proto.EnumName(Address_Type_name, int32(x))
+}
+func (Address_Type) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_binarylog_264c8c9c551ce911, []int{7, 0}
+}
+
+// Log entry we store in binary logs
+type GrpcLogEntry struct {
+	// The timestamp of the binary log message
+	Timestamp *timestamp.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
+	// Uniquely identifies a call. The value must not be 0 in order to disambiguate
+	// from an unset value.
+	// Each call may have several log entries, they will all have the same call_id.
+	// Nothing is guaranteed about their value other than they are unique across
+	// different RPCs in the same gRPC process.
+	CallId uint64 `protobuf:"varint,2,opt,name=call_id,json=callId,proto3" json:"call_id,omitempty"`
+	// The entry sequence id for this call. The first GrpcLogEntry has a
+	// value of 1, to disambiguate from an unset value. The purpose of
+	// this field is to detect missing entries in environments where
+	// durability or ordering is not guaranteed.
+	SequenceIdWithinCall uint64                 `protobuf:"varint,3,opt,name=sequence_id_within_call,json=sequenceIdWithinCall,proto3" json:"sequence_id_within_call,omitempty"`
+	Type                 GrpcLogEntry_EventType `protobuf:"varint,4,opt,name=type,proto3,enum=grpc.binarylog.v1.GrpcLogEntry_EventType" json:"type,omitempty"`
+	Logger               GrpcLogEntry_Logger    `protobuf:"varint,5,opt,name=logger,proto3,enum=grpc.binarylog.v1.GrpcLogEntry_Logger" json:"logger,omitempty"`
+	// The logger uses one of the following fields to record the payload,
+	// according to the type of the log entry.
+	//
+	// Types that are valid to be assigned to Payload:
+	//	*GrpcLogEntry_ClientHeader
+	//	*GrpcLogEntry_ServerHeader
+	//	*GrpcLogEntry_Message
+	//	*GrpcLogEntry_Trailer
+	Payload isGrpcLogEntry_Payload `protobuf_oneof:"payload"`
+	// true if payload does not represent the full message or metadata.
+	PayloadTruncated bool `protobuf:"varint,10,opt,name=payload_truncated,json=payloadTruncated,proto3" json:"payload_truncated,omitempty"`
+	// Peer address information, will only be recorded on the first
+	// incoming event. On client side, peer is logged on
+	// EVENT_TYPE_SERVER_HEADER normally or EVENT_TYPE_SERVER_TRAILER in
+	// the case of trailers-only. On server side, peer is always
+	// logged on EVENT_TYPE_CLIENT_HEADER.
+	Peer                 *Address `protobuf:"bytes,11,opt,name=peer,proto3" json:"peer,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *GrpcLogEntry) Reset()         { *m = GrpcLogEntry{} }
+func (m *GrpcLogEntry) String() string { return proto.CompactTextString(m) }
+func (*GrpcLogEntry) ProtoMessage()    {}
+func (*GrpcLogEntry) Descriptor() ([]byte, []int) {
+	return fileDescriptor_binarylog_264c8c9c551ce911, []int{0}
+}
+func (m *GrpcLogEntry) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_GrpcLogEntry.Unmarshal(m, b)
+}
+func (m *GrpcLogEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_GrpcLogEntry.Marshal(b, m, deterministic)
+}
+func (dst *GrpcLogEntry) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_GrpcLogEntry.Merge(dst, src)
+}
+func (m *GrpcLogEntry) XXX_Size() int {
+	return xxx_messageInfo_GrpcLogEntry.Size(m)
+}
+func (m *GrpcLogEntry) XXX_DiscardUnknown() {
+	xxx_messageInfo_GrpcLogEntry.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GrpcLogEntry proto.InternalMessageInfo
+
+func (m *GrpcLogEntry) GetTimestamp() *timestamp.Timestamp {
+	if m != nil {
+		return m.Timestamp
+	}
+	return nil
+}
+
+func (m *GrpcLogEntry) GetCallId() uint64 {
+	if m != nil {
+		return m.CallId
+	}
+	return 0
+}
+
+func (m *GrpcLogEntry) GetSequenceIdWithinCall() uint64 {
+	if m != nil {
+		return m.SequenceIdWithinCall
+	}
+	return 0
+}
+
+func (m *GrpcLogEntry) GetType() GrpcLogEntry_EventType {
+	if m != nil {
+		return m.Type
+	}
+	return GrpcLogEntry_EVENT_TYPE_UNKNOWN
+}
+
+func (m *GrpcLogEntry) GetLogger() GrpcLogEntry_Logger {
+	if m != nil {
+		return m.Logger
+	}
+	return GrpcLogEntry_LOGGER_UNKNOWN
+}
+
+type isGrpcLogEntry_Payload interface {
+	isGrpcLogEntry_Payload()
+}
+
+type GrpcLogEntry_ClientHeader struct {
+	ClientHeader *ClientHeader `protobuf:"bytes,6,opt,name=client_header,json=clientHeader,proto3,oneof"`
+}
+
+type GrpcLogEntry_ServerHeader struct {
+	ServerHeader *ServerHeader `protobuf:"bytes,7,opt,name=server_header,json=serverHeader,proto3,oneof"`
+}
+
+type GrpcLogEntry_Message struct {
+	Message *Message `protobuf:"bytes,8,opt,name=message,proto3,oneof"`
+}
+
+type GrpcLogEntry_Trailer struct {
+	Trailer *Trailer `protobuf:"bytes,9,opt,name=trailer,proto3,oneof"`
+}
+
+func (*GrpcLogEntry_ClientHeader) isGrpcLogEntry_Payload() {}
+
+func (*GrpcLogEntry_ServerHeader) isGrpcLogEntry_Payload() {}
+
+func (*GrpcLogEntry_Message) isGrpcLogEntry_Payload() {}
+
+func (*GrpcLogEntry_Trailer) isGrpcLogEntry_Payload() {}
+
+func (m *GrpcLogEntry) GetPayload() isGrpcLogEntry_Payload {
+	if m != nil {
+		return m.Payload
+	}
+	return nil
+}
+
+func (m *GrpcLogEntry) GetClientHeader() *ClientHeader {
+	if x, ok := m.GetPayload().(*GrpcLogEntry_ClientHeader); ok {
+		return x.ClientHeader
+	}
+	return nil
+}
+
+func (m *GrpcLogEntry) GetServerHeader() *ServerHeader {
+	if x, ok := m.GetPayload().(*GrpcLogEntry_ServerHeader); ok {
+		return x.ServerHeader
+	}
+	return nil
+}
+
+func (m *GrpcLogEntry) GetMessage() *Message {
+	if x, ok := m.GetPayload().(*GrpcLogEntry_Message); ok {
+		return x.Message
+	}
+	return nil
+}
+
+func (m *GrpcLogEntry) GetTrailer() *Trailer {
+	if x, ok := m.GetPayload().(*GrpcLogEntry_Trailer); ok {
+		return x.Trailer
+	}
+	return nil
+}
+
+func (m *GrpcLogEntry) GetPayloadTruncated() bool {
+	if m != nil {
+		return m.PayloadTruncated
+	}
+	return false
+}
+
+func (m *GrpcLogEntry) GetPeer() *Address {
+	if m != nil {
+		return m.Peer
+	}
+	return nil
+}
+
+// XXX_OneofFuncs is for the internal use of the proto package.
+func (*GrpcLogEntry) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
+	return _GrpcLogEntry_OneofMarshaler, _GrpcLogEntry_OneofUnmarshaler, _GrpcLogEntry_OneofSizer, []interface{}{
+		(*GrpcLogEntry_ClientHeader)(nil),
+		(*GrpcLogEntry_ServerHeader)(nil),
+		(*GrpcLogEntry_Message)(nil),
+		(*GrpcLogEntry_Trailer)(nil),
+	}
+}
+
+func _GrpcLogEntry_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
+	m := msg.(*GrpcLogEntry)
+	// payload
+	switch x := m.Payload.(type) {
+	case *GrpcLogEntry_ClientHeader:
+		b.EncodeVarint(6<<3 | proto.WireBytes)
+		if err := b.EncodeMessage(x.ClientHeader); err != nil {
+			return err
+		}
+	case *GrpcLogEntry_ServerHeader:
+		b.EncodeVarint(7<<3 | proto.WireBytes)
+		if err := b.EncodeMessage(x.ServerHeader); err != nil {
+			return err
+		}
+	case *GrpcLogEntry_Message:
+		b.EncodeVarint(8<<3 | proto.WireBytes)
+		if err := b.EncodeMessage(x.Message); err != nil {
+			return err
+		}
+	case *GrpcLogEntry_Trailer:
+		b.EncodeVarint(9<<3 | proto.WireBytes)
+		if err := b.EncodeMessage(x.Trailer); err != nil {
+			return err
+		}
+	case nil:
+	default:
+		return fmt.Errorf("GrpcLogEntry.Payload has unexpected type %T", x)
+	}
+	return nil
+}
+
+func _GrpcLogEntry_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
+	m := msg.(*GrpcLogEntry)
+	switch tag {
+	case 6: // payload.client_header
+		if wire != proto.WireBytes {
+			return true, proto.ErrInternalBadWireType
+		}
+		msg := new(ClientHeader)
+		err := b.DecodeMessage(msg)
+		m.Payload = &GrpcLogEntry_ClientHeader{msg}
+		return true, err
+	case 7: // payload.server_header
+		if wire != proto.WireBytes {
+			return true, proto.ErrInternalBadWireType
+		}
+		msg := new(ServerHeader)
+		err := b.DecodeMessage(msg)
+		m.Payload = &GrpcLogEntry_ServerHeader{msg}
+		return true, err
+	case 8: // payload.message
+		if wire != proto.WireBytes {
+			return true, proto.ErrInternalBadWireType
+		}
+		msg := new(Message)
+		err := b.DecodeMessage(msg)
+		m.Payload = &GrpcLogEntry_Message{msg}
+		return true, err
+	case 9: // payload.trailer
+		if wire != proto.WireBytes {
+			return true, proto.ErrInternalBadWireType
+		}
+		msg := new(Trailer)
+		err := b.DecodeMessage(msg)
+		m.Payload = &GrpcLogEntry_Trailer{msg}
+		return true, err
+	default:
+		return false, nil
+	}
+}
+
+func _GrpcLogEntry_OneofSizer(msg proto.Message) (n int) {
+	m := msg.(*GrpcLogEntry)
+	// payload
+	switch x := m.Payload.(type) {
+	case *GrpcLogEntry_ClientHeader:
+		s := proto.Size(x.ClientHeader)
+		n += 1 // tag and wire
+		n += proto.SizeVarint(uint64(s))
+		n += s
+	case *GrpcLogEntry_ServerHeader:
+		s := proto.Size(x.ServerHeader)
+		n += 1 // tag and wire
+		n += proto.SizeVarint(uint64(s))
+		n += s
+	case *GrpcLogEntry_Message:
+		s := proto.Size(x.Message)
+		n += 1 // tag and wire
+		n += proto.SizeVarint(uint64(s))
+		n += s
+	case *GrpcLogEntry_Trailer:
+		s := proto.Size(x.Trailer)
+		n += 1 // tag and wire
+		n += proto.SizeVarint(uint64(s))
+		n += s
+	case nil:
+	default:
+		panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
+	}
+	return n
+}
+
+type ClientHeader struct {
+	// This contains only the metadata from the application.
+	Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"`
+	// The name of the RPC method, which looks something like:
+	// /<service>/<method>
+	// Note the leading "/" character.
+	MethodName string `protobuf:"bytes,2,opt,name=method_name,json=methodName,proto3" json:"method_name,omitempty"`
+	// A single process may be used to run multiple virtual
+	// servers with different identities.
+	// The authority is the name of such a server identitiy.
+	// It is typically a portion of the URI in the form of
+	// <host> or <host>:<port> .
+	Authority string `protobuf:"bytes,3,opt,name=authority,proto3" json:"authority,omitempty"`
+	// the RPC timeout
+	Timeout              *duration.Duration `protobuf:"bytes,4,opt,name=timeout,proto3" json:"timeout,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}           `json:"-"`
+	XXX_unrecognized     []byte             `json:"-"`
+	XXX_sizecache        int32              `json:"-"`
+}
+
+func (m *ClientHeader) Reset()         { *m = ClientHeader{} }
+func (m *ClientHeader) String() string { return proto.CompactTextString(m) }
+func (*ClientHeader) ProtoMessage()    {}
+func (*ClientHeader) Descriptor() ([]byte, []int) {
+	return fileDescriptor_binarylog_264c8c9c551ce911, []int{1}
+}
+func (m *ClientHeader) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_ClientHeader.Unmarshal(m, b)
+}
+func (m *ClientHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_ClientHeader.Marshal(b, m, deterministic)
+}
+func (dst *ClientHeader) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ClientHeader.Merge(dst, src)
+}
+func (m *ClientHeader) XXX_Size() int {
+	return xxx_messageInfo_ClientHeader.Size(m)
+}
+func (m *ClientHeader) XXX_DiscardUnknown() {
+	xxx_messageInfo_ClientHeader.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ClientHeader proto.InternalMessageInfo
+
+func (m *ClientHeader) GetMetadata() *Metadata {
+	if m != nil {
+		return m.Metadata
+	}
+	return nil
+}
+
+func (m *ClientHeader) GetMethodName() string {
+	if m != nil {
+		return m.MethodName
+	}
+	return ""
+}
+
+func (m *ClientHeader) GetAuthority() string {
+	if m != nil {
+		return m.Authority
+	}
+	return ""
+}
+
+func (m *ClientHeader) GetTimeout() *duration.Duration {
+	if m != nil {
+		return m.Timeout
+	}
+	return nil
+}
+
+type ServerHeader struct {
+	// This contains only the metadata from the application.
+	Metadata             *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}  `json:"-"`
+	XXX_unrecognized     []byte    `json:"-"`
+	XXX_sizecache        int32     `json:"-"`
+}
+
+func (m *ServerHeader) Reset()         { *m = ServerHeader{} }
+func (m *ServerHeader) String() string { return proto.CompactTextString(m) }
+func (*ServerHeader) ProtoMessage()    {}
+func (*ServerHeader) Descriptor() ([]byte, []int) {
+	return fileDescriptor_binarylog_264c8c9c551ce911, []int{2}
+}
+func (m *ServerHeader) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_ServerHeader.Unmarshal(m, b)
+}
+func (m *ServerHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_ServerHeader.Marshal(b, m, deterministic)
+}
+func (dst *ServerHeader) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ServerHeader.Merge(dst, src)
+}
+func (m *ServerHeader) XXX_Size() int {
+	return xxx_messageInfo_ServerHeader.Size(m)
+}
+func (m *ServerHeader) XXX_DiscardUnknown() {
+	xxx_messageInfo_ServerHeader.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ServerHeader proto.InternalMessageInfo
+
+func (m *ServerHeader) GetMetadata() *Metadata {
+	if m != nil {
+		return m.Metadata
+	}
+	return nil
+}
+
+type Trailer struct {
+	// This contains only the metadata from the application.
+	Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"`
+	// The gRPC status code.
+	StatusCode uint32 `protobuf:"varint,2,opt,name=status_code,json=statusCode,proto3" json:"status_code,omitempty"`
+	// An original status message before any transport specific
+	// encoding.
+	StatusMessage string `protobuf:"bytes,3,opt,name=status_message,json=statusMessage,proto3" json:"status_message,omitempty"`
+	// The value of the 'grpc-status-details-bin' metadata key. If
+	// present, this is always an encoded 'google.rpc.Status' message.
+	StatusDetails        []byte   `protobuf:"bytes,4,opt,name=status_details,json=statusDetails,proto3" json:"status_details,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *Trailer) Reset()         { *m = Trailer{} }
+func (m *Trailer) String() string { return proto.CompactTextString(m) }
+func (*Trailer) ProtoMessage()    {}
+func (*Trailer) Descriptor() ([]byte, []int) {
+	return fileDescriptor_binarylog_264c8c9c551ce911, []int{3}
+}
+func (m *Trailer) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Trailer.Unmarshal(m, b)
+}
+func (m *Trailer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Trailer.Marshal(b, m, deterministic)
+}
+func (dst *Trailer) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Trailer.Merge(dst, src)
+}
+func (m *Trailer) XXX_Size() int {
+	return xxx_messageInfo_Trailer.Size(m)
+}
+func (m *Trailer) XXX_DiscardUnknown() {
+	xxx_messageInfo_Trailer.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Trailer proto.InternalMessageInfo
+
+func (m *Trailer) GetMetadata() *Metadata {
+	if m != nil {
+		return m.Metadata
+	}
+	return nil
+}
+
+func (m *Trailer) GetStatusCode() uint32 {
+	if m != nil {
+		return m.StatusCode
+	}
+	return 0
+}
+
+func (m *Trailer) GetStatusMessage() string {
+	if m != nil {
+		return m.StatusMessage
+	}
+	return ""
+}
+
+func (m *Trailer) GetStatusDetails() []byte {
+	if m != nil {
+		return m.StatusDetails
+	}
+	return nil
+}
+
+// Message payload, used by CLIENT_MESSAGE and SERVER_MESSAGE
+type Message struct {
+	// Length of the message. It may not be the same as the length of the
+	// data field, as the logging payload can be truncated or omitted.
+	Length uint32 `protobuf:"varint,1,opt,name=length,proto3" json:"length,omitempty"`
+	// May be truncated or omitted.
+	Data                 []byte   `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *Message) Reset()         { *m = Message{} }
+func (m *Message) String() string { return proto.CompactTextString(m) }
+func (*Message) ProtoMessage()    {}
+func (*Message) Descriptor() ([]byte, []int) {
+	return fileDescriptor_binarylog_264c8c9c551ce911, []int{4}
+}
+func (m *Message) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Message.Unmarshal(m, b)
+}
+func (m *Message) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Message.Marshal(b, m, deterministic)
+}
+func (dst *Message) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Message.Merge(dst, src)
+}
+func (m *Message) XXX_Size() int {
+	return xxx_messageInfo_Message.Size(m)
+}
+func (m *Message) XXX_DiscardUnknown() {
+	xxx_messageInfo_Message.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Message proto.InternalMessageInfo
+
+func (m *Message) GetLength() uint32 {
+	if m != nil {
+		return m.Length
+	}
+	return 0
+}
+
+func (m *Message) GetData() []byte {
+	if m != nil {
+		return m.Data
+	}
+	return nil
+}
+
+// A list of metadata pairs, used in the payload of client header,
+// server header, and server trailer.
+// Implementations may omit some entries to honor the header limits
+// of GRPC_BINARY_LOG_CONFIG.
+//
+// Header keys added by gRPC are omitted. To be more specific,
+// implementations will not log the following entries, and this is
+// not to be treated as a truncation:
+// - entries handled by grpc that are not user visible, such as those
+//   that begin with 'grpc-' (with exception of grpc-trace-bin)
+//   or keys like 'lb-token'
+// - transport specific entries, including but not limited to:
+//   ':path', ':authority', 'content-encoding', 'user-agent', 'te', etc
+// - entries added for call credentials
+//
+// Implementations must always log grpc-trace-bin if it is present.
+// Practically speaking it will only be visible on server side because
+// grpc-trace-bin is managed by low level client side mechanisms
+// inaccessible from the application level. On server side, the
+// header is just a normal metadata key.
+// The pair will not count towards the size limit.
+type Metadata struct {
+	Entry                []*MetadataEntry `protobuf:"bytes,1,rep,name=entry,proto3" json:"entry,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}         `json:"-"`
+	XXX_unrecognized     []byte           `json:"-"`
+	XXX_sizecache        int32            `json:"-"`
+}
+
+func (m *Metadata) Reset()         { *m = Metadata{} }
+func (m *Metadata) String() string { return proto.CompactTextString(m) }
+func (*Metadata) ProtoMessage()    {}
+func (*Metadata) Descriptor() ([]byte, []int) {
+	return fileDescriptor_binarylog_264c8c9c551ce911, []int{5}
+}
+func (m *Metadata) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Metadata.Unmarshal(m, b)
+}
+func (m *Metadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Metadata.Marshal(b, m, deterministic)
+}
+func (dst *Metadata) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Metadata.Merge(dst, src)
+}
+func (m *Metadata) XXX_Size() int {
+	return xxx_messageInfo_Metadata.Size(m)
+}
+func (m *Metadata) XXX_DiscardUnknown() {
+	xxx_messageInfo_Metadata.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Metadata proto.InternalMessageInfo
+
+func (m *Metadata) GetEntry() []*MetadataEntry {
+	if m != nil {
+		return m.Entry
+	}
+	return nil
+}
+
+// A metadata key value pair
+type MetadataEntry struct {
+	Key                  string   `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
+	Value                []byte   `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *MetadataEntry) Reset()         { *m = MetadataEntry{} }
+func (m *MetadataEntry) String() string { return proto.CompactTextString(m) }
+func (*MetadataEntry) ProtoMessage()    {}
+func (*MetadataEntry) Descriptor() ([]byte, []int) {
+	return fileDescriptor_binarylog_264c8c9c551ce911, []int{6}
+}
+func (m *MetadataEntry) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_MetadataEntry.Unmarshal(m, b)
+}
+func (m *MetadataEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_MetadataEntry.Marshal(b, m, deterministic)
+}
+func (dst *MetadataEntry) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_MetadataEntry.Merge(dst, src)
+}
+func (m *MetadataEntry) XXX_Size() int {
+	return xxx_messageInfo_MetadataEntry.Size(m)
+}
+func (m *MetadataEntry) XXX_DiscardUnknown() {
+	xxx_messageInfo_MetadataEntry.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MetadataEntry proto.InternalMessageInfo
+
+func (m *MetadataEntry) GetKey() string {
+	if m != nil {
+		return m.Key
+	}
+	return ""
+}
+
+func (m *MetadataEntry) GetValue() []byte {
+	if m != nil {
+		return m.Value
+	}
+	return nil
+}
+
+// Address information
+type Address struct {
+	Type    Address_Type `protobuf:"varint,1,opt,name=type,proto3,enum=grpc.binarylog.v1.Address_Type" json:"type,omitempty"`
+	Address string       `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"`
+	// only for TYPE_IPV4 and TYPE_IPV6
+	IpPort               uint32   `protobuf:"varint,3,opt,name=ip_port,json=ipPort,proto3" json:"ip_port,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *Address) Reset()         { *m = Address{} }
+func (m *Address) String() string { return proto.CompactTextString(m) }
+func (*Address) ProtoMessage()    {}
+func (*Address) Descriptor() ([]byte, []int) {
+	return fileDescriptor_binarylog_264c8c9c551ce911, []int{7}
+}
+func (m *Address) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Address.Unmarshal(m, b)
+}
+func (m *Address) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Address.Marshal(b, m, deterministic)
+}
+func (dst *Address) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Address.Merge(dst, src)
+}
+func (m *Address) XXX_Size() int {
+	return xxx_messageInfo_Address.Size(m)
+}
+func (m *Address) XXX_DiscardUnknown() {
+	xxx_messageInfo_Address.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Address proto.InternalMessageInfo
+
+func (m *Address) GetType() Address_Type {
+	if m != nil {
+		return m.Type
+	}
+	return Address_TYPE_UNKNOWN
+}
+
+func (m *Address) GetAddress() string {
+	if m != nil {
+		return m.Address
+	}
+	return ""
+}
+
+func (m *Address) GetIpPort() uint32 {
+	if m != nil {
+		return m.IpPort
+	}
+	return 0
+}
+
+func init() {
+	proto.RegisterType((*GrpcLogEntry)(nil), "grpc.binarylog.v1.GrpcLogEntry")
+	proto.RegisterType((*ClientHeader)(nil), "grpc.binarylog.v1.ClientHeader")
+	proto.RegisterType((*ServerHeader)(nil), "grpc.binarylog.v1.ServerHeader")
+	proto.RegisterType((*Trailer)(nil), "grpc.binarylog.v1.Trailer")
+	proto.RegisterType((*Message)(nil), "grpc.binarylog.v1.Message")
+	proto.RegisterType((*Metadata)(nil), "grpc.binarylog.v1.Metadata")
+	proto.RegisterType((*MetadataEntry)(nil), "grpc.binarylog.v1.MetadataEntry")
+	proto.RegisterType((*Address)(nil), "grpc.binarylog.v1.Address")
+	proto.RegisterEnum("grpc.binarylog.v1.GrpcLogEntry_EventType", GrpcLogEntry_EventType_name, GrpcLogEntry_EventType_value)
+	proto.RegisterEnum("grpc.binarylog.v1.GrpcLogEntry_Logger", GrpcLogEntry_Logger_name, GrpcLogEntry_Logger_value)
+	proto.RegisterEnum("grpc.binarylog.v1.Address_Type", Address_Type_name, Address_Type_value)
+}
+
+func init() {
+	proto.RegisterFile("grpc/binarylog/grpc_binarylog_v1/binarylog.proto", fileDescriptor_binarylog_264c8c9c551ce911)
+}
+
+var fileDescriptor_binarylog_264c8c9c551ce911 = []byte{
+	// 900 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0x51, 0x6f, 0xe3, 0x44,
+	0x10, 0x3e, 0x37, 0x69, 0xdc, 0x4c, 0x92, 0xca, 0x5d, 0x95, 0x3b, 0x5f, 0x29, 0x34, 0xb2, 0x04,
+	0x0a, 0x42, 0x72, 0xb9, 0x94, 0xeb, 0xf1, 0x02, 0x52, 0x92, 0xfa, 0xd2, 0x88, 0x5c, 0x1a, 0x6d,
+	0x72, 0x3d, 0x40, 0x48, 0xd6, 0x36, 0x5e, 0x1c, 0x0b, 0xc7, 0x6b, 0xd6, 0x9b, 0xa0, 0xfc, 0x2c,
+	0xde, 0x90, 0xee, 0x77, 0xf1, 0x8e, 0xbc, 0x6b, 0x27, 0xa6, 0x69, 0x0f, 0x09, 0xde, 0x3c, 0xdf,
+	0x7c, 0xf3, 0xcd, 0xee, 0x78, 0x66, 0x16, 0xbe, 0xf2, 0x79, 0x3c, 0x3b, 0xbf, 0x0b, 0x22, 0xc2,
+	0xd7, 0x21, 0xf3, 0xcf, 0x53, 0xd3, 0xdd, 0x98, 0xee, 0xea, 0xc5, 0xd6, 0x67, 0xc7, 0x9c, 0x09,
+	0x86, 0x8e, 0x52, 0x8a, 0xbd, 0x45, 0x57, 0x2f, 0x4e, 0x3e, 0xf5, 0x19, 0xf3, 0x43, 0x7a, 0x2e,
+	0x09, 0x77, 0xcb, 0x5f, 0xce, 0xbd, 0x25, 0x27, 0x22, 0x60, 0x91, 0x0a, 0x39, 0x39, 0xbb, 0xef,
+	0x17, 0xc1, 0x82, 0x26, 0x82, 0x2c, 0x62, 0x45, 0xb0, 0xde, 0xeb, 0x50, 0xef, 0xf3, 0x78, 0x36,
+	0x64, 0xbe, 0x13, 0x09, 0xbe, 0x46, 0xdf, 0x40, 0x75, 0xc3, 0x31, 0xb5, 0xa6, 0xd6, 0xaa, 0xb5,
+	0x4f, 0x6c, 0xa5, 0x62, 0xe7, 0x2a, 0xf6, 0x34, 0x67, 0xe0, 0x2d, 0x19, 0x3d, 0x03, 0x7d, 0x46,
+	0xc2, 0xd0, 0x0d, 0x3c, 0x73, 0xaf, 0xa9, 0xb5, 0xca, 0xb8, 0x92, 0x9a, 0x03, 0x0f, 0xbd, 0x84,
+	0x67, 0x09, 0xfd, 0x6d, 0x49, 0xa3, 0x19, 0x75, 0x03, 0xcf, 0xfd, 0x3d, 0x10, 0xf3, 0x20, 0x72,
+	0x53, 0xa7, 0x59, 0x92, 0xc4, 0xe3, 0xdc, 0x3d, 0xf0, 0xde, 0x49, 0x67, 0x8f, 0x84, 0x21, 0xfa,
+	0x16, 0xca, 0x62, 0x1d, 0x53, 0xb3, 0xdc, 0xd4, 0x5a, 0x87, 0xed, 0x2f, 0xec, 0x9d, 0xdb, 0xdb,
+	0xc5, 0x83, 0xdb, 0xce, 0x8a, 0x46, 0x62, 0xba, 0x8e, 0x29, 0x96, 0x61, 0xe8, 0x3b, 0xa8, 0x84,
+	0xcc, 0xf7, 0x29, 0x37, 0xf7, 0xa5, 0xc0, 0xe7, 0xff, 0x26, 0x30, 0x94, 0x6c, 0x9c, 0x45, 0xa1,
+	0xd7, 0xd0, 0x98, 0x85, 0x01, 0x8d, 0x84, 0x3b, 0xa7, 0xc4, 0xa3, 0xdc, 0xac, 0xc8, 0x62, 0x9c,
+	0x3d, 0x20, 0xd3, 0x93, 0xbc, 0x6b, 0x49, 0xbb, 0x7e, 0x82, 0xeb, 0xb3, 0x82, 0x9d, 0xea, 0x24,
+	0x94, 0xaf, 0x28, 0xcf, 0x75, 0xf4, 0x47, 0x75, 0x26, 0x92, 0xb7, 0xd5, 0x49, 0x0a, 0x36, 0xba,
+	0x04, 0x7d, 0x41, 0x93, 0x84, 0xf8, 0xd4, 0x3c, 0xc8, 0x7f, 0xcb, 0x8e, 0xc2, 0x1b, 0xc5, 0xb8,
+	0x7e, 0x82, 0x73, 0x72, 0x1a, 0x27, 0x38, 0x09, 0x42, 0xca, 0xcd, 0xea, 0xa3, 0x71, 0x53, 0xc5,
+	0x48, 0xe3, 0x32, 0x32, 0xfa, 0x12, 0x8e, 0x62, 0xb2, 0x0e, 0x19, 0xf1, 0x5c, 0xc1, 0x97, 0xd1,
+	0x8c, 0x08, 0xea, 0x99, 0xd0, 0xd4, 0x5a, 0x07, 0xd8, 0xc8, 0x1c, 0xd3, 0x1c, 0x47, 0x36, 0x94,
+	0x63, 0x4a, 0xb9, 0x59, 0x7b, 0x34, 0x43, 0xc7, 0xf3, 0x38, 0x4d, 0x12, 0x2c, 0x79, 0xd6, 0x5f,
+	0x1a, 0x54, 0x37, 0x3f, 0x0c, 0x3d, 0x05, 0xe4, 0xdc, 0x3a, 0xa3, 0xa9, 0x3b, 0xfd, 0x71, 0xec,
+	0xb8, 0x6f, 0x47, 0xdf, 0x8f, 0x6e, 0xde, 0x8d, 0x8c, 0x27, 0xe8, 0x14, 0xcc, 0x02, 0xde, 0x1b,
+	0x0e, 0xd2, 0xef, 0x6b, 0xa7, 0x73, 0xe5, 0x60, 0x43, 0xbb, 0xe7, 0x9d, 0x38, 0xf8, 0xd6, 0xc1,
+	0xb9, 0x77, 0x0f, 0x7d, 0x02, 0xcf, 0x77, 0x63, 0xdf, 0x38, 0x93, 0x49, 0xa7, 0xef, 0x18, 0xa5,
+	0x7b, 0xee, 0x2c, 0x38, 0x77, 0x97, 0x51, 0x13, 0x4e, 0x1f, 0xc8, 0xdc, 0x19, 0xbe, 0x76, 0x7b,
+	0xc3, 0x9b, 0x89, 0x63, 0xec, 0x3f, 0x2c, 0x30, 0xc5, 0x9d, 0xc1, 0xd0, 0xc1, 0x46, 0x05, 0x7d,
+	0x04, 0x47, 0x45, 0x81, 0xce, 0xa8, 0xe7, 0x0c, 0x0d, 0xdd, 0xea, 0x42, 0x45, 0xb5, 0x19, 0x42,
+	0x70, 0x38, 0xbc, 0xe9, 0xf7, 0x1d, 0x5c, 0xb8, 0xef, 0x11, 0x34, 0x32, 0x4c, 0x65, 0x34, 0xb4,
+	0x02, 0xa4, 0x52, 0x18, 0x7b, 0xdd, 0x2a, 0xe8, 0x59, 0xfd, 0xad, 0xf7, 0x1a, 0xd4, 0x8b, 0xcd,
+	0x87, 0x5e, 0xc1, 0xc1, 0x82, 0x0a, 0xe2, 0x11, 0x41, 0xb2, 0xe1, 0xfd, 0xf8, 0xc1, 0x2e, 0x51,
+	0x14, 0xbc, 0x21, 0xa3, 0x33, 0xa8, 0x2d, 0xa8, 0x98, 0x33, 0xcf, 0x8d, 0xc8, 0x82, 0xca, 0x01,
+	0xae, 0x62, 0x50, 0xd0, 0x88, 0x2c, 0x28, 0x3a, 0x85, 0x2a, 0x59, 0x8a, 0x39, 0xe3, 0x81, 0x58,
+	0xcb, 0xb1, 0xad, 0xe2, 0x2d, 0x80, 0x2e, 0x40, 0x4f, 0x17, 0x01, 0x5b, 0x0a, 0x39, 0xae, 0xb5,
+	0xf6, 0xf3, 0x9d, 0x9d, 0x71, 0x95, 0x6d, 0x26, 0x9c, 0x33, 0xad, 0x3e, 0xd4, 0x8b, 0x1d, 0xff,
+	0x9f, 0x0f, 0x6f, 0xfd, 0xa1, 0x81, 0x9e, 0x75, 0xf0, 0xff, 0xaa, 0x40, 0x22, 0x88, 0x58, 0x26,
+	0xee, 0x8c, 0x79, 0xaa, 0x02, 0x0d, 0x0c, 0x0a, 0xea, 0x31, 0x8f, 0xa2, 0xcf, 0xe0, 0x30, 0x23,
+	0xe4, 0x73, 0xa8, 0xca, 0xd0, 0x50, 0x68, 0x36, 0x7a, 0x05, 0x9a, 0x47, 0x05, 0x09, 0xc2, 0x44,
+	0x56, 0xa4, 0x9e, 0xd3, 0xae, 0x14, 0x68, 0xbd, 0x04, 0x3d, 0x8f, 0x78, 0x0a, 0x95, 0x90, 0x46,
+	0xbe, 0x98, 0xcb, 0x03, 0x37, 0x70, 0x66, 0x21, 0x04, 0x65, 0x79, 0x8d, 0x3d, 0x19, 0x2f, 0xbf,
+	0xad, 0x2e, 0x1c, 0xe4, 0x67, 0x47, 0x97, 0xb0, 0x4f, 0xd3, 0xcd, 0x65, 0x6a, 0xcd, 0x52, 0xab,
+	0xd6, 0x6e, 0x7e, 0xe0, 0x9e, 0x72, 0xc3, 0x61, 0x45, 0xb7, 0x5e, 0x41, 0xe3, 0x1f, 0x38, 0x32,
+	0xa0, 0xf4, 0x2b, 0x5d, 0xcb, 0xec, 0x55, 0x9c, 0x7e, 0xa2, 0x63, 0xd8, 0x5f, 0x91, 0x70, 0x49,
+	0xb3, 0xdc, 0xca, 0xb0, 0xfe, 0xd4, 0x40, 0xcf, 0xe6, 0x18, 0x5d, 0x64, 0xdb, 0x59, 0x93, 0xcb,
+	0xf5, 0xec, 0xf1, 0x89, 0xb7, 0x0b, 0x3b, 0xd9, 0x04, 0x9d, 0x28, 0x34, 0xeb, 0xb0, 0xdc, 0x4c,
+	0x1f, 0x8f, 0x20, 0x76, 0x63, 0xc6, 0x85, 0xac, 0x6a, 0x03, 0x57, 0x82, 0x78, 0xcc, 0xb8, 0xb0,
+	0x1c, 0x28, 0xcb, 0x1d, 0x61, 0x40, 0xfd, 0xde, 0x76, 0x68, 0x40, 0x55, 0x22, 0x83, 0xf1, 0xed,
+	0xd7, 0x86, 0x56, 0x34, 0x2f, 0x8d, 0xbd, 0x8d, 0xf9, 0x76, 0x34, 0xf8, 0xc1, 0x28, 0x75, 0x7f,
+	0x86, 0xe3, 0x80, 0xed, 0x1e, 0xb2, 0x7b, 0xd8, 0x95, 0xd6, 0x90, 0xf9, 0xe3, 0xb4, 0x51, 0xc7,
+	0xda, 0x4f, 0xed, 0xac, 0x71, 0x7d, 0x16, 0x92, 0xc8, 0xb7, 0x19, 0x57, 0x4f, 0xf3, 0x87, 0x5e,
+	0xea, 0xbb, 0x8a, 0xec, 0xf2, 0x8b, 0xbf, 0x03, 0x00, 0x00, 0xff, 0xff, 0xe7, 0xf6, 0x4b, 0x50,
+	0xd4, 0x07, 0x00, 0x00,
+}
diff --git a/vendor/google.golang.org/grpc/call.go b/vendor/google.golang.org/grpc/call.go
index f73b7d5..9e20e4d 100644
--- a/vendor/google.golang.org/grpc/call.go
+++ b/vendor/google.golang.org/grpc/call.go
@@ -19,7 +19,7 @@
 package grpc
 
 import (
-	"golang.org/x/net/context"
+	"context"
 )
 
 // Invoke sends the RPC request on the wire and returns after response is
@@ -40,7 +40,7 @@
 func combine(o1 []CallOption, o2 []CallOption) []CallOption {
 	// we don't use append because o1 could have extra capacity whose
 	// elements would be overwritten, which could cause inadvertent
-	// sharing (and race connditions) between concurrent calls
+	// sharing (and race conditions) between concurrent calls
 	if len(o1) == 0 {
 		return o2
 	} else if len(o2) == 0 {
@@ -63,31 +63,12 @@
 var unaryStreamDesc = &StreamDesc{ServerStreams: false, ClientStreams: false}
 
 func invoke(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error {
-	// TODO: implement retries in clientStream and make this simply
-	// newClientStream, SendMsg, RecvMsg.
-	firstAttempt := true
-	for {
-		csInt, err := newClientStream(ctx, unaryStreamDesc, cc, method, opts...)
-		if err != nil {
-			return err
-		}
-		cs := csInt.(*clientStream)
-		if err := cs.SendMsg(req); err != nil {
-			if !cs.c.failFast && cs.attempt.s.Unprocessed() && firstAttempt {
-				// TODO: Add a field to header for grpc-transparent-retry-attempts
-				firstAttempt = false
-				continue
-			}
-			return err
-		}
-		if err := cs.RecvMsg(reply); err != nil {
-			if !cs.c.failFast && cs.attempt.s.Unprocessed() && firstAttempt {
-				// TODO: Add a field to header for grpc-transparent-retry-attempts
-				firstAttempt = false
-				continue
-			}
-			return err
-		}
-		return nil
+	cs, err := newClientStream(ctx, unaryStreamDesc, cc, method, opts...)
+	if err != nil {
+		return err
 	}
+	if err := cs.SendMsg(req); err != nil {
+		return err
+	}
+	return cs.RecvMsg(reply)
 }
diff --git a/vendor/google.golang.org/grpc/channelz/types.go b/vendor/google.golang.org/grpc/channelz/types.go
deleted file mode 100644
index 153d753..0000000
--- a/vendor/google.golang.org/grpc/channelz/types.go
+++ /dev/null
@@ -1,418 +0,0 @@
-/*
- *
- * Copyright 2018 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package channelz
-
-import (
-	"net"
-	"time"
-
-	"google.golang.org/grpc/connectivity"
-	"google.golang.org/grpc/grpclog"
-)
-
-// entry represents a node in the channelz database.
-type entry interface {
-	// addChild adds a child e, whose channelz id is id to child list
-	addChild(id int64, e entry)
-	// deleteChild deletes a child with channelz id to be id from child list
-	deleteChild(id int64)
-	// triggerDelete tries to delete self from channelz database. However, if child
-	// list is not empty, then deletion from the database is on hold until the last
-	// child is deleted from database.
-	triggerDelete()
-	// deleteSelfIfReady check whether triggerDelete() has been called before, and whether child
-	// list is now empty. If both conditions are met, then delete self from database.
-	deleteSelfIfReady()
-}
-
-// dummyEntry is a fake entry to handle entry not found case.
-type dummyEntry struct {
-	idNotFound int64
-}
-
-func (d *dummyEntry) addChild(id int64, e entry) {
-	// Note: It is possible for a normal program to reach here under race condition.
-	// For example, there could be a race between ClientConn.Close() info being propagated
-	// to addrConn and http2Client. ClientConn.Close() cancel the context and result
-	// in http2Client to error. The error info is then caught by transport monitor
-	// and before addrConn.tearDown() is called in side ClientConn.Close(). Therefore,
-	// the addrConn will create a new transport. And when registering the new transport in
-	// channelz, its parent addrConn could have already been torn down and deleted
-	// from channelz tracking, and thus reach the code here.
-	grpclog.Infof("attempt to add child of type %T with id %d to a parent (id=%d) that doesn't currently exist", e, id, d.idNotFound)
-}
-
-func (d *dummyEntry) deleteChild(id int64) {
-	// It is possible for a normal program to reach here under race condition.
-	// Refer to the example described in addChild().
-	grpclog.Infof("attempt to delete child with id %d from a parent (id=%d) that doesn't currently exist", id, d.idNotFound)
-}
-
-func (d *dummyEntry) triggerDelete() {
-	grpclog.Warningf("attempt to delete an entry (id=%d) that doesn't currently exist", d.idNotFound)
-}
-
-func (*dummyEntry) deleteSelfIfReady() {
-	// code should not reach here. deleteSelfIfReady is always called on an existing entry.
-}
-
-// ChannelMetric defines the info channelz provides for a specific Channel, which
-// includes ChannelInternalMetric and channelz-specific data, such as channelz id,
-// child list, etc.
-type ChannelMetric struct {
-	// ID is the channelz id of this channel.
-	ID int64
-	// RefName is the human readable reference string of this channel.
-	RefName string
-	// ChannelData contains channel internal metric reported by the channel through
-	// ChannelzMetric().
-	ChannelData *ChannelInternalMetric
-	// NestedChans tracks the nested channel type children of this channel in the format of
-	// a map from nested channel channelz id to corresponding reference string.
-	NestedChans map[int64]string
-	// SubChans tracks the subchannel type children of this channel in the format of a
-	// map from subchannel channelz id to corresponding reference string.
-	SubChans map[int64]string
-	// Sockets tracks the socket type children of this channel in the format of a map
-	// from socket channelz id to corresponding reference string.
-	// Note current grpc implementation doesn't allow channel having sockets directly,
-	// therefore, this is field is unused.
-	Sockets map[int64]string
-}
-
-// SubChannelMetric defines the info channelz provides for a specific SubChannel,
-// which includes ChannelInternalMetric and channelz-specific data, such as
-// channelz id, child list, etc.
-type SubChannelMetric struct {
-	// ID is the channelz id of this subchannel.
-	ID int64
-	// RefName is the human readable reference string of this subchannel.
-	RefName string
-	// ChannelData contains subchannel internal metric reported by the subchannel
-	// through ChannelzMetric().
-	ChannelData *ChannelInternalMetric
-	// NestedChans tracks the nested channel type children of this subchannel in the format of
-	// a map from nested channel channelz id to corresponding reference string.
-	// Note current grpc implementation doesn't allow subchannel to have nested channels
-	// as children, therefore, this field is unused.
-	NestedChans map[int64]string
-	// SubChans tracks the subchannel type children of this subchannel in the format of a
-	// map from subchannel channelz id to corresponding reference string.
-	// Note current grpc implementation doesn't allow subchannel to have subchannels
-	// as children, therefore, this field is unused.
-	SubChans map[int64]string
-	// Sockets tracks the socket type children of this subchannel in the format of a map
-	// from socket channelz id to corresponding reference string.
-	Sockets map[int64]string
-}
-
-// ChannelInternalMetric defines the struct that the implementor of Channel interface
-// should return from ChannelzMetric().
-type ChannelInternalMetric struct {
-	// current connectivity state of the channel.
-	State connectivity.State
-	// The target this channel originally tried to connect to.  May be absent
-	Target string
-	// The number of calls started on the channel.
-	CallsStarted int64
-	// The number of calls that have completed with an OK status.
-	CallsSucceeded int64
-	// The number of calls that have a completed with a non-OK status.
-	CallsFailed int64
-	// The last time a call was started on the channel.
-	LastCallStartedTimestamp time.Time
-	//TODO: trace
-}
-
-// Channel is the interface that should be satisfied in order to be tracked by
-// channelz as Channel or SubChannel.
-type Channel interface {
-	ChannelzMetric() *ChannelInternalMetric
-}
-
-type channel struct {
-	refName     string
-	c           Channel
-	closeCalled bool
-	nestedChans map[int64]string
-	subChans    map[int64]string
-	id          int64
-	pid         int64
-	cm          *channelMap
-}
-
-func (c *channel) addChild(id int64, e entry) {
-	switch v := e.(type) {
-	case *subChannel:
-		c.subChans[id] = v.refName
-	case *channel:
-		c.nestedChans[id] = v.refName
-	default:
-		grpclog.Errorf("cannot add a child (id = %d) of type %T to a channel", id, e)
-	}
-}
-
-func (c *channel) deleteChild(id int64) {
-	delete(c.subChans, id)
-	delete(c.nestedChans, id)
-	c.deleteSelfIfReady()
-}
-
-func (c *channel) triggerDelete() {
-	c.closeCalled = true
-	c.deleteSelfIfReady()
-}
-
-func (c *channel) deleteSelfIfReady() {
-	if !c.closeCalled || len(c.subChans)+len(c.nestedChans) != 0 {
-		return
-	}
-	c.cm.deleteEntry(c.id)
-	// not top channel
-	if c.pid != 0 {
-		c.cm.findEntry(c.pid).deleteChild(c.id)
-	}
-}
-
-type subChannel struct {
-	refName     string
-	c           Channel
-	closeCalled bool
-	sockets     map[int64]string
-	id          int64
-	pid         int64
-	cm          *channelMap
-}
-
-func (sc *subChannel) addChild(id int64, e entry) {
-	if v, ok := e.(*normalSocket); ok {
-		sc.sockets[id] = v.refName
-	} else {
-		grpclog.Errorf("cannot add a child (id = %d) of type %T to a subChannel", id, e)
-	}
-}
-
-func (sc *subChannel) deleteChild(id int64) {
-	delete(sc.sockets, id)
-	sc.deleteSelfIfReady()
-}
-
-func (sc *subChannel) triggerDelete() {
-	sc.closeCalled = true
-	sc.deleteSelfIfReady()
-}
-
-func (sc *subChannel) deleteSelfIfReady() {
-	if !sc.closeCalled || len(sc.sockets) != 0 {
-		return
-	}
-	sc.cm.deleteEntry(sc.id)
-	sc.cm.findEntry(sc.pid).deleteChild(sc.id)
-}
-
-// SocketMetric defines the info channelz provides for a specific Socket, which
-// includes SocketInternalMetric and channelz-specific data, such as channelz id, etc.
-type SocketMetric struct {
-	// ID is the channelz id of this socket.
-	ID int64
-	// RefName is the human readable reference string of this socket.
-	RefName string
-	// SocketData contains socket internal metric reported by the socket through
-	// ChannelzMetric().
-	SocketData *SocketInternalMetric
-}
-
-// SocketInternalMetric defines the struct that the implementor of Socket interface
-// should return from ChannelzMetric().
-type SocketInternalMetric struct {
-	// The number of streams that have been started.
-	StreamsStarted int64
-	// The number of streams that have ended successfully:
-	// On client side, receiving frame with eos bit set.
-	// On server side, sending frame with eos bit set.
-	StreamsSucceeded int64
-	// The number of streams that have ended unsuccessfully:
-	// On client side, termination without receiving frame with eos bit set.
-	// On server side, termination without sending frame with eos bit set.
-	StreamsFailed int64
-	// The number of messages successfully sent on this socket.
-	MessagesSent     int64
-	MessagesReceived int64
-	// The number of keep alives sent.  This is typically implemented with HTTP/2
-	// ping messages.
-	KeepAlivesSent int64
-	// The last time a stream was created by this endpoint.  Usually unset for
-	// servers.
-	LastLocalStreamCreatedTimestamp time.Time
-	// The last time a stream was created by the remote endpoint.  Usually unset
-	// for clients.
-	LastRemoteStreamCreatedTimestamp time.Time
-	// The last time a message was sent by this endpoint.
-	LastMessageSentTimestamp time.Time
-	// The last time a message was received by this endpoint.
-	LastMessageReceivedTimestamp time.Time
-	// The amount of window, granted to the local endpoint by the remote endpoint.
-	// This may be slightly out of date due to network latency.  This does NOT
-	// include stream level or TCP level flow control info.
-	LocalFlowControlWindow int64
-	// The amount of window, granted to the remote endpoint by the local endpoint.
-	// This may be slightly out of date due to network latency.  This does NOT
-	// include stream level or TCP level flow control info.
-	RemoteFlowControlWindow int64
-	// The locally bound address.
-	LocalAddr net.Addr
-	// The remote bound address.  May be absent.
-	RemoteAddr net.Addr
-	// Optional, represents the name of the remote endpoint, if different than
-	// the original target name.
-	RemoteName string
-	//TODO: socket options
-	//TODO: Security
-}
-
-// Socket is the interface that should be satisfied in order to be tracked by
-// channelz as Socket.
-type Socket interface {
-	ChannelzMetric() *SocketInternalMetric
-}
-
-type listenSocket struct {
-	refName string
-	s       Socket
-	id      int64
-	pid     int64
-	cm      *channelMap
-}
-
-func (ls *listenSocket) addChild(id int64, e entry) {
-	grpclog.Errorf("cannot add a child (id = %d) of type %T to a listen socket", id, e)
-}
-
-func (ls *listenSocket) deleteChild(id int64) {
-	grpclog.Errorf("cannot delete a child (id = %d) from a listen socket", id)
-}
-
-func (ls *listenSocket) triggerDelete() {
-	ls.cm.deleteEntry(ls.id)
-	ls.cm.findEntry(ls.pid).deleteChild(ls.id)
-}
-
-func (ls *listenSocket) deleteSelfIfReady() {
-	grpclog.Errorf("cannot call deleteSelfIfReady on a listen socket")
-}
-
-type normalSocket struct {
-	refName string
-	s       Socket
-	id      int64
-	pid     int64
-	cm      *channelMap
-}
-
-func (ns *normalSocket) addChild(id int64, e entry) {
-	grpclog.Errorf("cannot add a child (id = %d) of type %T to a normal socket", id, e)
-}
-
-func (ns *normalSocket) deleteChild(id int64) {
-	grpclog.Errorf("cannot delete a child (id = %d) from a normal socket", id)
-}
-
-func (ns *normalSocket) triggerDelete() {
-	ns.cm.deleteEntry(ns.id)
-	ns.cm.findEntry(ns.pid).deleteChild(ns.id)
-}
-
-func (ns *normalSocket) deleteSelfIfReady() {
-	grpclog.Errorf("cannot call deleteSelfIfReady on a normal socket")
-}
-
-// ServerMetric defines the info channelz provides for a specific Server, which
-// includes ServerInternalMetric and channelz-specific data, such as channelz id,
-// child list, etc.
-type ServerMetric struct {
-	// ID is the channelz id of this server.
-	ID int64
-	// RefName is the human readable reference string of this server.
-	RefName string
-	// ServerData contains server internal metric reported by the server through
-	// ChannelzMetric().
-	ServerData *ServerInternalMetric
-	// ListenSockets tracks the listener socket type children of this server in the
-	// format of a map from socket channelz id to corresponding reference string.
-	ListenSockets map[int64]string
-}
-
-// ServerInternalMetric defines the struct that the implementor of Server interface
-// should return from ChannelzMetric().
-type ServerInternalMetric struct {
-	// The number of incoming calls started on the server.
-	CallsStarted int64
-	// The number of incoming calls that have completed with an OK status.
-	CallsSucceeded int64
-	// The number of incoming calls that have a completed with a non-OK status.
-	CallsFailed int64
-	// The last time a call was started on the server.
-	LastCallStartedTimestamp time.Time
-	//TODO: trace
-}
-
-// Server is the interface to be satisfied in order to be tracked by channelz as
-// Server.
-type Server interface {
-	ChannelzMetric() *ServerInternalMetric
-}
-
-type server struct {
-	refName       string
-	s             Server
-	closeCalled   bool
-	sockets       map[int64]string
-	listenSockets map[int64]string
-	id            int64
-	cm            *channelMap
-}
-
-func (s *server) addChild(id int64, e entry) {
-	switch v := e.(type) {
-	case *normalSocket:
-		s.sockets[id] = v.refName
-	case *listenSocket:
-		s.listenSockets[id] = v.refName
-	default:
-		grpclog.Errorf("cannot add a child (id = %d) of type %T to a server", id, e)
-	}
-}
-
-func (s *server) deleteChild(id int64) {
-	delete(s.sockets, id)
-	delete(s.listenSockets, id)
-	s.deleteSelfIfReady()
-}
-
-func (s *server) triggerDelete() {
-	s.closeCalled = true
-	s.deleteSelfIfReady()
-}
-
-func (s *server) deleteSelfIfReady() {
-	if !s.closeCalled || len(s.sockets)+len(s.listenSockets) != 0 {
-		return
-	}
-	s.cm.deleteEntry(s.id)
-}
diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go
index e8d95b4..bd2d2b3 100644
--- a/vendor/google.golang.org/grpc/clientconn.go
+++ b/vendor/google.golang.org/grpc/clientconn.go
@@ -19,6 +19,7 @@
 package grpc
 
 import (
+	"context"
 	"errors"
 	"fmt"
 	"math"
@@ -26,29 +27,32 @@
 	"reflect"
 	"strings"
 	"sync"
+	"sync/atomic"
 	"time"
 
-	"golang.org/x/net/context"
-	"golang.org/x/net/trace"
 	"google.golang.org/grpc/balancer"
 	_ "google.golang.org/grpc/balancer/roundrobin" // To register roundrobin.
-	"google.golang.org/grpc/channelz"
 	"google.golang.org/grpc/codes"
 	"google.golang.org/grpc/connectivity"
 	"google.golang.org/grpc/credentials"
 	"google.golang.org/grpc/grpclog"
+	"google.golang.org/grpc/internal/backoff"
+	"google.golang.org/grpc/internal/channelz"
+	"google.golang.org/grpc/internal/envconfig"
+	"google.golang.org/grpc/internal/grpcsync"
+	"google.golang.org/grpc/internal/transport"
 	"google.golang.org/grpc/keepalive"
 	"google.golang.org/grpc/resolver"
 	_ "google.golang.org/grpc/resolver/dns"         // To register dns resolver.
 	_ "google.golang.org/grpc/resolver/passthrough" // To register passthrough resolver.
-	"google.golang.org/grpc/stats"
 	"google.golang.org/grpc/status"
-	"google.golang.org/grpc/transport"
 )
 
 const (
 	// minimum time to give a connection to complete
 	minConnectTimeout = 20 * time.Second
+	// must match grpclbName in grpclb/grpclb.go
+	grpclbName = "grpclb"
 )
 
 var (
@@ -62,15 +66,11 @@
 	errConnDrain = errors.New("grpc: the connection is drained")
 	// errConnClosing indicates that the connection is closing.
 	errConnClosing = errors.New("grpc: the connection is closing")
-	// errConnUnavailable indicates that the connection is unavailable.
-	errConnUnavailable = errors.New("grpc: the connection is unavailable")
 	// errBalancerClosed indicates that the balancer is closed.
 	errBalancerClosed = errors.New("grpc: balancer is closed")
-	// We use an accessor so that minConnectTimeout can be
-	// atomically read and updated while testing.
-	getMinConnectTimeout = func() time.Duration {
-		return minConnectTimeout
-	}
+	// invalidDefaultServiceConfigErrPrefix is used to prefix the json parsing error for the default
+	// service config.
+	invalidDefaultServiceConfigErrPrefix = "grpc: the provided default service config is invalid"
 )
 
 // The following errors are returned from Dial and DialContext
@@ -79,353 +79,26 @@
 	// being set for ClientConn. Users should either set one or explicitly
 	// call WithInsecure DialOption to disable security.
 	errNoTransportSecurity = errors.New("grpc: no transport security set (use grpc.WithInsecure() explicitly or set credentials)")
+	// errTransportCredsAndBundle indicates that creds bundle is used together
+	// with other individual Transport Credentials.
+	errTransportCredsAndBundle = errors.New("grpc: credentials.Bundle may not be used with individual TransportCredentials")
 	// errTransportCredentialsMissing indicates that users want to transmit security
-	// information (e.g., oauth2 token) which requires secure connection on an insecure
+	// information (e.g., OAuth2 token) which requires secure connection on an insecure
 	// connection.
 	errTransportCredentialsMissing = errors.New("grpc: the credentials require transport level security (use grpc.WithTransportCredentials() to set)")
 	// errCredentialsConflict indicates that grpc.WithTransportCredentials()
 	// and grpc.WithInsecure() are both called for a connection.
 	errCredentialsConflict = errors.New("grpc: transport credentials are set for an insecure connection (grpc.WithTransportCredentials() and grpc.WithInsecure() are both called)")
-	// errNetworkIO indicates that the connection is down due to some network I/O error.
-	errNetworkIO = errors.New("grpc: failed with network I/O error")
 )
 
-// dialOptions configure a Dial call. dialOptions are set by the DialOption
-// values passed to Dial.
-type dialOptions struct {
-	unaryInt    UnaryClientInterceptor
-	streamInt   StreamClientInterceptor
-	cp          Compressor
-	dc          Decompressor
-	bs          backoffStrategy
-	block       bool
-	insecure    bool
-	timeout     time.Duration
-	scChan      <-chan ServiceConfig
-	copts       transport.ConnectOptions
-	callOptions []CallOption
-	// This is used by v1 balancer dial option WithBalancer to support v1
-	// balancer, and also by WithBalancerName dial option.
-	balancerBuilder balancer.Builder
-	// This is to support grpclb.
-	resolverBuilder      resolver.Builder
-	waitForHandshake     bool
-	channelzParentID     int64
-	disableServiceConfig bool
-}
-
 const (
 	defaultClientMaxReceiveMessageSize = 1024 * 1024 * 4
 	defaultClientMaxSendMessageSize    = math.MaxInt32
+	// http2IOBufSize specifies the buffer size for sending frames.
+	defaultWriteBufSize = 32 * 1024
+	defaultReadBufSize  = 32 * 1024
 )
 
-// RegisterChannelz turns on channelz service.
-// This is an EXPERIMENTAL API.
-func RegisterChannelz() {
-	channelz.TurnOn()
-}
-
-// DialOption configures how we set up the connection.
-type DialOption func(*dialOptions)
-
-// WithWaitForHandshake blocks until the initial settings frame is received from the
-// server before assigning RPCs to the connection.
-// Experimental API.
-func WithWaitForHandshake() DialOption {
-	return func(o *dialOptions) {
-		o.waitForHandshake = true
-	}
-}
-
-// WithWriteBufferSize lets you set the size of write buffer, this determines how much data can be batched
-// before doing a write on the wire.
-func WithWriteBufferSize(s int) DialOption {
-	return func(o *dialOptions) {
-		o.copts.WriteBufferSize = s
-	}
-}
-
-// WithReadBufferSize lets you set the size of read buffer, this determines how much data can be read at most
-// for each read syscall.
-func WithReadBufferSize(s int) DialOption {
-	return func(o *dialOptions) {
-		o.copts.ReadBufferSize = s
-	}
-}
-
-// WithInitialWindowSize returns a DialOption which sets the value for initial window size on a stream.
-// The lower bound for window size is 64K and any value smaller than that will be ignored.
-func WithInitialWindowSize(s int32) DialOption {
-	return func(o *dialOptions) {
-		o.copts.InitialWindowSize = s
-	}
-}
-
-// WithInitialConnWindowSize returns a DialOption which sets the value for initial window size on a connection.
-// The lower bound for window size is 64K and any value smaller than that will be ignored.
-func WithInitialConnWindowSize(s int32) DialOption {
-	return func(o *dialOptions) {
-		o.copts.InitialConnWindowSize = s
-	}
-}
-
-// WithMaxMsgSize returns a DialOption which sets the maximum message size the client can receive.
-//
-// Deprecated: use WithDefaultCallOptions(MaxCallRecvMsgSize(s)) instead.
-func WithMaxMsgSize(s int) DialOption {
-	return WithDefaultCallOptions(MaxCallRecvMsgSize(s))
-}
-
-// WithDefaultCallOptions returns a DialOption which sets the default CallOptions for calls over the connection.
-func WithDefaultCallOptions(cos ...CallOption) DialOption {
-	return func(o *dialOptions) {
-		o.callOptions = append(o.callOptions, cos...)
-	}
-}
-
-// WithCodec returns a DialOption which sets a codec for message marshaling and unmarshaling.
-//
-// Deprecated: use WithDefaultCallOptions(CallCustomCodec(c)) instead.
-func WithCodec(c Codec) DialOption {
-	return WithDefaultCallOptions(CallCustomCodec(c))
-}
-
-// WithCompressor returns a DialOption which sets a Compressor to use for
-// message compression. It has lower priority than the compressor set by
-// the UseCompressor CallOption.
-//
-// Deprecated: use UseCompressor instead.
-func WithCompressor(cp Compressor) DialOption {
-	return func(o *dialOptions) {
-		o.cp = cp
-	}
-}
-
-// WithDecompressor returns a DialOption which sets a Decompressor to use for
-// incoming message decompression.  If incoming response messages are encoded
-// using the decompressor's Type(), it will be used.  Otherwise, the message
-// encoding will be used to look up the compressor registered via
-// encoding.RegisterCompressor, which will then be used to decompress the
-// message.  If no compressor is registered for the encoding, an Unimplemented
-// status error will be returned.
-//
-// Deprecated: use encoding.RegisterCompressor instead.
-func WithDecompressor(dc Decompressor) DialOption {
-	return func(o *dialOptions) {
-		o.dc = dc
-	}
-}
-
-// WithBalancer returns a DialOption which sets a load balancer with the v1 API.
-// Name resolver will be ignored if this DialOption is specified.
-//
-// Deprecated: use the new balancer APIs in balancer package and WithBalancerName.
-func WithBalancer(b Balancer) DialOption {
-	return func(o *dialOptions) {
-		o.balancerBuilder = &balancerWrapperBuilder{
-			b: b,
-		}
-	}
-}
-
-// WithBalancerName sets the balancer that the ClientConn will be initialized
-// with. Balancer registered with balancerName will be used. This function
-// panics if no balancer was registered by balancerName.
-//
-// The balancer cannot be overridden by balancer option specified by service
-// config.
-//
-// This is an EXPERIMENTAL API.
-func WithBalancerName(balancerName string) DialOption {
-	builder := balancer.Get(balancerName)
-	if builder == nil {
-		panic(fmt.Sprintf("grpc.WithBalancerName: no balancer is registered for name %v", balancerName))
-	}
-	return func(o *dialOptions) {
-		o.balancerBuilder = builder
-	}
-}
-
-// withResolverBuilder is only for grpclb.
-func withResolverBuilder(b resolver.Builder) DialOption {
-	return func(o *dialOptions) {
-		o.resolverBuilder = b
-	}
-}
-
-// WithServiceConfig returns a DialOption which has a channel to read the service configuration.
-//
-// Deprecated: service config should be received through name resolver, as specified here.
-// https://github.com/grpc/grpc/blob/master/doc/service_config.md
-func WithServiceConfig(c <-chan ServiceConfig) DialOption {
-	return func(o *dialOptions) {
-		o.scChan = c
-	}
-}
-
-// WithBackoffMaxDelay configures the dialer to use the provided maximum delay
-// when backing off after failed connection attempts.
-func WithBackoffMaxDelay(md time.Duration) DialOption {
-	return WithBackoffConfig(BackoffConfig{MaxDelay: md})
-}
-
-// WithBackoffConfig configures the dialer to use the provided backoff
-// parameters after connection failures.
-//
-// Use WithBackoffMaxDelay until more parameters on BackoffConfig are opened up
-// for use.
-func WithBackoffConfig(b BackoffConfig) DialOption {
-	// Set defaults to ensure that provided BackoffConfig is valid and
-	// unexported fields get default values.
-	setDefaults(&b)
-	return withBackoff(b)
-}
-
-// withBackoff sets the backoff strategy used for connectRetryNum after a
-// failed connection attempt.
-//
-// This can be exported if arbitrary backoff strategies are allowed by gRPC.
-func withBackoff(bs backoffStrategy) DialOption {
-	return func(o *dialOptions) {
-		o.bs = bs
-	}
-}
-
-// WithBlock returns a DialOption which makes caller of Dial blocks until the underlying
-// connection is up. Without this, Dial returns immediately and connecting the server
-// happens in background.
-func WithBlock() DialOption {
-	return func(o *dialOptions) {
-		o.block = true
-	}
-}
-
-// WithInsecure returns a DialOption which disables transport security for this ClientConn.
-// Note that transport security is required unless WithInsecure is set.
-func WithInsecure() DialOption {
-	return func(o *dialOptions) {
-		o.insecure = true
-	}
-}
-
-// WithTransportCredentials returns a DialOption which configures a
-// connection level security credentials (e.g., TLS/SSL).
-func WithTransportCredentials(creds credentials.TransportCredentials) DialOption {
-	return func(o *dialOptions) {
-		o.copts.TransportCredentials = creds
-	}
-}
-
-// WithPerRPCCredentials returns a DialOption which sets
-// credentials and places auth state on each outbound RPC.
-func WithPerRPCCredentials(creds credentials.PerRPCCredentials) DialOption {
-	return func(o *dialOptions) {
-		o.copts.PerRPCCredentials = append(o.copts.PerRPCCredentials, creds)
-	}
-}
-
-// WithTimeout returns a DialOption that configures a timeout for dialing a ClientConn
-// initially. This is valid if and only if WithBlock() is present.
-//
-// Deprecated: use DialContext and context.WithTimeout instead.
-func WithTimeout(d time.Duration) DialOption {
-	return func(o *dialOptions) {
-		o.timeout = d
-	}
-}
-
-func withContextDialer(f func(context.Context, string) (net.Conn, error)) DialOption {
-	return func(o *dialOptions) {
-		o.copts.Dialer = f
-	}
-}
-
-// WithDialer returns a DialOption that specifies a function to use for dialing network addresses.
-// If FailOnNonTempDialError() is set to true, and an error is returned by f, gRPC checks the error's
-// Temporary() method to decide if it should try to reconnect to the network address.
-func WithDialer(f func(string, time.Duration) (net.Conn, error)) DialOption {
-	return withContextDialer(
-		func(ctx context.Context, addr string) (net.Conn, error) {
-			if deadline, ok := ctx.Deadline(); ok {
-				return f(addr, deadline.Sub(time.Now()))
-			}
-			return f(addr, 0)
-		})
-}
-
-// WithStatsHandler returns a DialOption that specifies the stats handler
-// for all the RPCs and underlying network connections in this ClientConn.
-func WithStatsHandler(h stats.Handler) DialOption {
-	return func(o *dialOptions) {
-		o.copts.StatsHandler = h
-	}
-}
-
-// FailOnNonTempDialError returns a DialOption that specifies if gRPC fails on non-temporary dial errors.
-// If f is true, and dialer returns a non-temporary error, gRPC will fail the connection to the network
-// address and won't try to reconnect.
-// The default value of FailOnNonTempDialError is false.
-// This is an EXPERIMENTAL API.
-func FailOnNonTempDialError(f bool) DialOption {
-	return func(o *dialOptions) {
-		o.copts.FailOnNonTempDialError = f
-	}
-}
-
-// WithUserAgent returns a DialOption that specifies a user agent string for all the RPCs.
-func WithUserAgent(s string) DialOption {
-	return func(o *dialOptions) {
-		o.copts.UserAgent = s
-	}
-}
-
-// WithKeepaliveParams returns a DialOption that specifies keepalive parameters for the client transport.
-func WithKeepaliveParams(kp keepalive.ClientParameters) DialOption {
-	return func(o *dialOptions) {
-		o.copts.KeepaliveParams = kp
-	}
-}
-
-// WithUnaryInterceptor returns a DialOption that specifies the interceptor for unary RPCs.
-func WithUnaryInterceptor(f UnaryClientInterceptor) DialOption {
-	return func(o *dialOptions) {
-		o.unaryInt = f
-	}
-}
-
-// WithStreamInterceptor returns a DialOption that specifies the interceptor for streaming RPCs.
-func WithStreamInterceptor(f StreamClientInterceptor) DialOption {
-	return func(o *dialOptions) {
-		o.streamInt = f
-	}
-}
-
-// WithAuthority returns a DialOption that specifies the value to be used as
-// the :authority pseudo-header. This value only works with WithInsecure and
-// has no effect if TransportCredentials are present.
-func WithAuthority(a string) DialOption {
-	return func(o *dialOptions) {
-		o.copts.Authority = a
-	}
-}
-
-// WithChannelzParentID returns a DialOption that specifies the channelz ID of current ClientConn's
-// parent. This function is used in nested channel creation (e.g. grpclb dial).
-func WithChannelzParentID(id int64) DialOption {
-	return func(o *dialOptions) {
-		o.channelzParentID = id
-	}
-}
-
-// WithDisableServiceConfig returns a DialOption that causes grpc to ignore any
-// service config provided by the resolver and provides a hint to the resolver
-// to not fetch service configs.
-func WithDisableServiceConfig() DialOption {
-	return func(o *dialOptions) {
-		o.disableServiceConfig = true
-	}
-}
-
 // Dial creates a client connection to the given target.
 func Dial(target string, opts ...DialOption) (*ClientConn, error) {
 	return DialContext(context.Background(), target, opts...)
@@ -449,32 +122,57 @@
 // e.g. to use dns resolver, a "dns:///" prefix should be applied to the target.
 func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) {
 	cc := &ClientConn{
-		target: target,
-		csMgr:  &connectivityStateManager{},
-		conns:  make(map[*addrConn]struct{}),
-
-		blockingpicker: newPickerWrapper(),
+		target:            target,
+		csMgr:             &connectivityStateManager{},
+		conns:             make(map[*addrConn]struct{}),
+		dopts:             defaultDialOptions(),
+		blockingpicker:    newPickerWrapper(),
+		czData:            new(channelzData),
+		firstResolveEvent: grpcsync.NewEvent(),
 	}
+	cc.retryThrottler.Store((*retryThrottler)(nil))
 	cc.ctx, cc.cancel = context.WithCancel(context.Background())
 
 	for _, opt := range opts {
-		opt(&cc.dopts)
+		opt.apply(&cc.dopts)
 	}
 
+	defer func() {
+		if err != nil {
+			cc.Close()
+		}
+	}()
+
 	if channelz.IsOn() {
 		if cc.dopts.channelzParentID != 0 {
-			cc.channelzID = channelz.RegisterChannel(cc, cc.dopts.channelzParentID, target)
+			cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, cc.dopts.channelzParentID, target)
+			channelz.AddTraceEvent(cc.channelzID, &channelz.TraceEventDesc{
+				Desc:     "Channel Created",
+				Severity: channelz.CtINFO,
+				Parent: &channelz.TraceEventDesc{
+					Desc:     fmt.Sprintf("Nested Channel(id:%d) created", cc.channelzID),
+					Severity: channelz.CtINFO,
+				},
+			})
 		} else {
-			cc.channelzID = channelz.RegisterChannel(cc, 0, target)
+			cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, 0, target)
+			channelz.AddTraceEvent(cc.channelzID, &channelz.TraceEventDesc{
+				Desc:     "Channel Created",
+				Severity: channelz.CtINFO,
+			})
 		}
+		cc.csMgr.channelzID = cc.channelzID
 	}
 
 	if !cc.dopts.insecure {
-		if cc.dopts.copts.TransportCredentials == nil {
+		if cc.dopts.copts.TransportCredentials == nil && cc.dopts.copts.CredsBundle == nil {
 			return nil, errNoTransportSecurity
 		}
+		if cc.dopts.copts.TransportCredentials != nil && cc.dopts.copts.CredsBundle != nil {
+			return nil, errTransportCredsAndBundle
+		}
 	} else {
-		if cc.dopts.copts.TransportCredentials != nil {
+		if cc.dopts.copts.TransportCredentials != nil || cc.dopts.copts.CredsBundle != nil {
 			return nil, errCredentialsConflict
 		}
 		for _, cd := range cc.dopts.copts.PerRPCCredentials {
@@ -484,13 +182,20 @@
 		}
 	}
 
+	if cc.dopts.defaultServiceConfigRawJSON != nil {
+		sc, err := parseServiceConfig(*cc.dopts.defaultServiceConfigRawJSON)
+		if err != nil {
+			return nil, fmt.Errorf("%s: %v", invalidDefaultServiceConfigErrPrefix, err)
+		}
+		cc.dopts.defaultServiceConfig = sc
+	}
 	cc.mkp = cc.dopts.copts.KeepaliveParams
 
 	if cc.dopts.copts.Dialer == nil {
 		cc.dopts.copts.Dialer = newProxyDialer(
 			func(ctx context.Context, addr string) (net.Conn, error) {
 				network, addr := parseDialTarget(addr)
-				return dialContext(ctx, network, addr)
+				return (&net.Dialer{}).DialContext(ctx, network, addr)
 			},
 		)
 	}
@@ -506,17 +211,12 @@
 		ctx, cancel = context.WithTimeout(ctx, cc.dopts.timeout)
 		defer cancel()
 	}
-
 	defer func() {
 		select {
 		case <-ctx.Done():
 			conn, err = nil, ctx.Err()
 		default:
 		}
-
-		if err != nil {
-			cc.Close()
-		}
 	}()
 
 	scSet := false
@@ -525,14 +225,16 @@
 		select {
 		case sc, ok := <-cc.dopts.scChan:
 			if ok {
-				cc.sc = sc
+				cc.sc = &sc
 				scSet = true
 			}
 		default:
 		}
 	}
 	if cc.dopts.bs == nil {
-		cc.dopts.bs = DefaultBackoffConfig
+		cc.dopts.bs = backoff.Exponential{
+			MaxDelay: DefaultBackoffConfig.MaxDelay,
+		}
 	}
 	if cc.dopts.resolverBuilder == nil {
 		// Only try to parse target when resolver builder is not already set.
@@ -540,9 +242,9 @@
 		grpclog.Infof("parsed scheme: %q", cc.parsedTarget.Scheme)
 		cc.dopts.resolverBuilder = resolver.Get(cc.parsedTarget.Scheme)
 		if cc.dopts.resolverBuilder == nil {
-			// If resolver builder is still nil, the parse target's scheme is
+			// If resolver builder is still nil, the parsed target's scheme is
 			// not registered. Fallback to default resolver and set Endpoint to
-			// the original unparsed target.
+			// the original target.
 			grpclog.Infof("scheme %q not registered, fallback to default scheme", cc.parsedTarget.Scheme)
 			cc.parsedTarget = resolver.Target{
 				Scheme:   resolver.GetDefaultScheme(),
@@ -556,8 +258,8 @@
 	creds := cc.dopts.copts.TransportCredentials
 	if creds != nil && creds.Info().ServerName != "" {
 		cc.authority = creds.Info().ServerName
-	} else if cc.dopts.insecure && cc.dopts.copts.Authority != "" {
-		cc.authority = cc.dopts.copts.Authority
+	} else if cc.dopts.insecure && cc.dopts.authority != "" {
+		cc.authority = cc.dopts.authority
 	} else {
 		// Use endpoint from "scheme://authority/endpoint" as the default
 		// authority for ClientConn.
@@ -569,7 +271,7 @@
 		select {
 		case sc, ok := <-cc.dopts.scChan:
 			if ok {
-				cc.sc = sc
+				cc.sc = &sc
 			}
 		case <-ctx.Done():
 			return nil, ctx.Err()
@@ -585,30 +287,35 @@
 	}
 	cc.balancerBuildOpts = balancer.BuildOptions{
 		DialCreds:        credsClone,
+		CredsBundle:      cc.dopts.copts.CredsBundle,
 		Dialer:           cc.dopts.copts.Dialer,
 		ChannelzParentID: cc.channelzID,
 	}
 
 	// Build the resolver.
-	cc.resolverWrapper, err = newCCResolverWrapper(cc)
+	rWrapper, err := newCCResolverWrapper(cc)
 	if err != nil {
 		return nil, fmt.Errorf("failed to build resolver: %v", err)
 	}
-	// Start the resolver wrapper goroutine after resolverWrapper is created.
-	//
-	// If the goroutine is started before resolverWrapper is ready, the
-	// following may happen: The goroutine sends updates to cc. cc forwards
-	// those to balancer. Balancer creates new addrConn. addrConn fails to
-	// connect, and calls resolveNow(). resolveNow() tries to use the non-ready
-	// resolverWrapper.
-	cc.resolverWrapper.start()
 
+	cc.mu.Lock()
+	cc.resolverWrapper = rWrapper
+	cc.mu.Unlock()
 	// A blocking dial blocks until the clientConn is ready.
 	if cc.dopts.block {
 		for {
 			s := cc.GetState()
 			if s == connectivity.Ready {
 				break
+			} else if cc.dopts.copts.FailOnNonTempDialError && s == connectivity.TransientFailure {
+				if err = cc.blockingpicker.connectionError(); err != nil {
+					terr, ok := err.(interface {
+						Temporary() bool
+					})
+					if ok && !terr.Temporary() {
+						return nil, err
+					}
+				}
 			}
 			if !cc.WaitForStateChange(ctx, s) {
 				// ctx got timeout or canceled.
@@ -626,6 +333,7 @@
 	mu         sync.Mutex
 	state      connectivity.State
 	notifyChan chan struct{}
+	channelzID int64
 }
 
 // updateState updates the connectivity.State of ClientConn.
@@ -641,6 +349,12 @@
 		return
 	}
 	csm.state = state
+	if channelz.IsOn() {
+		channelz.AddTraceEvent(csm.channelzID, &channelz.TraceEventDesc{
+			Desc:     fmt.Sprintf("Channel Connectivity change to %v", state),
+			Severity: channelz.CtINFO,
+		})
+	}
 	if csm.notifyChan != nil {
 		// There are other goroutines waiting on this channel.
 		close(csm.notifyChan)
@@ -675,26 +389,22 @@
 	csMgr        *connectivityStateManager
 
 	balancerBuildOpts balancer.BuildOptions
-	resolverWrapper   *ccResolverWrapper
 	blockingpicker    *pickerWrapper
 
-	mu    sync.RWMutex
-	sc    ServiceConfig
-	scRaw string
-	conns map[*addrConn]struct{}
+	mu              sync.RWMutex
+	resolverWrapper *ccResolverWrapper
+	sc              *ServiceConfig
+	conns           map[*addrConn]struct{}
 	// Keepalive parameter can be updated if a GoAway is received.
 	mkp             keepalive.ClientParameters
 	curBalancerName string
-	preBalancerName string // previous balancer name.
-	curAddresses    []resolver.Address
 	balancerWrapper *ccBalancerWrapper
+	retryThrottler  atomic.Value
 
-	channelzID          int64 // channelz unique identification number
-	czmu                sync.RWMutex
-	callsStarted        int64
-	callsSucceeded      int64
-	callsFailed         int64
-	lastCallStartedTime time.Time
+	firstResolveEvent *grpcsync.Event
+
+	channelzID int64 // channelz unique identification number
+	czData     *channelzData
 }
 
 // WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or
@@ -728,9 +438,8 @@
 			}
 			cc.mu.Lock()
 			// TODO: load balance policy runtime change is ignored.
-			// We may revist this decision in the future.
-			cc.sc = sc
-			cc.scRaw = ""
+			// We may revisit this decision in the future.
+			cc.sc = &sc
 			cc.mu.Unlock()
 		case <-cc.ctx.Done():
 			return
@@ -738,49 +447,91 @@
 	}
 }
 
-func (cc *ClientConn) handleResolvedAddrs(addrs []resolver.Address, err error) {
+// waitForResolvedAddrs blocks until the resolver has provided addresses or the
+// context expires.  Returns nil unless the context expires first; otherwise
+// returns a status error based on the context.
+func (cc *ClientConn) waitForResolvedAddrs(ctx context.Context) error {
+	// This is on the RPC path, so we use a fast path to avoid the
+	// more-expensive "select" below after the resolver has returned once.
+	if cc.firstResolveEvent.HasFired() {
+		return nil
+	}
+	select {
+	case <-cc.firstResolveEvent.Done():
+		return nil
+	case <-ctx.Done():
+		return status.FromContextError(ctx.Err()).Err()
+	case <-cc.ctx.Done():
+		return ErrClientConnClosing
+	}
+}
+
+// gRPC should resort to default service config when:
+// * resolver service config is disabled
+// * or, resolver does not return a service config or returns an invalid one.
+func (cc *ClientConn) fallbackToDefaultServiceConfig(sc string) bool {
+	if cc.dopts.disableServiceConfig {
+		return true
+	}
+	// The logic below is temporary, will be removed once we change the resolver.State ServiceConfig field type.
+	// Right now, we assume that empty service config string means resolver does not return a config.
+	if sc == "" {
+		return true
+	}
+	// TODO: the logic below is temporary. Once we finish the logic to validate service config
+	// in resolver, we will replace the logic below.
+	_, err := parseServiceConfig(sc)
+	return err != nil
+}
+
+func (cc *ClientConn) updateResolverState(s resolver.State) error {
 	cc.mu.Lock()
 	defer cc.mu.Unlock()
+	// Check if the ClientConn is already closed. Some fields (e.g.
+	// balancerWrapper) are set to nil when closing the ClientConn, and could
+	// cause nil pointer panic if we don't have this check.
 	if cc.conns == nil {
-		// cc was closed.
-		return
+		return nil
 	}
 
-	if reflect.DeepEqual(cc.curAddresses, addrs) {
-		return
+	if cc.fallbackToDefaultServiceConfig(s.ServiceConfig) {
+		if cc.dopts.defaultServiceConfig != nil && cc.sc == nil {
+			cc.applyServiceConfig(cc.dopts.defaultServiceConfig)
+		}
+	} else {
+		// TODO: the parsing logic below will be moved inside resolver.
+		sc, err := parseServiceConfig(s.ServiceConfig)
+		if err != nil {
+			return err
+		}
+		if cc.sc == nil || cc.sc.rawJSONString != s.ServiceConfig {
+			cc.applyServiceConfig(sc)
+		}
 	}
 
-	cc.curAddresses = addrs
+	// update the service config that will be sent to balancer.
+	if cc.sc != nil {
+		s.ServiceConfig = cc.sc.rawJSONString
+	}
 
 	if cc.dopts.balancerBuilder == nil {
 		// Only look at balancer types and switch balancer if balancer dial
 		// option is not set.
 		var isGRPCLB bool
-		for _, a := range addrs {
+		for _, a := range s.Addresses {
 			if a.Type == resolver.GRPCLB {
 				isGRPCLB = true
 				break
 			}
 		}
 		var newBalancerName string
+		// TODO: use new loadBalancerConfig field with appropriate priority.
 		if isGRPCLB {
 			newBalancerName = grpclbName
+		} else if cc.sc != nil && cc.sc.LB != nil {
+			newBalancerName = *cc.sc.LB
 		} else {
-			// Address list doesn't contain grpclb address. Try to pick a
-			// non-grpclb balancer.
-			newBalancerName = cc.curBalancerName
-			// If current balancer is grpclb, switch to the previous one.
-			if newBalancerName == grpclbName {
-				newBalancerName = cc.preBalancerName
-			}
-			// The following could be true in two cases:
-			// - the first time handling resolved addresses
-			//   (curBalancerName="")
-			// - the first time handling non-grpclb addresses
-			//   (curBalancerName="grpclb", preBalancerName="")
-			if newBalancerName == "" {
-				newBalancerName = PickFirstBalancerName
-			}
+			newBalancerName = PickFirstBalancerName
 		}
 		cc.switchBalancer(newBalancerName)
 	} else if cc.balancerWrapper == nil {
@@ -789,7 +540,9 @@
 		cc.balancerWrapper = newCCBalancerWrapper(cc, cc.dopts.balancerBuilder, cc.balancerBuildOpts)
 	}
 
-	cc.balancerWrapper.handleResolvedAddrs(addrs, nil)
+	cc.balancerWrapper.updateResolverState(s)
+	cc.firstResolveEvent.Fire()
+	return nil
 }
 
 // switchBalancer starts the switching from current balancer to the balancer
@@ -801,10 +554,6 @@
 //
 // Caller must hold cc.mu.
 func (cc *ClientConn) switchBalancer(name string) {
-	if cc.conns == nil {
-		return
-	}
-
 	if strings.ToLower(cc.curBalancerName) == strings.ToLower(name) {
 		return
 	}
@@ -814,20 +563,29 @@
 		grpclog.Infoln("ignoring balancer switching: Balancer DialOption used instead")
 		return
 	}
-	// TODO(bar switching) change this to two steps: drain and close.
-	// Keep track of sc in wrapper.
 	if cc.balancerWrapper != nil {
 		cc.balancerWrapper.close()
 	}
-	// Clear all stickiness state.
-	cc.blockingpicker.clearStickinessState()
 
 	builder := balancer.Get(name)
+	if channelz.IsOn() {
+		if builder == nil {
+			channelz.AddTraceEvent(cc.channelzID, &channelz.TraceEventDesc{
+				Desc:     fmt.Sprintf("Channel switches to new LB policy %q due to fallback from invalid balancer name", PickFirstBalancerName),
+				Severity: channelz.CtWarning,
+			})
+		} else {
+			channelz.AddTraceEvent(cc.channelzID, &channelz.TraceEventDesc{
+				Desc:     fmt.Sprintf("Channel switches to new LB policy %q", name),
+				Severity: channelz.CtINFO,
+			})
+		}
+	}
 	if builder == nil {
 		grpclog.Infof("failed to get balancer builder for: %v, using pick_first instead", name)
 		builder = newPickfirstBuilder()
 	}
-	cc.preBalancerName = cc.curBalancerName
+
 	cc.curBalancerName = builder.Name()
 	cc.balancerWrapper = newCCBalancerWrapper(cc, builder, cc.balancerBuildOpts)
 }
@@ -847,11 +605,14 @@
 // newAddrConn creates an addrConn for addrs and adds it to cc.conns.
 //
 // Caller needs to make sure len(addrs) > 0.
-func (cc *ClientConn) newAddrConn(addrs []resolver.Address) (*addrConn, error) {
+func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (*addrConn, error) {
 	ac := &addrConn{
-		cc:    cc,
-		addrs: addrs,
-		dopts: cc.dopts,
+		cc:           cc,
+		addrs:        addrs,
+		scopts:       opts,
+		dopts:        cc.dopts,
+		czData:       new(channelzData),
+		resetBackoff: make(chan struct{}),
 	}
 	ac.ctx, ac.cancel = context.WithCancel(cc.ctx)
 	// Track ac in cc. This needs to be done before any getTransport(...) is called.
@@ -862,6 +623,14 @@
 	}
 	if channelz.IsOn() {
 		ac.channelzID = channelz.RegisterSubChannel(ac, cc.channelzID, "")
+		channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{
+			Desc:     "Subchannel Created",
+			Severity: channelz.CtINFO,
+			Parent: &channelz.TraceEventDesc{
+				Desc:     fmt.Sprintf("Subchannel(id:%d) created", ac.channelzID),
+				Severity: channelz.CtINFO,
+			},
+		})
 	}
 	cc.conns[ac] = struct{}{}
 	cc.mu.Unlock()
@@ -881,47 +650,39 @@
 	ac.tearDown(err)
 }
 
-// ChannelzMetric returns ChannelInternalMetric of current ClientConn.
-// This is an EXPERIMENTAL API.
-func (cc *ClientConn) ChannelzMetric() *channelz.ChannelInternalMetric {
-	state := cc.GetState()
-	cc.czmu.RLock()
-	defer cc.czmu.RUnlock()
+func (cc *ClientConn) channelzMetric() *channelz.ChannelInternalMetric {
 	return &channelz.ChannelInternalMetric{
-		State:                    state,
+		State:                    cc.GetState(),
 		Target:                   cc.target,
-		CallsStarted:             cc.callsStarted,
-		CallsSucceeded:           cc.callsSucceeded,
-		CallsFailed:              cc.callsFailed,
-		LastCallStartedTimestamp: cc.lastCallStartedTime,
+		CallsStarted:             atomic.LoadInt64(&cc.czData.callsStarted),
+		CallsSucceeded:           atomic.LoadInt64(&cc.czData.callsSucceeded),
+		CallsFailed:              atomic.LoadInt64(&cc.czData.callsFailed),
+		LastCallStartedTimestamp: time.Unix(0, atomic.LoadInt64(&cc.czData.lastCallStartedTime)),
 	}
 }
 
+// Target returns the target string of the ClientConn.
+// This is an EXPERIMENTAL API.
+func (cc *ClientConn) Target() string {
+	return cc.target
+}
+
 func (cc *ClientConn) incrCallsStarted() {
-	cc.czmu.Lock()
-	cc.callsStarted++
-	// TODO(yuxuanli): will make this a time.Time pointer improve performance?
-	cc.lastCallStartedTime = time.Now()
-	cc.czmu.Unlock()
+	atomic.AddInt64(&cc.czData.callsStarted, 1)
+	atomic.StoreInt64(&cc.czData.lastCallStartedTime, time.Now().UnixNano())
 }
 
 func (cc *ClientConn) incrCallsSucceeded() {
-	cc.czmu.Lock()
-	cc.callsSucceeded++
-	cc.czmu.Unlock()
+	atomic.AddInt64(&cc.czData.callsSucceeded, 1)
 }
 
 func (cc *ClientConn) incrCallsFailed() {
-	cc.czmu.Lock()
-	cc.callsFailed++
-	cc.czmu.Unlock()
+	atomic.AddInt64(&cc.czData.callsFailed, 1)
 }
 
-// connect starts to creating transport and also starts the transport monitor
-// goroutine for this ac.
+// connect starts creating a transport.
 // It does nothing if the ac is not IDLE.
 // TODO(bar) Move this to the addrConn section.
-// This was part of resetAddrConn, keep it here to make the diff look clean.
 func (ac *addrConn) connect() error {
 	ac.mu.Lock()
 	if ac.state == connectivity.Shutdown {
@@ -932,22 +693,11 @@
 		ac.mu.Unlock()
 		return nil
 	}
-	ac.state = connectivity.Connecting
-	ac.cc.handleSubConnStateChange(ac.acbw, ac.state)
+	ac.updateConnectivityState(connectivity.Connecting)
 	ac.mu.Unlock()
 
 	// Start a goroutine connecting to the server asynchronously.
-	go func() {
-		if err := ac.resetTransport(); err != nil {
-			grpclog.Warningf("Failed to dial %s: %v; please retry.", ac.addrs[0].Addr, err)
-			if err != errConnClosing {
-				// Keep this ac in cc.conns, to get the reason it's torn down.
-				ac.tearDown(err)
-			}
-			return
-		}
-		ac.transportMonitor()
-	}()
+	go ac.resetTransport()
 	return nil
 }
 
@@ -966,6 +716,12 @@
 		return true
 	}
 
+	// Unless we're busy reconnecting already, let's reconnect from the top of
+	// the list.
+	if ac.state != connectivity.Ready {
+		return false
+	}
+
 	var curAddrFound bool
 	for _, a := range addrs {
 		if reflect.DeepEqual(ac.curAddr, a) {
@@ -976,7 +732,6 @@
 	grpclog.Infof("addrConn: tryUpdateAddrs curAddrFound: %v", curAddrFound)
 	if curAddrFound {
 		ac.addrs = addrs
-		ac.reconnectIdx = 0 // Start reconnecting from beginning in the new list.
 	}
 
 	return curAddrFound
@@ -993,6 +748,9 @@
 	// TODO: Avoid the locking here.
 	cc.mu.RLock()
 	defer cc.mu.RUnlock()
+	if cc.sc == nil {
+		return MethodConfig{}
+	}
 	m, ok := cc.sc.Methods[method]
 	if !ok {
 		i := strings.LastIndex(method, "/")
@@ -1001,66 +759,75 @@
 	return m
 }
 
-func (cc *ClientConn) getTransport(ctx context.Context, failfast bool) (transport.ClientTransport, func(balancer.DoneInfo), error) {
-	t, done, err := cc.blockingpicker.pick(ctx, failfast, balancer.PickOptions{})
+func (cc *ClientConn) healthCheckConfig() *healthCheckConfig {
+	cc.mu.RLock()
+	defer cc.mu.RUnlock()
+	if cc.sc == nil {
+		return nil
+	}
+	return cc.sc.healthCheckConfig
+}
+
+func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, func(balancer.DoneInfo), error) {
+	t, done, err := cc.blockingpicker.pick(ctx, failfast, balancer.PickOptions{
+		FullMethodName: method,
+	})
 	if err != nil {
 		return nil, nil, toRPCErr(err)
 	}
 	return t, done, nil
 }
 
-// handleServiceConfig parses the service config string in JSON format to Go native
-// struct ServiceConfig, and store both the struct and the JSON string in ClientConn.
-func (cc *ClientConn) handleServiceConfig(js string) error {
-	if cc.dopts.disableServiceConfig {
-		return nil
+func (cc *ClientConn) applyServiceConfig(sc *ServiceConfig) error {
+	if sc == nil {
+		// should never reach here.
+		return fmt.Errorf("got nil pointer for service config")
 	}
-	sc, err := parseServiceConfig(js)
-	if err != nil {
-		return err
-	}
-	cc.mu.Lock()
-	cc.scRaw = js
 	cc.sc = sc
-	if sc.LB != nil && *sc.LB != grpclbName { // "grpclb" is not a valid balancer option in service config.
-		if cc.curBalancerName == grpclbName {
-			// If current balancer is grpclb, there's at least one grpclb
-			// balancer address in the resolved list. Don't switch the balancer,
-			// but change the previous balancer name, so if a new resolved
-			// address list doesn't contain grpclb address, balancer will be
-			// switched to *sc.LB.
-			cc.preBalancerName = *sc.LB
-		} else {
-			cc.switchBalancer(*sc.LB)
-			cc.balancerWrapper.handleResolvedAddrs(cc.curAddresses, nil)
+
+	if cc.sc.retryThrottling != nil {
+		newThrottler := &retryThrottler{
+			tokens: cc.sc.retryThrottling.MaxTokens,
+			max:    cc.sc.retryThrottling.MaxTokens,
+			thresh: cc.sc.retryThrottling.MaxTokens / 2,
+			ratio:  cc.sc.retryThrottling.TokenRatio,
 		}
+		cc.retryThrottler.Store(newThrottler)
+	} else {
+		cc.retryThrottler.Store((*retryThrottler)(nil))
 	}
 
-	if envConfigStickinessOn {
-		var newStickinessMDKey string
-		if sc.stickinessMetadataKey != nil && *sc.stickinessMetadataKey != "" {
-			newStickinessMDKey = *sc.stickinessMetadataKey
-		}
-		// newStickinessMDKey is "" if one of the following happens:
-		// - stickinessMetadataKey is set to ""
-		// - stickinessMetadataKey field doesn't exist in service config
-		cc.blockingpicker.updateStickinessMDKey(strings.ToLower(newStickinessMDKey))
-	}
-
-	cc.mu.Unlock()
 	return nil
 }
 
 func (cc *ClientConn) resolveNow(o resolver.ResolveNowOption) {
-	cc.mu.Lock()
+	cc.mu.RLock()
 	r := cc.resolverWrapper
-	cc.mu.Unlock()
+	cc.mu.RUnlock()
 	if r == nil {
 		return
 	}
 	go r.resolveNow(o)
 }
 
+// ResetConnectBackoff wakes up all subchannels in transient failure and causes
+// them to attempt another connection immediately.  It also resets the backoff
+// times used for subsequent attempts regardless of the current state.
+//
+// In general, this function should not be used.  Typical service or network
+// outages result in a reasonable client reconnection strategy by default.
+// However, if a previously unavailable network becomes available, this may be
+// used to trigger an immediate reconnect.
+//
+// This API is EXPERIMENTAL.
+func (cc *ClientConn) ResetConnectBackoff() {
+	cc.mu.Lock()
+	defer cc.mu.Unlock()
+	for ac := range cc.conns {
+		ac.resetConnectBackoff()
+	}
+}
+
 // Close tears down the ClientConn and all underlying connections.
 func (cc *ClientConn) Close() error {
 	defer cc.cancel()
@@ -1093,6 +860,19 @@
 		ac.tearDown(ErrClientConnClosing)
 	}
 	if channelz.IsOn() {
+		ted := &channelz.TraceEventDesc{
+			Desc:     "Channel Deleted",
+			Severity: channelz.CtINFO,
+		}
+		if cc.dopts.channelzParentID != 0 {
+			ted.Parent = &channelz.TraceEventDesc{
+				Desc:     fmt.Sprintf("Nested channel(id:%d) deleted", cc.channelzID),
+				Severity: channelz.CtINFO,
+			}
+		}
+		channelz.AddTraceEvent(cc.channelzID, ted)
+		// TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to
+		// the entity being deleted, and thus prevent it from being deleted right away.
 		channelz.RemoveEntry(cc.channelzID)
 	}
 	return nil
@@ -1104,37 +884,45 @@
 	cancel context.CancelFunc
 
 	cc     *ClientConn
-	addrs  []resolver.Address
 	dopts  dialOptions
-	events trace.EventLog
 	acbw   balancer.SubConn
+	scopts balancer.NewSubConnOptions
 
-	mu           sync.Mutex
-	curAddr      resolver.Address
-	reconnectIdx int // The index in addrs list to start reconnecting from.
-	state        connectivity.State
-	// ready is closed and becomes nil when a new transport is up or failed
-	// due to timeout.
-	ready     chan struct{}
-	transport transport.ClientTransport
+	// transport is set when there's a viable transport (note: ac state may not be READY as LB channel
+	// health checking may require server to report healthy to set ac to READY), and is reset
+	// to nil when the current transport should no longer be used to create a stream (e.g. after GoAway
+	// is received, transport is closed, ac has been torn down).
+	transport transport.ClientTransport // The current transport.
 
-	// The reason this addrConn is torn down.
-	tearDownErr error
+	mu      sync.Mutex
+	curAddr resolver.Address   // The current address.
+	addrs   []resolver.Address // All addresses that the resolver resolved to.
 
-	connectRetryNum int
-	// backoffDeadline is the time until which resetTransport needs to
-	// wait before increasing connectRetryNum count.
-	backoffDeadline time.Time
-	// connectDeadline is the time by which all connection
-	// negotiations must complete.
-	connectDeadline time.Time
+	// Use updateConnectivityState for updating addrConn's connectivity state.
+	state connectivity.State
 
-	channelzID          int64 // channelz unique identification number
-	czmu                sync.RWMutex
-	callsStarted        int64
-	callsSucceeded      int64
-	callsFailed         int64
-	lastCallStartedTime time.Time
+	backoffIdx   int // Needs to be stateful for resetConnectBackoff.
+	resetBackoff chan struct{}
+
+	channelzID int64 // channelz unique identification number.
+	czData     *channelzData
+}
+
+// Note: this requires a lock on ac.mu.
+func (ac *addrConn) updateConnectivityState(s connectivity.State) {
+	if ac.state == s {
+		return
+	}
+
+	updateMsg := fmt.Sprintf("Subchannel Connectivity change to %v", s)
+	ac.state = s
+	if channelz.IsOn() {
+		channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{
+			Desc:     updateMsg,
+			Severity: channelz.CtINFO,
+		})
+	}
+	ac.cc.handleSubConnStateChange(ac.acbw, s)
 }
 
 // adjustParams updates parameters used to create transports upon
@@ -1151,329 +939,269 @@
 	}
 }
 
-// printf records an event in ac's event log, unless ac has been closed.
-// REQUIRES ac.mu is held.
-func (ac *addrConn) printf(format string, a ...interface{}) {
-	if ac.events != nil {
-		ac.events.Printf(format, a...)
-	}
-}
-
-// errorf records an error in ac's event log, unless ac has been closed.
-// REQUIRES ac.mu is held.
-func (ac *addrConn) errorf(format string, a ...interface{}) {
-	if ac.events != nil {
-		ac.events.Errorf(format, a...)
-	}
-}
-
-// resetTransport recreates a transport to the address for ac.  The old
-// transport will close itself on error or when the clientconn is closed.
-// The created transport must receive initial settings frame from the server.
-// In case that doesn't happen, transportMonitor will kill the newly created
-// transport after connectDeadline has expired.
-// In case there was an error on the transport before the settings frame was
-// received, resetTransport resumes connecting to backends after the one that
-// was previously connected to. In case end of the list is reached, resetTransport
-// backs off until the original deadline.
-// If the DialOption WithWaitForHandshake was set, resetTrasport returns
-// successfully only after server settings are received.
-//
-// TODO(bar) make sure all state transitions are valid.
-func (ac *addrConn) resetTransport() error {
-	ac.mu.Lock()
-	if ac.state == connectivity.Shutdown {
-		ac.mu.Unlock()
-		return errConnClosing
-	}
-	if ac.ready != nil {
-		close(ac.ready)
-		ac.ready = nil
-	}
-	ac.transport = nil
-	ridx := ac.reconnectIdx
-	ac.mu.Unlock()
-	ac.cc.mu.RLock()
-	ac.dopts.copts.KeepaliveParams = ac.cc.mkp
-	ac.cc.mu.RUnlock()
-	var backoffDeadline, connectDeadline time.Time
-	for connectRetryNum := 0; ; connectRetryNum++ {
-		ac.mu.Lock()
-		if ac.backoffDeadline.IsZero() {
-			// This means either a successful HTTP2 connection was established
-			// or this is the first time this addrConn is trying to establish a
-			// connection.
-			backoffFor := ac.dopts.bs.backoff(connectRetryNum) // time.Duration.
-			// This will be the duration that dial gets to finish.
-			dialDuration := getMinConnectTimeout()
-			if backoffFor > dialDuration {
-				// Give dial more time as we keep failing to connect.
-				dialDuration = backoffFor
-			}
-			start := time.Now()
-			backoffDeadline = start.Add(backoffFor)
-			connectDeadline = start.Add(dialDuration)
-			ridx = 0 // Start connecting from the beginning.
-		} else {
-			// Continue trying to connect with the same deadlines.
-			connectRetryNum = ac.connectRetryNum
-			backoffDeadline = ac.backoffDeadline
-			connectDeadline = ac.connectDeadline
-			ac.backoffDeadline = time.Time{}
-			ac.connectDeadline = time.Time{}
-			ac.connectRetryNum = 0
+func (ac *addrConn) resetTransport() {
+	for i := 0; ; i++ {
+		if i > 0 {
+			ac.cc.resolveNow(resolver.ResolveNowOption{})
 		}
+
+		ac.mu.Lock()
 		if ac.state == connectivity.Shutdown {
 			ac.mu.Unlock()
-			return errConnClosing
+			return
 		}
-		ac.printf("connecting")
-		if ac.state != connectivity.Connecting {
-			ac.state = connectivity.Connecting
-			ac.cc.handleSubConnStateChange(ac.acbw, ac.state)
-		}
-		// copy ac.addrs in case of race
-		addrsIter := make([]resolver.Address, len(ac.addrs))
-		copy(addrsIter, ac.addrs)
-		copts := ac.dopts.copts
-		ac.mu.Unlock()
-		connected, err := ac.createTransport(connectRetryNum, ridx, backoffDeadline, connectDeadline, addrsIter, copts)
-		if err != nil {
-			return err
-		}
-		if connected {
-			return nil
-		}
-	}
-}
 
-// createTransport creates a connection to one of the backends in addrs.
-// It returns true if a connection was established.
-func (ac *addrConn) createTransport(connectRetryNum, ridx int, backoffDeadline, connectDeadline time.Time, addrs []resolver.Address, copts transport.ConnectOptions) (bool, error) {
-	for i := ridx; i < len(addrs); i++ {
-		addr := addrs[i]
-		target := transport.TargetInfo{
-			Addr:      addr.Addr,
-			Metadata:  addr.Metadata,
-			Authority: ac.cc.authority,
+		addrs := ac.addrs
+		backoffFor := ac.dopts.bs.Backoff(ac.backoffIdx)
+		// This will be the duration that dial gets to finish.
+		dialDuration := minConnectTimeout
+		if ac.dopts.minConnectTimeout != nil {
+			dialDuration = ac.dopts.minConnectTimeout()
 		}
-		done := make(chan struct{})
-		onPrefaceReceipt := func() {
-			ac.mu.Lock()
-			close(done)
-			if !ac.backoffDeadline.IsZero() {
-				// If we haven't already started reconnecting to
-				// other backends.
-				// Note, this can happen when writer notices an error
-				// and triggers resetTransport while at the same time
-				// reader receives the preface and invokes this closure.
-				ac.backoffDeadline = time.Time{}
-				ac.connectDeadline = time.Time{}
-				ac.connectRetryNum = 0
-			}
-			ac.mu.Unlock()
+
+		if dialDuration < backoffFor {
+			// Give dial more time as we keep failing to connect.
+			dialDuration = backoffFor
 		}
-		// Do not cancel in the success path because of
-		// this issue in Go1.6: https://github.com/golang/go/issues/15078.
-		connectCtx, cancel := context.WithDeadline(ac.ctx, connectDeadline)
-		if channelz.IsOn() {
-			copts.ChannelzParentID = ac.channelzID
-		}
-		newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, target, copts, onPrefaceReceipt)
+		// We can potentially spend all the time trying the first address, and
+		// if the server accepts the connection and then hangs, the following
+		// addresses will never be tried.
+		//
+		// The spec doesn't mention what should be done for multiple addresses.
+		// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md#proposed-backoff-algorithm
+		connectDeadline := time.Now().Add(dialDuration)
+		ac.mu.Unlock()
+
+		newTr, addr, reconnect, err := ac.tryAllAddrs(addrs, connectDeadline)
 		if err != nil {
-			cancel()
-			ac.cc.blockingpicker.updateConnectionError(err)
+			// After exhausting all addresses, the addrConn enters
+			// TRANSIENT_FAILURE.
 			ac.mu.Lock()
 			if ac.state == connectivity.Shutdown {
-				// ac.tearDown(...) has been invoked.
 				ac.mu.Unlock()
-				return false, errConnClosing
+				return
 			}
+			ac.updateConnectivityState(connectivity.TransientFailure)
+
+			// Backoff.
+			b := ac.resetBackoff
 			ac.mu.Unlock()
-			grpclog.Warningf("grpc: addrConn.createTransport failed to connect to %v. Err :%v. Reconnecting...", addr, err)
+
+			timer := time.NewTimer(backoffFor)
+			select {
+			case <-timer.C:
+				ac.mu.Lock()
+				ac.backoffIdx++
+				ac.mu.Unlock()
+			case <-b:
+				timer.Stop()
+			case <-ac.ctx.Done():
+				timer.Stop()
+				return
+			}
 			continue
 		}
-		if ac.dopts.waitForHandshake {
-			select {
-			case <-done:
-			case <-connectCtx.Done():
-				// Didn't receive server preface, must kill this new transport now.
-				grpclog.Warningf("grpc: addrConn.createTransport failed to receive server preface before deadline.")
-				newTr.Close()
-				break
-			case <-ac.ctx.Done():
-			}
-		}
+
 		ac.mu.Lock()
 		if ac.state == connectivity.Shutdown {
-			ac.mu.Unlock()
-			// ac.tearDonn(...) has been invoked.
 			newTr.Close()
-			return false, errConnClosing
-		}
-		ac.printf("ready")
-		ac.state = connectivity.Ready
-		ac.cc.handleSubConnStateChange(ac.acbw, ac.state)
-		ac.transport = newTr
-		ac.curAddr = addr
-		if ac.ready != nil {
-			close(ac.ready)
-			ac.ready = nil
-		}
-		select {
-		case <-done:
-			// If the server has responded back with preface already,
-			// don't set the reconnect parameters.
-		default:
-			ac.connectRetryNum = connectRetryNum
-			ac.backoffDeadline = backoffDeadline
-			ac.connectDeadline = connectDeadline
-			ac.reconnectIdx = i + 1 // Start reconnecting from the next backend in the list.
-		}
-		ac.mu.Unlock()
-		return true, nil
-	}
-	ac.mu.Lock()
-	if ac.state == connectivity.Shutdown {
-		ac.mu.Unlock()
-		return false, errConnClosing
-	}
-	ac.state = connectivity.TransientFailure
-	ac.cc.handleSubConnStateChange(ac.acbw, ac.state)
-	ac.cc.resolveNow(resolver.ResolveNowOption{})
-	if ac.ready != nil {
-		close(ac.ready)
-		ac.ready = nil
-	}
-	ac.mu.Unlock()
-	timer := time.NewTimer(backoffDeadline.Sub(time.Now()))
-	select {
-	case <-timer.C:
-	case <-ac.ctx.Done():
-		timer.Stop()
-		return false, ac.ctx.Err()
-	}
-	return false, nil
-}
-
-// Run in a goroutine to track the error in transport and create the
-// new transport if an error happens. It returns when the channel is closing.
-func (ac *addrConn) transportMonitor() {
-	for {
-		var timer *time.Timer
-		var cdeadline <-chan time.Time
-		ac.mu.Lock()
-		t := ac.transport
-		if !ac.connectDeadline.IsZero() {
-			timer = time.NewTimer(ac.connectDeadline.Sub(time.Now()))
-			cdeadline = timer.C
-		}
-		ac.mu.Unlock()
-		// Block until we receive a goaway or an error occurs.
-		select {
-		case <-t.GoAway():
-			done := t.Error()
-			cleanup := t.Close
-			// Since this transport will be orphaned (won't have a transportMonitor)
-			// we need to launch a goroutine to keep track of clientConn.Close()
-			// happening since it might not be noticed by any other goroutine for a while.
-			go func() {
-				<-done
-				cleanup()
-			}()
-		case <-t.Error():
-			// In case this is triggered because clientConn.Close()
-			// was called, we want to immeditately close the transport
-			// since no other goroutine might notice it for a while.
-			t.Close()
-		case <-cdeadline:
-			ac.mu.Lock()
-			// This implies that client received server preface.
-			if ac.backoffDeadline.IsZero() {
-				ac.mu.Unlock()
-				continue
-			}
 			ac.mu.Unlock()
-			timer = nil
-			// No server preface received until deadline.
-			// Kill the connection.
-			grpclog.Warningf("grpc: addrConn.transportMonitor didn't get server preface after waiting. Closing the new transport now.")
-			t.Close()
+			return
 		}
-		if timer != nil {
-			timer.Stop()
+		ac.curAddr = addr
+		ac.transport = newTr
+		ac.backoffIdx = 0
+
+		healthCheckConfig := ac.cc.healthCheckConfig()
+		// LB channel health checking is only enabled when all the four requirements below are met:
+		// 1. it is not disabled by the user with the WithDisableHealthCheck DialOption,
+		// 2. the internal.HealthCheckFunc is set by importing the grpc/healthcheck package,
+		// 3. a service config with non-empty healthCheckConfig field is provided,
+		// 4. the current load balancer allows it.
+		hctx, hcancel := context.WithCancel(ac.ctx)
+		healthcheckManagingState := false
+		if !ac.cc.dopts.disableHealthCheck && healthCheckConfig != nil && ac.scopts.HealthCheckEnabled {
+			if ac.cc.dopts.healthCheckFunc == nil {
+				// TODO: add a link to the health check doc in the error message.
+				grpclog.Error("the client side LB channel health check function has not been set.")
+			} else {
+				// TODO(deklerk) refactor to just return transport
+				go ac.startHealthCheck(hctx, newTr, addr, healthCheckConfig.ServiceName)
+				healthcheckManagingState = true
+			}
 		}
-		// If a GoAway happened, regardless of error, adjust our keepalive
-		// parameters as appropriate.
-		select {
-		case <-t.GoAway():
-			ac.adjustParams(t.GetGoAwayReason())
-		default:
+		if !healthcheckManagingState {
+			ac.updateConnectivityState(connectivity.Ready)
 		}
+		ac.mu.Unlock()
+
+		// Block until the created transport is down. And when this happens,
+		// we restart from the top of the addr list.
+		<-reconnect.Done()
+		hcancel()
+
+		// Need to reconnect after a READY, the addrConn enters
+		// TRANSIENT_FAILURE.
+		//
+		// This will set addrConn to TRANSIENT_FAILURE for a very short period
+		// of time, and turns CONNECTING. It seems reasonable to skip this, but
+		// READY-CONNECTING is not a valid transition.
 		ac.mu.Lock()
 		if ac.state == connectivity.Shutdown {
 			ac.mu.Unlock()
 			return
 		}
-		// Set connectivity state to TransientFailure before calling
-		// resetTransport. Transition READY->CONNECTING is not valid.
-		ac.state = connectivity.TransientFailure
-		ac.cc.handleSubConnStateChange(ac.acbw, ac.state)
-		ac.cc.resolveNow(resolver.ResolveNowOption{})
-		ac.curAddr = resolver.Address{}
+		ac.updateConnectivityState(connectivity.TransientFailure)
 		ac.mu.Unlock()
-		if err := ac.resetTransport(); err != nil {
-			ac.mu.Lock()
-			ac.printf("transport exiting: %v", err)
+	}
+}
+
+// tryAllAddrs tries to creates a connection to the addresses, and stop when at the
+// first successful one. It returns the transport, the address and a Event in
+// the successful case. The Event fires when the returned transport disconnects.
+func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.Time) (transport.ClientTransport, resolver.Address, *grpcsync.Event, error) {
+	for _, addr := range addrs {
+		ac.mu.Lock()
+		if ac.state == connectivity.Shutdown {
 			ac.mu.Unlock()
-			grpclog.Warningf("grpc: addrConn.transportMonitor exits due to: %v", err)
-			if err != errConnClosing {
-				// Keep this ac in cc.conns, to get the reason it's torn down.
-				ac.tearDown(err)
-			}
+			return nil, resolver.Address{}, nil, errConnClosing
+		}
+		ac.updateConnectivityState(connectivity.Connecting)
+		ac.transport = nil
+
+		ac.cc.mu.RLock()
+		ac.dopts.copts.KeepaliveParams = ac.cc.mkp
+		ac.cc.mu.RUnlock()
+
+		copts := ac.dopts.copts
+		if ac.scopts.CredsBundle != nil {
+			copts.CredsBundle = ac.scopts.CredsBundle
+		}
+		ac.mu.Unlock()
+
+		if channelz.IsOn() {
+			channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{
+				Desc:     fmt.Sprintf("Subchannel picks a new address %q to connect", addr.Addr),
+				Severity: channelz.CtINFO,
+			})
+		}
+
+		newTr, reconnect, err := ac.createTransport(addr, copts, connectDeadline)
+		if err == nil {
+			return newTr, addr, reconnect, nil
+		}
+		ac.cc.blockingpicker.updateConnectionError(err)
+	}
+
+	// Couldn't connect to any address.
+	return nil, resolver.Address{}, nil, fmt.Errorf("couldn't connect to any address")
+}
+
+// createTransport creates a connection to addr. It returns the transport and a
+// Event in the successful case. The Event fires when the returned transport
+// disconnects.
+func (ac *addrConn) createTransport(addr resolver.Address, copts transport.ConnectOptions, connectDeadline time.Time) (transport.ClientTransport, *grpcsync.Event, error) {
+	prefaceReceived := make(chan struct{})
+	onCloseCalled := make(chan struct{})
+	reconnect := grpcsync.NewEvent()
+
+	target := transport.TargetInfo{
+		Addr:      addr.Addr,
+		Metadata:  addr.Metadata,
+		Authority: ac.cc.authority,
+	}
+
+	onGoAway := func(r transport.GoAwayReason) {
+		ac.mu.Lock()
+		ac.adjustParams(r)
+		ac.mu.Unlock()
+		reconnect.Fire()
+	}
+
+	onClose := func() {
+		close(onCloseCalled)
+		reconnect.Fire()
+	}
+
+	onPrefaceReceipt := func() {
+		close(prefaceReceived)
+	}
+
+	connectCtx, cancel := context.WithDeadline(ac.ctx, connectDeadline)
+	defer cancel()
+	if channelz.IsOn() {
+		copts.ChannelzParentID = ac.channelzID
+	}
+
+	newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, target, copts, onPrefaceReceipt, onGoAway, onClose)
+	if err != nil {
+		// newTr is either nil, or closed.
+		grpclog.Warningf("grpc: addrConn.createTransport failed to connect to %v. Err :%v. Reconnecting...", addr, err)
+		return nil, nil, err
+	}
+
+	if ac.dopts.reqHandshake == envconfig.RequireHandshakeOn {
+		select {
+		case <-time.After(connectDeadline.Sub(time.Now())):
+			// We didn't get the preface in time.
+			newTr.Close()
+			grpclog.Warningf("grpc: addrConn.createTransport failed to connect to %v: didn't receive server preface in time. Reconnecting...", addr)
+			return nil, nil, errors.New("timed out waiting for server handshake")
+		case <-prefaceReceived:
+			// We got the preface - huzzah! things are good.
+		case <-onCloseCalled:
+			// The transport has already closed - noop.
+			return nil, nil, errors.New("connection closed")
+			// TODO(deklerk) this should bail on ac.ctx.Done(). Add a test and fix.
+		}
+	}
+	return newTr, reconnect, nil
+}
+
+func (ac *addrConn) startHealthCheck(ctx context.Context, newTr transport.ClientTransport, addr resolver.Address, serviceName string) {
+	// Set up the health check helper functions
+	newStream := func() (interface{}, error) {
+		return ac.newClientStream(ctx, &StreamDesc{ServerStreams: true}, "/grpc.health.v1.Health/Watch", newTr)
+	}
+	firstReady := true
+	reportHealth := func(ok bool) {
+		ac.mu.Lock()
+		defer ac.mu.Unlock()
+		if ac.transport != newTr {
 			return
 		}
+		if ok {
+			if firstReady {
+				firstReady = false
+				ac.curAddr = addr
+			}
+			ac.updateConnectivityState(connectivity.Ready)
+		} else {
+			ac.updateConnectivityState(connectivity.TransientFailure)
+		}
+	}
+	err := ac.cc.dopts.healthCheckFunc(ctx, newStream, reportHealth, serviceName)
+	if err != nil {
+		if status.Code(err) == codes.Unimplemented {
+			if channelz.IsOn() {
+				channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{
+					Desc:     "Subchannel health check is unimplemented at server side, thus health check is disabled",
+					Severity: channelz.CtError,
+				})
+			}
+			grpclog.Error("Subchannel health check is unimplemented at server side, thus health check is disabled")
+		} else {
+			grpclog.Errorf("HealthCheckFunc exits with unexpected error %v", err)
+		}
 	}
 }
 
-// wait blocks until i) the new transport is up or ii) ctx is done or iii) ac is closed or
-// iv) transport is in connectivity.TransientFailure and there is a balancer/failfast is true.
-func (ac *addrConn) wait(ctx context.Context, hasBalancer, failfast bool) (transport.ClientTransport, error) {
-	for {
-		ac.mu.Lock()
-		switch {
-		case ac.state == connectivity.Shutdown:
-			if failfast || !hasBalancer {
-				// RPC is failfast or balancer is nil. This RPC should fail with ac.tearDownErr.
-				err := ac.tearDownErr
-				ac.mu.Unlock()
-				return nil, err
-			}
-			ac.mu.Unlock()
-			return nil, errConnClosing
-		case ac.state == connectivity.Ready:
-			ct := ac.transport
-			ac.mu.Unlock()
-			return ct, nil
-		case ac.state == connectivity.TransientFailure:
-			if failfast || hasBalancer {
-				ac.mu.Unlock()
-				return nil, errConnUnavailable
-			}
-		}
-		ready := ac.ready
-		if ready == nil {
-			ready = make(chan struct{})
-			ac.ready = ready
-		}
-		ac.mu.Unlock()
-		select {
-		case <-ctx.Done():
-			return nil, toRPCErr(ctx.Err())
-		// Wait until the new transport is ready or failed.
-		case <-ready:
-		}
-	}
+func (ac *addrConn) resetConnectBackoff() {
+	ac.mu.Lock()
+	close(ac.resetBackoff)
+	ac.backoffIdx = 0
+	ac.resetBackoff = make(chan struct{})
+	ac.mu.Unlock()
 }
 
 // getReadyTransport returns the transport if ac's state is READY.
@@ -1481,7 +1209,7 @@
 // If ac's state is IDLE, it will trigger ac to connect.
 func (ac *addrConn) getReadyTransport() (transport.ClientTransport, bool) {
 	ac.mu.Lock()
-	if ac.state == connectivity.Ready {
+	if ac.state == connectivity.Ready && ac.transport != nil {
 		t := ac.transport
 		ac.mu.Unlock()
 		return t, true
@@ -1504,34 +1232,42 @@
 // tight loop.
 // tearDown doesn't remove ac from ac.cc.conns.
 func (ac *addrConn) tearDown(err error) {
-	ac.cancel()
 	ac.mu.Lock()
-	defer ac.mu.Unlock()
 	if ac.state == connectivity.Shutdown {
+		ac.mu.Unlock()
 		return
 	}
+	curTr := ac.transport
+	ac.transport = nil
+	// We have to set the state to Shutdown before anything else to prevent races
+	// between setting the state and logic that waits on context cancelation / etc.
+	ac.updateConnectivityState(connectivity.Shutdown)
+	ac.cancel()
 	ac.curAddr = resolver.Address{}
-	if err == errConnDrain && ac.transport != nil {
+	if err == errConnDrain && curTr != nil {
 		// GracefulClose(...) may be executed multiple times when
 		// i) receiving multiple GoAway frames from the server; or
 		// ii) there are concurrent name resolver/Balancer triggered
 		// address removal and GoAway.
-		ac.transport.GracefulClose()
-	}
-	ac.state = connectivity.Shutdown
-	ac.tearDownErr = err
-	ac.cc.handleSubConnStateChange(ac.acbw, ac.state)
-	if ac.events != nil {
-		ac.events.Finish()
-		ac.events = nil
-	}
-	if ac.ready != nil {
-		close(ac.ready)
-		ac.ready = nil
+		// We have to unlock and re-lock here because GracefulClose => Close => onClose, which requires locking ac.mu.
+		ac.mu.Unlock()
+		curTr.GracefulClose()
+		ac.mu.Lock()
 	}
 	if channelz.IsOn() {
+		channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{
+			Desc:     "Subchannel Deleted",
+			Severity: channelz.CtINFO,
+			Parent: &channelz.TraceEventDesc{
+				Desc:     fmt.Sprintf("Subchanel(id:%d) deleted", ac.channelzID),
+				Severity: channelz.CtINFO,
+			},
+		})
+		// TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to
+		// the entity beng deleted, and thus prevent it from being deleted right away.
 		channelz.RemoveEntry(ac.channelzID)
 	}
+	ac.mu.Unlock()
 }
 
 func (ac *addrConn) getState() connectivity.State {
@@ -1540,47 +1276,76 @@
 	return ac.state
 }
 
-func (ac *addrConn) getCurAddr() (ret resolver.Address) {
-	ac.mu.Lock()
-	ret = ac.curAddr
-	ac.mu.Unlock()
-	return
-}
-
 func (ac *addrConn) ChannelzMetric() *channelz.ChannelInternalMetric {
 	ac.mu.Lock()
 	addr := ac.curAddr.Addr
 	ac.mu.Unlock()
-	state := ac.getState()
-	ac.czmu.RLock()
-	defer ac.czmu.RUnlock()
 	return &channelz.ChannelInternalMetric{
-		State:                    state,
+		State:                    ac.getState(),
 		Target:                   addr,
-		CallsStarted:             ac.callsStarted,
-		CallsSucceeded:           ac.callsSucceeded,
-		CallsFailed:              ac.callsFailed,
-		LastCallStartedTimestamp: ac.lastCallStartedTime,
+		CallsStarted:             atomic.LoadInt64(&ac.czData.callsStarted),
+		CallsSucceeded:           atomic.LoadInt64(&ac.czData.callsSucceeded),
+		CallsFailed:              atomic.LoadInt64(&ac.czData.callsFailed),
+		LastCallStartedTimestamp: time.Unix(0, atomic.LoadInt64(&ac.czData.lastCallStartedTime)),
 	}
 }
 
 func (ac *addrConn) incrCallsStarted() {
-	ac.czmu.Lock()
-	ac.callsStarted++
-	ac.lastCallStartedTime = time.Now()
-	ac.czmu.Unlock()
+	atomic.AddInt64(&ac.czData.callsStarted, 1)
+	atomic.StoreInt64(&ac.czData.lastCallStartedTime, time.Now().UnixNano())
 }
 
 func (ac *addrConn) incrCallsSucceeded() {
-	ac.czmu.Lock()
-	ac.callsSucceeded++
-	ac.czmu.Unlock()
+	atomic.AddInt64(&ac.czData.callsSucceeded, 1)
 }
 
 func (ac *addrConn) incrCallsFailed() {
-	ac.czmu.Lock()
-	ac.callsFailed++
-	ac.czmu.Unlock()
+	atomic.AddInt64(&ac.czData.callsFailed, 1)
+}
+
+type retryThrottler struct {
+	max    float64
+	thresh float64
+	ratio  float64
+
+	mu     sync.Mutex
+	tokens float64 // TODO(dfawley): replace with atomic and remove lock.
+}
+
+// throttle subtracts a retry token from the pool and returns whether a retry
+// should be throttled (disallowed) based upon the retry throttling policy in
+// the service config.
+func (rt *retryThrottler) throttle() bool {
+	if rt == nil {
+		return false
+	}
+	rt.mu.Lock()
+	defer rt.mu.Unlock()
+	rt.tokens--
+	if rt.tokens < 0 {
+		rt.tokens = 0
+	}
+	return rt.tokens <= rt.thresh
+}
+
+func (rt *retryThrottler) successfulRPC() {
+	if rt == nil {
+		return
+	}
+	rt.mu.Lock()
+	defer rt.mu.Unlock()
+	rt.tokens += rt.ratio
+	if rt.tokens > rt.max {
+		rt.tokens = rt.max
+	}
+}
+
+type channelzChannel struct {
+	cc *ClientConn
+}
+
+func (c *channelzChannel) ChannelzMetric() *channelz.ChannelInternalMetric {
+	return c.cc.channelzMetric()
 }
 
 // ErrClientConnTimeout indicates that the ClientConn cannot establish the
diff --git a/vendor/google.golang.org/grpc/codes/codes.go b/vendor/google.golang.org/grpc/codes/codes.go
index a8280ae..d9b9d57 100644
--- a/vendor/google.golang.org/grpc/codes/codes.go
+++ b/vendor/google.golang.org/grpc/codes/codes.go
@@ -22,6 +22,7 @@
 
 import (
 	"fmt"
+	"strconv"
 )
 
 // A Code is an unsigned 32-bit error code as defined in the gRPC spec.
@@ -143,6 +144,8 @@
 	// Unauthenticated indicates the request does not have valid
 	// authentication credentials for the operation.
 	Unauthenticated Code = 16
+
+	_maxCode = 17
 )
 
 var strToCode = map[string]Code{
@@ -176,6 +179,16 @@
 	if c == nil {
 		return fmt.Errorf("nil receiver passed to UnmarshalJSON")
 	}
+
+	if ci, err := strconv.ParseUint(string(b), 10, 32); err == nil {
+		if ci >= _maxCode {
+			return fmt.Errorf("invalid code: %q", ci)
+		}
+
+		*c = Code(ci)
+		return nil
+	}
+
 	if jc, ok := strToCode[string(b)]; ok {
 		*c = jc
 		return nil
diff --git a/vendor/google.golang.org/grpc/connectivity/connectivity.go b/vendor/google.golang.org/grpc/connectivity/connectivity.go
index 568ef5d..34ec36f 100644
--- a/vendor/google.golang.org/grpc/connectivity/connectivity.go
+++ b/vendor/google.golang.org/grpc/connectivity/connectivity.go
@@ -22,7 +22,8 @@
 package connectivity
 
 import (
-	"golang.org/x/net/context"
+	"context"
+
 	"google.golang.org/grpc/grpclog"
 )
 
@@ -51,7 +52,7 @@
 const (
 	// Idle indicates the ClientConn is idle.
 	Idle State = iota
-	// Connecting indicates the ClienConn is connecting.
+	// Connecting indicates the ClientConn is connecting.
 	Connecting
 	// Ready indicates the ClientConn is ready for work.
 	Ready
diff --git a/vendor/google.golang.org/grpc/credentials/credentials.go b/vendor/google.golang.org/grpc/credentials/credentials.go
index 3351bf0..88aff94 100644
--- a/vendor/google.golang.org/grpc/credentials/credentials.go
+++ b/vendor/google.golang.org/grpc/credentials/credentials.go
@@ -23,6 +23,7 @@
 package credentials // import "google.golang.org/grpc/credentials"
 
 import (
+	"context"
 	"crypto/tls"
 	"crypto/x509"
 	"errors"
@@ -31,12 +32,10 @@
 	"net"
 	"strings"
 
-	"golang.org/x/net/context"
+	"github.com/golang/protobuf/proto"
+	"google.golang.org/grpc/credentials/internal"
 )
 
-// alpnProtoStr are the specified application level protocols for gRPC.
-var alpnProtoStr = []string{"h2"}
-
 // PerRPCCredentials defines the common interface for the credentials which need to
 // attach security information to every RPC (e.g., oauth2).
 type PerRPCCredentials interface {
@@ -107,6 +106,25 @@
 	OverrideServerName(string) error
 }
 
+// Bundle is a combination of TransportCredentials and PerRPCCredentials.
+//
+// It also contains a mode switching method, so it can be used as a combination
+// of different credential policies.
+//
+// Bundle cannot be used together with individual TransportCredentials.
+// PerRPCCredentials from Bundle will be appended to other PerRPCCredentials.
+//
+// This API is experimental.
+type Bundle interface {
+	TransportCredentials() TransportCredentials
+	PerRPCCredentials() PerRPCCredentials
+	// NewWithMode should make a copy of Bundle, and switch mode. Modifying the
+	// existing Bundle may cause races.
+	//
+	// NewWithMode returns nil if the requested mode is not supported.
+	NewWithMode(mode string) (Bundle, error)
+}
+
 // TLSInfo contains the auth information for a TLS authenticated connection.
 // It implements the AuthInfo interface.
 type TLSInfo struct {
@@ -118,6 +136,18 @@
 	return "tls"
 }
 
+// GetSecurityValue returns security info requested by channelz.
+func (t TLSInfo) GetSecurityValue() ChannelzSecurityValue {
+	v := &TLSChannelzSecurityValue{
+		StandardName: cipherSuiteLookup[t.State.CipherSuite],
+	}
+	// Currently there's no way to get LocalCertificate info from tls package.
+	if len(t.State.PeerCertificates) > 0 {
+		v.RemoteCertificate = t.State.PeerCertificates[0].Raw
+	}
+	return v
+}
+
 // tlsCreds is the credentials required for authenticating a connection using TLS.
 type tlsCreds struct {
 	// TLS configuration
@@ -155,7 +185,7 @@
 	case <-ctx.Done():
 		return nil, nil, ctx.Err()
 	}
-	return conn, TLSInfo{conn.ConnectionState()}, nil
+	return internal.WrapSyscallConn(rawConn, conn), TLSInfo{conn.ConnectionState()}, nil
 }
 
 func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) {
@@ -163,7 +193,7 @@
 	if err := conn.Handshake(); err != nil {
 		return nil, nil, err
 	}
-	return conn, TLSInfo{conn.ConnectionState()}, nil
+	return internal.WrapSyscallConn(rawConn, conn), TLSInfo{conn.ConnectionState()}, nil
 }
 
 func (c *tlsCreds) Clone() TransportCredentials {
@@ -175,10 +205,23 @@
 	return nil
 }
 
+const alpnProtoStrH2 = "h2"
+
+func appendH2ToNextProtos(ps []string) []string {
+	for _, p := range ps {
+		if p == alpnProtoStrH2 {
+			return ps
+		}
+	}
+	ret := make([]string, 0, len(ps)+1)
+	ret = append(ret, ps...)
+	return append(ret, alpnProtoStrH2)
+}
+
 // NewTLS uses c to construct a TransportCredentials based on TLS.
 func NewTLS(c *tls.Config) TransportCredentials {
 	tc := &tlsCreds{cloneTLSConfig(c)}
-	tc.config.NextProtos = alpnProtoStr
+	tc.config.NextProtos = appendH2ToNextProtos(tc.config.NextProtos)
 	return tc
 }
 
@@ -218,3 +261,78 @@
 	}
 	return NewTLS(&tls.Config{Certificates: []tls.Certificate{cert}}), nil
 }
+
+// ChannelzSecurityInfo defines the interface that security protocols should implement
+// in order to provide security info to channelz.
+type ChannelzSecurityInfo interface {
+	GetSecurityValue() ChannelzSecurityValue
+}
+
+// ChannelzSecurityValue defines the interface that GetSecurityValue() return value
+// should satisfy. This interface should only be satisfied by *TLSChannelzSecurityValue
+// and *OtherChannelzSecurityValue.
+type ChannelzSecurityValue interface {
+	isChannelzSecurityValue()
+}
+
+// TLSChannelzSecurityValue defines the struct that TLS protocol should return
+// from GetSecurityValue(), containing security info like cipher and certificate used.
+type TLSChannelzSecurityValue struct {
+	StandardName      string
+	LocalCertificate  []byte
+	RemoteCertificate []byte
+}
+
+func (*TLSChannelzSecurityValue) isChannelzSecurityValue() {}
+
+// OtherChannelzSecurityValue defines the struct that non-TLS protocol should return
+// from GetSecurityValue(), which contains protocol specific security info. Note
+// the Value field will be sent to users of channelz requesting channel info, and
+// thus sensitive info should better be avoided.
+type OtherChannelzSecurityValue struct {
+	Name  string
+	Value proto.Message
+}
+
+func (*OtherChannelzSecurityValue) isChannelzSecurityValue() {}
+
+var cipherSuiteLookup = map[uint16]string{
+	tls.TLS_RSA_WITH_RC4_128_SHA:                "TLS_RSA_WITH_RC4_128_SHA",
+	tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA:           "TLS_RSA_WITH_3DES_EDE_CBC_SHA",
+	tls.TLS_RSA_WITH_AES_128_CBC_SHA:            "TLS_RSA_WITH_AES_128_CBC_SHA",
+	tls.TLS_RSA_WITH_AES_256_CBC_SHA:            "TLS_RSA_WITH_AES_256_CBC_SHA",
+	tls.TLS_RSA_WITH_AES_128_GCM_SHA256:         "TLS_RSA_WITH_AES_128_GCM_SHA256",
+	tls.TLS_RSA_WITH_AES_256_GCM_SHA384:         "TLS_RSA_WITH_AES_256_GCM_SHA384",
+	tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA:        "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA",
+	tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA:    "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA",
+	tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA:    "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA",
+	tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA:          "TLS_ECDHE_RSA_WITH_RC4_128_SHA",
+	tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA:     "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA",
+	tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA:      "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA",
+	tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA:      "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA",
+	tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256:   "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
+	tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
+	tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384:   "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384",
+	tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384",
+	tls.TLS_FALLBACK_SCSV:                       "TLS_FALLBACK_SCSV",
+	tls.TLS_RSA_WITH_AES_128_CBC_SHA256:         "TLS_RSA_WITH_AES_128_CBC_SHA256",
+	tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256",
+	tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256:   "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256",
+	tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305:    "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305",
+	tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305:  "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305",
+}
+
+// cloneTLSConfig returns a shallow clone of the exported
+// fields of cfg, ignoring the unexported sync.Once, which
+// contains a mutex and must not be copied.
+//
+// If cfg is nil, a new zero tls.Config is returned.
+//
+// TODO: inline this function if possible.
+func cloneTLSConfig(cfg *tls.Config) *tls.Config {
+	if cfg == nil {
+		return &tls.Config{}
+	}
+
+	return cfg.Clone()
+}
diff --git a/vendor/google.golang.org/grpc/credentials/credentials_util_go17.go b/vendor/google.golang.org/grpc/credentials/credentials_util_go17.go
deleted file mode 100644
index 60409aa..0000000
--- a/vendor/google.golang.org/grpc/credentials/credentials_util_go17.go
+++ /dev/null
@@ -1,60 +0,0 @@
-// +build go1.7
-// +build !go1.8
-
-/*
- *
- * Copyright 2016 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package credentials
-
-import (
-	"crypto/tls"
-)
-
-// cloneTLSConfig returns a shallow clone of the exported
-// fields of cfg, ignoring the unexported sync.Once, which
-// contains a mutex and must not be copied.
-//
-// If cfg is nil, a new zero tls.Config is returned.
-func cloneTLSConfig(cfg *tls.Config) *tls.Config {
-	if cfg == nil {
-		return &tls.Config{}
-	}
-	return &tls.Config{
-		Rand:                        cfg.Rand,
-		Time:                        cfg.Time,
-		Certificates:                cfg.Certificates,
-		NameToCertificate:           cfg.NameToCertificate,
-		GetCertificate:              cfg.GetCertificate,
-		RootCAs:                     cfg.RootCAs,
-		NextProtos:                  cfg.NextProtos,
-		ServerName:                  cfg.ServerName,
-		ClientAuth:                  cfg.ClientAuth,
-		ClientCAs:                   cfg.ClientCAs,
-		InsecureSkipVerify:          cfg.InsecureSkipVerify,
-		CipherSuites:                cfg.CipherSuites,
-		PreferServerCipherSuites:    cfg.PreferServerCipherSuites,
-		SessionTicketsDisabled:      cfg.SessionTicketsDisabled,
-		SessionTicketKey:            cfg.SessionTicketKey,
-		ClientSessionCache:          cfg.ClientSessionCache,
-		MinVersion:                  cfg.MinVersion,
-		MaxVersion:                  cfg.MaxVersion,
-		CurvePreferences:            cfg.CurvePreferences,
-		DynamicRecordSizingDisabled: cfg.DynamicRecordSizingDisabled,
-		Renegotiation:               cfg.Renegotiation,
-	}
-}
diff --git a/vendor/google.golang.org/grpc/credentials/credentials_util_go18.go b/vendor/google.golang.org/grpc/credentials/credentials_util_go18.go
deleted file mode 100644
index 93f0e1d..0000000
--- a/vendor/google.golang.org/grpc/credentials/credentials_util_go18.go
+++ /dev/null
@@ -1,38 +0,0 @@
-// +build go1.8
-
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package credentials
-
-import (
-	"crypto/tls"
-)
-
-// cloneTLSConfig returns a shallow clone of the exported
-// fields of cfg, ignoring the unexported sync.Once, which
-// contains a mutex and must not be copied.
-//
-// If cfg is nil, a new zero tls.Config is returned.
-func cloneTLSConfig(cfg *tls.Config) *tls.Config {
-	if cfg == nil {
-		return &tls.Config{}
-	}
-
-	return cfg.Clone()
-}
diff --git a/vendor/google.golang.org/grpc/credentials/credentials_util_pre_go17.go b/vendor/google.golang.org/grpc/credentials/credentials_util_pre_go17.go
deleted file mode 100644
index d6bbcc9..0000000
--- a/vendor/google.golang.org/grpc/credentials/credentials_util_pre_go17.go
+++ /dev/null
@@ -1,57 +0,0 @@
-// +build !go1.7
-
-/*
- *
- * Copyright 2016 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package credentials
-
-import (
-	"crypto/tls"
-)
-
-// cloneTLSConfig returns a shallow clone of the exported
-// fields of cfg, ignoring the unexported sync.Once, which
-// contains a mutex and must not be copied.
-//
-// If cfg is nil, a new zero tls.Config is returned.
-func cloneTLSConfig(cfg *tls.Config) *tls.Config {
-	if cfg == nil {
-		return &tls.Config{}
-	}
-	return &tls.Config{
-		Rand:                     cfg.Rand,
-		Time:                     cfg.Time,
-		Certificates:             cfg.Certificates,
-		NameToCertificate:        cfg.NameToCertificate,
-		GetCertificate:           cfg.GetCertificate,
-		RootCAs:                  cfg.RootCAs,
-		NextProtos:               cfg.NextProtos,
-		ServerName:               cfg.ServerName,
-		ClientAuth:               cfg.ClientAuth,
-		ClientCAs:                cfg.ClientCAs,
-		InsecureSkipVerify:       cfg.InsecureSkipVerify,
-		CipherSuites:             cfg.CipherSuites,
-		PreferServerCipherSuites: cfg.PreferServerCipherSuites,
-		SessionTicketsDisabled:   cfg.SessionTicketsDisabled,
-		SessionTicketKey:         cfg.SessionTicketKey,
-		ClientSessionCache:       cfg.ClientSessionCache,
-		MinVersion:               cfg.MinVersion,
-		MaxVersion:               cfg.MaxVersion,
-		CurvePreferences:         cfg.CurvePreferences,
-	}
-}
diff --git a/vendor/google.golang.org/grpc/credentials/internal/syscallconn.go b/vendor/google.golang.org/grpc/credentials/internal/syscallconn.go
new file mode 100644
index 0000000..2f4472b
--- /dev/null
+++ b/vendor/google.golang.org/grpc/credentials/internal/syscallconn.go
@@ -0,0 +1,61 @@
+// +build !appengine
+
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package internal contains credentials-internal code.
+package internal
+
+import (
+	"net"
+	"syscall"
+)
+
+type sysConn = syscall.Conn
+
+// syscallConn keeps reference of rawConn to support syscall.Conn for channelz.
+// SyscallConn() (the method in interface syscall.Conn) is explicitly
+// implemented on this type,
+//
+// Interface syscall.Conn is implemented by most net.Conn implementations (e.g.
+// TCPConn, UnixConn), but is not part of net.Conn interface. So wrapper conns
+// that embed net.Conn don't implement syscall.Conn. (Side note: tls.Conn
+// doesn't embed net.Conn, so even if syscall.Conn is part of net.Conn, it won't
+// help here).
+type syscallConn struct {
+	net.Conn
+	// sysConn is a type alias of syscall.Conn. It's necessary because the name
+	// `Conn` collides with `net.Conn`.
+	sysConn
+}
+
+// WrapSyscallConn tries to wrap rawConn and newConn into a net.Conn that
+// implements syscall.Conn. rawConn will be used to support syscall, and newConn
+// will be used for read/write.
+//
+// This function returns newConn if rawConn doesn't implement syscall.Conn.
+func WrapSyscallConn(rawConn, newConn net.Conn) net.Conn {
+	sysConn, ok := rawConn.(syscall.Conn)
+	if !ok {
+		return newConn
+	}
+	return &syscallConn{
+		Conn:    newConn,
+		sysConn: sysConn,
+	}
+}
diff --git a/vendor/google.golang.org/grpc/naming/go18.go b/vendor/google.golang.org/grpc/credentials/internal/syscallconn_appengine.go
similarity index 72%
copy from vendor/google.golang.org/grpc/naming/go18.go
copy to vendor/google.golang.org/grpc/credentials/internal/syscallconn_appengine.go
index b5a0f84..d4346e9 100644
--- a/vendor/google.golang.org/grpc/naming/go18.go
+++ b/vendor/google.golang.org/grpc/credentials/internal/syscallconn_appengine.go
@@ -1,8 +1,8 @@
-// +build go1.8
+// +build appengine
 
 /*
  *
- * Copyright 2017 gRPC authors.
+ * Copyright 2018 gRPC authors.
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -18,11 +18,13 @@
  *
  */
 
-package naming
+package internal
 
-import "net"
-
-var (
-	lookupHost = net.DefaultResolver.LookupHost
-	lookupSRV  = net.DefaultResolver.LookupSRV
+import (
+	"net"
 )
+
+// WrapSyscallConn returns newConn on appengine.
+func WrapSyscallConn(rawConn, newConn net.Conn) net.Conn {
+	return newConn
+}
diff --git a/vendor/google.golang.org/grpc/credentials/oauth/oauth.go b/vendor/google.golang.org/grpc/credentials/oauth/oauth.go
index f6d597a..e0e74d8 100644
--- a/vendor/google.golang.org/grpc/credentials/oauth/oauth.go
+++ b/vendor/google.golang.org/grpc/credentials/oauth/oauth.go
@@ -20,11 +20,11 @@
 package oauth
 
 import (
+	"context"
 	"fmt"
 	"io/ioutil"
 	"sync"
 
-	"golang.org/x/net/context"
 	"golang.org/x/oauth2"
 	"golang.org/x/oauth2/google"
 	"golang.org/x/oauth2/jwt"
diff --git a/vendor/google.golang.org/grpc/credentials/tls13.go b/vendor/google.golang.org/grpc/credentials/tls13.go
new file mode 100644
index 0000000..ccbf35b
--- /dev/null
+++ b/vendor/google.golang.org/grpc/credentials/tls13.go
@@ -0,0 +1,30 @@
+// +build go1.12
+
+/*
+ *
+ * Copyright 2019 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package credentials
+
+import "crypto/tls"
+
+// This init function adds cipher suite constants only defined in Go 1.12.
+func init() {
+	cipherSuiteLookup[tls.TLS_AES_128_GCM_SHA256] = "TLS_AES_128_GCM_SHA256"
+	cipherSuiteLookup[tls.TLS_AES_256_GCM_SHA384] = "TLS_AES_256_GCM_SHA384"
+	cipherSuiteLookup[tls.TLS_CHACHA20_POLY1305_SHA256] = "TLS_CHACHA20_POLY1305_SHA256"
+}
diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go
new file mode 100644
index 0000000..e114fec
--- /dev/null
+++ b/vendor/google.golang.org/grpc/dialoptions.go
@@ -0,0 +1,532 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpc
+
+import (
+	"context"
+	"fmt"
+	"net"
+	"time"
+
+	"google.golang.org/grpc/balancer"
+	"google.golang.org/grpc/credentials"
+	"google.golang.org/grpc/grpclog"
+	"google.golang.org/grpc/internal"
+	"google.golang.org/grpc/internal/backoff"
+	"google.golang.org/grpc/internal/envconfig"
+	"google.golang.org/grpc/internal/transport"
+	"google.golang.org/grpc/keepalive"
+	"google.golang.org/grpc/resolver"
+	"google.golang.org/grpc/stats"
+)
+
+// dialOptions configure a Dial call. dialOptions are set by the DialOption
+// values passed to Dial.
+type dialOptions struct {
+	unaryInt    UnaryClientInterceptor
+	streamInt   StreamClientInterceptor
+	cp          Compressor
+	dc          Decompressor
+	bs          backoff.Strategy
+	block       bool
+	insecure    bool
+	timeout     time.Duration
+	scChan      <-chan ServiceConfig
+	authority   string
+	copts       transport.ConnectOptions
+	callOptions []CallOption
+	// This is used by v1 balancer dial option WithBalancer to support v1
+	// balancer, and also by WithBalancerName dial option.
+	balancerBuilder balancer.Builder
+	// This is to support grpclb.
+	resolverBuilder             resolver.Builder
+	reqHandshake                envconfig.RequireHandshakeSetting
+	channelzParentID            int64
+	disableServiceConfig        bool
+	disableRetry                bool
+	disableHealthCheck          bool
+	healthCheckFunc             internal.HealthChecker
+	minConnectTimeout           func() time.Duration
+	defaultServiceConfig        *ServiceConfig // defaultServiceConfig is parsed from defaultServiceConfigRawJSON.
+	defaultServiceConfigRawJSON *string
+}
+
+// DialOption configures how we set up the connection.
+type DialOption interface {
+	apply(*dialOptions)
+}
+
+// EmptyDialOption does not alter the dial configuration. It can be embedded in
+// another structure to build custom dial options.
+//
+// This API is EXPERIMENTAL.
+type EmptyDialOption struct{}
+
+func (EmptyDialOption) apply(*dialOptions) {}
+
+// funcDialOption wraps a function that modifies dialOptions into an
+// implementation of the DialOption interface.
+type funcDialOption struct {
+	f func(*dialOptions)
+}
+
+func (fdo *funcDialOption) apply(do *dialOptions) {
+	fdo.f(do)
+}
+
+func newFuncDialOption(f func(*dialOptions)) *funcDialOption {
+	return &funcDialOption{
+		f: f,
+	}
+}
+
+// WithWaitForHandshake blocks until the initial settings frame is received from
+// the server before assigning RPCs to the connection.
+//
+// Deprecated: this is the default behavior, and this option will be removed
+// after the 1.18 release.
+func WithWaitForHandshake() DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.reqHandshake = envconfig.RequireHandshakeOn
+	})
+}
+
+// WithWriteBufferSize determines how much data can be batched before doing a
+// write on the wire. The corresponding memory allocation for this buffer will
+// be twice the size to keep syscalls low. The default value for this buffer is
+// 32KB.
+//
+// Zero will disable the write buffer such that each write will be on underlying
+// connection. Note: A Send call may not directly translate to a write.
+func WithWriteBufferSize(s int) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.copts.WriteBufferSize = s
+	})
+}
+
+// WithReadBufferSize lets you set the size of read buffer, this determines how
+// much data can be read at most for each read syscall.
+//
+// The default value for this buffer is 32KB. Zero will disable read buffer for
+// a connection so data framer can access the underlying conn directly.
+func WithReadBufferSize(s int) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.copts.ReadBufferSize = s
+	})
+}
+
+// WithInitialWindowSize returns a DialOption which sets the value for initial
+// window size on a stream. The lower bound for window size is 64K and any value
+// smaller than that will be ignored.
+func WithInitialWindowSize(s int32) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.copts.InitialWindowSize = s
+	})
+}
+
+// WithInitialConnWindowSize returns a DialOption which sets the value for
+// initial window size on a connection. The lower bound for window size is 64K
+// and any value smaller than that will be ignored.
+func WithInitialConnWindowSize(s int32) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.copts.InitialConnWindowSize = s
+	})
+}
+
+// WithMaxMsgSize returns a DialOption which sets the maximum message size the
+// client can receive.
+//
+// Deprecated: use WithDefaultCallOptions(MaxCallRecvMsgSize(s)) instead.
+func WithMaxMsgSize(s int) DialOption {
+	return WithDefaultCallOptions(MaxCallRecvMsgSize(s))
+}
+
+// WithDefaultCallOptions returns a DialOption which sets the default
+// CallOptions for calls over the connection.
+func WithDefaultCallOptions(cos ...CallOption) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.callOptions = append(o.callOptions, cos...)
+	})
+}
+
+// WithCodec returns a DialOption which sets a codec for message marshaling and
+// unmarshaling.
+//
+// Deprecated: use WithDefaultCallOptions(ForceCodec(_)) instead.
+func WithCodec(c Codec) DialOption {
+	return WithDefaultCallOptions(CallCustomCodec(c))
+}
+
+// WithCompressor returns a DialOption which sets a Compressor to use for
+// message compression. It has lower priority than the compressor set by the
+// UseCompressor CallOption.
+//
+// Deprecated: use UseCompressor instead.
+func WithCompressor(cp Compressor) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.cp = cp
+	})
+}
+
+// WithDecompressor returns a DialOption which sets a Decompressor to use for
+// incoming message decompression.  If incoming response messages are encoded
+// using the decompressor's Type(), it will be used.  Otherwise, the message
+// encoding will be used to look up the compressor registered via
+// encoding.RegisterCompressor, which will then be used to decompress the
+// message.  If no compressor is registered for the encoding, an Unimplemented
+// status error will be returned.
+//
+// Deprecated: use encoding.RegisterCompressor instead.
+func WithDecompressor(dc Decompressor) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.dc = dc
+	})
+}
+
+// WithBalancer returns a DialOption which sets a load balancer with the v1 API.
+// Name resolver will be ignored if this DialOption is specified.
+//
+// Deprecated: use the new balancer APIs in balancer package and
+// WithBalancerName.
+func WithBalancer(b Balancer) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.balancerBuilder = &balancerWrapperBuilder{
+			b: b,
+		}
+	})
+}
+
+// WithBalancerName sets the balancer that the ClientConn will be initialized
+// with. Balancer registered with balancerName will be used. This function
+// panics if no balancer was registered by balancerName.
+//
+// The balancer cannot be overridden by balancer option specified by service
+// config.
+//
+// This is an EXPERIMENTAL API.
+func WithBalancerName(balancerName string) DialOption {
+	builder := balancer.Get(balancerName)
+	if builder == nil {
+		panic(fmt.Sprintf("grpc.WithBalancerName: no balancer is registered for name %v", balancerName))
+	}
+	return newFuncDialOption(func(o *dialOptions) {
+		o.balancerBuilder = builder
+	})
+}
+
+// withResolverBuilder is only for grpclb.
+func withResolverBuilder(b resolver.Builder) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.resolverBuilder = b
+	})
+}
+
+// WithServiceConfig returns a DialOption which has a channel to read the
+// service configuration.
+//
+// Deprecated: service config should be received through name resolver, as
+// specified here.
+// https://github.com/grpc/grpc/blob/master/doc/service_config.md
+func WithServiceConfig(c <-chan ServiceConfig) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.scChan = c
+	})
+}
+
+// WithBackoffMaxDelay configures the dialer to use the provided maximum delay
+// when backing off after failed connection attempts.
+func WithBackoffMaxDelay(md time.Duration) DialOption {
+	return WithBackoffConfig(BackoffConfig{MaxDelay: md})
+}
+
+// WithBackoffConfig configures the dialer to use the provided backoff
+// parameters after connection failures.
+//
+// Use WithBackoffMaxDelay until more parameters on BackoffConfig are opened up
+// for use.
+func WithBackoffConfig(b BackoffConfig) DialOption {
+	return withBackoff(backoff.Exponential{
+		MaxDelay: b.MaxDelay,
+	})
+}
+
+// withBackoff sets the backoff strategy used for connectRetryNum after a failed
+// connection attempt.
+//
+// This can be exported if arbitrary backoff strategies are allowed by gRPC.
+func withBackoff(bs backoff.Strategy) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.bs = bs
+	})
+}
+
+// WithBlock returns a DialOption which makes caller of Dial blocks until the
+// underlying connection is up. Without this, Dial returns immediately and
+// connecting the server happens in background.
+func WithBlock() DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.block = true
+	})
+}
+
+// WithInsecure returns a DialOption which disables transport security for this
+// ClientConn. Note that transport security is required unless WithInsecure is
+// set.
+func WithInsecure() DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.insecure = true
+	})
+}
+
+// WithTransportCredentials returns a DialOption which configures a connection
+// level security credentials (e.g., TLS/SSL). This should not be used together
+// with WithCredentialsBundle.
+func WithTransportCredentials(creds credentials.TransportCredentials) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.copts.TransportCredentials = creds
+	})
+}
+
+// WithPerRPCCredentials returns a DialOption which sets credentials and places
+// auth state on each outbound RPC.
+func WithPerRPCCredentials(creds credentials.PerRPCCredentials) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.copts.PerRPCCredentials = append(o.copts.PerRPCCredentials, creds)
+	})
+}
+
+// WithCredentialsBundle returns a DialOption to set a credentials bundle for
+// the ClientConn.WithCreds. This should not be used together with
+// WithTransportCredentials.
+//
+// This API is experimental.
+func WithCredentialsBundle(b credentials.Bundle) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.copts.CredsBundle = b
+	})
+}
+
+// WithTimeout returns a DialOption that configures a timeout for dialing a
+// ClientConn initially. This is valid if and only if WithBlock() is present.
+//
+// Deprecated: use DialContext and context.WithTimeout instead.
+func WithTimeout(d time.Duration) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.timeout = d
+	})
+}
+
+// WithContextDialer returns a DialOption that sets a dialer to create
+// connections. If FailOnNonTempDialError() is set to true, and an error is
+// returned by f, gRPC checks the error's Temporary() method to decide if it
+// should try to reconnect to the network address.
+func WithContextDialer(f func(context.Context, string) (net.Conn, error)) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.copts.Dialer = f
+	})
+}
+
+func init() {
+	internal.WithResolverBuilder = withResolverBuilder
+	internal.WithHealthCheckFunc = withHealthCheckFunc
+}
+
+// WithDialer returns a DialOption that specifies a function to use for dialing
+// network addresses. If FailOnNonTempDialError() is set to true, and an error
+// is returned by f, gRPC checks the error's Temporary() method to decide if it
+// should try to reconnect to the network address.
+//
+// Deprecated: use WithContextDialer instead
+func WithDialer(f func(string, time.Duration) (net.Conn, error)) DialOption {
+	return WithContextDialer(
+		func(ctx context.Context, addr string) (net.Conn, error) {
+			if deadline, ok := ctx.Deadline(); ok {
+				return f(addr, time.Until(deadline))
+			}
+			return f(addr, 0)
+		})
+}
+
+// WithStatsHandler returns a DialOption that specifies the stats handler for
+// all the RPCs and underlying network connections in this ClientConn.
+func WithStatsHandler(h stats.Handler) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.copts.StatsHandler = h
+	})
+}
+
+// FailOnNonTempDialError returns a DialOption that specifies if gRPC fails on
+// non-temporary dial errors. If f is true, and dialer returns a non-temporary
+// error, gRPC will fail the connection to the network address and won't try to
+// reconnect. The default value of FailOnNonTempDialError is false.
+//
+// FailOnNonTempDialError only affects the initial dial, and does not do
+// anything useful unless you are also using WithBlock().
+//
+// This is an EXPERIMENTAL API.
+func FailOnNonTempDialError(f bool) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.copts.FailOnNonTempDialError = f
+	})
+}
+
+// WithUserAgent returns a DialOption that specifies a user agent string for all
+// the RPCs.
+func WithUserAgent(s string) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.copts.UserAgent = s
+	})
+}
+
+// WithKeepaliveParams returns a DialOption that specifies keepalive parameters
+// for the client transport.
+func WithKeepaliveParams(kp keepalive.ClientParameters) DialOption {
+	if kp.Time < internal.KeepaliveMinPingTime {
+		grpclog.Warningf("Adjusting keepalive ping interval to minimum period of %v", internal.KeepaliveMinPingTime)
+		kp.Time = internal.KeepaliveMinPingTime
+	}
+	return newFuncDialOption(func(o *dialOptions) {
+		o.copts.KeepaliveParams = kp
+	})
+}
+
+// WithUnaryInterceptor returns a DialOption that specifies the interceptor for
+// unary RPCs.
+func WithUnaryInterceptor(f UnaryClientInterceptor) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.unaryInt = f
+	})
+}
+
+// WithStreamInterceptor returns a DialOption that specifies the interceptor for
+// streaming RPCs.
+func WithStreamInterceptor(f StreamClientInterceptor) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.streamInt = f
+	})
+}
+
+// WithAuthority returns a DialOption that specifies the value to be used as the
+// :authority pseudo-header. This value only works with WithInsecure and has no
+// effect if TransportCredentials are present.
+func WithAuthority(a string) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.authority = a
+	})
+}
+
+// WithChannelzParentID returns a DialOption that specifies the channelz ID of
+// current ClientConn's parent. This function is used in nested channel creation
+// (e.g. grpclb dial).
+func WithChannelzParentID(id int64) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.channelzParentID = id
+	})
+}
+
+// WithDisableServiceConfig returns a DialOption that causes grpc to ignore any
+// service config provided by the resolver and provides a hint to the resolver
+// to not fetch service configs.
+//
+// Note that, this dial option only disables service config from resolver. If
+// default service config is provided, grpc will use the default service config.
+func WithDisableServiceConfig() DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.disableServiceConfig = true
+	})
+}
+
+// WithDefaultServiceConfig returns a DialOption that configures the default
+// service config, which will be used in cases where:
+// 1. WithDisableServiceConfig is called.
+// 2. Resolver does not return service config or if the resolver gets and invalid config.
+//
+// This API is EXPERIMENTAL.
+func WithDefaultServiceConfig(s string) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.defaultServiceConfigRawJSON = &s
+	})
+}
+
+// WithDisableRetry returns a DialOption that disables retries, even if the
+// service config enables them.  This does not impact transparent retries, which
+// will happen automatically if no data is written to the wire or if the RPC is
+// unprocessed by the remote server.
+//
+// Retry support is currently disabled by default, but will be enabled by
+// default in the future.  Until then, it may be enabled by setting the
+// environment variable "GRPC_GO_RETRY" to "on".
+//
+// This API is EXPERIMENTAL.
+func WithDisableRetry() DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.disableRetry = true
+	})
+}
+
+// WithMaxHeaderListSize returns a DialOption that specifies the maximum
+// (uncompressed) size of header list that the client is prepared to accept.
+func WithMaxHeaderListSize(s uint32) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.copts.MaxHeaderListSize = &s
+	})
+}
+
+// WithDisableHealthCheck disables the LB channel health checking for all
+// SubConns of this ClientConn.
+//
+// This API is EXPERIMENTAL.
+func WithDisableHealthCheck() DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.disableHealthCheck = true
+	})
+}
+
+// withHealthCheckFunc replaces the default health check function with the
+// provided one. It makes tests easier to change the health check function.
+//
+// For testing purpose only.
+func withHealthCheckFunc(f internal.HealthChecker) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.healthCheckFunc = f
+	})
+}
+
+func defaultDialOptions() dialOptions {
+	return dialOptions{
+		disableRetry:    !envconfig.Retry,
+		reqHandshake:    envconfig.RequireHandshake,
+		healthCheckFunc: internal.HealthCheckFunc,
+		copts: transport.ConnectOptions{
+			WriteBufferSize: defaultWriteBufSize,
+			ReadBufferSize:  defaultReadBufSize,
+		},
+	}
+}
+
+// withGetMinConnectDeadline specifies the function that clientconn uses to
+// get minConnectDeadline. This can be used to make connection attempts happen
+// faster/slower.
+//
+// For testing purpose only.
+func withMinConnectDeadline(f func() time.Duration) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.minConnectTimeout = f
+	})
+}
diff --git a/vendor/google.golang.org/grpc/encoding/encoding.go b/vendor/google.golang.org/grpc/encoding/encoding.go
index ade8b7c..30a75da 100644
--- a/vendor/google.golang.org/grpc/encoding/encoding.go
+++ b/vendor/google.golang.org/grpc/encoding/encoding.go
@@ -102,10 +102,10 @@
 	if codec == nil {
 		panic("cannot register a nil Codec")
 	}
-	contentSubtype := strings.ToLower(codec.Name())
-	if contentSubtype == "" {
-		panic("cannot register Codec with empty string result for String()")
+	if codec.Name() == "" {
+		panic("cannot register Codec with empty string result for Name()")
 	}
+	contentSubtype := strings.ToLower(codec.Name())
 	registeredCodecs[contentSubtype] = codec
 }
 
diff --git a/vendor/google.golang.org/grpc/go16.go b/vendor/google.golang.org/grpc/go16.go
deleted file mode 100644
index 535ee93..0000000
--- a/vendor/google.golang.org/grpc/go16.go
+++ /dev/null
@@ -1,70 +0,0 @@
-// +build go1.6,!go1.7
-
-/*
- *
- * Copyright 2016 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package grpc
-
-import (
-	"fmt"
-	"io"
-	"net"
-	"net/http"
-
-	"golang.org/x/net/context"
-	"google.golang.org/grpc/codes"
-	"google.golang.org/grpc/status"
-	"google.golang.org/grpc/transport"
-)
-
-// dialContext connects to the address on the named network.
-func dialContext(ctx context.Context, network, address string) (net.Conn, error) {
-	return (&net.Dialer{Cancel: ctx.Done()}).Dial(network, address)
-}
-
-func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error {
-	req.Cancel = ctx.Done()
-	if err := req.Write(conn); err != nil {
-		return fmt.Errorf("failed to write the HTTP request: %v", err)
-	}
-	return nil
-}
-
-// toRPCErr converts an error into an error from the status package.
-func toRPCErr(err error) error {
-	if err == nil || err == io.EOF {
-		return err
-	}
-	if _, ok := status.FromError(err); ok {
-		return err
-	}
-	switch e := err.(type) {
-	case transport.StreamError:
-		return status.Error(e.Code, e.Desc)
-	case transport.ConnectionError:
-		return status.Error(codes.Unavailable, e.Desc)
-	default:
-		switch err {
-		case context.DeadlineExceeded:
-			return status.Error(codes.DeadlineExceeded, err.Error())
-		case context.Canceled:
-			return status.Error(codes.Canceled, err.Error())
-		}
-	}
-	return status.Error(codes.Unknown, err.Error())
-}
diff --git a/vendor/google.golang.org/grpc/go17.go b/vendor/google.golang.org/grpc/go17.go
deleted file mode 100644
index ec676a9..0000000
--- a/vendor/google.golang.org/grpc/go17.go
+++ /dev/null
@@ -1,71 +0,0 @@
-// +build go1.7
-
-/*
- *
- * Copyright 2016 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package grpc
-
-import (
-	"context"
-	"fmt"
-	"io"
-	"net"
-	"net/http"
-
-	netctx "golang.org/x/net/context"
-	"google.golang.org/grpc/codes"
-	"google.golang.org/grpc/status"
-	"google.golang.org/grpc/transport"
-)
-
-// dialContext connects to the address on the named network.
-func dialContext(ctx context.Context, network, address string) (net.Conn, error) {
-	return (&net.Dialer{}).DialContext(ctx, network, address)
-}
-
-func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error {
-	req = req.WithContext(ctx)
-	if err := req.Write(conn); err != nil {
-		return fmt.Errorf("failed to write the HTTP request: %v", err)
-	}
-	return nil
-}
-
-// toRPCErr converts an error into an error from the status package.
-func toRPCErr(err error) error {
-	if err == nil || err == io.EOF {
-		return err
-	}
-	if _, ok := status.FromError(err); ok {
-		return err
-	}
-	switch e := err.(type) {
-	case transport.StreamError:
-		return status.Error(e.Code, e.Desc)
-	case transport.ConnectionError:
-		return status.Error(codes.Unavailable, e.Desc)
-	default:
-		switch err {
-		case context.DeadlineExceeded, netctx.DeadlineExceeded:
-			return status.Error(codes.DeadlineExceeded, err.Error())
-		case context.Canceled, netctx.Canceled:
-			return status.Error(codes.Canceled, err.Error())
-		}
-	}
-	return status.Error(codes.Unknown, err.Error())
-}
diff --git a/vendor/google.golang.org/grpc/grpclb.go b/vendor/google.golang.org/grpc/grpclb.go
deleted file mode 100644
index bc2b445..0000000
--- a/vendor/google.golang.org/grpc/grpclb.go
+++ /dev/null
@@ -1,341 +0,0 @@
-/*
- *
- * Copyright 2016 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package grpc
-
-import (
-	"strconv"
-	"strings"
-	"sync"
-	"time"
-
-	"golang.org/x/net/context"
-	"google.golang.org/grpc/balancer"
-	"google.golang.org/grpc/connectivity"
-	lbpb "google.golang.org/grpc/grpclb/grpc_lb_v1/messages"
-	"google.golang.org/grpc/grpclog"
-	"google.golang.org/grpc/resolver"
-)
-
-const (
-	lbTokeyKey             = "lb-token"
-	defaultFallbackTimeout = 10 * time.Second
-	grpclbName             = "grpclb"
-)
-
-func convertDuration(d *lbpb.Duration) time.Duration {
-	if d == nil {
-		return 0
-	}
-	return time.Duration(d.Seconds)*time.Second + time.Duration(d.Nanos)*time.Nanosecond
-}
-
-// Client API for LoadBalancer service.
-// Mostly copied from generated pb.go file.
-// To avoid circular dependency.
-type loadBalancerClient struct {
-	cc *ClientConn
-}
-
-func (c *loadBalancerClient) BalanceLoad(ctx context.Context, opts ...CallOption) (*balanceLoadClientStream, error) {
-	desc := &StreamDesc{
-		StreamName:    "BalanceLoad",
-		ServerStreams: true,
-		ClientStreams: true,
-	}
-	stream, err := c.cc.NewStream(ctx, desc, "/grpc.lb.v1.LoadBalancer/BalanceLoad", opts...)
-	if err != nil {
-		return nil, err
-	}
-	x := &balanceLoadClientStream{stream}
-	return x, nil
-}
-
-type balanceLoadClientStream struct {
-	ClientStream
-}
-
-func (x *balanceLoadClientStream) Send(m *lbpb.LoadBalanceRequest) error {
-	return x.ClientStream.SendMsg(m)
-}
-
-func (x *balanceLoadClientStream) Recv() (*lbpb.LoadBalanceResponse, error) {
-	m := new(lbpb.LoadBalanceResponse)
-	if err := x.ClientStream.RecvMsg(m); err != nil {
-		return nil, err
-	}
-	return m, nil
-}
-
-func init() {
-	balancer.Register(newLBBuilder())
-}
-
-// newLBBuilder creates a builder for grpclb.
-func newLBBuilder() balancer.Builder {
-	return NewLBBuilderWithFallbackTimeout(defaultFallbackTimeout)
-}
-
-// NewLBBuilderWithFallbackTimeout creates a grpclb builder with the given
-// fallbackTimeout. If no response is received from the remote balancer within
-// fallbackTimeout, the backend addresses from the resolved address list will be
-// used.
-//
-// Only call this function when a non-default fallback timeout is needed.
-func NewLBBuilderWithFallbackTimeout(fallbackTimeout time.Duration) balancer.Builder {
-	return &lbBuilder{
-		fallbackTimeout: fallbackTimeout,
-	}
-}
-
-type lbBuilder struct {
-	fallbackTimeout time.Duration
-}
-
-func (b *lbBuilder) Name() string {
-	return grpclbName
-}
-
-func (b *lbBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer {
-	// This generates a manual resolver builder with a random scheme. This
-	// scheme will be used to dial to remote LB, so we can send filtered address
-	// updates to remote LB ClientConn using this manual resolver.
-	scheme := "grpclb_internal_" + strconv.FormatInt(time.Now().UnixNano(), 36)
-	r := &lbManualResolver{scheme: scheme, ccb: cc}
-
-	var target string
-	targetSplitted := strings.Split(cc.Target(), ":///")
-	if len(targetSplitted) < 2 {
-		target = cc.Target()
-	} else {
-		target = targetSplitted[1]
-	}
-
-	lb := &lbBalancer{
-		cc:              newLBCacheClientConn(cc),
-		target:          target,
-		opt:             opt,
-		fallbackTimeout: b.fallbackTimeout,
-		doneCh:          make(chan struct{}),
-
-		manualResolver: r,
-		csEvltr:        &connectivityStateEvaluator{},
-		subConns:       make(map[resolver.Address]balancer.SubConn),
-		scStates:       make(map[balancer.SubConn]connectivity.State),
-		picker:         &errPicker{err: balancer.ErrNoSubConnAvailable},
-		clientStats:    &rpcStats{},
-	}
-
-	return lb
-}
-
-type lbBalancer struct {
-	cc              *lbCacheClientConn
-	target          string
-	opt             balancer.BuildOptions
-	fallbackTimeout time.Duration
-	doneCh          chan struct{}
-
-	// manualResolver is used in the remote LB ClientConn inside grpclb. When
-	// resolved address updates are received by grpclb, filtered updates will be
-	// send to remote LB ClientConn through this resolver.
-	manualResolver *lbManualResolver
-	// The ClientConn to talk to the remote balancer.
-	ccRemoteLB *ClientConn
-
-	// Support client side load reporting. Each picker gets a reference to this,
-	// and will update its content.
-	clientStats *rpcStats
-
-	mu sync.Mutex // guards everything following.
-	// The full server list including drops, used to check if the newly received
-	// serverList contains anything new. Each generate picker will also have
-	// reference to this list to do the first layer pick.
-	fullServerList []*lbpb.Server
-	// All backends addresses, with metadata set to nil. This list contains all
-	// backend addresses in the same order and with the same duplicates as in
-	// serverlist. When generating picker, a SubConn slice with the same order
-	// but with only READY SCs will be gerenated.
-	backendAddrs []resolver.Address
-	// Roundrobin functionalities.
-	csEvltr  *connectivityStateEvaluator
-	state    connectivity.State
-	subConns map[resolver.Address]balancer.SubConn   // Used to new/remove SubConn.
-	scStates map[balancer.SubConn]connectivity.State // Used to filter READY SubConns.
-	picker   balancer.Picker
-	// Support fallback to resolved backend addresses if there's no response
-	// from remote balancer within fallbackTimeout.
-	fallbackTimerExpired bool
-	serverListReceived   bool
-	// resolvedBackendAddrs is resolvedAddrs minus remote balancers. It's set
-	// when resolved address updates are received, and read in the goroutine
-	// handling fallback.
-	resolvedBackendAddrs []resolver.Address
-}
-
-// regeneratePicker takes a snapshot of the balancer, and generates a picker from
-// it. The picker
-//  - always returns ErrTransientFailure if the balancer is in TransientFailure,
-//  - does two layer roundrobin pick otherwise.
-// Caller must hold lb.mu.
-func (lb *lbBalancer) regeneratePicker() {
-	if lb.state == connectivity.TransientFailure {
-		lb.picker = &errPicker{err: balancer.ErrTransientFailure}
-		return
-	}
-	var readySCs []balancer.SubConn
-	for _, a := range lb.backendAddrs {
-		if sc, ok := lb.subConns[a]; ok {
-			if st, ok := lb.scStates[sc]; ok && st == connectivity.Ready {
-				readySCs = append(readySCs, sc)
-			}
-		}
-	}
-
-	if len(lb.fullServerList) <= 0 {
-		if len(readySCs) <= 0 {
-			lb.picker = &errPicker{err: balancer.ErrNoSubConnAvailable}
-			return
-		}
-		lb.picker = &rrPicker{subConns: readySCs}
-		return
-	}
-	lb.picker = &lbPicker{
-		serverList: lb.fullServerList,
-		subConns:   readySCs,
-		stats:      lb.clientStats,
-	}
-}
-
-func (lb *lbBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
-	grpclog.Infof("lbBalancer: handle SubConn state change: %p, %v", sc, s)
-	lb.mu.Lock()
-	defer lb.mu.Unlock()
-
-	oldS, ok := lb.scStates[sc]
-	if !ok {
-		grpclog.Infof("lbBalancer: got state changes for an unknown SubConn: %p, %v", sc, s)
-		return
-	}
-	lb.scStates[sc] = s
-	switch s {
-	case connectivity.Idle:
-		sc.Connect()
-	case connectivity.Shutdown:
-		// When an address was removed by resolver, b called RemoveSubConn but
-		// kept the sc's state in scStates. Remove state for this sc here.
-		delete(lb.scStates, sc)
-	}
-
-	oldAggrState := lb.state
-	lb.state = lb.csEvltr.recordTransition(oldS, s)
-
-	// Regenerate picker when one of the following happens:
-	//  - this sc became ready from not-ready
-	//  - this sc became not-ready from ready
-	//  - the aggregated state of balancer became TransientFailure from non-TransientFailure
-	//  - the aggregated state of balancer became non-TransientFailure from TransientFailure
-	if (oldS == connectivity.Ready) != (s == connectivity.Ready) ||
-		(lb.state == connectivity.TransientFailure) != (oldAggrState == connectivity.TransientFailure) {
-		lb.regeneratePicker()
-	}
-
-	lb.cc.UpdateBalancerState(lb.state, lb.picker)
-}
-
-// fallbackToBackendsAfter blocks for fallbackTimeout and falls back to use
-// resolved backends (backends received from resolver, not from remote balancer)
-// if no connection to remote balancers was successful.
-func (lb *lbBalancer) fallbackToBackendsAfter(fallbackTimeout time.Duration) {
-	timer := time.NewTimer(fallbackTimeout)
-	defer timer.Stop()
-	select {
-	case <-timer.C:
-	case <-lb.doneCh:
-		return
-	}
-	lb.mu.Lock()
-	if lb.serverListReceived {
-		lb.mu.Unlock()
-		return
-	}
-	lb.fallbackTimerExpired = true
-	lb.refreshSubConns(lb.resolvedBackendAddrs)
-	lb.mu.Unlock()
-}
-
-// HandleResolvedAddrs sends the updated remoteLB addresses to remoteLB
-// clientConn. The remoteLB clientConn will handle creating/removing remoteLB
-// connections.
-func (lb *lbBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) {
-	grpclog.Infof("lbBalancer: handleResolvedResult: %+v", addrs)
-	if len(addrs) <= 0 {
-		return
-	}
-
-	var remoteBalancerAddrs, backendAddrs []resolver.Address
-	for _, a := range addrs {
-		if a.Type == resolver.GRPCLB {
-			remoteBalancerAddrs = append(remoteBalancerAddrs, a)
-		} else {
-			backendAddrs = append(backendAddrs, a)
-		}
-	}
-
-	if lb.ccRemoteLB == nil {
-		if len(remoteBalancerAddrs) <= 0 {
-			grpclog.Errorf("grpclb: no remote balancer address is available, should never happen")
-			return
-		}
-		// First time receiving resolved addresses, create a cc to remote
-		// balancers.
-		lb.dialRemoteLB(remoteBalancerAddrs[0].ServerName)
-		// Start the fallback goroutine.
-		go lb.fallbackToBackendsAfter(lb.fallbackTimeout)
-	}
-
-	// cc to remote balancers uses lb.manualResolver. Send the updated remote
-	// balancer addresses to it through manualResolver.
-	lb.manualResolver.NewAddress(remoteBalancerAddrs)
-
-	lb.mu.Lock()
-	lb.resolvedBackendAddrs = backendAddrs
-	// If serverListReceived is true, connection to remote balancer was
-	// successful and there's no need to do fallback anymore.
-	// If fallbackTimerExpired is false, fallback hasn't happened yet.
-	if !lb.serverListReceived && lb.fallbackTimerExpired {
-		// This means we received a new list of resolved backends, and we are
-		// still in fallback mode. Need to update the list of backends we are
-		// using to the new list of backends.
-		lb.refreshSubConns(lb.resolvedBackendAddrs)
-	}
-	lb.mu.Unlock()
-}
-
-func (lb *lbBalancer) Close() {
-	select {
-	case <-lb.doneCh:
-		return
-	default:
-	}
-	close(lb.doneCh)
-	if lb.ccRemoteLB != nil {
-		lb.ccRemoteLB.Close()
-	}
-	lb.cc.close()
-}
diff --git a/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/messages.pb.go b/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/messages.pb.go
deleted file mode 100644
index b3b32b4..0000000
--- a/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/messages.pb.go
+++ /dev/null
@@ -1,799 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: grpc_lb_v1/messages/messages.proto
-
-package messages // import "google.golang.org/grpc/grpclb/grpc_lb_v1/messages"
-
-import proto "github.com/golang/protobuf/proto"
-import fmt "fmt"
-import math "math"
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
-
-type Duration struct {
-	// Signed seconds of the span of time. Must be from -315,576,000,000
-	// to +315,576,000,000 inclusive.
-	Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"`
-	// Signed fractions of a second at nanosecond resolution of the span
-	// of time. Durations less than one second are represented with a 0
-	// `seconds` field and a positive or negative `nanos` field. For durations
-	// of one second or more, a non-zero value for the `nanos` field must be
-	// of the same sign as the `seconds` field. Must be from -999,999,999
-	// to +999,999,999 inclusive.
-	Nanos                int32    `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *Duration) Reset()         { *m = Duration{} }
-func (m *Duration) String() string { return proto.CompactTextString(m) }
-func (*Duration) ProtoMessage()    {}
-func (*Duration) Descriptor() ([]byte, []int) {
-	return fileDescriptor_messages_b81c731f0e83edbd, []int{0}
-}
-func (m *Duration) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_Duration.Unmarshal(m, b)
-}
-func (m *Duration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_Duration.Marshal(b, m, deterministic)
-}
-func (dst *Duration) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_Duration.Merge(dst, src)
-}
-func (m *Duration) XXX_Size() int {
-	return xxx_messageInfo_Duration.Size(m)
-}
-func (m *Duration) XXX_DiscardUnknown() {
-	xxx_messageInfo_Duration.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Duration proto.InternalMessageInfo
-
-func (m *Duration) GetSeconds() int64 {
-	if m != nil {
-		return m.Seconds
-	}
-	return 0
-}
-
-func (m *Duration) GetNanos() int32 {
-	if m != nil {
-		return m.Nanos
-	}
-	return 0
-}
-
-type Timestamp struct {
-	// Represents seconds of UTC time since Unix epoch
-	// 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
-	// 9999-12-31T23:59:59Z inclusive.
-	Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"`
-	// Non-negative fractions of a second at nanosecond resolution. Negative
-	// second values with fractions must still have non-negative nanos values
-	// that count forward in time. Must be from 0 to 999,999,999
-	// inclusive.
-	Nanos                int32    `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *Timestamp) Reset()         { *m = Timestamp{} }
-func (m *Timestamp) String() string { return proto.CompactTextString(m) }
-func (*Timestamp) ProtoMessage()    {}
-func (*Timestamp) Descriptor() ([]byte, []int) {
-	return fileDescriptor_messages_b81c731f0e83edbd, []int{1}
-}
-func (m *Timestamp) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_Timestamp.Unmarshal(m, b)
-}
-func (m *Timestamp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_Timestamp.Marshal(b, m, deterministic)
-}
-func (dst *Timestamp) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_Timestamp.Merge(dst, src)
-}
-func (m *Timestamp) XXX_Size() int {
-	return xxx_messageInfo_Timestamp.Size(m)
-}
-func (m *Timestamp) XXX_DiscardUnknown() {
-	xxx_messageInfo_Timestamp.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Timestamp proto.InternalMessageInfo
-
-func (m *Timestamp) GetSeconds() int64 {
-	if m != nil {
-		return m.Seconds
-	}
-	return 0
-}
-
-func (m *Timestamp) GetNanos() int32 {
-	if m != nil {
-		return m.Nanos
-	}
-	return 0
-}
-
-type LoadBalanceRequest struct {
-	// Types that are valid to be assigned to LoadBalanceRequestType:
-	//	*LoadBalanceRequest_InitialRequest
-	//	*LoadBalanceRequest_ClientStats
-	LoadBalanceRequestType isLoadBalanceRequest_LoadBalanceRequestType `protobuf_oneof:"load_balance_request_type"`
-	XXX_NoUnkeyedLiteral   struct{}                                    `json:"-"`
-	XXX_unrecognized       []byte                                      `json:"-"`
-	XXX_sizecache          int32                                       `json:"-"`
-}
-
-func (m *LoadBalanceRequest) Reset()         { *m = LoadBalanceRequest{} }
-func (m *LoadBalanceRequest) String() string { return proto.CompactTextString(m) }
-func (*LoadBalanceRequest) ProtoMessage()    {}
-func (*LoadBalanceRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_messages_b81c731f0e83edbd, []int{2}
-}
-func (m *LoadBalanceRequest) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_LoadBalanceRequest.Unmarshal(m, b)
-}
-func (m *LoadBalanceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_LoadBalanceRequest.Marshal(b, m, deterministic)
-}
-func (dst *LoadBalanceRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_LoadBalanceRequest.Merge(dst, src)
-}
-func (m *LoadBalanceRequest) XXX_Size() int {
-	return xxx_messageInfo_LoadBalanceRequest.Size(m)
-}
-func (m *LoadBalanceRequest) XXX_DiscardUnknown() {
-	xxx_messageInfo_LoadBalanceRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LoadBalanceRequest proto.InternalMessageInfo
-
-type isLoadBalanceRequest_LoadBalanceRequestType interface {
-	isLoadBalanceRequest_LoadBalanceRequestType()
-}
-
-type LoadBalanceRequest_InitialRequest struct {
-	InitialRequest *InitialLoadBalanceRequest `protobuf:"bytes,1,opt,name=initial_request,json=initialRequest,oneof"`
-}
-type LoadBalanceRequest_ClientStats struct {
-	ClientStats *ClientStats `protobuf:"bytes,2,opt,name=client_stats,json=clientStats,oneof"`
-}
-
-func (*LoadBalanceRequest_InitialRequest) isLoadBalanceRequest_LoadBalanceRequestType() {}
-func (*LoadBalanceRequest_ClientStats) isLoadBalanceRequest_LoadBalanceRequestType()    {}
-
-func (m *LoadBalanceRequest) GetLoadBalanceRequestType() isLoadBalanceRequest_LoadBalanceRequestType {
-	if m != nil {
-		return m.LoadBalanceRequestType
-	}
-	return nil
-}
-
-func (m *LoadBalanceRequest) GetInitialRequest() *InitialLoadBalanceRequest {
-	if x, ok := m.GetLoadBalanceRequestType().(*LoadBalanceRequest_InitialRequest); ok {
-		return x.InitialRequest
-	}
-	return nil
-}
-
-func (m *LoadBalanceRequest) GetClientStats() *ClientStats {
-	if x, ok := m.GetLoadBalanceRequestType().(*LoadBalanceRequest_ClientStats); ok {
-		return x.ClientStats
-	}
-	return nil
-}
-
-// XXX_OneofFuncs is for the internal use of the proto package.
-func (*LoadBalanceRequest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
-	return _LoadBalanceRequest_OneofMarshaler, _LoadBalanceRequest_OneofUnmarshaler, _LoadBalanceRequest_OneofSizer, []interface{}{
-		(*LoadBalanceRequest_InitialRequest)(nil),
-		(*LoadBalanceRequest_ClientStats)(nil),
-	}
-}
-
-func _LoadBalanceRequest_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
-	m := msg.(*LoadBalanceRequest)
-	// load_balance_request_type
-	switch x := m.LoadBalanceRequestType.(type) {
-	case *LoadBalanceRequest_InitialRequest:
-		b.EncodeVarint(1<<3 | proto.WireBytes)
-		if err := b.EncodeMessage(x.InitialRequest); err != nil {
-			return err
-		}
-	case *LoadBalanceRequest_ClientStats:
-		b.EncodeVarint(2<<3 | proto.WireBytes)
-		if err := b.EncodeMessage(x.ClientStats); err != nil {
-			return err
-		}
-	case nil:
-	default:
-		return fmt.Errorf("LoadBalanceRequest.LoadBalanceRequestType has unexpected type %T", x)
-	}
-	return nil
-}
-
-func _LoadBalanceRequest_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
-	m := msg.(*LoadBalanceRequest)
-	switch tag {
-	case 1: // load_balance_request_type.initial_request
-		if wire != proto.WireBytes {
-			return true, proto.ErrInternalBadWireType
-		}
-		msg := new(InitialLoadBalanceRequest)
-		err := b.DecodeMessage(msg)
-		m.LoadBalanceRequestType = &LoadBalanceRequest_InitialRequest{msg}
-		return true, err
-	case 2: // load_balance_request_type.client_stats
-		if wire != proto.WireBytes {
-			return true, proto.ErrInternalBadWireType
-		}
-		msg := new(ClientStats)
-		err := b.DecodeMessage(msg)
-		m.LoadBalanceRequestType = &LoadBalanceRequest_ClientStats{msg}
-		return true, err
-	default:
-		return false, nil
-	}
-}
-
-func _LoadBalanceRequest_OneofSizer(msg proto.Message) (n int) {
-	m := msg.(*LoadBalanceRequest)
-	// load_balance_request_type
-	switch x := m.LoadBalanceRequestType.(type) {
-	case *LoadBalanceRequest_InitialRequest:
-		s := proto.Size(x.InitialRequest)
-		n += 1 // tag and wire
-		n += proto.SizeVarint(uint64(s))
-		n += s
-	case *LoadBalanceRequest_ClientStats:
-		s := proto.Size(x.ClientStats)
-		n += 1 // tag and wire
-		n += proto.SizeVarint(uint64(s))
-		n += s
-	case nil:
-	default:
-		panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
-	}
-	return n
-}
-
-type InitialLoadBalanceRequest struct {
-	// Name of load balanced service (IE, balancer.service.com)
-	// length should be less than 256 bytes.
-	Name                 string   `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *InitialLoadBalanceRequest) Reset()         { *m = InitialLoadBalanceRequest{} }
-func (m *InitialLoadBalanceRequest) String() string { return proto.CompactTextString(m) }
-func (*InitialLoadBalanceRequest) ProtoMessage()    {}
-func (*InitialLoadBalanceRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_messages_b81c731f0e83edbd, []int{3}
-}
-func (m *InitialLoadBalanceRequest) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_InitialLoadBalanceRequest.Unmarshal(m, b)
-}
-func (m *InitialLoadBalanceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_InitialLoadBalanceRequest.Marshal(b, m, deterministic)
-}
-func (dst *InitialLoadBalanceRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_InitialLoadBalanceRequest.Merge(dst, src)
-}
-func (m *InitialLoadBalanceRequest) XXX_Size() int {
-	return xxx_messageInfo_InitialLoadBalanceRequest.Size(m)
-}
-func (m *InitialLoadBalanceRequest) XXX_DiscardUnknown() {
-	xxx_messageInfo_InitialLoadBalanceRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_InitialLoadBalanceRequest proto.InternalMessageInfo
-
-func (m *InitialLoadBalanceRequest) GetName() string {
-	if m != nil {
-		return m.Name
-	}
-	return ""
-}
-
-// Contains client level statistics that are useful to load balancing. Each
-// count except the timestamp should be reset to zero after reporting the stats.
-type ClientStats struct {
-	// The timestamp of generating the report.
-	Timestamp *Timestamp `protobuf:"bytes,1,opt,name=timestamp" json:"timestamp,omitempty"`
-	// The total number of RPCs that started.
-	NumCallsStarted int64 `protobuf:"varint,2,opt,name=num_calls_started,json=numCallsStarted" json:"num_calls_started,omitempty"`
-	// The total number of RPCs that finished.
-	NumCallsFinished int64 `protobuf:"varint,3,opt,name=num_calls_finished,json=numCallsFinished" json:"num_calls_finished,omitempty"`
-	// The total number of RPCs that were dropped by the client because of rate
-	// limiting.
-	NumCallsFinishedWithDropForRateLimiting int64 `protobuf:"varint,4,opt,name=num_calls_finished_with_drop_for_rate_limiting,json=numCallsFinishedWithDropForRateLimiting" json:"num_calls_finished_with_drop_for_rate_limiting,omitempty"`
-	// The total number of RPCs that were dropped by the client because of load
-	// balancing.
-	NumCallsFinishedWithDropForLoadBalancing int64 `protobuf:"varint,5,opt,name=num_calls_finished_with_drop_for_load_balancing,json=numCallsFinishedWithDropForLoadBalancing" json:"num_calls_finished_with_drop_for_load_balancing,omitempty"`
-	// The total number of RPCs that failed to reach a server except dropped RPCs.
-	NumCallsFinishedWithClientFailedToSend int64 `protobuf:"varint,6,opt,name=num_calls_finished_with_client_failed_to_send,json=numCallsFinishedWithClientFailedToSend" json:"num_calls_finished_with_client_failed_to_send,omitempty"`
-	// The total number of RPCs that finished and are known to have been received
-	// by a server.
-	NumCallsFinishedKnownReceived int64    `protobuf:"varint,7,opt,name=num_calls_finished_known_received,json=numCallsFinishedKnownReceived" json:"num_calls_finished_known_received,omitempty"`
-	XXX_NoUnkeyedLiteral          struct{} `json:"-"`
-	XXX_unrecognized              []byte   `json:"-"`
-	XXX_sizecache                 int32    `json:"-"`
-}
-
-func (m *ClientStats) Reset()         { *m = ClientStats{} }
-func (m *ClientStats) String() string { return proto.CompactTextString(m) }
-func (*ClientStats) ProtoMessage()    {}
-func (*ClientStats) Descriptor() ([]byte, []int) {
-	return fileDescriptor_messages_b81c731f0e83edbd, []int{4}
-}
-func (m *ClientStats) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_ClientStats.Unmarshal(m, b)
-}
-func (m *ClientStats) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_ClientStats.Marshal(b, m, deterministic)
-}
-func (dst *ClientStats) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_ClientStats.Merge(dst, src)
-}
-func (m *ClientStats) XXX_Size() int {
-	return xxx_messageInfo_ClientStats.Size(m)
-}
-func (m *ClientStats) XXX_DiscardUnknown() {
-	xxx_messageInfo_ClientStats.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ClientStats proto.InternalMessageInfo
-
-func (m *ClientStats) GetTimestamp() *Timestamp {
-	if m != nil {
-		return m.Timestamp
-	}
-	return nil
-}
-
-func (m *ClientStats) GetNumCallsStarted() int64 {
-	if m != nil {
-		return m.NumCallsStarted
-	}
-	return 0
-}
-
-func (m *ClientStats) GetNumCallsFinished() int64 {
-	if m != nil {
-		return m.NumCallsFinished
-	}
-	return 0
-}
-
-func (m *ClientStats) GetNumCallsFinishedWithDropForRateLimiting() int64 {
-	if m != nil {
-		return m.NumCallsFinishedWithDropForRateLimiting
-	}
-	return 0
-}
-
-func (m *ClientStats) GetNumCallsFinishedWithDropForLoadBalancing() int64 {
-	if m != nil {
-		return m.NumCallsFinishedWithDropForLoadBalancing
-	}
-	return 0
-}
-
-func (m *ClientStats) GetNumCallsFinishedWithClientFailedToSend() int64 {
-	if m != nil {
-		return m.NumCallsFinishedWithClientFailedToSend
-	}
-	return 0
-}
-
-func (m *ClientStats) GetNumCallsFinishedKnownReceived() int64 {
-	if m != nil {
-		return m.NumCallsFinishedKnownReceived
-	}
-	return 0
-}
-
-type LoadBalanceResponse struct {
-	// Types that are valid to be assigned to LoadBalanceResponseType:
-	//	*LoadBalanceResponse_InitialResponse
-	//	*LoadBalanceResponse_ServerList
-	LoadBalanceResponseType isLoadBalanceResponse_LoadBalanceResponseType `protobuf_oneof:"load_balance_response_type"`
-	XXX_NoUnkeyedLiteral    struct{}                                      `json:"-"`
-	XXX_unrecognized        []byte                                        `json:"-"`
-	XXX_sizecache           int32                                         `json:"-"`
-}
-
-func (m *LoadBalanceResponse) Reset()         { *m = LoadBalanceResponse{} }
-func (m *LoadBalanceResponse) String() string { return proto.CompactTextString(m) }
-func (*LoadBalanceResponse) ProtoMessage()    {}
-func (*LoadBalanceResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_messages_b81c731f0e83edbd, []int{5}
-}
-func (m *LoadBalanceResponse) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_LoadBalanceResponse.Unmarshal(m, b)
-}
-func (m *LoadBalanceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_LoadBalanceResponse.Marshal(b, m, deterministic)
-}
-func (dst *LoadBalanceResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_LoadBalanceResponse.Merge(dst, src)
-}
-func (m *LoadBalanceResponse) XXX_Size() int {
-	return xxx_messageInfo_LoadBalanceResponse.Size(m)
-}
-func (m *LoadBalanceResponse) XXX_DiscardUnknown() {
-	xxx_messageInfo_LoadBalanceResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LoadBalanceResponse proto.InternalMessageInfo
-
-type isLoadBalanceResponse_LoadBalanceResponseType interface {
-	isLoadBalanceResponse_LoadBalanceResponseType()
-}
-
-type LoadBalanceResponse_InitialResponse struct {
-	InitialResponse *InitialLoadBalanceResponse `protobuf:"bytes,1,opt,name=initial_response,json=initialResponse,oneof"`
-}
-type LoadBalanceResponse_ServerList struct {
-	ServerList *ServerList `protobuf:"bytes,2,opt,name=server_list,json=serverList,oneof"`
-}
-
-func (*LoadBalanceResponse_InitialResponse) isLoadBalanceResponse_LoadBalanceResponseType() {}
-func (*LoadBalanceResponse_ServerList) isLoadBalanceResponse_LoadBalanceResponseType()      {}
-
-func (m *LoadBalanceResponse) GetLoadBalanceResponseType() isLoadBalanceResponse_LoadBalanceResponseType {
-	if m != nil {
-		return m.LoadBalanceResponseType
-	}
-	return nil
-}
-
-func (m *LoadBalanceResponse) GetInitialResponse() *InitialLoadBalanceResponse {
-	if x, ok := m.GetLoadBalanceResponseType().(*LoadBalanceResponse_InitialResponse); ok {
-		return x.InitialResponse
-	}
-	return nil
-}
-
-func (m *LoadBalanceResponse) GetServerList() *ServerList {
-	if x, ok := m.GetLoadBalanceResponseType().(*LoadBalanceResponse_ServerList); ok {
-		return x.ServerList
-	}
-	return nil
-}
-
-// XXX_OneofFuncs is for the internal use of the proto package.
-func (*LoadBalanceResponse) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
-	return _LoadBalanceResponse_OneofMarshaler, _LoadBalanceResponse_OneofUnmarshaler, _LoadBalanceResponse_OneofSizer, []interface{}{
-		(*LoadBalanceResponse_InitialResponse)(nil),
-		(*LoadBalanceResponse_ServerList)(nil),
-	}
-}
-
-func _LoadBalanceResponse_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
-	m := msg.(*LoadBalanceResponse)
-	// load_balance_response_type
-	switch x := m.LoadBalanceResponseType.(type) {
-	case *LoadBalanceResponse_InitialResponse:
-		b.EncodeVarint(1<<3 | proto.WireBytes)
-		if err := b.EncodeMessage(x.InitialResponse); err != nil {
-			return err
-		}
-	case *LoadBalanceResponse_ServerList:
-		b.EncodeVarint(2<<3 | proto.WireBytes)
-		if err := b.EncodeMessage(x.ServerList); err != nil {
-			return err
-		}
-	case nil:
-	default:
-		return fmt.Errorf("LoadBalanceResponse.LoadBalanceResponseType has unexpected type %T", x)
-	}
-	return nil
-}
-
-func _LoadBalanceResponse_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
-	m := msg.(*LoadBalanceResponse)
-	switch tag {
-	case 1: // load_balance_response_type.initial_response
-		if wire != proto.WireBytes {
-			return true, proto.ErrInternalBadWireType
-		}
-		msg := new(InitialLoadBalanceResponse)
-		err := b.DecodeMessage(msg)
-		m.LoadBalanceResponseType = &LoadBalanceResponse_InitialResponse{msg}
-		return true, err
-	case 2: // load_balance_response_type.server_list
-		if wire != proto.WireBytes {
-			return true, proto.ErrInternalBadWireType
-		}
-		msg := new(ServerList)
-		err := b.DecodeMessage(msg)
-		m.LoadBalanceResponseType = &LoadBalanceResponse_ServerList{msg}
-		return true, err
-	default:
-		return false, nil
-	}
-}
-
-func _LoadBalanceResponse_OneofSizer(msg proto.Message) (n int) {
-	m := msg.(*LoadBalanceResponse)
-	// load_balance_response_type
-	switch x := m.LoadBalanceResponseType.(type) {
-	case *LoadBalanceResponse_InitialResponse:
-		s := proto.Size(x.InitialResponse)
-		n += 1 // tag and wire
-		n += proto.SizeVarint(uint64(s))
-		n += s
-	case *LoadBalanceResponse_ServerList:
-		s := proto.Size(x.ServerList)
-		n += 1 // tag and wire
-		n += proto.SizeVarint(uint64(s))
-		n += s
-	case nil:
-	default:
-		panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
-	}
-	return n
-}
-
-type InitialLoadBalanceResponse struct {
-	// This is an application layer redirect that indicates the client should use
-	// the specified server for load balancing. When this field is non-empty in
-	// the response, the client should open a separate connection to the
-	// load_balancer_delegate and call the BalanceLoad method. Its length should
-	// be less than 64 bytes.
-	LoadBalancerDelegate string `protobuf:"bytes,1,opt,name=load_balancer_delegate,json=loadBalancerDelegate" json:"load_balancer_delegate,omitempty"`
-	// This interval defines how often the client should send the client stats
-	// to the load balancer. Stats should only be reported when the duration is
-	// positive.
-	ClientStatsReportInterval *Duration `protobuf:"bytes,2,opt,name=client_stats_report_interval,json=clientStatsReportInterval" json:"client_stats_report_interval,omitempty"`
-	XXX_NoUnkeyedLiteral      struct{}  `json:"-"`
-	XXX_unrecognized          []byte    `json:"-"`
-	XXX_sizecache             int32     `json:"-"`
-}
-
-func (m *InitialLoadBalanceResponse) Reset()         { *m = InitialLoadBalanceResponse{} }
-func (m *InitialLoadBalanceResponse) String() string { return proto.CompactTextString(m) }
-func (*InitialLoadBalanceResponse) ProtoMessage()    {}
-func (*InitialLoadBalanceResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_messages_b81c731f0e83edbd, []int{6}
-}
-func (m *InitialLoadBalanceResponse) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_InitialLoadBalanceResponse.Unmarshal(m, b)
-}
-func (m *InitialLoadBalanceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_InitialLoadBalanceResponse.Marshal(b, m, deterministic)
-}
-func (dst *InitialLoadBalanceResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_InitialLoadBalanceResponse.Merge(dst, src)
-}
-func (m *InitialLoadBalanceResponse) XXX_Size() int {
-	return xxx_messageInfo_InitialLoadBalanceResponse.Size(m)
-}
-func (m *InitialLoadBalanceResponse) XXX_DiscardUnknown() {
-	xxx_messageInfo_InitialLoadBalanceResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_InitialLoadBalanceResponse proto.InternalMessageInfo
-
-func (m *InitialLoadBalanceResponse) GetLoadBalancerDelegate() string {
-	if m != nil {
-		return m.LoadBalancerDelegate
-	}
-	return ""
-}
-
-func (m *InitialLoadBalanceResponse) GetClientStatsReportInterval() *Duration {
-	if m != nil {
-		return m.ClientStatsReportInterval
-	}
-	return nil
-}
-
-type ServerList struct {
-	// Contains a list of servers selected by the load balancer. The list will
-	// be updated when server resolutions change or as needed to balance load
-	// across more servers. The client should consume the server list in order
-	// unless instructed otherwise via the client_config.
-	Servers              []*Server `protobuf:"bytes,1,rep,name=servers" json:"servers,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}  `json:"-"`
-	XXX_unrecognized     []byte    `json:"-"`
-	XXX_sizecache        int32     `json:"-"`
-}
-
-func (m *ServerList) Reset()         { *m = ServerList{} }
-func (m *ServerList) String() string { return proto.CompactTextString(m) }
-func (*ServerList) ProtoMessage()    {}
-func (*ServerList) Descriptor() ([]byte, []int) {
-	return fileDescriptor_messages_b81c731f0e83edbd, []int{7}
-}
-func (m *ServerList) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_ServerList.Unmarshal(m, b)
-}
-func (m *ServerList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_ServerList.Marshal(b, m, deterministic)
-}
-func (dst *ServerList) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_ServerList.Merge(dst, src)
-}
-func (m *ServerList) XXX_Size() int {
-	return xxx_messageInfo_ServerList.Size(m)
-}
-func (m *ServerList) XXX_DiscardUnknown() {
-	xxx_messageInfo_ServerList.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ServerList proto.InternalMessageInfo
-
-func (m *ServerList) GetServers() []*Server {
-	if m != nil {
-		return m.Servers
-	}
-	return nil
-}
-
-// Contains server information. When none of the [drop_for_*] fields are true,
-// use the other fields. When drop_for_rate_limiting is true, ignore all other
-// fields. Use drop_for_load_balancing only when it is true and
-// drop_for_rate_limiting is false.
-type Server struct {
-	// A resolved address for the server, serialized in network-byte-order. It may
-	// either be an IPv4 or IPv6 address.
-	IpAddress []byte `protobuf:"bytes,1,opt,name=ip_address,json=ipAddress,proto3" json:"ip_address,omitempty"`
-	// A resolved port number for the server.
-	Port int32 `protobuf:"varint,2,opt,name=port" json:"port,omitempty"`
-	// An opaque but printable token given to the frontend for each pick. All
-	// frontend requests for that pick must include the token in its initial
-	// metadata. The token is used by the backend to verify the request and to
-	// allow the backend to report load to the gRPC LB system.
-	//
-	// Its length is variable but less than 50 bytes.
-	LoadBalanceToken string `protobuf:"bytes,3,opt,name=load_balance_token,json=loadBalanceToken" json:"load_balance_token,omitempty"`
-	// Indicates whether this particular request should be dropped by the client
-	// for rate limiting.
-	DropForRateLimiting bool `protobuf:"varint,4,opt,name=drop_for_rate_limiting,json=dropForRateLimiting" json:"drop_for_rate_limiting,omitempty"`
-	// Indicates whether this particular request should be dropped by the client
-	// for load balancing.
-	DropForLoadBalancing bool     `protobuf:"varint,5,opt,name=drop_for_load_balancing,json=dropForLoadBalancing" json:"drop_for_load_balancing,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *Server) Reset()         { *m = Server{} }
-func (m *Server) String() string { return proto.CompactTextString(m) }
-func (*Server) ProtoMessage()    {}
-func (*Server) Descriptor() ([]byte, []int) {
-	return fileDescriptor_messages_b81c731f0e83edbd, []int{8}
-}
-func (m *Server) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_Server.Unmarshal(m, b)
-}
-func (m *Server) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_Server.Marshal(b, m, deterministic)
-}
-func (dst *Server) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_Server.Merge(dst, src)
-}
-func (m *Server) XXX_Size() int {
-	return xxx_messageInfo_Server.Size(m)
-}
-func (m *Server) XXX_DiscardUnknown() {
-	xxx_messageInfo_Server.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Server proto.InternalMessageInfo
-
-func (m *Server) GetIpAddress() []byte {
-	if m != nil {
-		return m.IpAddress
-	}
-	return nil
-}
-
-func (m *Server) GetPort() int32 {
-	if m != nil {
-		return m.Port
-	}
-	return 0
-}
-
-func (m *Server) GetLoadBalanceToken() string {
-	if m != nil {
-		return m.LoadBalanceToken
-	}
-	return ""
-}
-
-func (m *Server) GetDropForRateLimiting() bool {
-	if m != nil {
-		return m.DropForRateLimiting
-	}
-	return false
-}
-
-func (m *Server) GetDropForLoadBalancing() bool {
-	if m != nil {
-		return m.DropForLoadBalancing
-	}
-	return false
-}
-
-func init() {
-	proto.RegisterType((*Duration)(nil), "grpc.lb.v1.Duration")
-	proto.RegisterType((*Timestamp)(nil), "grpc.lb.v1.Timestamp")
-	proto.RegisterType((*LoadBalanceRequest)(nil), "grpc.lb.v1.LoadBalanceRequest")
-	proto.RegisterType((*InitialLoadBalanceRequest)(nil), "grpc.lb.v1.InitialLoadBalanceRequest")
-	proto.RegisterType((*ClientStats)(nil), "grpc.lb.v1.ClientStats")
-	proto.RegisterType((*LoadBalanceResponse)(nil), "grpc.lb.v1.LoadBalanceResponse")
-	proto.RegisterType((*InitialLoadBalanceResponse)(nil), "grpc.lb.v1.InitialLoadBalanceResponse")
-	proto.RegisterType((*ServerList)(nil), "grpc.lb.v1.ServerList")
-	proto.RegisterType((*Server)(nil), "grpc.lb.v1.Server")
-}
-
-func init() {
-	proto.RegisterFile("grpc_lb_v1/messages/messages.proto", fileDescriptor_messages_b81c731f0e83edbd)
-}
-
-var fileDescriptor_messages_b81c731f0e83edbd = []byte{
-	// 731 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x55, 0xdd, 0x4e, 0x1b, 0x39,
-	0x14, 0x26, 0x9b, 0x00, 0xc9, 0x09, 0x5a, 0xb2, 0x26, 0x0b, 0x81, 0x05, 0x89, 0x1d, 0x69, 0xd9,
-	0x68, 0xc5, 0x4e, 0x04, 0xd9, 0xbd, 0xe8, 0xcf, 0x45, 0x1b, 0x10, 0x0a, 0x2d, 0x17, 0x95, 0x43,
-	0x55, 0xa9, 0x52, 0x65, 0x39, 0x19, 0x33, 0x58, 0x38, 0xf6, 0xd4, 0x76, 0x82, 0xfa, 0x08, 0x7d,
-	0x94, 0x3e, 0x46, 0xd5, 0x67, 0xe8, 0xfb, 0x54, 0xe3, 0x99, 0xc9, 0x0c, 0x10, 0x40, 0xbd, 0x89,
-	0xec, 0xe3, 0xef, 0x7c, 0xdf, 0xf1, 0x89, 0xbf, 0x33, 0xe0, 0x85, 0x3a, 0x1a, 0x11, 0x31, 0x24,
-	0xd3, 0x83, 0xce, 0x98, 0x19, 0x43, 0x43, 0x66, 0x66, 0x0b, 0x3f, 0xd2, 0xca, 0x2a, 0x04, 0x31,
-	0xc6, 0x17, 0x43, 0x7f, 0x7a, 0xe0, 0x3d, 0x85, 0xea, 0xf1, 0x44, 0x53, 0xcb, 0x95, 0x44, 0x2d,
-	0x58, 0x36, 0x6c, 0xa4, 0x64, 0x60, 0x5a, 0xa5, 0xdd, 0x52, 0xbb, 0x8c, 0xb3, 0x2d, 0x6a, 0xc2,
-	0xa2, 0xa4, 0x52, 0x99, 0xd6, 0x2f, 0xbb, 0xa5, 0xf6, 0x22, 0x4e, 0x36, 0xde, 0x33, 0xa8, 0x9d,
-	0xf3, 0x31, 0x33, 0x96, 0x8e, 0xa3, 0x9f, 0x4e, 0xfe, 0x5a, 0x02, 0x74, 0xa6, 0x68, 0xd0, 0xa3,
-	0x82, 0xca, 0x11, 0xc3, 0xec, 0xe3, 0x84, 0x19, 0x8b, 0xde, 0xc0, 0x2a, 0x97, 0xdc, 0x72, 0x2a,
-	0x88, 0x4e, 0x42, 0x8e, 0xae, 0x7e, 0xf8, 0x97, 0x9f, 0x57, 0xed, 0x9f, 0x26, 0x90, 0xbb, 0xf9,
-	0xfd, 0x05, 0xfc, 0x6b, 0x9a, 0x9f, 0x31, 0x3e, 0x87, 0x95, 0x91, 0xe0, 0x4c, 0x5a, 0x62, 0x2c,
-	0xb5, 0x49, 0x15, 0xf5, 0xc3, 0x8d, 0x22, 0xdd, 0x91, 0x3b, 0x1f, 0xc4, 0xc7, 0xfd, 0x05, 0x5c,
-	0x1f, 0xe5, 0xdb, 0xde, 0x1f, 0xb0, 0x29, 0x14, 0x0d, 0xc8, 0x30, 0x91, 0xc9, 0x8a, 0x22, 0xf6,
-	0x53, 0xc4, 0xbc, 0x0e, 0x6c, 0xde, 0x5b, 0x09, 0x42, 0x50, 0x91, 0x74, 0xcc, 0x5c, 0xf9, 0x35,
-	0xec, 0xd6, 0xde, 0xe7, 0x0a, 0xd4, 0x0b, 0x62, 0xa8, 0x0b, 0x35, 0x9b, 0x75, 0x30, 0xbd, 0xe7,
-	0xef, 0xc5, 0xc2, 0x66, 0xed, 0xc5, 0x39, 0x0e, 0xfd, 0x03, 0xbf, 0xc9, 0xc9, 0x98, 0x8c, 0xa8,
-	0x10, 0x26, 0xbe, 0x93, 0xb6, 0x2c, 0x70, 0xb7, 0x2a, 0xe3, 0x55, 0x39, 0x19, 0x1f, 0xc5, 0xf1,
-	0x41, 0x12, 0x46, 0xfb, 0x80, 0x72, 0xec, 0x05, 0x97, 0xdc, 0x5c, 0xb2, 0xa0, 0x55, 0x76, 0xe0,
-	0x46, 0x06, 0x3e, 0x49, 0xe3, 0x88, 0x80, 0x7f, 0x17, 0x4d, 0xae, 0xb9, 0xbd, 0x24, 0x81, 0x56,
-	0x11, 0xb9, 0x50, 0x9a, 0x68, 0x6a, 0x19, 0x11, 0x7c, 0xcc, 0x2d, 0x97, 0x61, 0xab, 0xe2, 0x98,
-	0xfe, 0xbe, 0xcd, 0xf4, 0x8e, 0xdb, 0xcb, 0x63, 0xad, 0xa2, 0x13, 0xa5, 0x31, 0xb5, 0xec, 0x2c,
-	0x85, 0x23, 0x0a, 0x9d, 0x47, 0x05, 0x0a, 0xed, 0x8e, 0x15, 0x16, 0x9d, 0x42, 0xfb, 0x01, 0x85,
-	0xbc, 0xf7, 0xb1, 0xc4, 0x07, 0xf8, 0xf7, 0x3e, 0x89, 0xf4, 0x19, 0x5c, 0x50, 0x2e, 0x58, 0x40,
-	0xac, 0x22, 0x86, 0xc9, 0xa0, 0xb5, 0xe4, 0x04, 0xf6, 0xe6, 0x09, 0x24, 0x7f, 0xd5, 0x89, 0xc3,
-	0x9f, 0xab, 0x01, 0x93, 0x01, 0xea, 0xc3, 0x9f, 0x73, 0xe8, 0xaf, 0xa4, 0xba, 0x96, 0x44, 0xb3,
-	0x11, 0xe3, 0x53, 0x16, 0xb4, 0x96, 0x1d, 0xe5, 0xce, 0x6d, 0xca, 0xd7, 0x31, 0x0a, 0xa7, 0x20,
-	0xef, 0x5b, 0x09, 0xd6, 0x6e, 0x3c, 0x1b, 0x13, 0x29, 0x69, 0x18, 0x1a, 0x40, 0x23, 0x77, 0x40,
-	0x12, 0x4b, 0x9f, 0xc6, 0xde, 0x63, 0x16, 0x48, 0xd0, 0xfd, 0x05, 0xbc, 0x3a, 0xf3, 0x40, 0x4a,
-	0xfa, 0x04, 0xea, 0x86, 0xe9, 0x29, 0xd3, 0x44, 0x70, 0x63, 0x53, 0x0f, 0xac, 0x17, 0xf9, 0x06,
-	0xee, 0xf8, 0x8c, 0x3b, 0x0f, 0x81, 0x99, 0xed, 0x7a, 0xdb, 0xb0, 0x75, 0xcb, 0x01, 0x09, 0x67,
-	0x62, 0x81, 0x2f, 0x25, 0xd8, 0xba, 0xbf, 0x14, 0xf4, 0x1f, 0xac, 0x17, 0x93, 0x35, 0x09, 0x98,
-	0x60, 0x21, 0xb5, 0x99, 0x2d, 0x9a, 0x22, 0x4f, 0xd2, 0xc7, 0xe9, 0x19, 0x7a, 0x0b, 0xdb, 0x45,
-	0xcb, 0x12, 0xcd, 0x22, 0xa5, 0x2d, 0xe1, 0xd2, 0x32, 0x3d, 0xa5, 0x22, 0x2d, 0xbf, 0x59, 0x2c,
-	0x3f, 0x1b, 0x62, 0x78, 0xb3, 0xe0, 0x5e, 0xec, 0xf2, 0x4e, 0xd3, 0x34, 0xef, 0x05, 0x40, 0x7e,
-	0x4b, 0xb4, 0x1f, 0x0f, 0xac, 0x78, 0x17, 0x0f, 0xac, 0x72, 0xbb, 0x7e, 0x88, 0xee, 0xb6, 0x03,
-	0x67, 0x90, 0x57, 0x95, 0x6a, 0xb9, 0x51, 0xf1, 0xbe, 0x97, 0x60, 0x29, 0x39, 0x41, 0x3b, 0x00,
-	0x3c, 0x22, 0x34, 0x08, 0x34, 0x33, 0xc9, 0xc8, 0x5b, 0xc1, 0x35, 0x1e, 0xbd, 0x4c, 0x02, 0xb1,
-	0xfb, 0x63, 0xed, 0x74, 0xe6, 0xb9, 0x75, 0x6c, 0xc6, 0x1b, 0x9d, 0xb4, 0xea, 0x8a, 0x49, 0x67,
-	0xc6, 0x1a, 0x6e, 0x14, 0x1a, 0x71, 0x1e, 0xc7, 0x51, 0x17, 0xd6, 0x1f, 0x30, 0x5d, 0x15, 0xaf,
-	0x05, 0x73, 0x0c, 0xf6, 0x3f, 0x6c, 0x3c, 0x64, 0xa4, 0x2a, 0x6e, 0x06, 0x73, 0x4c, 0xd3, 0xeb,
-	0xbe, 0x3f, 0x08, 0x95, 0x0a, 0x05, 0xf3, 0x43, 0x25, 0xa8, 0x0c, 0x7d, 0xa5, 0xc3, 0x4e, 0xdc,
-	0x0d, 0xf7, 0x23, 0x86, 0x9d, 0x39, 0x5f, 0x95, 0xe1, 0x92, 0xfb, 0x9a, 0x74, 0x7f, 0x04, 0x00,
-	0x00, 0xff, 0xff, 0x8e, 0xd0, 0x70, 0xb7, 0x73, 0x06, 0x00, 0x00,
-}
diff --git a/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/messages.proto b/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/messages.proto
deleted file mode 100644
index 42d99c1..0000000
--- a/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/messages.proto
+++ /dev/null
@@ -1,155 +0,0 @@
-// Copyright 2016 gRPC authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-syntax = "proto3";
-
-package grpc.lb.v1;
-option go_package = "google.golang.org/grpc/grpclb/grpc_lb_v1/messages";
-
-message Duration {
-  // Signed seconds of the span of time. Must be from -315,576,000,000
-  // to +315,576,000,000 inclusive.
-  int64 seconds = 1;
-
-  // Signed fractions of a second at nanosecond resolution of the span
-  // of time. Durations less than one second are represented with a 0
-  // `seconds` field and a positive or negative `nanos` field. For durations
-  // of one second or more, a non-zero value for the `nanos` field must be
-  // of the same sign as the `seconds` field. Must be from -999,999,999
-  // to +999,999,999 inclusive.
-  int32 nanos = 2;
-}
-
-message Timestamp {
-  // Represents seconds of UTC time since Unix epoch
-  // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
-  // 9999-12-31T23:59:59Z inclusive.
-  int64 seconds = 1;
-
-  // Non-negative fractions of a second at nanosecond resolution. Negative
-  // second values with fractions must still have non-negative nanos values
-  // that count forward in time. Must be from 0 to 999,999,999
-  // inclusive.
-  int32 nanos = 2;
-}
-
-message LoadBalanceRequest {
-  oneof load_balance_request_type {
-    // This message should be sent on the first request to the load balancer.
-    InitialLoadBalanceRequest initial_request = 1;
-
-    // The client stats should be periodically reported to the load balancer
-    // based on the duration defined in the InitialLoadBalanceResponse.
-    ClientStats client_stats = 2;
-  }
-}
-
-message InitialLoadBalanceRequest {
-  // Name of load balanced service (IE, balancer.service.com)
-  // length should be less than 256 bytes.
-  string name = 1;
-}
-
-// Contains client level statistics that are useful to load balancing. Each
-// count except the timestamp should be reset to zero after reporting the stats.
-message ClientStats {
-  // The timestamp of generating the report.
-  Timestamp timestamp = 1;
-
-  // The total number of RPCs that started.
-  int64 num_calls_started = 2;
-
-  // The total number of RPCs that finished.
-  int64 num_calls_finished = 3;
-
-  // The total number of RPCs that were dropped by the client because of rate
-  // limiting.
-  int64 num_calls_finished_with_drop_for_rate_limiting = 4;
-
-  // The total number of RPCs that were dropped by the client because of load
-  // balancing.
-  int64 num_calls_finished_with_drop_for_load_balancing = 5;
-
-  // The total number of RPCs that failed to reach a server except dropped RPCs.
-  int64 num_calls_finished_with_client_failed_to_send = 6;
-
-  // The total number of RPCs that finished and are known to have been received
-  // by a server.
-  int64 num_calls_finished_known_received = 7;
-}
-
-message LoadBalanceResponse {
-  oneof load_balance_response_type {
-    // This message should be sent on the first response to the client.
-    InitialLoadBalanceResponse initial_response = 1;
-
-    // Contains the list of servers selected by the load balancer. The client
-    // should send requests to these servers in the specified order.
-    ServerList server_list = 2;
-  }
-}
-
-message InitialLoadBalanceResponse {
-  // This is an application layer redirect that indicates the client should use
-  // the specified server for load balancing. When this field is non-empty in
-  // the response, the client should open a separate connection to the
-  // load_balancer_delegate and call the BalanceLoad method. Its length should
-  // be less than 64 bytes.
-  string load_balancer_delegate = 1;
-
-  // This interval defines how often the client should send the client stats
-  // to the load balancer. Stats should only be reported when the duration is
-  // positive.
-  Duration client_stats_report_interval = 2;
-}
-
-message ServerList {
-  // Contains a list of servers selected by the load balancer. The list will
-  // be updated when server resolutions change or as needed to balance load
-  // across more servers. The client should consume the server list in order
-  // unless instructed otherwise via the client_config.
-  repeated Server servers = 1;
-
-  // Was google.protobuf.Duration expiration_interval.
-  reserved 3;
-}
-
-// Contains server information. When none of the [drop_for_*] fields are true,
-// use the other fields. When drop_for_rate_limiting is true, ignore all other
-// fields. Use drop_for_load_balancing only when it is true and
-// drop_for_rate_limiting is false.
-message Server {
-  // A resolved address for the server, serialized in network-byte-order. It may
-  // either be an IPv4 or IPv6 address.
-  bytes ip_address = 1;
-
-  // A resolved port number for the server.
-  int32 port = 2;
-
-  // An opaque but printable token given to the frontend for each pick. All
-  // frontend requests for that pick must include the token in its initial
-  // metadata. The token is used by the backend to verify the request and to
-  // allow the backend to report load to the gRPC LB system.
-  //
-  // Its length is variable but less than 50 bytes.
-  string load_balance_token = 3;
-
-  // Indicates whether this particular request should be dropped by the client
-  // for rate limiting.
-  bool drop_for_rate_limiting = 4;
-
-  // Indicates whether this particular request should be dropped by the client
-  // for load balancing.
-  bool drop_for_load_balancing = 5;
-}
diff --git a/vendor/google.golang.org/grpc/grpclb_picker.go b/vendor/google.golang.org/grpc/grpclb_picker.go
deleted file mode 100644
index 872c7cc..0000000
--- a/vendor/google.golang.org/grpc/grpclb_picker.go
+++ /dev/null
@@ -1,159 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package grpc
-
-import (
-	"sync"
-	"sync/atomic"
-
-	"golang.org/x/net/context"
-	"google.golang.org/grpc/balancer"
-	"google.golang.org/grpc/codes"
-	lbpb "google.golang.org/grpc/grpclb/grpc_lb_v1/messages"
-	"google.golang.org/grpc/status"
-)
-
-type rpcStats struct {
-	NumCallsStarted                          int64
-	NumCallsFinished                         int64
-	NumCallsFinishedWithDropForRateLimiting  int64
-	NumCallsFinishedWithDropForLoadBalancing int64
-	NumCallsFinishedWithClientFailedToSend   int64
-	NumCallsFinishedKnownReceived            int64
-}
-
-// toClientStats converts rpcStats to lbpb.ClientStats, and clears rpcStats.
-func (s *rpcStats) toClientStats() *lbpb.ClientStats {
-	stats := &lbpb.ClientStats{
-		NumCallsStarted:                          atomic.SwapInt64(&s.NumCallsStarted, 0),
-		NumCallsFinished:                         atomic.SwapInt64(&s.NumCallsFinished, 0),
-		NumCallsFinishedWithDropForRateLimiting:  atomic.SwapInt64(&s.NumCallsFinishedWithDropForRateLimiting, 0),
-		NumCallsFinishedWithDropForLoadBalancing: atomic.SwapInt64(&s.NumCallsFinishedWithDropForLoadBalancing, 0),
-		NumCallsFinishedWithClientFailedToSend:   atomic.SwapInt64(&s.NumCallsFinishedWithClientFailedToSend, 0),
-		NumCallsFinishedKnownReceived:            atomic.SwapInt64(&s.NumCallsFinishedKnownReceived, 0),
-	}
-	return stats
-}
-
-func (s *rpcStats) dropForRateLimiting() {
-	atomic.AddInt64(&s.NumCallsStarted, 1)
-	atomic.AddInt64(&s.NumCallsFinishedWithDropForRateLimiting, 1)
-	atomic.AddInt64(&s.NumCallsFinished, 1)
-}
-
-func (s *rpcStats) dropForLoadBalancing() {
-	atomic.AddInt64(&s.NumCallsStarted, 1)
-	atomic.AddInt64(&s.NumCallsFinishedWithDropForLoadBalancing, 1)
-	atomic.AddInt64(&s.NumCallsFinished, 1)
-}
-
-func (s *rpcStats) failedToSend() {
-	atomic.AddInt64(&s.NumCallsStarted, 1)
-	atomic.AddInt64(&s.NumCallsFinishedWithClientFailedToSend, 1)
-	atomic.AddInt64(&s.NumCallsFinished, 1)
-}
-
-func (s *rpcStats) knownReceived() {
-	atomic.AddInt64(&s.NumCallsStarted, 1)
-	atomic.AddInt64(&s.NumCallsFinishedKnownReceived, 1)
-	atomic.AddInt64(&s.NumCallsFinished, 1)
-}
-
-type errPicker struct {
-	// Pick always returns this err.
-	err error
-}
-
-func (p *errPicker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
-	return nil, nil, p.err
-}
-
-// rrPicker does roundrobin on subConns. It's typically used when there's no
-// response from remote balancer, and grpclb falls back to the resolved
-// backends.
-//
-// It guaranteed that len(subConns) > 0.
-type rrPicker struct {
-	mu           sync.Mutex
-	subConns     []balancer.SubConn // The subConns that were READY when taking the snapshot.
-	subConnsNext int
-}
-
-func (p *rrPicker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
-	p.mu.Lock()
-	defer p.mu.Unlock()
-	sc := p.subConns[p.subConnsNext]
-	p.subConnsNext = (p.subConnsNext + 1) % len(p.subConns)
-	return sc, nil, nil
-}
-
-// lbPicker does two layers of picks:
-//
-// First layer: roundrobin on all servers in serverList, including drops and backends.
-// - If it picks a drop, the RPC will fail as being dropped.
-// - If it picks a backend, do a second layer pick to pick the real backend.
-//
-// Second layer: roundrobin on all READY backends.
-//
-// It's guaranteed that len(serverList) > 0.
-type lbPicker struct {
-	mu             sync.Mutex
-	serverList     []*lbpb.Server
-	serverListNext int
-	subConns       []balancer.SubConn // The subConns that were READY when taking the snapshot.
-	subConnsNext   int
-
-	stats *rpcStats
-}
-
-func (p *lbPicker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
-	p.mu.Lock()
-	defer p.mu.Unlock()
-
-	// Layer one roundrobin on serverList.
-	s := p.serverList[p.serverListNext]
-	p.serverListNext = (p.serverListNext + 1) % len(p.serverList)
-
-	// If it's a drop, return an error and fail the RPC.
-	if s.DropForRateLimiting {
-		p.stats.dropForRateLimiting()
-		return nil, nil, status.Errorf(codes.Unavailable, "request dropped by grpclb")
-	}
-	if s.DropForLoadBalancing {
-		p.stats.dropForLoadBalancing()
-		return nil, nil, status.Errorf(codes.Unavailable, "request dropped by grpclb")
-	}
-
-	// If not a drop but there's no ready subConns.
-	if len(p.subConns) <= 0 {
-		return nil, nil, balancer.ErrNoSubConnAvailable
-	}
-
-	// Return the next ready subConn in the list, also collect rpc stats.
-	sc := p.subConns[p.subConnsNext]
-	p.subConnsNext = (p.subConnsNext + 1) % len(p.subConns)
-	done := func(info balancer.DoneInfo) {
-		if !info.BytesSent {
-			p.stats.failedToSend()
-		} else if info.BytesReceived {
-			p.stats.knownReceived()
-		}
-	}
-	return sc, done, nil
-}
diff --git a/vendor/google.golang.org/grpc/grpclb_remote_balancer.go b/vendor/google.golang.org/grpc/grpclb_remote_balancer.go
deleted file mode 100644
index b8dd4f1..0000000
--- a/vendor/google.golang.org/grpc/grpclb_remote_balancer.go
+++ /dev/null
@@ -1,266 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package grpc
-
-import (
-	"fmt"
-	"net"
-	"reflect"
-	"time"
-
-	"golang.org/x/net/context"
-	"google.golang.org/grpc/balancer"
-	"google.golang.org/grpc/channelz"
-
-	"google.golang.org/grpc/connectivity"
-	lbpb "google.golang.org/grpc/grpclb/grpc_lb_v1/messages"
-	"google.golang.org/grpc/grpclog"
-	"google.golang.org/grpc/metadata"
-	"google.golang.org/grpc/resolver"
-)
-
-// processServerList updates balaner's internal state, create/remove SubConns
-// and regenerates picker using the received serverList.
-func (lb *lbBalancer) processServerList(l *lbpb.ServerList) {
-	grpclog.Infof("lbBalancer: processing server list: %+v", l)
-	lb.mu.Lock()
-	defer lb.mu.Unlock()
-
-	// Set serverListReceived to true so fallback will not take effect if it has
-	// not hit timeout.
-	lb.serverListReceived = true
-
-	// If the new server list == old server list, do nothing.
-	if reflect.DeepEqual(lb.fullServerList, l.Servers) {
-		grpclog.Infof("lbBalancer: new serverlist same as the previous one, ignoring")
-		return
-	}
-	lb.fullServerList = l.Servers
-
-	var backendAddrs []resolver.Address
-	for _, s := range l.Servers {
-		if s.DropForLoadBalancing || s.DropForRateLimiting {
-			continue
-		}
-
-		md := metadata.Pairs(lbTokeyKey, s.LoadBalanceToken)
-		ip := net.IP(s.IpAddress)
-		ipStr := ip.String()
-		if ip.To4() == nil {
-			// Add square brackets to ipv6 addresses, otherwise net.Dial() and
-			// net.SplitHostPort() will return too many colons error.
-			ipStr = fmt.Sprintf("[%s]", ipStr)
-		}
-		addr := resolver.Address{
-			Addr:     fmt.Sprintf("%s:%d", ipStr, s.Port),
-			Metadata: &md,
-		}
-
-		backendAddrs = append(backendAddrs, addr)
-	}
-
-	// Call refreshSubConns to create/remove SubConns.
-	lb.refreshSubConns(backendAddrs)
-	// Regenerate and update picker no matter if there's update on backends (if
-	// any SubConn will be newed/removed). Because since the full serverList was
-	// different, there might be updates in drops or pick weights(different
-	// number of duplicates). We need to update picker with the fulllist.
-	//
-	// Now with cache, even if SubConn was newed/removed, there might be no
-	// state changes.
-	lb.regeneratePicker()
-	lb.cc.UpdateBalancerState(lb.state, lb.picker)
-}
-
-// refreshSubConns creates/removes SubConns with backendAddrs. It returns a bool
-// indicating whether the backendAddrs are different from the cached
-// backendAddrs (whether any SubConn was newed/removed).
-// Caller must hold lb.mu.
-func (lb *lbBalancer) refreshSubConns(backendAddrs []resolver.Address) bool {
-	lb.backendAddrs = nil
-	var backendsUpdated bool
-	// addrsSet is the set converted from backendAddrs, it's used to quick
-	// lookup for an address.
-	addrsSet := make(map[resolver.Address]struct{})
-	// Create new SubConns.
-	for _, addr := range backendAddrs {
-		addrWithoutMD := addr
-		addrWithoutMD.Metadata = nil
-		addrsSet[addrWithoutMD] = struct{}{}
-		lb.backendAddrs = append(lb.backendAddrs, addrWithoutMD)
-
-		if _, ok := lb.subConns[addrWithoutMD]; !ok {
-			backendsUpdated = true
-
-			// Use addrWithMD to create the SubConn.
-			sc, err := lb.cc.NewSubConn([]resolver.Address{addr}, balancer.NewSubConnOptions{})
-			if err != nil {
-				grpclog.Warningf("roundrobinBalancer: failed to create new SubConn: %v", err)
-				continue
-			}
-			lb.subConns[addrWithoutMD] = sc // Use the addr without MD as key for the map.
-			if _, ok := lb.scStates[sc]; !ok {
-				// Only set state of new sc to IDLE. The state could already be
-				// READY for cached SubConns.
-				lb.scStates[sc] = connectivity.Idle
-			}
-			sc.Connect()
-		}
-	}
-
-	for a, sc := range lb.subConns {
-		// a was removed by resolver.
-		if _, ok := addrsSet[a]; !ok {
-			backendsUpdated = true
-
-			lb.cc.RemoveSubConn(sc)
-			delete(lb.subConns, a)
-			// Keep the state of this sc in b.scStates until sc's state becomes Shutdown.
-			// The entry will be deleted in HandleSubConnStateChange.
-		}
-	}
-
-	return backendsUpdated
-}
-
-func (lb *lbBalancer) readServerList(s *balanceLoadClientStream) error {
-	for {
-		reply, err := s.Recv()
-		if err != nil {
-			return fmt.Errorf("grpclb: failed to recv server list: %v", err)
-		}
-		if serverList := reply.GetServerList(); serverList != nil {
-			lb.processServerList(serverList)
-		}
-	}
-}
-
-func (lb *lbBalancer) sendLoadReport(s *balanceLoadClientStream, interval time.Duration) {
-	ticker := time.NewTicker(interval)
-	defer ticker.Stop()
-	for {
-		select {
-		case <-ticker.C:
-		case <-s.Context().Done():
-			return
-		}
-		stats := lb.clientStats.toClientStats()
-		t := time.Now()
-		stats.Timestamp = &lbpb.Timestamp{
-			Seconds: t.Unix(),
-			Nanos:   int32(t.Nanosecond()),
-		}
-		if err := s.Send(&lbpb.LoadBalanceRequest{
-			LoadBalanceRequestType: &lbpb.LoadBalanceRequest_ClientStats{
-				ClientStats: stats,
-			},
-		}); err != nil {
-			return
-		}
-	}
-}
-
-func (lb *lbBalancer) callRemoteBalancer() error {
-	lbClient := &loadBalancerClient{cc: lb.ccRemoteLB}
-	ctx, cancel := context.WithCancel(context.Background())
-	defer cancel()
-	stream, err := lbClient.BalanceLoad(ctx, FailFast(false))
-	if err != nil {
-		return fmt.Errorf("grpclb: failed to perform RPC to the remote balancer %v", err)
-	}
-
-	// grpclb handshake on the stream.
-	initReq := &lbpb.LoadBalanceRequest{
-		LoadBalanceRequestType: &lbpb.LoadBalanceRequest_InitialRequest{
-			InitialRequest: &lbpb.InitialLoadBalanceRequest{
-				Name: lb.target,
-			},
-		},
-	}
-	if err := stream.Send(initReq); err != nil {
-		return fmt.Errorf("grpclb: failed to send init request: %v", err)
-	}
-	reply, err := stream.Recv()
-	if err != nil {
-		return fmt.Errorf("grpclb: failed to recv init response: %v", err)
-	}
-	initResp := reply.GetInitialResponse()
-	if initResp == nil {
-		return fmt.Errorf("grpclb: reply from remote balancer did not include initial response")
-	}
-	if initResp.LoadBalancerDelegate != "" {
-		return fmt.Errorf("grpclb: Delegation is not supported")
-	}
-
-	go func() {
-		if d := convertDuration(initResp.ClientStatsReportInterval); d > 0 {
-			lb.sendLoadReport(stream, d)
-		}
-	}()
-	return lb.readServerList(stream)
-}
-
-func (lb *lbBalancer) watchRemoteBalancer() {
-	for {
-		err := lb.callRemoteBalancer()
-		select {
-		case <-lb.doneCh:
-			return
-		default:
-			if err != nil {
-				grpclog.Error(err)
-			}
-		}
-
-	}
-}
-
-func (lb *lbBalancer) dialRemoteLB(remoteLBName string) {
-	var dopts []DialOption
-	if creds := lb.opt.DialCreds; creds != nil {
-		if err := creds.OverrideServerName(remoteLBName); err == nil {
-			dopts = append(dopts, WithTransportCredentials(creds))
-		} else {
-			grpclog.Warningf("grpclb: failed to override the server name in the credentials: %v, using Insecure", err)
-			dopts = append(dopts, WithInsecure())
-		}
-	} else {
-		dopts = append(dopts, WithInsecure())
-	}
-	if lb.opt.Dialer != nil {
-		// WithDialer takes a different type of function, so we instead use a
-		// special DialOption here.
-		dopts = append(dopts, withContextDialer(lb.opt.Dialer))
-	}
-	// Explicitly set pickfirst as the balancer.
-	dopts = append(dopts, WithBalancerName(PickFirstBalancerName))
-	dopts = append(dopts, withResolverBuilder(lb.manualResolver))
-	if channelz.IsOn() {
-		dopts = append(dopts, WithChannelzParentID(lb.opt.ChannelzParentID))
-	}
-
-	// DialContext using manualResolver.Scheme, which is a random scheme generated
-	// when init grpclb. The target name is not important.
-	cc, err := DialContext(context.Background(), "grpclb:///grpclb.server", dopts...)
-	if err != nil {
-		grpclog.Fatalf("failed to dial: %v", err)
-	}
-	lb.ccRemoteLB = cc
-	go lb.watchRemoteBalancer()
-}
diff --git a/vendor/google.golang.org/grpc/grpclb_util.go b/vendor/google.golang.org/grpc/grpclb_util.go
deleted file mode 100644
index 063ba9d..0000000
--- a/vendor/google.golang.org/grpc/grpclb_util.go
+++ /dev/null
@@ -1,214 +0,0 @@
-/*
- *
- * Copyright 2016 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package grpc
-
-import (
-	"fmt"
-	"sync"
-	"time"
-
-	"google.golang.org/grpc/balancer"
-	"google.golang.org/grpc/connectivity"
-	"google.golang.org/grpc/resolver"
-)
-
-// The parent ClientConn should re-resolve when grpclb loses connection to the
-// remote balancer. When the ClientConn inside grpclb gets a TransientFailure,
-// it calls lbManualResolver.ResolveNow(), which calls parent ClientConn's
-// ResolveNow, and eventually results in re-resolve happening in parent
-// ClientConn's resolver (DNS for example).
-//
-//                          parent
-//                          ClientConn
-//  +-----------------------------------------------------------------+
-//  |             parent          +---------------------------------+ |
-//  | DNS         ClientConn      |  grpclb                         | |
-//  | resolver    balancerWrapper |                                 | |
-//  | +              +            |    grpclb          grpclb       | |
-//  | |              |            |    ManualResolver  ClientConn   | |
-//  | |              |            |     +              +            | |
-//  | |              |            |     |              | Transient  | |
-//  | |              |            |     |              | Failure    | |
-//  | |              |            |     |  <---------  |            | |
-//  | |              | <--------------- |  ResolveNow  |            | |
-//  | |  <---------  | ResolveNow |     |              |            | |
-//  | |  ResolveNow  |            |     |              |            | |
-//  | |              |            |     |              |            | |
-//  | +              +            |     +              +            | |
-//  |                             +---------------------------------+ |
-//  +-----------------------------------------------------------------+
-
-// lbManualResolver is used by the ClientConn inside grpclb. It's a manual
-// resolver with a special ResolveNow() function.
-//
-// When ResolveNow() is called, it calls ResolveNow() on the parent ClientConn,
-// so when grpclb client lose contact with remote balancers, the parent
-// ClientConn's resolver will re-resolve.
-type lbManualResolver struct {
-	scheme string
-	ccr    resolver.ClientConn
-
-	ccb balancer.ClientConn
-}
-
-func (r *lbManualResolver) Build(_ resolver.Target, cc resolver.ClientConn, _ resolver.BuildOption) (resolver.Resolver, error) {
-	r.ccr = cc
-	return r, nil
-}
-
-func (r *lbManualResolver) Scheme() string {
-	return r.scheme
-}
-
-// ResolveNow calls resolveNow on the parent ClientConn.
-func (r *lbManualResolver) ResolveNow(o resolver.ResolveNowOption) {
-	r.ccb.ResolveNow(o)
-}
-
-// Close is a noop for Resolver.
-func (*lbManualResolver) Close() {}
-
-// NewAddress calls cc.NewAddress.
-func (r *lbManualResolver) NewAddress(addrs []resolver.Address) {
-	r.ccr.NewAddress(addrs)
-}
-
-// NewServiceConfig calls cc.NewServiceConfig.
-func (r *lbManualResolver) NewServiceConfig(sc string) {
-	r.ccr.NewServiceConfig(sc)
-}
-
-const subConnCacheTime = time.Second * 10
-
-// lbCacheClientConn is a wrapper balancer.ClientConn with a SubConn cache.
-// SubConns will be kept in cache for subConnCacheTime before being removed.
-//
-// Its new and remove methods are updated to do cache first.
-type lbCacheClientConn struct {
-	cc      balancer.ClientConn
-	timeout time.Duration
-
-	mu sync.Mutex
-	// subConnCache only keeps subConns that are being deleted.
-	subConnCache  map[resolver.Address]*subConnCacheEntry
-	subConnToAddr map[balancer.SubConn]resolver.Address
-}
-
-type subConnCacheEntry struct {
-	sc balancer.SubConn
-
-	cancel        func()
-	abortDeleting bool
-}
-
-func newLBCacheClientConn(cc balancer.ClientConn) *lbCacheClientConn {
-	return &lbCacheClientConn{
-		cc:            cc,
-		timeout:       subConnCacheTime,
-		subConnCache:  make(map[resolver.Address]*subConnCacheEntry),
-		subConnToAddr: make(map[balancer.SubConn]resolver.Address),
-	}
-}
-
-func (ccc *lbCacheClientConn) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) {
-	if len(addrs) != 1 {
-		return nil, fmt.Errorf("grpclb calling NewSubConn with addrs of length %v", len(addrs))
-	}
-	addrWithoutMD := addrs[0]
-	addrWithoutMD.Metadata = nil
-
-	ccc.mu.Lock()
-	defer ccc.mu.Unlock()
-	if entry, ok := ccc.subConnCache[addrWithoutMD]; ok {
-		// If entry is in subConnCache, the SubConn was being deleted.
-		// cancel function will never be nil.
-		entry.cancel()
-		delete(ccc.subConnCache, addrWithoutMD)
-		return entry.sc, nil
-	}
-
-	scNew, err := ccc.cc.NewSubConn(addrs, opts)
-	if err != nil {
-		return nil, err
-	}
-
-	ccc.subConnToAddr[scNew] = addrWithoutMD
-	return scNew, nil
-}
-
-func (ccc *lbCacheClientConn) RemoveSubConn(sc balancer.SubConn) {
-	ccc.mu.Lock()
-	defer ccc.mu.Unlock()
-	addr, ok := ccc.subConnToAddr[sc]
-	if !ok {
-		return
-	}
-
-	if entry, ok := ccc.subConnCache[addr]; ok {
-		if entry.sc != sc {
-			// This could happen if NewSubConn was called multiple times for the
-			// same address, and those SubConns are all removed. We remove sc
-			// immediately here.
-			delete(ccc.subConnToAddr, sc)
-			ccc.cc.RemoveSubConn(sc)
-		}
-		return
-	}
-
-	entry := &subConnCacheEntry{
-		sc: sc,
-	}
-	ccc.subConnCache[addr] = entry
-
-	timer := time.AfterFunc(ccc.timeout, func() {
-		ccc.mu.Lock()
-		if entry.abortDeleting {
-			return
-		}
-		ccc.cc.RemoveSubConn(sc)
-		delete(ccc.subConnToAddr, sc)
-		delete(ccc.subConnCache, addr)
-		ccc.mu.Unlock()
-	})
-	entry.cancel = func() {
-		if !timer.Stop() {
-			// If stop was not successful, the timer has fired (this can only
-			// happen in a race). But the deleting function is blocked on ccc.mu
-			// because the mutex was held by the caller of this function.
-			//
-			// Set abortDeleting to true to abort the deleting function. When
-			// the lock is released, the deleting function will acquire the
-			// lock, check the value of abortDeleting and return.
-			entry.abortDeleting = true
-		}
-	}
-}
-
-func (ccc *lbCacheClientConn) UpdateBalancerState(s connectivity.State, p balancer.Picker) {
-	ccc.cc.UpdateBalancerState(s, p)
-}
-
-func (ccc *lbCacheClientConn) close() {
-	ccc.mu.Lock()
-	// Only cancel all existing timers. There's no need to remove SubConns.
-	for _, entry := range ccc.subConnCache {
-		entry.cancel()
-	}
-	ccc.mu.Unlock()
-}
diff --git a/vendor/google.golang.org/grpc/grpclog/grpclog.go b/vendor/google.golang.org/grpc/grpclog/grpclog.go
index 1fabb11..51bb945 100644
--- a/vendor/google.golang.org/grpc/grpclog/grpclog.go
+++ b/vendor/google.golang.org/grpc/grpclog/grpclog.go
@@ -18,7 +18,7 @@
 
 // Package grpclog defines logging for grpc.
 //
-// All logs in transport package only go to verbose level 2.
+// All logs in transport and grpclb packages only go to verbose level 2.
 // All logs in other packages in grpc are logged in spite of the verbosity level.
 //
 // In the default logger,
diff --git a/vendor/google.golang.org/grpc/health/client.go b/vendor/google.golang.org/grpc/health/client.go
new file mode 100644
index 0000000..e15f04c
--- /dev/null
+++ b/vendor/google.golang.org/grpc/health/client.go
@@ -0,0 +1,107 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package health
+
+import (
+	"context"
+	"fmt"
+	"io"
+	"time"
+
+	"google.golang.org/grpc"
+	"google.golang.org/grpc/codes"
+	healthpb "google.golang.org/grpc/health/grpc_health_v1"
+	"google.golang.org/grpc/internal"
+	"google.golang.org/grpc/internal/backoff"
+	"google.golang.org/grpc/status"
+)
+
+const maxDelay = 120 * time.Second
+
+var backoffStrategy = backoff.Exponential{MaxDelay: maxDelay}
+var backoffFunc = func(ctx context.Context, retries int) bool {
+	d := backoffStrategy.Backoff(retries)
+	timer := time.NewTimer(d)
+	select {
+	case <-timer.C:
+		return true
+	case <-ctx.Done():
+		timer.Stop()
+		return false
+	}
+}
+
+func init() {
+	internal.HealthCheckFunc = clientHealthCheck
+}
+
+func clientHealthCheck(ctx context.Context, newStream func() (interface{}, error), reportHealth func(bool), service string) error {
+	tryCnt := 0
+
+retryConnection:
+	for {
+		// Backs off if the connection has failed in some way without receiving a message in the previous retry.
+		if tryCnt > 0 && !backoffFunc(ctx, tryCnt-1) {
+			return nil
+		}
+		tryCnt++
+
+		if ctx.Err() != nil {
+			return nil
+		}
+		rawS, err := newStream()
+		if err != nil {
+			continue retryConnection
+		}
+
+		s, ok := rawS.(grpc.ClientStream)
+		// Ideally, this should never happen. But if it happens, the server is marked as healthy for LBing purposes.
+		if !ok {
+			reportHealth(true)
+			return fmt.Errorf("newStream returned %v (type %T); want grpc.ClientStream", rawS, rawS)
+		}
+
+		if err = s.SendMsg(&healthpb.HealthCheckRequest{Service: service}); err != nil && err != io.EOF {
+			// Stream should have been closed, so we can safely continue to create a new stream.
+			continue retryConnection
+		}
+		s.CloseSend()
+
+		resp := new(healthpb.HealthCheckResponse)
+		for {
+			err = s.RecvMsg(resp)
+
+			// Reports healthy for the LBing purposes if health check is not implemented in the server.
+			if status.Code(err) == codes.Unimplemented {
+				reportHealth(true)
+				return err
+			}
+
+			// Reports unhealthy if server's Watch method gives an error other than UNIMPLEMENTED.
+			if err != nil {
+				reportHealth(false)
+				continue retryConnection
+			}
+
+			// As a message has been received, removes the need for backoff for the next retry by reseting the try count.
+			tryCnt = 0
+			reportHealth(resp.Status == healthpb.HealthCheckResponse_SERVING)
+		}
+	}
+}
diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go
index e5906de..c2f2c77 100644
--- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go
+++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go
@@ -1,5 +1,5 @@
 // Code generated by protoc-gen-go. DO NOT EDIT.
-// source: grpc_health_v1/health.proto
+// source: grpc/health/v1/health.proto
 
 package grpc_health_v1 // import "google.golang.org/grpc/health/grpc_health_v1"
 
@@ -26,31 +26,34 @@
 type HealthCheckResponse_ServingStatus int32
 
 const (
-	HealthCheckResponse_UNKNOWN     HealthCheckResponse_ServingStatus = 0
-	HealthCheckResponse_SERVING     HealthCheckResponse_ServingStatus = 1
-	HealthCheckResponse_NOT_SERVING HealthCheckResponse_ServingStatus = 2
+	HealthCheckResponse_UNKNOWN         HealthCheckResponse_ServingStatus = 0
+	HealthCheckResponse_SERVING         HealthCheckResponse_ServingStatus = 1
+	HealthCheckResponse_NOT_SERVING     HealthCheckResponse_ServingStatus = 2
+	HealthCheckResponse_SERVICE_UNKNOWN HealthCheckResponse_ServingStatus = 3
 )
 
 var HealthCheckResponse_ServingStatus_name = map[int32]string{
 	0: "UNKNOWN",
 	1: "SERVING",
 	2: "NOT_SERVING",
+	3: "SERVICE_UNKNOWN",
 }
 var HealthCheckResponse_ServingStatus_value = map[string]int32{
-	"UNKNOWN":     0,
-	"SERVING":     1,
-	"NOT_SERVING": 2,
+	"UNKNOWN":         0,
+	"SERVING":         1,
+	"NOT_SERVING":     2,
+	"SERVICE_UNKNOWN": 3,
 }
 
 func (x HealthCheckResponse_ServingStatus) String() string {
 	return proto.EnumName(HealthCheckResponse_ServingStatus_name, int32(x))
 }
 func (HealthCheckResponse_ServingStatus) EnumDescriptor() ([]byte, []int) {
-	return fileDescriptor_health_8e5b8a3074428511, []int{1, 0}
+	return fileDescriptor_health_6b1a06aa67f91efd, []int{1, 0}
 }
 
 type HealthCheckRequest struct {
-	Service              string   `protobuf:"bytes,1,opt,name=service" json:"service,omitempty"`
+	Service              string   `protobuf:"bytes,1,opt,name=service,proto3" json:"service,omitempty"`
 	XXX_NoUnkeyedLiteral struct{} `json:"-"`
 	XXX_unrecognized     []byte   `json:"-"`
 	XXX_sizecache        int32    `json:"-"`
@@ -60,7 +63,7 @@
 func (m *HealthCheckRequest) String() string { return proto.CompactTextString(m) }
 func (*HealthCheckRequest) ProtoMessage()    {}
 func (*HealthCheckRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_health_8e5b8a3074428511, []int{0}
+	return fileDescriptor_health_6b1a06aa67f91efd, []int{0}
 }
 func (m *HealthCheckRequest) XXX_Unmarshal(b []byte) error {
 	return xxx_messageInfo_HealthCheckRequest.Unmarshal(m, b)
@@ -88,7 +91,7 @@
 }
 
 type HealthCheckResponse struct {
-	Status               HealthCheckResponse_ServingStatus `protobuf:"varint,1,opt,name=status,enum=grpc.health.v1.HealthCheckResponse_ServingStatus" json:"status,omitempty"`
+	Status               HealthCheckResponse_ServingStatus `protobuf:"varint,1,opt,name=status,proto3,enum=grpc.health.v1.HealthCheckResponse_ServingStatus" json:"status,omitempty"`
 	XXX_NoUnkeyedLiteral struct{}                          `json:"-"`
 	XXX_unrecognized     []byte                            `json:"-"`
 	XXX_sizecache        int32                             `json:"-"`
@@ -98,7 +101,7 @@
 func (m *HealthCheckResponse) String() string { return proto.CompactTextString(m) }
 func (*HealthCheckResponse) ProtoMessage()    {}
 func (*HealthCheckResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_health_8e5b8a3074428511, []int{1}
+	return fileDescriptor_health_6b1a06aa67f91efd, []int{1}
 }
 func (m *HealthCheckResponse) XXX_Unmarshal(b []byte) error {
 	return xxx_messageInfo_HealthCheckResponse.Unmarshal(m, b)
@@ -139,10 +142,29 @@
 // is compatible with the grpc package it is being compiled against.
 const _ = grpc.SupportPackageIsVersion4
 
-// Client API for Health service
-
+// HealthClient is the client API for Health service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
 type HealthClient interface {
+	// If the requested service is unknown, the call will fail with status
+	// NOT_FOUND.
 	Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error)
+	// Performs a watch for the serving status of the requested service.
+	// The server will immediately send back a message indicating the current
+	// serving status.  It will then subsequently send a new message whenever
+	// the service's serving status changes.
+	//
+	// If the requested service is unknown when the call is received, the
+	// server will send a message setting the serving status to
+	// SERVICE_UNKNOWN but will *not* terminate the call.  If at some
+	// future point, the serving status of the service becomes known, the
+	// server will send a new message with the service's serving status.
+	//
+	// If the call terminates with status UNIMPLEMENTED, then clients
+	// should assume this method is not supported and should not retry the
+	// call.  If the call terminates with any other status (including OK),
+	// clients should retry the call with appropriate exponential backoff.
+	Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (Health_WatchClient, error)
 }
 
 type healthClient struct {
@@ -155,17 +177,66 @@
 
 func (c *healthClient) Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) {
 	out := new(HealthCheckResponse)
-	err := grpc.Invoke(ctx, "/grpc.health.v1.Health/Check", in, out, c.cc, opts...)
+	err := c.cc.Invoke(ctx, "/grpc.health.v1.Health/Check", in, out, opts...)
 	if err != nil {
 		return nil, err
 	}
 	return out, nil
 }
 
-// Server API for Health service
+func (c *healthClient) Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (Health_WatchClient, error) {
+	stream, err := c.cc.NewStream(ctx, &_Health_serviceDesc.Streams[0], "/grpc.health.v1.Health/Watch", opts...)
+	if err != nil {
+		return nil, err
+	}
+	x := &healthWatchClient{stream}
+	if err := x.ClientStream.SendMsg(in); err != nil {
+		return nil, err
+	}
+	if err := x.ClientStream.CloseSend(); err != nil {
+		return nil, err
+	}
+	return x, nil
+}
 
+type Health_WatchClient interface {
+	Recv() (*HealthCheckResponse, error)
+	grpc.ClientStream
+}
+
+type healthWatchClient struct {
+	grpc.ClientStream
+}
+
+func (x *healthWatchClient) Recv() (*HealthCheckResponse, error) {
+	m := new(HealthCheckResponse)
+	if err := x.ClientStream.RecvMsg(m); err != nil {
+		return nil, err
+	}
+	return m, nil
+}
+
+// HealthServer is the server API for Health service.
 type HealthServer interface {
+	// If the requested service is unknown, the call will fail with status
+	// NOT_FOUND.
 	Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error)
+	// Performs a watch for the serving status of the requested service.
+	// The server will immediately send back a message indicating the current
+	// serving status.  It will then subsequently send a new message whenever
+	// the service's serving status changes.
+	//
+	// If the requested service is unknown when the call is received, the
+	// server will send a message setting the serving status to
+	// SERVICE_UNKNOWN but will *not* terminate the call.  If at some
+	// future point, the serving status of the service becomes known, the
+	// server will send a new message with the service's serving status.
+	//
+	// If the call terminates with status UNIMPLEMENTED, then clients
+	// should assume this method is not supported and should not retry the
+	// call.  If the call terminates with any other status (including OK),
+	// clients should retry the call with appropriate exponential backoff.
+	Watch(*HealthCheckRequest, Health_WatchServer) error
 }
 
 func RegisterHealthServer(s *grpc.Server, srv HealthServer) {
@@ -190,6 +261,27 @@
 	return interceptor(ctx, in, info, handler)
 }
 
+func _Health_Watch_Handler(srv interface{}, stream grpc.ServerStream) error {
+	m := new(HealthCheckRequest)
+	if err := stream.RecvMsg(m); err != nil {
+		return err
+	}
+	return srv.(HealthServer).Watch(m, &healthWatchServer{stream})
+}
+
+type Health_WatchServer interface {
+	Send(*HealthCheckResponse) error
+	grpc.ServerStream
+}
+
+type healthWatchServer struct {
+	grpc.ServerStream
+}
+
+func (x *healthWatchServer) Send(m *HealthCheckResponse) error {
+	return x.ServerStream.SendMsg(m)
+}
+
 var _Health_serviceDesc = grpc.ServiceDesc{
 	ServiceName: "grpc.health.v1.Health",
 	HandlerType: (*HealthServer)(nil),
@@ -199,29 +291,37 @@
 			Handler:    _Health_Check_Handler,
 		},
 	},
-	Streams:  []grpc.StreamDesc{},
-	Metadata: "grpc_health_v1/health.proto",
+	Streams: []grpc.StreamDesc{
+		{
+			StreamName:    "Watch",
+			Handler:       _Health_Watch_Handler,
+			ServerStreams: true,
+		},
+	},
+	Metadata: "grpc/health/v1/health.proto",
 }
 
-func init() { proto.RegisterFile("grpc_health_v1/health.proto", fileDescriptor_health_8e5b8a3074428511) }
+func init() { proto.RegisterFile("grpc/health/v1/health.proto", fileDescriptor_health_6b1a06aa67f91efd) }
 
-var fileDescriptor_health_8e5b8a3074428511 = []byte{
-	// 269 bytes of a gzipped FileDescriptorProto
+var fileDescriptor_health_6b1a06aa67f91efd = []byte{
+	// 297 bytes of a gzipped FileDescriptorProto
 	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4e, 0x2f, 0x2a, 0x48,
-	0x8e, 0xcf, 0x48, 0x4d, 0xcc, 0x29, 0xc9, 0x88, 0x2f, 0x33, 0xd4, 0x87, 0xb0, 0xf4, 0x0a, 0x8a,
-	0xf2, 0x4b, 0xf2, 0x85, 0xf8, 0x40, 0x92, 0x7a, 0x50, 0xa1, 0x32, 0x43, 0x25, 0x3d, 0x2e, 0x21,
-	0x0f, 0x30, 0xc7, 0x39, 0x23, 0x35, 0x39, 0x3b, 0x28, 0xb5, 0xb0, 0x34, 0xb5, 0xb8, 0x44, 0x48,
-	0x82, 0x8b, 0xbd, 0x38, 0xb5, 0xa8, 0x2c, 0x33, 0x39, 0x55, 0x82, 0x51, 0x81, 0x51, 0x83, 0x33,
-	0x08, 0xc6, 0x55, 0x9a, 0xc3, 0xc8, 0x25, 0x8c, 0xa2, 0xa1, 0xb8, 0x20, 0x3f, 0xaf, 0x38, 0x55,
-	0xc8, 0x93, 0x8b, 0xad, 0xb8, 0x24, 0xb1, 0xa4, 0xb4, 0x18, 0xac, 0x81, 0xcf, 0xc8, 0x50, 0x0f,
-	0xd5, 0x22, 0x3d, 0x2c, 0x9a, 0xf4, 0x82, 0x41, 0x86, 0xe6, 0xa5, 0x07, 0x83, 0x35, 0x06, 0x41,
-	0x0d, 0x50, 0xb2, 0xe2, 0xe2, 0x45, 0x91, 0x10, 0xe2, 0xe6, 0x62, 0x0f, 0xf5, 0xf3, 0xf6, 0xf3,
-	0x0f, 0xf7, 0x13, 0x60, 0x00, 0x71, 0x82, 0x5d, 0x83, 0xc2, 0x3c, 0xfd, 0xdc, 0x05, 0x18, 0x85,
-	0xf8, 0xb9, 0xb8, 0xfd, 0xfc, 0x43, 0xe2, 0x61, 0x02, 0x4c, 0x46, 0x51, 0x5c, 0x6c, 0x10, 0x8b,
-	0x84, 0x02, 0xb8, 0x58, 0xc1, 0x96, 0x09, 0x29, 0xe1, 0x75, 0x09, 0xd8, 0xbf, 0x52, 0xca, 0x44,
-	0xb8, 0xd6, 0x29, 0x91, 0x4b, 0x30, 0x33, 0x1f, 0x4d, 0xa1, 0x13, 0x37, 0x44, 0x65, 0x00, 0x28,
-	0x70, 0x03, 0x18, 0xa3, 0x74, 0xd2, 0xf3, 0xf3, 0xd3, 0x73, 0x52, 0xf5, 0xd2, 0xf3, 0x73, 0x12,
-	0xf3, 0xd2, 0xf5, 0xf2, 0x8b, 0xd2, 0xf5, 0x41, 0x1a, 0xa0, 0x71, 0xa0, 0x8f, 0x1a, 0x33, 0xab,
-	0x98, 0xf8, 0xdc, 0x41, 0xa6, 0x41, 0x8c, 0xd0, 0x0b, 0x33, 0x4c, 0x62, 0x03, 0x47, 0x92, 0x31,
-	0x20, 0x00, 0x00, 0xff, 0xff, 0xb7, 0x70, 0xc4, 0xa7, 0xc3, 0x01, 0x00, 0x00,
+	0xd6, 0xcf, 0x48, 0x4d, 0xcc, 0x29, 0xc9, 0xd0, 0x2f, 0x33, 0x84, 0xb2, 0xf4, 0x0a, 0x8a, 0xf2,
+	0x4b, 0xf2, 0x85, 0xf8, 0x40, 0x92, 0x7a, 0x50, 0xa1, 0x32, 0x43, 0x25, 0x3d, 0x2e, 0x21, 0x0f,
+	0x30, 0xc7, 0x39, 0x23, 0x35, 0x39, 0x3b, 0x28, 0xb5, 0xb0, 0x34, 0xb5, 0xb8, 0x44, 0x48, 0x82,
+	0x8b, 0xbd, 0x38, 0xb5, 0xa8, 0x2c, 0x33, 0x39, 0x55, 0x82, 0x51, 0x81, 0x51, 0x83, 0x33, 0x08,
+	0xc6, 0x55, 0xda, 0xc8, 0xc8, 0x25, 0x8c, 0xa2, 0xa1, 0xb8, 0x20, 0x3f, 0xaf, 0x38, 0x55, 0xc8,
+	0x93, 0x8b, 0xad, 0xb8, 0x24, 0xb1, 0xa4, 0xb4, 0x18, 0xac, 0x81, 0xcf, 0xc8, 0x50, 0x0f, 0xd5,
+	0x22, 0x3d, 0x2c, 0x9a, 0xf4, 0x82, 0x41, 0x86, 0xe6, 0xa5, 0x07, 0x83, 0x35, 0x06, 0x41, 0x0d,
+	0x50, 0xf2, 0xe7, 0xe2, 0x45, 0x91, 0x10, 0xe2, 0xe6, 0x62, 0x0f, 0xf5, 0xf3, 0xf6, 0xf3, 0x0f,
+	0xf7, 0x13, 0x60, 0x00, 0x71, 0x82, 0x5d, 0x83, 0xc2, 0x3c, 0xfd, 0xdc, 0x05, 0x18, 0x85, 0xf8,
+	0xb9, 0xb8, 0xfd, 0xfc, 0x43, 0xe2, 0x61, 0x02, 0x4c, 0x42, 0xc2, 0x5c, 0xfc, 0x60, 0x8e, 0xb3,
+	0x6b, 0x3c, 0x4c, 0x0b, 0xb3, 0xd1, 0x3a, 0x46, 0x2e, 0x36, 0x88, 0xf5, 0x42, 0x01, 0x5c, 0xac,
+	0x60, 0x27, 0x08, 0x29, 0xe1, 0x75, 0x1f, 0x38, 0x14, 0xa4, 0x94, 0x89, 0xf0, 0x83, 0x50, 0x10,
+	0x17, 0x6b, 0x78, 0x62, 0x49, 0x72, 0x06, 0xd5, 0x4c, 0x34, 0x60, 0x74, 0x4a, 0xe4, 0x12, 0xcc,
+	0xcc, 0x47, 0x53, 0xea, 0xc4, 0x0d, 0x51, 0x1b, 0x00, 0x8a, 0xc6, 0x00, 0xc6, 0x28, 0x9d, 0xf4,
+	0xfc, 0xfc, 0xf4, 0x9c, 0x54, 0xbd, 0xf4, 0xfc, 0x9c, 0xc4, 0xbc, 0x74, 0xbd, 0xfc, 0xa2, 0x74,
+	0x7d, 0xe4, 0x78, 0x07, 0xb1, 0xe3, 0x21, 0xec, 0xf8, 0x32, 0xc3, 0x55, 0x4c, 0x7c, 0xee, 0x20,
+	0xd3, 0x20, 0x46, 0xe8, 0x85, 0x19, 0x26, 0xb1, 0x81, 0x93, 0x83, 0x31, 0x20, 0x00, 0x00, 0xff,
+	0xff, 0x12, 0x7d, 0x96, 0xcb, 0x2d, 0x02, 0x00, 0x00,
 }
diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.proto b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.proto
deleted file mode 100644
index bcc02f8..0000000
--- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.proto
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright 2015, gRPC Authors
-// All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// The canonical version of this proto can be found at
-// https://github.com/grpc/grpc-proto/blob/master/grpc/health/v1/health.proto
-
-syntax = "proto3";
-
-package grpc.health.v1;
-
-option csharp_namespace = "Grpc.Health.V1";
-option go_package = "google.golang.org/grpc/health/grpc_health_v1";
-option java_multiple_files = true;
-option java_outer_classname = "HealthProto";
-option java_package = "io.grpc.health.v1";
-
-message HealthCheckRequest {
-  string service = 1;
-}
-
-message HealthCheckResponse {
-  enum ServingStatus {
-    UNKNOWN = 0;
-    SERVING = 1;
-    NOT_SERVING = 2;
-  }
-  ServingStatus status = 1;
-}
-
-service Health {
-  rpc Check(HealthCheckRequest) returns (HealthCheckResponse);
-}
diff --git a/vendor/google.golang.org/grpc/health/health.go b/vendor/google.golang.org/grpc/health/health.go
deleted file mode 100644
index de7f9ba..0000000
--- a/vendor/google.golang.org/grpc/health/health.go
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-//go:generate protoc --go_out=plugins=grpc,paths=source_relative:. grpc_health_v1/health.proto
-
-// Package health provides some utility functions to health-check a server. The implementation
-// is based on protobuf. Users need to write their own implementations if other IDLs are used.
-package health
-
-import (
-	"sync"
-
-	"golang.org/x/net/context"
-	"google.golang.org/grpc/codes"
-	healthpb "google.golang.org/grpc/health/grpc_health_v1"
-	"google.golang.org/grpc/status"
-)
-
-// Server implements `service Health`.
-type Server struct {
-	mu sync.Mutex
-	// statusMap stores the serving status of the services this Server monitors.
-	statusMap map[string]healthpb.HealthCheckResponse_ServingStatus
-}
-
-// NewServer returns a new Server.
-func NewServer() *Server {
-	return &Server{
-		statusMap: make(map[string]healthpb.HealthCheckResponse_ServingStatus),
-	}
-}
-
-// Check implements `service Health`.
-func (s *Server) Check(ctx context.Context, in *healthpb.HealthCheckRequest) (*healthpb.HealthCheckResponse, error) {
-	s.mu.Lock()
-	defer s.mu.Unlock()
-	if in.Service == "" {
-		// check the server overall health status.
-		return &healthpb.HealthCheckResponse{
-			Status: healthpb.HealthCheckResponse_SERVING,
-		}, nil
-	}
-	if status, ok := s.statusMap[in.Service]; ok {
-		return &healthpb.HealthCheckResponse{
-			Status: status,
-		}, nil
-	}
-	return nil, status.Error(codes.NotFound, "unknown service")
-}
-
-// SetServingStatus is called when need to reset the serving status of a service
-// or insert a new service entry into the statusMap.
-func (s *Server) SetServingStatus(service string, status healthpb.HealthCheckResponse_ServingStatus) {
-	s.mu.Lock()
-	s.statusMap[service] = status
-	s.mu.Unlock()
-}
diff --git a/vendor/google.golang.org/grpc/health/server.go b/vendor/google.golang.org/grpc/health/server.go
new file mode 100644
index 0000000..c79f9d2
--- /dev/null
+++ b/vendor/google.golang.org/grpc/health/server.go
@@ -0,0 +1,165 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+//go:generate ./regenerate.sh
+
+// Package health provides a service that exposes server's health and it must be
+// imported to enable support for client-side health checks.
+package health
+
+import (
+	"context"
+	"sync"
+
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/grpclog"
+	healthgrpc "google.golang.org/grpc/health/grpc_health_v1"
+	healthpb "google.golang.org/grpc/health/grpc_health_v1"
+	"google.golang.org/grpc/status"
+)
+
+// Server implements `service Health`.
+type Server struct {
+	mu sync.Mutex
+	// If shutdown is true, it's expected all serving status is NOT_SERVING, and
+	// will stay in NOT_SERVING.
+	shutdown bool
+	// statusMap stores the serving status of the services this Server monitors.
+	statusMap map[string]healthpb.HealthCheckResponse_ServingStatus
+	updates   map[string]map[healthgrpc.Health_WatchServer]chan healthpb.HealthCheckResponse_ServingStatus
+}
+
+// NewServer returns a new Server.
+func NewServer() *Server {
+	return &Server{
+		statusMap: map[string]healthpb.HealthCheckResponse_ServingStatus{"": healthpb.HealthCheckResponse_SERVING},
+		updates:   make(map[string]map[healthgrpc.Health_WatchServer]chan healthpb.HealthCheckResponse_ServingStatus),
+	}
+}
+
+// Check implements `service Health`.
+func (s *Server) Check(ctx context.Context, in *healthpb.HealthCheckRequest) (*healthpb.HealthCheckResponse, error) {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	if servingStatus, ok := s.statusMap[in.Service]; ok {
+		return &healthpb.HealthCheckResponse{
+			Status: servingStatus,
+		}, nil
+	}
+	return nil, status.Error(codes.NotFound, "unknown service")
+}
+
+// Watch implements `service Health`.
+func (s *Server) Watch(in *healthpb.HealthCheckRequest, stream healthgrpc.Health_WatchServer) error {
+	service := in.Service
+	// update channel is used for getting service status updates.
+	update := make(chan healthpb.HealthCheckResponse_ServingStatus, 1)
+	s.mu.Lock()
+	// Puts the initial status to the channel.
+	if servingStatus, ok := s.statusMap[service]; ok {
+		update <- servingStatus
+	} else {
+		update <- healthpb.HealthCheckResponse_SERVICE_UNKNOWN
+	}
+
+	// Registers the update channel to the correct place in the updates map.
+	if _, ok := s.updates[service]; !ok {
+		s.updates[service] = make(map[healthgrpc.Health_WatchServer]chan healthpb.HealthCheckResponse_ServingStatus)
+	}
+	s.updates[service][stream] = update
+	defer func() {
+		s.mu.Lock()
+		delete(s.updates[service], stream)
+		s.mu.Unlock()
+	}()
+	s.mu.Unlock()
+
+	var lastSentStatus healthpb.HealthCheckResponse_ServingStatus = -1
+	for {
+		select {
+		// Status updated. Sends the up-to-date status to the client.
+		case servingStatus := <-update:
+			if lastSentStatus == servingStatus {
+				continue
+			}
+			lastSentStatus = servingStatus
+			err := stream.Send(&healthpb.HealthCheckResponse{Status: servingStatus})
+			if err != nil {
+				return status.Error(codes.Canceled, "Stream has ended.")
+			}
+		// Context done. Removes the update channel from the updates map.
+		case <-stream.Context().Done():
+			return status.Error(codes.Canceled, "Stream has ended.")
+		}
+	}
+}
+
+// SetServingStatus is called when need to reset the serving status of a service
+// or insert a new service entry into the statusMap.
+func (s *Server) SetServingStatus(service string, servingStatus healthpb.HealthCheckResponse_ServingStatus) {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	if s.shutdown {
+		grpclog.Infof("health: status changing for %s to %v is ignored because health service is shutdown", service, servingStatus)
+		return
+	}
+
+	s.setServingStatusLocked(service, servingStatus)
+}
+
+func (s *Server) setServingStatusLocked(service string, servingStatus healthpb.HealthCheckResponse_ServingStatus) {
+	s.statusMap[service] = servingStatus
+	for _, update := range s.updates[service] {
+		// Clears previous updates, that are not sent to the client, from the channel.
+		// This can happen if the client is not reading and the server gets flow control limited.
+		select {
+		case <-update:
+		default:
+		}
+		// Puts the most recent update to the channel.
+		update <- servingStatus
+	}
+}
+
+// Shutdown sets all serving status to NOT_SERVING, and configures the server to
+// ignore all future status changes.
+//
+// This changes serving status for all services. To set status for a perticular
+// services, call SetServingStatus().
+func (s *Server) Shutdown() {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	s.shutdown = true
+	for service := range s.statusMap {
+		s.setServingStatusLocked(service, healthpb.HealthCheckResponse_NOT_SERVING)
+	}
+}
+
+// Resume sets all serving status to SERVING, and configures the server to
+// accept all future status changes.
+//
+// This changes serving status for all services. To set status for a perticular
+// services, call SetServingStatus().
+func (s *Server) Resume() {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	s.shutdown = false
+	for service := range s.statusMap {
+		s.setServingStatusLocked(service, healthpb.HealthCheckResponse_SERVING)
+	}
+}
diff --git a/vendor/google.golang.org/grpc/interceptor.go b/vendor/google.golang.org/grpc/interceptor.go
index 1f6ef67..8b73500 100644
--- a/vendor/google.golang.org/grpc/interceptor.go
+++ b/vendor/google.golang.org/grpc/interceptor.go
@@ -19,7 +19,7 @@
 package grpc
 
 import (
-	"golang.org/x/net/context"
+	"context"
 )
 
 // UnaryInvoker is called by UnaryClientInterceptor to complete RPCs.
diff --git a/vendor/google.golang.org/grpc/internal/backoff/backoff.go b/vendor/google.golang.org/grpc/internal/backoff/backoff.go
new file mode 100644
index 0000000..1bd0cce
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/backoff/backoff.go
@@ -0,0 +1,78 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package backoff implement the backoff strategy for gRPC.
+//
+// This is kept in internal until the gRPC project decides whether or not to
+// allow alternative backoff strategies.
+package backoff
+
+import (
+	"time"
+
+	"google.golang.org/grpc/internal/grpcrand"
+)
+
+// Strategy defines the methodology for backing off after a grpc connection
+// failure.
+//
+type Strategy interface {
+	// Backoff returns the amount of time to wait before the next retry given
+	// the number of consecutive failures.
+	Backoff(retries int) time.Duration
+}
+
+const (
+	// baseDelay is the amount of time to wait before retrying after the first
+	// failure.
+	baseDelay = 1.0 * time.Second
+	// factor is applied to the backoff after each retry.
+	factor = 1.6
+	// jitter provides a range to randomize backoff delays.
+	jitter = 0.2
+)
+
+// Exponential implements exponential backoff algorithm as defined in
+// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
+type Exponential struct {
+	// MaxDelay is the upper bound of backoff delay.
+	MaxDelay time.Duration
+}
+
+// Backoff returns the amount of time to wait before the next retry given the
+// number of retries.
+func (bc Exponential) Backoff(retries int) time.Duration {
+	if retries == 0 {
+		return baseDelay
+	}
+	backoff, max := float64(baseDelay), float64(bc.MaxDelay)
+	for backoff < max && retries > 0 {
+		backoff *= factor
+		retries--
+	}
+	if backoff > max {
+		backoff = max
+	}
+	// Randomize backoff delays so that if a cluster of requests start at
+	// the same time, they won't operate in lockstep.
+	backoff *= 1 + jitter*(grpcrand.Float64()*2-1)
+	if backoff < 0 {
+		return 0
+	}
+	return time.Duration(backoff)
+}
diff --git a/vendor/google.golang.org/grpc/internal/balancerload/load.go b/vendor/google.golang.org/grpc/internal/balancerload/load.go
new file mode 100644
index 0000000..3a905d9
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/balancerload/load.go
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2019 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Package balancerload defines APIs to parse server loads in trailers. The
+// parsed loads are sent to balancers in DoneInfo.
+package balancerload
+
+import (
+	"google.golang.org/grpc/metadata"
+)
+
+// Parser converts loads from metadata into a concrete type.
+type Parser interface {
+	// Parse parses loads from metadata.
+	Parse(md metadata.MD) interface{}
+}
+
+var parser Parser
+
+// SetParser sets the load parser.
+//
+// Not mutex-protected, should be called before any gRPC functions.
+func SetParser(lr Parser) {
+	parser = lr
+}
+
+// Parse calls parser.Read().
+func Parse(md metadata.MD) interface{} {
+	if parser == nil {
+		return nil
+	}
+	return parser.Parse(md)
+}
diff --git a/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go
new file mode 100644
index 0000000..fee6aec
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go
@@ -0,0 +1,167 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package binarylog implementation binary logging as defined in
+// https://github.com/grpc/proposal/blob/master/A16-binary-logging.md.
+package binarylog
+
+import (
+	"fmt"
+	"os"
+
+	"google.golang.org/grpc/grpclog"
+)
+
+// Logger is the global binary logger. It can be used to get binary logger for
+// each method.
+type Logger interface {
+	getMethodLogger(methodName string) *MethodLogger
+}
+
+// binLogger is the global binary logger for the binary. One of this should be
+// built at init time from the configuration (environment varialbe or flags).
+//
+// It is used to get a methodLogger for each individual method.
+var binLogger Logger
+
+// SetLogger sets the binarg logger.
+//
+// Only call this at init time.
+func SetLogger(l Logger) {
+	binLogger = l
+}
+
+// GetMethodLogger returns the methodLogger for the given methodName.
+//
+// methodName should be in the format of "/service/method".
+//
+// Each methodLogger returned by this method is a new instance. This is to
+// generate sequence id within the call.
+func GetMethodLogger(methodName string) *MethodLogger {
+	if binLogger == nil {
+		return nil
+	}
+	return binLogger.getMethodLogger(methodName)
+}
+
+func init() {
+	const envStr = "GRPC_BINARY_LOG_FILTER"
+	configStr := os.Getenv(envStr)
+	binLogger = NewLoggerFromConfigString(configStr)
+}
+
+type methodLoggerConfig struct {
+	// Max length of header and message.
+	hdr, msg uint64
+}
+
+type logger struct {
+	all      *methodLoggerConfig
+	services map[string]*methodLoggerConfig
+	methods  map[string]*methodLoggerConfig
+
+	blacklist map[string]struct{}
+}
+
+// newEmptyLogger creates an empty logger. The map fields need to be filled in
+// using the set* functions.
+func newEmptyLogger() *logger {
+	return &logger{}
+}
+
+// Set method logger for "*".
+func (l *logger) setDefaultMethodLogger(ml *methodLoggerConfig) error {
+	if l.all != nil {
+		return fmt.Errorf("conflicting global rules found")
+	}
+	l.all = ml
+	return nil
+}
+
+// Set method logger for "service/*".
+//
+// New methodLogger with same service overrides the old one.
+func (l *logger) setServiceMethodLogger(service string, ml *methodLoggerConfig) error {
+	if _, ok := l.services[service]; ok {
+		return fmt.Errorf("conflicting rules for service %v found", service)
+	}
+	if l.services == nil {
+		l.services = make(map[string]*methodLoggerConfig)
+	}
+	l.services[service] = ml
+	return nil
+}
+
+// Set method logger for "service/method".
+//
+// New methodLogger with same method overrides the old one.
+func (l *logger) setMethodMethodLogger(method string, ml *methodLoggerConfig) error {
+	if _, ok := l.blacklist[method]; ok {
+		return fmt.Errorf("conflicting rules for method %v found", method)
+	}
+	if _, ok := l.methods[method]; ok {
+		return fmt.Errorf("conflicting rules for method %v found", method)
+	}
+	if l.methods == nil {
+		l.methods = make(map[string]*methodLoggerConfig)
+	}
+	l.methods[method] = ml
+	return nil
+}
+
+// Set blacklist method for "-service/method".
+func (l *logger) setBlacklist(method string) error {
+	if _, ok := l.blacklist[method]; ok {
+		return fmt.Errorf("conflicting rules for method %v found", method)
+	}
+	if _, ok := l.methods[method]; ok {
+		return fmt.Errorf("conflicting rules for method %v found", method)
+	}
+	if l.blacklist == nil {
+		l.blacklist = make(map[string]struct{})
+	}
+	l.blacklist[method] = struct{}{}
+	return nil
+}
+
+// getMethodLogger returns the methodLogger for the given methodName.
+//
+// methodName should be in the format of "/service/method".
+//
+// Each methodLogger returned by this method is a new instance. This is to
+// generate sequence id within the call.
+func (l *logger) getMethodLogger(methodName string) *MethodLogger {
+	s, m, err := parseMethodName(methodName)
+	if err != nil {
+		grpclog.Infof("binarylogging: failed to parse %q: %v", methodName, err)
+		return nil
+	}
+	if ml, ok := l.methods[s+"/"+m]; ok {
+		return newMethodLogger(ml.hdr, ml.msg)
+	}
+	if _, ok := l.blacklist[s+"/"+m]; ok {
+		return nil
+	}
+	if ml, ok := l.services[s]; ok {
+		return newMethodLogger(ml.hdr, ml.msg)
+	}
+	if l.all == nil {
+		return nil
+	}
+	return newMethodLogger(l.all.hdr, l.all.msg)
+}
diff --git a/vendor/google.golang.org/grpc/internal/binarylog/binarylog_testutil.go b/vendor/google.golang.org/grpc/internal/binarylog/binarylog_testutil.go
new file mode 100644
index 0000000..1ee00a3
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/binarylog/binarylog_testutil.go
@@ -0,0 +1,42 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// This file contains exported variables/functions that are exported for testing
+// only.
+//
+// An ideal way for this would be to put those in a *_test.go but in binarylog
+// package. But this doesn't work with staticcheck with go module. Error was:
+// "MdToMetadataProto not declared by package binarylog". This could be caused
+// by the way staticcheck looks for files for a certain package, which doesn't
+// support *_test.go files.
+//
+// Move those to binary_test.go when staticcheck is fixed.
+
+package binarylog
+
+var (
+	// AllLogger is a logger that logs all headers/messages for all RPCs. It's
+	// for testing only.
+	AllLogger = NewLoggerFromConfigString("*")
+	// MdToMetadataProto converts metadata to a binary logging proto message.
+	// It's for testing only.
+	MdToMetadataProto = mdToMetadataProto
+	// AddrToProto converts an address to a binary logging proto message. It's
+	// for testing only.
+	AddrToProto = addrToProto
+)
diff --git a/vendor/google.golang.org/grpc/internal/binarylog/env_config.go b/vendor/google.golang.org/grpc/internal/binarylog/env_config.go
new file mode 100644
index 0000000..4cc2525
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/binarylog/env_config.go
@@ -0,0 +1,210 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package binarylog
+
+import (
+	"errors"
+	"fmt"
+	"regexp"
+	"strconv"
+	"strings"
+
+	"google.golang.org/grpc/grpclog"
+)
+
+// NewLoggerFromConfigString reads the string and build a logger. It can be used
+// to build a new logger and assign it to binarylog.Logger.
+//
+// Example filter config strings:
+//  - "" Nothing will be logged
+//  - "*" All headers and messages will be fully logged.
+//  - "*{h}" Only headers will be logged.
+//  - "*{m:256}" Only the first 256 bytes of each message will be logged.
+//  - "Foo/*" Logs every method in service Foo
+//  - "Foo/*,-Foo/Bar" Logs every method in service Foo except method /Foo/Bar
+//  - "Foo/*,Foo/Bar{m:256}" Logs the first 256 bytes of each message in method
+//    /Foo/Bar, logs all headers and messages in every other method in service
+//    Foo.
+//
+// If two configs exist for one certain method or service, the one specified
+// later overrides the privous config.
+func NewLoggerFromConfigString(s string) Logger {
+	if s == "" {
+		return nil
+	}
+	l := newEmptyLogger()
+	methods := strings.Split(s, ",")
+	for _, method := range methods {
+		if err := l.fillMethodLoggerWithConfigString(method); err != nil {
+			grpclog.Warningf("failed to parse binary log config: %v", err)
+			return nil
+		}
+	}
+	return l
+}
+
+// fillMethodLoggerWithConfigString parses config, creates methodLogger and adds
+// it to the right map in the logger.
+func (l *logger) fillMethodLoggerWithConfigString(config string) error {
+	// "" is invalid.
+	if config == "" {
+		return errors.New("empty string is not a valid method binary logging config")
+	}
+
+	// "-service/method", blacklist, no * or {} allowed.
+	if config[0] == '-' {
+		s, m, suffix, err := parseMethodConfigAndSuffix(config[1:])
+		if err != nil {
+			return fmt.Errorf("invalid config: %q, %v", config, err)
+		}
+		if m == "*" {
+			return fmt.Errorf("invalid config: %q, %v", config, "* not allowd in blacklist config")
+		}
+		if suffix != "" {
+			return fmt.Errorf("invalid config: %q, %v", config, "header/message limit not allowed in blacklist config")
+		}
+		if err := l.setBlacklist(s + "/" + m); err != nil {
+			return fmt.Errorf("invalid config: %v", err)
+		}
+		return nil
+	}
+
+	// "*{h:256;m:256}"
+	if config[0] == '*' {
+		hdr, msg, err := parseHeaderMessageLengthConfig(config[1:])
+		if err != nil {
+			return fmt.Errorf("invalid config: %q, %v", config, err)
+		}
+		if err := l.setDefaultMethodLogger(&methodLoggerConfig{hdr: hdr, msg: msg}); err != nil {
+			return fmt.Errorf("invalid config: %v", err)
+		}
+		return nil
+	}
+
+	s, m, suffix, err := parseMethodConfigAndSuffix(config)
+	if err != nil {
+		return fmt.Errorf("invalid config: %q, %v", config, err)
+	}
+	hdr, msg, err := parseHeaderMessageLengthConfig(suffix)
+	if err != nil {
+		return fmt.Errorf("invalid header/message length config: %q, %v", suffix, err)
+	}
+	if m == "*" {
+		if err := l.setServiceMethodLogger(s, &methodLoggerConfig{hdr: hdr, msg: msg}); err != nil {
+			return fmt.Errorf("invalid config: %v", err)
+		}
+	} else {
+		if err := l.setMethodMethodLogger(s+"/"+m, &methodLoggerConfig{hdr: hdr, msg: msg}); err != nil {
+			return fmt.Errorf("invalid config: %v", err)
+		}
+	}
+	return nil
+}
+
+const (
+	// TODO: this const is only used by env_config now. But could be useful for
+	// other config. Move to binarylog.go if necessary.
+	maxUInt = ^uint64(0)
+
+	// For "p.s/m" plus any suffix. Suffix will be parsed again. See test for
+	// expected output.
+	longMethodConfigRegexpStr = `^([\w./]+)/((?:\w+)|[*])(.+)?$`
+
+	// For suffix from above, "{h:123,m:123}". See test for expected output.
+	optionalLengthRegexpStr      = `(?::(\d+))?` // Optional ":123".
+	headerConfigRegexpStr        = `^{h` + optionalLengthRegexpStr + `}$`
+	messageConfigRegexpStr       = `^{m` + optionalLengthRegexpStr + `}$`
+	headerMessageConfigRegexpStr = `^{h` + optionalLengthRegexpStr + `;m` + optionalLengthRegexpStr + `}$`
+)
+
+var (
+	longMethodConfigRegexp    = regexp.MustCompile(longMethodConfigRegexpStr)
+	headerConfigRegexp        = regexp.MustCompile(headerConfigRegexpStr)
+	messageConfigRegexp       = regexp.MustCompile(messageConfigRegexpStr)
+	headerMessageConfigRegexp = regexp.MustCompile(headerMessageConfigRegexpStr)
+)
+
+// Turn "service/method{h;m}" into "service", "method", "{h;m}".
+func parseMethodConfigAndSuffix(c string) (service, method, suffix string, _ error) {
+	// Regexp result:
+	//
+	// in:  "p.s/m{h:123,m:123}",
+	// out: []string{"p.s/m{h:123,m:123}", "p.s", "m", "{h:123,m:123}"},
+	match := longMethodConfigRegexp.FindStringSubmatch(c)
+	if match == nil {
+		return "", "", "", fmt.Errorf("%q contains invalid substring", c)
+	}
+	service = match[1]
+	method = match[2]
+	suffix = match[3]
+	return
+}
+
+// Turn "{h:123;m:345}" into 123, 345.
+//
+// Return maxUInt if length is unspecified.
+func parseHeaderMessageLengthConfig(c string) (hdrLenStr, msgLenStr uint64, err error) {
+	if c == "" {
+		return maxUInt, maxUInt, nil
+	}
+	// Header config only.
+	if match := headerConfigRegexp.FindStringSubmatch(c); match != nil {
+		if s := match[1]; s != "" {
+			hdrLenStr, err = strconv.ParseUint(s, 10, 64)
+			if err != nil {
+				return 0, 0, fmt.Errorf("failed to convert %q to uint", s)
+			}
+			return hdrLenStr, 0, nil
+		}
+		return maxUInt, 0, nil
+	}
+
+	// Message config only.
+	if match := messageConfigRegexp.FindStringSubmatch(c); match != nil {
+		if s := match[1]; s != "" {
+			msgLenStr, err = strconv.ParseUint(s, 10, 64)
+			if err != nil {
+				return 0, 0, fmt.Errorf("failed to convert %q to uint", s)
+			}
+			return 0, msgLenStr, nil
+		}
+		return 0, maxUInt, nil
+	}
+
+	// Header and message config both.
+	if match := headerMessageConfigRegexp.FindStringSubmatch(c); match != nil {
+		// Both hdr and msg are specified, but one or two of them might be empty.
+		hdrLenStr = maxUInt
+		msgLenStr = maxUInt
+		if s := match[1]; s != "" {
+			hdrLenStr, err = strconv.ParseUint(s, 10, 64)
+			if err != nil {
+				return 0, 0, fmt.Errorf("failed to convert %q to uint", s)
+			}
+		}
+		if s := match[2]; s != "" {
+			msgLenStr, err = strconv.ParseUint(s, 10, 64)
+			if err != nil {
+				return 0, 0, fmt.Errorf("failed to convert %q to uint", s)
+			}
+		}
+		return hdrLenStr, msgLenStr, nil
+	}
+	return 0, 0, fmt.Errorf("%q contains invalid substring", c)
+}
diff --git a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go
new file mode 100644
index 0000000..160f6e8
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go
@@ -0,0 +1,423 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package binarylog
+
+import (
+	"net"
+	"strings"
+	"sync/atomic"
+	"time"
+
+	"github.com/golang/protobuf/proto"
+	"github.com/golang/protobuf/ptypes"
+	pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1"
+	"google.golang.org/grpc/grpclog"
+	"google.golang.org/grpc/metadata"
+	"google.golang.org/grpc/status"
+)
+
+type callIDGenerator struct {
+	id uint64
+}
+
+func (g *callIDGenerator) next() uint64 {
+	id := atomic.AddUint64(&g.id, 1)
+	return id
+}
+
+// reset is for testing only, and doesn't need to be thread safe.
+func (g *callIDGenerator) reset() {
+	g.id = 0
+}
+
+var idGen callIDGenerator
+
+// MethodLogger is the sub-logger for each method.
+type MethodLogger struct {
+	headerMaxLen, messageMaxLen uint64
+
+	callID          uint64
+	idWithinCallGen *callIDGenerator
+
+	sink Sink // TODO(blog): make this plugable.
+}
+
+func newMethodLogger(h, m uint64) *MethodLogger {
+	return &MethodLogger{
+		headerMaxLen:  h,
+		messageMaxLen: m,
+
+		callID:          idGen.next(),
+		idWithinCallGen: &callIDGenerator{},
+
+		sink: defaultSink, // TODO(blog): make it plugable.
+	}
+}
+
+// Log creates a proto binary log entry, and logs it to the sink.
+func (ml *MethodLogger) Log(c LogEntryConfig) {
+	m := c.toProto()
+	timestamp, _ := ptypes.TimestampProto(time.Now())
+	m.Timestamp = timestamp
+	m.CallId = ml.callID
+	m.SequenceIdWithinCall = ml.idWithinCallGen.next()
+
+	switch pay := m.Payload.(type) {
+	case *pb.GrpcLogEntry_ClientHeader:
+		m.PayloadTruncated = ml.truncateMetadata(pay.ClientHeader.GetMetadata())
+	case *pb.GrpcLogEntry_ServerHeader:
+		m.PayloadTruncated = ml.truncateMetadata(pay.ServerHeader.GetMetadata())
+	case *pb.GrpcLogEntry_Message:
+		m.PayloadTruncated = ml.truncateMessage(pay.Message)
+	}
+
+	ml.sink.Write(m)
+}
+
+func (ml *MethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) {
+	if ml.headerMaxLen == maxUInt {
+		return false
+	}
+	var (
+		bytesLimit = ml.headerMaxLen
+		index      int
+	)
+	// At the end of the loop, index will be the first entry where the total
+	// size is greater than the limit:
+	//
+	// len(entry[:index]) <= ml.hdr && len(entry[:index+1]) > ml.hdr.
+	for ; index < len(mdPb.Entry); index++ {
+		entry := mdPb.Entry[index]
+		if entry.Key == "grpc-trace-bin" {
+			// "grpc-trace-bin" is a special key. It's kept in the log entry,
+			// but not counted towards the size limit.
+			continue
+		}
+		currentEntryLen := uint64(len(entry.Value))
+		if currentEntryLen > bytesLimit {
+			break
+		}
+		bytesLimit -= currentEntryLen
+	}
+	truncated = index < len(mdPb.Entry)
+	mdPb.Entry = mdPb.Entry[:index]
+	return truncated
+}
+
+func (ml *MethodLogger) truncateMessage(msgPb *pb.Message) (truncated bool) {
+	if ml.messageMaxLen == maxUInt {
+		return false
+	}
+	if ml.messageMaxLen >= uint64(len(msgPb.Data)) {
+		return false
+	}
+	msgPb.Data = msgPb.Data[:ml.messageMaxLen]
+	return true
+}
+
+// LogEntryConfig represents the configuration for binary log entry.
+type LogEntryConfig interface {
+	toProto() *pb.GrpcLogEntry
+}
+
+// ClientHeader configs the binary log entry to be a ClientHeader entry.
+type ClientHeader struct {
+	OnClientSide bool
+	Header       metadata.MD
+	MethodName   string
+	Authority    string
+	Timeout      time.Duration
+	// PeerAddr is required only when it's on server side.
+	PeerAddr net.Addr
+}
+
+func (c *ClientHeader) toProto() *pb.GrpcLogEntry {
+	// This function doesn't need to set all the fields (e.g. seq ID). The Log
+	// function will set the fields when necessary.
+	clientHeader := &pb.ClientHeader{
+		Metadata:   mdToMetadataProto(c.Header),
+		MethodName: c.MethodName,
+		Authority:  c.Authority,
+	}
+	if c.Timeout > 0 {
+		clientHeader.Timeout = ptypes.DurationProto(c.Timeout)
+	}
+	ret := &pb.GrpcLogEntry{
+		Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER,
+		Payload: &pb.GrpcLogEntry_ClientHeader{
+			ClientHeader: clientHeader,
+		},
+	}
+	if c.OnClientSide {
+		ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
+	} else {
+		ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
+	}
+	if c.PeerAddr != nil {
+		ret.Peer = addrToProto(c.PeerAddr)
+	}
+	return ret
+}
+
+// ServerHeader configs the binary log entry to be a ServerHeader entry.
+type ServerHeader struct {
+	OnClientSide bool
+	Header       metadata.MD
+	// PeerAddr is required only when it's on client side.
+	PeerAddr net.Addr
+}
+
+func (c *ServerHeader) toProto() *pb.GrpcLogEntry {
+	ret := &pb.GrpcLogEntry{
+		Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_HEADER,
+		Payload: &pb.GrpcLogEntry_ServerHeader{
+			ServerHeader: &pb.ServerHeader{
+				Metadata: mdToMetadataProto(c.Header),
+			},
+		},
+	}
+	if c.OnClientSide {
+		ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
+	} else {
+		ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
+	}
+	if c.PeerAddr != nil {
+		ret.Peer = addrToProto(c.PeerAddr)
+	}
+	return ret
+}
+
+// ClientMessage configs the binary log entry to be a ClientMessage entry.
+type ClientMessage struct {
+	OnClientSide bool
+	// Message can be a proto.Message or []byte. Other messages formats are not
+	// supported.
+	Message interface{}
+}
+
+func (c *ClientMessage) toProto() *pb.GrpcLogEntry {
+	var (
+		data []byte
+		err  error
+	)
+	if m, ok := c.Message.(proto.Message); ok {
+		data, err = proto.Marshal(m)
+		if err != nil {
+			grpclog.Infof("binarylogging: failed to marshal proto message: %v", err)
+		}
+	} else if b, ok := c.Message.([]byte); ok {
+		data = b
+	} else {
+		grpclog.Infof("binarylogging: message to log is neither proto.message nor []byte")
+	}
+	ret := &pb.GrpcLogEntry{
+		Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE,
+		Payload: &pb.GrpcLogEntry_Message{
+			Message: &pb.Message{
+				Length: uint32(len(data)),
+				Data:   data,
+			},
+		},
+	}
+	if c.OnClientSide {
+		ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
+	} else {
+		ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
+	}
+	return ret
+}
+
+// ServerMessage configs the binary log entry to be a ServerMessage entry.
+type ServerMessage struct {
+	OnClientSide bool
+	// Message can be a proto.Message or []byte. Other messages formats are not
+	// supported.
+	Message interface{}
+}
+
+func (c *ServerMessage) toProto() *pb.GrpcLogEntry {
+	var (
+		data []byte
+		err  error
+	)
+	if m, ok := c.Message.(proto.Message); ok {
+		data, err = proto.Marshal(m)
+		if err != nil {
+			grpclog.Infof("binarylogging: failed to marshal proto message: %v", err)
+		}
+	} else if b, ok := c.Message.([]byte); ok {
+		data = b
+	} else {
+		grpclog.Infof("binarylogging: message to log is neither proto.message nor []byte")
+	}
+	ret := &pb.GrpcLogEntry{
+		Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE,
+		Payload: &pb.GrpcLogEntry_Message{
+			Message: &pb.Message{
+				Length: uint32(len(data)),
+				Data:   data,
+			},
+		},
+	}
+	if c.OnClientSide {
+		ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
+	} else {
+		ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
+	}
+	return ret
+}
+
+// ClientHalfClose configs the binary log entry to be a ClientHalfClose entry.
+type ClientHalfClose struct {
+	OnClientSide bool
+}
+
+func (c *ClientHalfClose) toProto() *pb.GrpcLogEntry {
+	ret := &pb.GrpcLogEntry{
+		Type:    pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE,
+		Payload: nil, // No payload here.
+	}
+	if c.OnClientSide {
+		ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
+	} else {
+		ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
+	}
+	return ret
+}
+
+// ServerTrailer configs the binary log entry to be a ServerTrailer entry.
+type ServerTrailer struct {
+	OnClientSide bool
+	Trailer      metadata.MD
+	// Err is the status error.
+	Err error
+	// PeerAddr is required only when it's on client side and the RPC is trailer
+	// only.
+	PeerAddr net.Addr
+}
+
+func (c *ServerTrailer) toProto() *pb.GrpcLogEntry {
+	st, ok := status.FromError(c.Err)
+	if !ok {
+		grpclog.Info("binarylogging: error in trailer is not a status error")
+	}
+	var (
+		detailsBytes []byte
+		err          error
+	)
+	stProto := st.Proto()
+	if stProto != nil && len(stProto.Details) != 0 {
+		detailsBytes, err = proto.Marshal(stProto)
+		if err != nil {
+			grpclog.Infof("binarylogging: failed to marshal status proto: %v", err)
+		}
+	}
+	ret := &pb.GrpcLogEntry{
+		Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER,
+		Payload: &pb.GrpcLogEntry_Trailer{
+			Trailer: &pb.Trailer{
+				Metadata:      mdToMetadataProto(c.Trailer),
+				StatusCode:    uint32(st.Code()),
+				StatusMessage: st.Message(),
+				StatusDetails: detailsBytes,
+			},
+		},
+	}
+	if c.OnClientSide {
+		ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
+	} else {
+		ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
+	}
+	if c.PeerAddr != nil {
+		ret.Peer = addrToProto(c.PeerAddr)
+	}
+	return ret
+}
+
+// Cancel configs the binary log entry to be a Cancel entry.
+type Cancel struct {
+	OnClientSide bool
+}
+
+func (c *Cancel) toProto() *pb.GrpcLogEntry {
+	ret := &pb.GrpcLogEntry{
+		Type:    pb.GrpcLogEntry_EVENT_TYPE_CANCEL,
+		Payload: nil,
+	}
+	if c.OnClientSide {
+		ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
+	} else {
+		ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
+	}
+	return ret
+}
+
+// metadataKeyOmit returns whether the metadata entry with this key should be
+// omitted.
+func metadataKeyOmit(key string) bool {
+	switch key {
+	case "lb-token", ":path", ":authority", "content-encoding", "content-type", "user-agent", "te":
+		return true
+	case "grpc-trace-bin": // grpc-trace-bin is special because it's visiable to users.
+		return false
+	}
+	return strings.HasPrefix(key, "grpc-")
+}
+
+func mdToMetadataProto(md metadata.MD) *pb.Metadata {
+	ret := &pb.Metadata{}
+	for k, vv := range md {
+		if metadataKeyOmit(k) {
+			continue
+		}
+		for _, v := range vv {
+			ret.Entry = append(ret.Entry,
+				&pb.MetadataEntry{
+					Key:   k,
+					Value: []byte(v),
+				},
+			)
+		}
+	}
+	return ret
+}
+
+func addrToProto(addr net.Addr) *pb.Address {
+	ret := &pb.Address{}
+	switch a := addr.(type) {
+	case *net.TCPAddr:
+		if a.IP.To4() != nil {
+			ret.Type = pb.Address_TYPE_IPV4
+		} else if a.IP.To16() != nil {
+			ret.Type = pb.Address_TYPE_IPV6
+		} else {
+			ret.Type = pb.Address_TYPE_UNKNOWN
+			// Do not set address and port fields.
+			break
+		}
+		ret.Address = a.IP.String()
+		ret.IpPort = uint32(a.Port)
+	case *net.UnixAddr:
+		ret.Type = pb.Address_TYPE_UNIX
+		ret.Address = a.String()
+	default:
+		ret.Type = pb.Address_TYPE_UNKNOWN
+	}
+	return ret
+}
diff --git a/vendor/google.golang.org/grpc/internal/binarylog/sink.go b/vendor/google.golang.org/grpc/internal/binarylog/sink.go
new file mode 100644
index 0000000..20d044f
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/binarylog/sink.go
@@ -0,0 +1,162 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package binarylog
+
+import (
+	"bufio"
+	"encoding/binary"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"sync"
+	"time"
+
+	"github.com/golang/protobuf/proto"
+	pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1"
+	"google.golang.org/grpc/grpclog"
+)
+
+var (
+	defaultSink Sink = &noopSink{} // TODO(blog): change this default (file in /tmp).
+)
+
+// SetDefaultSink sets the sink where binary logs will be written to.
+//
+// Not thread safe. Only set during initialization.
+func SetDefaultSink(s Sink) {
+	if defaultSink != nil {
+		defaultSink.Close()
+	}
+	defaultSink = s
+}
+
+// Sink writes log entry into the binary log sink.
+type Sink interface {
+	// Write will be called to write the log entry into the sink.
+	//
+	// It should be thread-safe so it can be called in parallel.
+	Write(*pb.GrpcLogEntry) error
+	// Close will be called when the Sink is replaced by a new Sink.
+	Close() error
+}
+
+type noopSink struct{}
+
+func (ns *noopSink) Write(*pb.GrpcLogEntry) error { return nil }
+func (ns *noopSink) Close() error                 { return nil }
+
+// newWriterSink creates a binary log sink with the given writer.
+//
+// Write() marshalls the proto message and writes it to the given writer. Each
+// message is prefixed with a 4 byte big endian unsigned integer as the length.
+//
+// No buffer is done, Close() doesn't try to close the writer.
+func newWriterSink(w io.Writer) *writerSink {
+	return &writerSink{out: w}
+}
+
+type writerSink struct {
+	out io.Writer
+}
+
+func (ws *writerSink) Write(e *pb.GrpcLogEntry) error {
+	b, err := proto.Marshal(e)
+	if err != nil {
+		grpclog.Infof("binary logging: failed to marshal proto message: %v", err)
+	}
+	hdr := make([]byte, 4)
+	binary.BigEndian.PutUint32(hdr, uint32(len(b)))
+	if _, err := ws.out.Write(hdr); err != nil {
+		return err
+	}
+	if _, err := ws.out.Write(b); err != nil {
+		return err
+	}
+	return nil
+}
+
+func (ws *writerSink) Close() error { return nil }
+
+type bufWriteCloserSink struct {
+	mu     sync.Mutex
+	closer io.Closer
+	out    *writerSink   // out is built on buf.
+	buf    *bufio.Writer // buf is kept for flush.
+
+	writeStartOnce sync.Once
+	writeTicker    *time.Ticker
+}
+
+func (fs *bufWriteCloserSink) Write(e *pb.GrpcLogEntry) error {
+	// Start the write loop when Write is called.
+	fs.writeStartOnce.Do(fs.startFlushGoroutine)
+	fs.mu.Lock()
+	if err := fs.out.Write(e); err != nil {
+		fs.mu.Unlock()
+		return err
+	}
+	fs.mu.Unlock()
+	return nil
+}
+
+const (
+	bufFlushDuration = 60 * time.Second
+)
+
+func (fs *bufWriteCloserSink) startFlushGoroutine() {
+	fs.writeTicker = time.NewTicker(bufFlushDuration)
+	go func() {
+		for range fs.writeTicker.C {
+			fs.mu.Lock()
+			fs.buf.Flush()
+			fs.mu.Unlock()
+		}
+	}()
+}
+
+func (fs *bufWriteCloserSink) Close() error {
+	if fs.writeTicker != nil {
+		fs.writeTicker.Stop()
+	}
+	fs.mu.Lock()
+	fs.buf.Flush()
+	fs.closer.Close()
+	fs.out.Close()
+	fs.mu.Unlock()
+	return nil
+}
+
+func newBufWriteCloserSink(o io.WriteCloser) Sink {
+	bufW := bufio.NewWriter(o)
+	return &bufWriteCloserSink{
+		closer: o,
+		out:    newWriterSink(bufW),
+		buf:    bufW,
+	}
+}
+
+// NewTempFileSink creates a temp file and returns a Sink that writes to this
+// file.
+func NewTempFileSink() (Sink, error) {
+	tempFile, err := ioutil.TempFile("/tmp", "grpcgo_binarylog_*.txt")
+	if err != nil {
+		return nil, fmt.Errorf("failed to create temp file: %v", err)
+	}
+	return newBufWriteCloserSink(tempFile), nil
+}
diff --git a/vendor/google.golang.org/grpc/internal/binarylog/util.go b/vendor/google.golang.org/grpc/internal/binarylog/util.go
new file mode 100644
index 0000000..15dc780
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/binarylog/util.go
@@ -0,0 +1,41 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package binarylog
+
+import (
+	"errors"
+	"strings"
+)
+
+// parseMethodName splits service and method from the input. It expects format
+// "/service/method".
+//
+// TODO: move to internal/grpcutil.
+func parseMethodName(methodName string) (service, method string, _ error) {
+	if !strings.HasPrefix(methodName, "/") {
+		return "", "", errors.New("invalid method name: should start with /")
+	}
+	methodName = methodName[1:]
+
+	pos := strings.LastIndex(methodName, "/")
+	if pos < 0 {
+		return "", "", errors.New("invalid method name: suffix /method is missing")
+	}
+	return methodName[:pos], methodName[pos+1:], nil
+}
diff --git a/vendor/google.golang.org/grpc/channelz/funcs.go b/vendor/google.golang.org/grpc/internal/channelz/funcs.go
similarity index 70%
rename from vendor/google.golang.org/grpc/channelz/funcs.go
rename to vendor/google.golang.org/grpc/internal/channelz/funcs.go
index 586a033..041520d 100644
--- a/vendor/google.golang.org/grpc/channelz/funcs.go
+++ b/vendor/google.golang.org/grpc/internal/channelz/funcs.go
@@ -27,16 +27,22 @@
 	"sort"
 	"sync"
 	"sync/atomic"
+	"time"
 
 	"google.golang.org/grpc/grpclog"
 )
 
+const (
+	defaultMaxTraceEntry int32 = 30
+)
+
 var (
 	db    dbWrapper
 	idGen idGenerator
 	// EntryPerPage defines the number of channelz entries to be shown on a web page.
-	EntryPerPage = 50
-	curState     int32
+	EntryPerPage  = int64(50)
+	curState      int32
+	maxTraceEntry = defaultMaxTraceEntry
 )
 
 // TurnOn turns on channelz data collection.
@@ -52,6 +58,22 @@
 	return atomic.CompareAndSwapInt32(&curState, 1, 1)
 }
 
+// SetMaxTraceEntry sets maximum number of trace entry per entity (i.e. channel/subchannel).
+// Setting it to 0 will disable channel tracing.
+func SetMaxTraceEntry(i int32) {
+	atomic.StoreInt32(&maxTraceEntry, i)
+}
+
+// ResetMaxTraceEntryToDefault resets the maximum number of trace entry per entity to default.
+func ResetMaxTraceEntryToDefault() {
+	atomic.StoreInt32(&maxTraceEntry, defaultMaxTraceEntry)
+}
+
+func getMaxTraceEntry() int {
+	i := atomic.LoadInt32(&maxTraceEntry)
+	return int(i)
+}
+
 // dbWarpper wraps around a reference to internal channelz data storage, and
 // provide synchronized functionality to set and get the reference.
 type dbWrapper struct {
@@ -91,20 +113,20 @@
 // boolean indicating whether there's more top channels to be queried for.
 //
 // The arg id specifies that only top channel with id at or above it will be included
-// in the result. The returned slice is up to a length of EntryPerPage, and is
-// sorted in ascending id order.
-func GetTopChannels(id int64) ([]*ChannelMetric, bool) {
-	return db.get().GetTopChannels(id)
+// in the result. The returned slice is up to a length of the arg maxResults or
+// EntryPerPage if maxResults is zero, and is sorted in ascending id order.
+func GetTopChannels(id int64, maxResults int64) ([]*ChannelMetric, bool) {
+	return db.get().GetTopChannels(id, maxResults)
 }
 
 // GetServers returns a slice of server's ServerMetric, along with a
 // boolean indicating whether there's more servers to be queried for.
 //
 // The arg id specifies that only server with id at or above it will be included
-// in the result. The returned slice is up to a length of EntryPerPage, and is
-// sorted in ascending id order.
-func GetServers(id int64) ([]*ServerMetric, bool) {
-	return db.get().GetServers(id)
+// in the result. The returned slice is up to a length of the arg maxResults or
+// EntryPerPage if maxResults is zero, and is sorted in ascending id order.
+func GetServers(id int64, maxResults int64) ([]*ServerMetric, bool) {
+	return db.get().GetServers(id, maxResults)
 }
 
 // GetServerSockets returns a slice of server's (identified by id) normal socket's
@@ -112,10 +134,10 @@
 // be queried for.
 //
 // The arg startID specifies that only sockets with id at or above it will be
-// included in the result. The returned slice is up to a length of EntryPerPage,
-// and is sorted in ascending id order.
-func GetServerSockets(id int64, startID int64) ([]*SocketMetric, bool) {
-	return db.get().GetServerSockets(id, startID)
+// included in the result. The returned slice is up to a length of the arg maxResults
+// or EntryPerPage if maxResults is zero, and is sorted in ascending id order.
+func GetServerSockets(id int64, startID int64, maxResults int64) ([]*SocketMetric, bool) {
+	return db.get().GetServerSockets(id, startID, maxResults)
 }
 
 // GetChannel returns the ChannelMetric for the channel (identified by id).
@@ -133,6 +155,11 @@
 	return db.get().GetSocket(id)
 }
 
+// GetServer returns the ServerMetric for the server (identified by id).
+func GetServer(id int64) *ServerMetric {
+	return db.get().GetServer(id)
+}
+
 // RegisterChannel registers the given channel c in channelz database with ref
 // as its reference name, and add it to the child list of its parent (identified
 // by pid). pid = 0 means no parent. It returns the unique channelz tracking id
@@ -146,6 +173,7 @@
 		nestedChans: make(map[int64]string),
 		id:          id,
 		pid:         pid,
+		trace:       &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())},
 	}
 	if pid == 0 {
 		db.get().addChannel(id, cn, true, pid, ref)
@@ -170,6 +198,7 @@
 		sockets: make(map[int64]string),
 		id:      id,
 		pid:     pid,
+		trace:   &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())},
 	}
 	db.get().addSubChannel(id, sc, pid, ref)
 	return id
@@ -226,6 +255,24 @@
 	db.get().removeEntry(id)
 }
 
+// TraceEventDesc is what the caller of AddTraceEvent should provide to describe the event to be added
+// to the channel trace.
+// The Parent field is optional. It is used for event that will be recorded in the entity's parent
+// trace also.
+type TraceEventDesc struct {
+	Desc     string
+	Severity Severity
+	Parent   *TraceEventDesc
+}
+
+// AddTraceEvent adds trace related to the entity with specified id, using the provided TraceEventDesc.
+func AddTraceEvent(id int64, desc *TraceEventDesc) {
+	if getMaxTraceEntry() == 0 {
+		return
+	}
+	db.get().traceEvent(id, desc)
+}
+
 // channelMap is the storage data structure for channelz.
 // Methods of channelMap can be divided in two two categories with respect to locking.
 // 1. Methods acquire the global lock.
@@ -251,6 +298,7 @@
 func (c *channelMap) addChannel(id int64, cn *channel, isTopChannel bool, pid int64, ref string) {
 	c.mu.Lock()
 	cn.cm = c
+	cn.trace.cm = c
 	c.channels[id] = cn
 	if isTopChannel {
 		c.topLevelChannels[id] = struct{}{}
@@ -263,6 +311,7 @@
 func (c *channelMap) addSubChannel(id int64, sc *subChannel, pid int64, ref string) {
 	c.mu.Lock()
 	sc.cm = c
+	sc.trace.cm = c
 	c.subChannels[id] = sc
 	c.findEntry(pid).addChild(id, sc)
 	c.mu.Unlock()
@@ -284,16 +333,25 @@
 	c.mu.Unlock()
 }
 
-// removeEntry triggers the removal of an entry, which may not indeed delete the
-// entry, if it has to wait on the deletion of its children, or may lead to a chain
-// of entry deletion. For example, deleting the last socket of a gracefully shutting
-// down server will lead to the server being also deleted.
+// removeEntry triggers the removal of an entry, which may not indeed delete the entry, if it has to
+// wait on the deletion of its children and until no other entity's channel trace references it.
+// It may lead to a chain of entry deletion. For example, deleting the last socket of a gracefully
+// shutting down server will lead to the server being also deleted.
 func (c *channelMap) removeEntry(id int64) {
 	c.mu.Lock()
 	c.findEntry(id).triggerDelete()
 	c.mu.Unlock()
 }
 
+// c.mu must be held by the caller
+func (c *channelMap) decrTraceRefCount(id int64) {
+	e := c.findEntry(id)
+	if v, ok := e.(tracedChannel); ok {
+		v.decrTraceRefCount()
+		e.deleteSelfIfReady()
+	}
+}
+
 // c.mu must be held by the caller.
 func (c *channelMap) findEntry(id int64) entry {
 	var v entry
@@ -347,6 +405,39 @@
 	}
 }
 
+func (c *channelMap) traceEvent(id int64, desc *TraceEventDesc) {
+	c.mu.Lock()
+	child := c.findEntry(id)
+	childTC, ok := child.(tracedChannel)
+	if !ok {
+		c.mu.Unlock()
+		return
+	}
+	childTC.getChannelTrace().append(&TraceEvent{Desc: desc.Desc, Severity: desc.Severity, Timestamp: time.Now()})
+	if desc.Parent != nil {
+		parent := c.findEntry(child.getParentID())
+		var chanType RefChannelType
+		switch child.(type) {
+		case *channel:
+			chanType = RefChannel
+		case *subChannel:
+			chanType = RefSubChannel
+		}
+		if parentTC, ok := parent.(tracedChannel); ok {
+			parentTC.getChannelTrace().append(&TraceEvent{
+				Desc:      desc.Parent.Desc,
+				Severity:  desc.Parent.Severity,
+				Timestamp: time.Now(),
+				RefID:     id,
+				RefName:   childTC.getRefName(),
+				RefType:   chanType,
+			})
+			childTC.incrTraceRefCount()
+		}
+	}
+	c.mu.Unlock()
+}
+
 type int64Slice []int64
 
 func (s int64Slice) Len() int           { return len(s) }
@@ -361,29 +452,32 @@
 	return n
 }
 
-func min(a, b int) int {
+func min(a, b int64) int64 {
 	if a < b {
 		return a
 	}
 	return b
 }
 
-func (c *channelMap) GetTopChannels(id int64) ([]*ChannelMetric, bool) {
+func (c *channelMap) GetTopChannels(id int64, maxResults int64) ([]*ChannelMetric, bool) {
+	if maxResults <= 0 {
+		maxResults = EntryPerPage
+	}
 	c.mu.RLock()
-	l := len(c.topLevelChannels)
+	l := int64(len(c.topLevelChannels))
 	ids := make([]int64, 0, l)
-	cns := make([]*channel, 0, min(l, EntryPerPage))
+	cns := make([]*channel, 0, min(l, maxResults))
 
 	for k := range c.topLevelChannels {
 		ids = append(ids, k)
 	}
 	sort.Sort(int64Slice(ids))
 	idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id })
-	count := 0
+	count := int64(0)
 	var end bool
 	var t []*ChannelMetric
 	for i, v := range ids[idx:] {
-		if count == EntryPerPage {
+		if count == maxResults {
 			break
 		}
 		if cn, ok := c.channels[v]; ok {
@@ -408,25 +502,29 @@
 		t[i].ChannelData = cn.c.ChannelzMetric()
 		t[i].ID = cn.id
 		t[i].RefName = cn.refName
+		t[i].Trace = cn.trace.dumpData()
 	}
 	return t, end
 }
 
-func (c *channelMap) GetServers(id int64) ([]*ServerMetric, bool) {
+func (c *channelMap) GetServers(id, maxResults int64) ([]*ServerMetric, bool) {
+	if maxResults <= 0 {
+		maxResults = EntryPerPage
+	}
 	c.mu.RLock()
-	l := len(c.servers)
+	l := int64(len(c.servers))
 	ids := make([]int64, 0, l)
-	ss := make([]*server, 0, min(l, EntryPerPage))
+	ss := make([]*server, 0, min(l, maxResults))
 	for k := range c.servers {
 		ids = append(ids, k)
 	}
 	sort.Sort(int64Slice(ids))
 	idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id })
-	count := 0
+	count := int64(0)
 	var end bool
 	var s []*ServerMetric
 	for i, v := range ids[idx:] {
-		if count == EntryPerPage {
+		if count == maxResults {
 			break
 		}
 		if svr, ok := c.servers[v]; ok {
@@ -454,7 +552,10 @@
 	return s, end
 }
 
-func (c *channelMap) GetServerSockets(id int64, startID int64) ([]*SocketMetric, bool) {
+func (c *channelMap) GetServerSockets(id int64, startID int64, maxResults int64) ([]*SocketMetric, bool) {
+	if maxResults <= 0 {
+		maxResults = EntryPerPage
+	}
 	var svr *server
 	var ok bool
 	c.mu.RLock()
@@ -464,18 +565,18 @@
 		return nil, true
 	}
 	svrskts := svr.sockets
-	l := len(svrskts)
+	l := int64(len(svrskts))
 	ids := make([]int64, 0, l)
-	sks := make([]*normalSocket, 0, min(l, EntryPerPage))
+	sks := make([]*normalSocket, 0, min(l, maxResults))
 	for k := range svrskts {
 		ids = append(ids, k)
 	}
-	sort.Sort((int64Slice(ids)))
-	idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id })
-	count := 0
+	sort.Sort(int64Slice(ids))
+	idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= startID })
+	count := int64(0)
 	var end bool
 	for i, v := range ids[idx:] {
-		if count == EntryPerPage {
+		if count == maxResults {
 			break
 		}
 		if ns, ok := c.normalSockets[v]; ok {
@@ -514,10 +615,14 @@
 	}
 	cm.NestedChans = copyMap(cn.nestedChans)
 	cm.SubChans = copyMap(cn.subChans)
+	// cn.c can be set to &dummyChannel{} when deleteSelfFromMap is called. Save a copy of cn.c when
+	// holding the lock to prevent potential data race.
+	chanCopy := cn.c
 	c.mu.RUnlock()
-	cm.ChannelData = cn.c.ChannelzMetric()
+	cm.ChannelData = chanCopy.ChannelzMetric()
 	cm.ID = cn.id
 	cm.RefName = cn.refName
+	cm.Trace = cn.trace.dumpData()
 	return cm
 }
 
@@ -532,10 +637,14 @@
 		return nil
 	}
 	cm.Sockets = copyMap(sc.sockets)
+	// sc.c can be set to &dummyChannel{} when deleteSelfFromMap is called. Save a copy of sc.c when
+	// holding the lock to prevent potential data race.
+	chanCopy := sc.c
 	c.mu.RUnlock()
-	cm.ChannelData = sc.c.ChannelzMetric()
+	cm.ChannelData = chanCopy.ChannelzMetric()
 	cm.ID = sc.id
 	cm.RefName = sc.refName
+	cm.Trace = sc.trace.dumpData()
 	return cm
 }
 
@@ -560,6 +669,23 @@
 	return nil
 }
 
+func (c *channelMap) GetServer(id int64) *ServerMetric {
+	sm := &ServerMetric{}
+	var svr *server
+	var ok bool
+	c.mu.RLock()
+	if svr, ok = c.servers[id]; !ok {
+		c.mu.RUnlock()
+		return nil
+	}
+	sm.ListenSockets = copyMap(svr.listenSockets)
+	c.mu.RUnlock()
+	sm.ID = svr.id
+	sm.RefName = svr.refName
+	sm.ServerData = svr.s.ChannelzMetric()
+	return sm
+}
+
 type idGenerator struct {
 	id int64
 }
diff --git a/vendor/google.golang.org/grpc/internal/channelz/types.go b/vendor/google.golang.org/grpc/internal/channelz/types.go
new file mode 100644
index 0000000..17c2274
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/channelz/types.go
@@ -0,0 +1,702 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package channelz
+
+import (
+	"net"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"google.golang.org/grpc/connectivity"
+	"google.golang.org/grpc/credentials"
+	"google.golang.org/grpc/grpclog"
+)
+
+// entry represents a node in the channelz database.
+type entry interface {
+	// addChild adds a child e, whose channelz id is id to child list
+	addChild(id int64, e entry)
+	// deleteChild deletes a child with channelz id to be id from child list
+	deleteChild(id int64)
+	// triggerDelete tries to delete self from channelz database. However, if child
+	// list is not empty, then deletion from the database is on hold until the last
+	// child is deleted from database.
+	triggerDelete()
+	// deleteSelfIfReady check whether triggerDelete() has been called before, and whether child
+	// list is now empty. If both conditions are met, then delete self from database.
+	deleteSelfIfReady()
+	// getParentID returns parent ID of the entry. 0 value parent ID means no parent.
+	getParentID() int64
+}
+
+// dummyEntry is a fake entry to handle entry not found case.
+type dummyEntry struct {
+	idNotFound int64
+}
+
+func (d *dummyEntry) addChild(id int64, e entry) {
+	// Note: It is possible for a normal program to reach here under race condition.
+	// For example, there could be a race between ClientConn.Close() info being propagated
+	// to addrConn and http2Client. ClientConn.Close() cancel the context and result
+	// in http2Client to error. The error info is then caught by transport monitor
+	// and before addrConn.tearDown() is called in side ClientConn.Close(). Therefore,
+	// the addrConn will create a new transport. And when registering the new transport in
+	// channelz, its parent addrConn could have already been torn down and deleted
+	// from channelz tracking, and thus reach the code here.
+	grpclog.Infof("attempt to add child of type %T with id %d to a parent (id=%d) that doesn't currently exist", e, id, d.idNotFound)
+}
+
+func (d *dummyEntry) deleteChild(id int64) {
+	// It is possible for a normal program to reach here under race condition.
+	// Refer to the example described in addChild().
+	grpclog.Infof("attempt to delete child with id %d from a parent (id=%d) that doesn't currently exist", id, d.idNotFound)
+}
+
+func (d *dummyEntry) triggerDelete() {
+	grpclog.Warningf("attempt to delete an entry (id=%d) that doesn't currently exist", d.idNotFound)
+}
+
+func (*dummyEntry) deleteSelfIfReady() {
+	// code should not reach here. deleteSelfIfReady is always called on an existing entry.
+}
+
+func (*dummyEntry) getParentID() int64 {
+	return 0
+}
+
+// ChannelMetric defines the info channelz provides for a specific Channel, which
+// includes ChannelInternalMetric and channelz-specific data, such as channelz id,
+// child list, etc.
+type ChannelMetric struct {
+	// ID is the channelz id of this channel.
+	ID int64
+	// RefName is the human readable reference string of this channel.
+	RefName string
+	// ChannelData contains channel internal metric reported by the channel through
+	// ChannelzMetric().
+	ChannelData *ChannelInternalMetric
+	// NestedChans tracks the nested channel type children of this channel in the format of
+	// a map from nested channel channelz id to corresponding reference string.
+	NestedChans map[int64]string
+	// SubChans tracks the subchannel type children of this channel in the format of a
+	// map from subchannel channelz id to corresponding reference string.
+	SubChans map[int64]string
+	// Sockets tracks the socket type children of this channel in the format of a map
+	// from socket channelz id to corresponding reference string.
+	// Note current grpc implementation doesn't allow channel having sockets directly,
+	// therefore, this is field is unused.
+	Sockets map[int64]string
+	// Trace contains the most recent traced events.
+	Trace *ChannelTrace
+}
+
+// SubChannelMetric defines the info channelz provides for a specific SubChannel,
+// which includes ChannelInternalMetric and channelz-specific data, such as
+// channelz id, child list, etc.
+type SubChannelMetric struct {
+	// ID is the channelz id of this subchannel.
+	ID int64
+	// RefName is the human readable reference string of this subchannel.
+	RefName string
+	// ChannelData contains subchannel internal metric reported by the subchannel
+	// through ChannelzMetric().
+	ChannelData *ChannelInternalMetric
+	// NestedChans tracks the nested channel type children of this subchannel in the format of
+	// a map from nested channel channelz id to corresponding reference string.
+	// Note current grpc implementation doesn't allow subchannel to have nested channels
+	// as children, therefore, this field is unused.
+	NestedChans map[int64]string
+	// SubChans tracks the subchannel type children of this subchannel in the format of a
+	// map from subchannel channelz id to corresponding reference string.
+	// Note current grpc implementation doesn't allow subchannel to have subchannels
+	// as children, therefore, this field is unused.
+	SubChans map[int64]string
+	// Sockets tracks the socket type children of this subchannel in the format of a map
+	// from socket channelz id to corresponding reference string.
+	Sockets map[int64]string
+	// Trace contains the most recent traced events.
+	Trace *ChannelTrace
+}
+
+// ChannelInternalMetric defines the struct that the implementor of Channel interface
+// should return from ChannelzMetric().
+type ChannelInternalMetric struct {
+	// current connectivity state of the channel.
+	State connectivity.State
+	// The target this channel originally tried to connect to.  May be absent
+	Target string
+	// The number of calls started on the channel.
+	CallsStarted int64
+	// The number of calls that have completed with an OK status.
+	CallsSucceeded int64
+	// The number of calls that have a completed with a non-OK status.
+	CallsFailed int64
+	// The last time a call was started on the channel.
+	LastCallStartedTimestamp time.Time
+}
+
+// ChannelTrace stores traced events on a channel/subchannel and related info.
+type ChannelTrace struct {
+	// EventNum is the number of events that ever got traced (i.e. including those that have been deleted)
+	EventNum int64
+	// CreationTime is the creation time of the trace.
+	CreationTime time.Time
+	// Events stores the most recent trace events (up to $maxTraceEntry, newer event will overwrite the
+	// oldest one)
+	Events []*TraceEvent
+}
+
+// TraceEvent represent a single trace event
+type TraceEvent struct {
+	// Desc is a simple description of the trace event.
+	Desc string
+	// Severity states the severity of this trace event.
+	Severity Severity
+	// Timestamp is the event time.
+	Timestamp time.Time
+	// RefID is the id of the entity that gets referenced in the event. RefID is 0 if no other entity is
+	// involved in this event.
+	// e.g. SubChannel (id: 4[]) Created. --> RefID = 4, RefName = "" (inside [])
+	RefID int64
+	// RefName is the reference name for the entity that gets referenced in the event.
+	RefName string
+	// RefType indicates the referenced entity type, i.e Channel or SubChannel.
+	RefType RefChannelType
+}
+
+// Channel is the interface that should be satisfied in order to be tracked by
+// channelz as Channel or SubChannel.
+type Channel interface {
+	ChannelzMetric() *ChannelInternalMetric
+}
+
+type dummyChannel struct{}
+
+func (d *dummyChannel) ChannelzMetric() *ChannelInternalMetric {
+	return &ChannelInternalMetric{}
+}
+
+type channel struct {
+	refName     string
+	c           Channel
+	closeCalled bool
+	nestedChans map[int64]string
+	subChans    map[int64]string
+	id          int64
+	pid         int64
+	cm          *channelMap
+	trace       *channelTrace
+	// traceRefCount is the number of trace events that reference this channel.
+	// Non-zero traceRefCount means the trace of this channel cannot be deleted.
+	traceRefCount int32
+}
+
+func (c *channel) addChild(id int64, e entry) {
+	switch v := e.(type) {
+	case *subChannel:
+		c.subChans[id] = v.refName
+	case *channel:
+		c.nestedChans[id] = v.refName
+	default:
+		grpclog.Errorf("cannot add a child (id = %d) of type %T to a channel", id, e)
+	}
+}
+
+func (c *channel) deleteChild(id int64) {
+	delete(c.subChans, id)
+	delete(c.nestedChans, id)
+	c.deleteSelfIfReady()
+}
+
+func (c *channel) triggerDelete() {
+	c.closeCalled = true
+	c.deleteSelfIfReady()
+}
+
+func (c *channel) getParentID() int64 {
+	return c.pid
+}
+
+// deleteSelfFromTree tries to delete the channel from the channelz entry relation tree, which means
+// deleting the channel reference from its parent's child list.
+//
+// In order for a channel to be deleted from the tree, it must meet the criteria that, removal of the
+// corresponding grpc object has been invoked, and the channel does not have any children left.
+//
+// The returned boolean value indicates whether the channel has been successfully deleted from tree.
+func (c *channel) deleteSelfFromTree() (deleted bool) {
+	if !c.closeCalled || len(c.subChans)+len(c.nestedChans) != 0 {
+		return false
+	}
+	// not top channel
+	if c.pid != 0 {
+		c.cm.findEntry(c.pid).deleteChild(c.id)
+	}
+	return true
+}
+
+// deleteSelfFromMap checks whether it is valid to delete the channel from the map, which means
+// deleting the channel from channelz's tracking entirely. Users can no longer use id to query the
+// channel, and its memory will be garbage collected.
+//
+// The trace reference count of the channel must be 0 in order to be deleted from the map. This is
+// specified in the channel tracing gRFC that as long as some other trace has reference to an entity,
+// the trace of the referenced entity must not be deleted. In order to release the resource allocated
+// by grpc, the reference to the grpc object is reset to a dummy object.
+//
+// deleteSelfFromMap must be called after deleteSelfFromTree returns true.
+//
+// It returns a bool to indicate whether the channel can be safely deleted from map.
+func (c *channel) deleteSelfFromMap() (delete bool) {
+	if c.getTraceRefCount() != 0 {
+		c.c = &dummyChannel{}
+		return false
+	}
+	return true
+}
+
+// deleteSelfIfReady tries to delete the channel itself from the channelz database.
+// The delete process includes two steps:
+// 1. delete the channel from the entry relation tree, i.e. delete the channel reference from its
+//    parent's child list.
+// 2. delete the channel from the map, i.e. delete the channel entirely from channelz. Lookup by id
+//    will return entry not found error.
+func (c *channel) deleteSelfIfReady() {
+	if !c.deleteSelfFromTree() {
+		return
+	}
+	if !c.deleteSelfFromMap() {
+		return
+	}
+	c.cm.deleteEntry(c.id)
+	c.trace.clear()
+}
+
+func (c *channel) getChannelTrace() *channelTrace {
+	return c.trace
+}
+
+func (c *channel) incrTraceRefCount() {
+	atomic.AddInt32(&c.traceRefCount, 1)
+}
+
+func (c *channel) decrTraceRefCount() {
+	atomic.AddInt32(&c.traceRefCount, -1)
+}
+
+func (c *channel) getTraceRefCount() int {
+	i := atomic.LoadInt32(&c.traceRefCount)
+	return int(i)
+}
+
+func (c *channel) getRefName() string {
+	return c.refName
+}
+
+type subChannel struct {
+	refName       string
+	c             Channel
+	closeCalled   bool
+	sockets       map[int64]string
+	id            int64
+	pid           int64
+	cm            *channelMap
+	trace         *channelTrace
+	traceRefCount int32
+}
+
+func (sc *subChannel) addChild(id int64, e entry) {
+	if v, ok := e.(*normalSocket); ok {
+		sc.sockets[id] = v.refName
+	} else {
+		grpclog.Errorf("cannot add a child (id = %d) of type %T to a subChannel", id, e)
+	}
+}
+
+func (sc *subChannel) deleteChild(id int64) {
+	delete(sc.sockets, id)
+	sc.deleteSelfIfReady()
+}
+
+func (sc *subChannel) triggerDelete() {
+	sc.closeCalled = true
+	sc.deleteSelfIfReady()
+}
+
+func (sc *subChannel) getParentID() int64 {
+	return sc.pid
+}
+
+// deleteSelfFromTree tries to delete the subchannel from the channelz entry relation tree, which
+// means deleting the subchannel reference from its parent's child list.
+//
+// In order for a subchannel to be deleted from the tree, it must meet the criteria that, removal of
+// the corresponding grpc object has been invoked, and the subchannel does not have any children left.
+//
+// The returned boolean value indicates whether the channel has been successfully deleted from tree.
+func (sc *subChannel) deleteSelfFromTree() (deleted bool) {
+	if !sc.closeCalled || len(sc.sockets) != 0 {
+		return false
+	}
+	sc.cm.findEntry(sc.pid).deleteChild(sc.id)
+	return true
+}
+
+// deleteSelfFromMap checks whether it is valid to delete the subchannel from the map, which means
+// deleting the subchannel from channelz's tracking entirely. Users can no longer use id to query
+// the subchannel, and its memory will be garbage collected.
+//
+// The trace reference count of the subchannel must be 0 in order to be deleted from the map. This is
+// specified in the channel tracing gRFC that as long as some other trace has reference to an entity,
+// the trace of the referenced entity must not be deleted. In order to release the resource allocated
+// by grpc, the reference to the grpc object is reset to a dummy object.
+//
+// deleteSelfFromMap must be called after deleteSelfFromTree returns true.
+//
+// It returns a bool to indicate whether the channel can be safely deleted from map.
+func (sc *subChannel) deleteSelfFromMap() (delete bool) {
+	if sc.getTraceRefCount() != 0 {
+		// free the grpc struct (i.e. addrConn)
+		sc.c = &dummyChannel{}
+		return false
+	}
+	return true
+}
+
+// deleteSelfIfReady tries to delete the subchannel itself from the channelz database.
+// The delete process includes two steps:
+// 1. delete the subchannel from the entry relation tree, i.e. delete the subchannel reference from
+//    its parent's child list.
+// 2. delete the subchannel from the map, i.e. delete the subchannel entirely from channelz. Lookup
+//    by id will return entry not found error.
+func (sc *subChannel) deleteSelfIfReady() {
+	if !sc.deleteSelfFromTree() {
+		return
+	}
+	if !sc.deleteSelfFromMap() {
+		return
+	}
+	sc.cm.deleteEntry(sc.id)
+	sc.trace.clear()
+}
+
+func (sc *subChannel) getChannelTrace() *channelTrace {
+	return sc.trace
+}
+
+func (sc *subChannel) incrTraceRefCount() {
+	atomic.AddInt32(&sc.traceRefCount, 1)
+}
+
+func (sc *subChannel) decrTraceRefCount() {
+	atomic.AddInt32(&sc.traceRefCount, -1)
+}
+
+func (sc *subChannel) getTraceRefCount() int {
+	i := atomic.LoadInt32(&sc.traceRefCount)
+	return int(i)
+}
+
+func (sc *subChannel) getRefName() string {
+	return sc.refName
+}
+
+// SocketMetric defines the info channelz provides for a specific Socket, which
+// includes SocketInternalMetric and channelz-specific data, such as channelz id, etc.
+type SocketMetric struct {
+	// ID is the channelz id of this socket.
+	ID int64
+	// RefName is the human readable reference string of this socket.
+	RefName string
+	// SocketData contains socket internal metric reported by the socket through
+	// ChannelzMetric().
+	SocketData *SocketInternalMetric
+}
+
+// SocketInternalMetric defines the struct that the implementor of Socket interface
+// should return from ChannelzMetric().
+type SocketInternalMetric struct {
+	// The number of streams that have been started.
+	StreamsStarted int64
+	// The number of streams that have ended successfully:
+	// On client side, receiving frame with eos bit set.
+	// On server side, sending frame with eos bit set.
+	StreamsSucceeded int64
+	// The number of streams that have ended unsuccessfully:
+	// On client side, termination without receiving frame with eos bit set.
+	// On server side, termination without sending frame with eos bit set.
+	StreamsFailed int64
+	// The number of messages successfully sent on this socket.
+	MessagesSent     int64
+	MessagesReceived int64
+	// The number of keep alives sent.  This is typically implemented with HTTP/2
+	// ping messages.
+	KeepAlivesSent int64
+	// The last time a stream was created by this endpoint.  Usually unset for
+	// servers.
+	LastLocalStreamCreatedTimestamp time.Time
+	// The last time a stream was created by the remote endpoint.  Usually unset
+	// for clients.
+	LastRemoteStreamCreatedTimestamp time.Time
+	// The last time a message was sent by this endpoint.
+	LastMessageSentTimestamp time.Time
+	// The last time a message was received by this endpoint.
+	LastMessageReceivedTimestamp time.Time
+	// The amount of window, granted to the local endpoint by the remote endpoint.
+	// This may be slightly out of date due to network latency.  This does NOT
+	// include stream level or TCP level flow control info.
+	LocalFlowControlWindow int64
+	// The amount of window, granted to the remote endpoint by the local endpoint.
+	// This may be slightly out of date due to network latency.  This does NOT
+	// include stream level or TCP level flow control info.
+	RemoteFlowControlWindow int64
+	// The locally bound address.
+	LocalAddr net.Addr
+	// The remote bound address.  May be absent.
+	RemoteAddr net.Addr
+	// Optional, represents the name of the remote endpoint, if different than
+	// the original target name.
+	RemoteName    string
+	SocketOptions *SocketOptionData
+	Security      credentials.ChannelzSecurityValue
+}
+
+// Socket is the interface that should be satisfied in order to be tracked by
+// channelz as Socket.
+type Socket interface {
+	ChannelzMetric() *SocketInternalMetric
+}
+
+type listenSocket struct {
+	refName string
+	s       Socket
+	id      int64
+	pid     int64
+	cm      *channelMap
+}
+
+func (ls *listenSocket) addChild(id int64, e entry) {
+	grpclog.Errorf("cannot add a child (id = %d) of type %T to a listen socket", id, e)
+}
+
+func (ls *listenSocket) deleteChild(id int64) {
+	grpclog.Errorf("cannot delete a child (id = %d) from a listen socket", id)
+}
+
+func (ls *listenSocket) triggerDelete() {
+	ls.cm.deleteEntry(ls.id)
+	ls.cm.findEntry(ls.pid).deleteChild(ls.id)
+}
+
+func (ls *listenSocket) deleteSelfIfReady() {
+	grpclog.Errorf("cannot call deleteSelfIfReady on a listen socket")
+}
+
+func (ls *listenSocket) getParentID() int64 {
+	return ls.pid
+}
+
+type normalSocket struct {
+	refName string
+	s       Socket
+	id      int64
+	pid     int64
+	cm      *channelMap
+}
+
+func (ns *normalSocket) addChild(id int64, e entry) {
+	grpclog.Errorf("cannot add a child (id = %d) of type %T to a normal socket", id, e)
+}
+
+func (ns *normalSocket) deleteChild(id int64) {
+	grpclog.Errorf("cannot delete a child (id = %d) from a normal socket", id)
+}
+
+func (ns *normalSocket) triggerDelete() {
+	ns.cm.deleteEntry(ns.id)
+	ns.cm.findEntry(ns.pid).deleteChild(ns.id)
+}
+
+func (ns *normalSocket) deleteSelfIfReady() {
+	grpclog.Errorf("cannot call deleteSelfIfReady on a normal socket")
+}
+
+func (ns *normalSocket) getParentID() int64 {
+	return ns.pid
+}
+
+// ServerMetric defines the info channelz provides for a specific Server, which
+// includes ServerInternalMetric and channelz-specific data, such as channelz id,
+// child list, etc.
+type ServerMetric struct {
+	// ID is the channelz id of this server.
+	ID int64
+	// RefName is the human readable reference string of this server.
+	RefName string
+	// ServerData contains server internal metric reported by the server through
+	// ChannelzMetric().
+	ServerData *ServerInternalMetric
+	// ListenSockets tracks the listener socket type children of this server in the
+	// format of a map from socket channelz id to corresponding reference string.
+	ListenSockets map[int64]string
+}
+
+// ServerInternalMetric defines the struct that the implementor of Server interface
+// should return from ChannelzMetric().
+type ServerInternalMetric struct {
+	// The number of incoming calls started on the server.
+	CallsStarted int64
+	// The number of incoming calls that have completed with an OK status.
+	CallsSucceeded int64
+	// The number of incoming calls that have a completed with a non-OK status.
+	CallsFailed int64
+	// The last time a call was started on the server.
+	LastCallStartedTimestamp time.Time
+}
+
+// Server is the interface to be satisfied in order to be tracked by channelz as
+// Server.
+type Server interface {
+	ChannelzMetric() *ServerInternalMetric
+}
+
+type server struct {
+	refName       string
+	s             Server
+	closeCalled   bool
+	sockets       map[int64]string
+	listenSockets map[int64]string
+	id            int64
+	cm            *channelMap
+}
+
+func (s *server) addChild(id int64, e entry) {
+	switch v := e.(type) {
+	case *normalSocket:
+		s.sockets[id] = v.refName
+	case *listenSocket:
+		s.listenSockets[id] = v.refName
+	default:
+		grpclog.Errorf("cannot add a child (id = %d) of type %T to a server", id, e)
+	}
+}
+
+func (s *server) deleteChild(id int64) {
+	delete(s.sockets, id)
+	delete(s.listenSockets, id)
+	s.deleteSelfIfReady()
+}
+
+func (s *server) triggerDelete() {
+	s.closeCalled = true
+	s.deleteSelfIfReady()
+}
+
+func (s *server) deleteSelfIfReady() {
+	if !s.closeCalled || len(s.sockets)+len(s.listenSockets) != 0 {
+		return
+	}
+	s.cm.deleteEntry(s.id)
+}
+
+func (s *server) getParentID() int64 {
+	return 0
+}
+
+type tracedChannel interface {
+	getChannelTrace() *channelTrace
+	incrTraceRefCount()
+	decrTraceRefCount()
+	getRefName() string
+}
+
+type channelTrace struct {
+	cm          *channelMap
+	createdTime time.Time
+	eventCount  int64
+	mu          sync.Mutex
+	events      []*TraceEvent
+}
+
+func (c *channelTrace) append(e *TraceEvent) {
+	c.mu.Lock()
+	if len(c.events) == getMaxTraceEntry() {
+		del := c.events[0]
+		c.events = c.events[1:]
+		if del.RefID != 0 {
+			// start recursive cleanup in a goroutine to not block the call originated from grpc.
+			go func() {
+				// need to acquire c.cm.mu lock to call the unlocked attemptCleanup func.
+				c.cm.mu.Lock()
+				c.cm.decrTraceRefCount(del.RefID)
+				c.cm.mu.Unlock()
+			}()
+		}
+	}
+	e.Timestamp = time.Now()
+	c.events = append(c.events, e)
+	c.eventCount++
+	c.mu.Unlock()
+}
+
+func (c *channelTrace) clear() {
+	c.mu.Lock()
+	for _, e := range c.events {
+		if e.RefID != 0 {
+			// caller should have already held the c.cm.mu lock.
+			c.cm.decrTraceRefCount(e.RefID)
+		}
+	}
+	c.mu.Unlock()
+}
+
+// Severity is the severity level of a trace event.
+// The canonical enumeration of all valid values is here:
+// https://github.com/grpc/grpc-proto/blob/9b13d199cc0d4703c7ea26c9c330ba695866eb23/grpc/channelz/v1/channelz.proto#L126.
+type Severity int
+
+const (
+	// CtUNKNOWN indicates unknown severity of a trace event.
+	CtUNKNOWN Severity = iota
+	// CtINFO indicates info level severity of a trace event.
+	CtINFO
+	// CtWarning indicates warning level severity of a trace event.
+	CtWarning
+	// CtError indicates error level severity of a trace event.
+	CtError
+)
+
+// RefChannelType is the type of the entity being referenced in a trace event.
+type RefChannelType int
+
+const (
+	// RefChannel indicates the referenced entity is a Channel.
+	RefChannel RefChannelType = iota
+	// RefSubChannel indicates the referenced entity is a SubChannel.
+	RefSubChannel
+)
+
+func (c *channelTrace) dumpData() *ChannelTrace {
+	c.mu.Lock()
+	ct := &ChannelTrace{EventNum: c.eventCount, CreationTime: c.createdTime}
+	ct.Events = c.events[:len(c.events)]
+	c.mu.Unlock()
+	return ct
+}
diff --git a/vendor/google.golang.org/grpc/internal/channelz/types_linux.go b/vendor/google.golang.org/grpc/internal/channelz/types_linux.go
new file mode 100644
index 0000000..692dd61
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/channelz/types_linux.go
@@ -0,0 +1,53 @@
+// +build !appengine
+
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package channelz
+
+import (
+	"syscall"
+
+	"golang.org/x/sys/unix"
+)
+
+// SocketOptionData defines the struct to hold socket option data, and related
+// getter function to obtain info from fd.
+type SocketOptionData struct {
+	Linger      *unix.Linger
+	RecvTimeout *unix.Timeval
+	SendTimeout *unix.Timeval
+	TCPInfo     *unix.TCPInfo
+}
+
+// Getsockopt defines the function to get socket options requested by channelz.
+// It is to be passed to syscall.RawConn.Control().
+func (s *SocketOptionData) Getsockopt(fd uintptr) {
+	if v, err := unix.GetsockoptLinger(int(fd), syscall.SOL_SOCKET, syscall.SO_LINGER); err == nil {
+		s.Linger = v
+	}
+	if v, err := unix.GetsockoptTimeval(int(fd), syscall.SOL_SOCKET, syscall.SO_RCVTIMEO); err == nil {
+		s.RecvTimeout = v
+	}
+	if v, err := unix.GetsockoptTimeval(int(fd), syscall.SOL_SOCKET, syscall.SO_SNDTIMEO); err == nil {
+		s.SendTimeout = v
+	}
+	if v, err := unix.GetsockoptTCPInfo(int(fd), syscall.SOL_TCP, syscall.TCP_INFO); err == nil {
+		s.TCPInfo = v
+	}
+}
diff --git a/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go
new file mode 100644
index 0000000..79edbef
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go
@@ -0,0 +1,44 @@
+// +build !linux appengine
+
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package channelz
+
+import (
+	"sync"
+
+	"google.golang.org/grpc/grpclog"
+)
+
+var once sync.Once
+
+// SocketOptionData defines the struct to hold socket option data, and related
+// getter function to obtain info from fd.
+// Windows OS doesn't support Socket Option
+type SocketOptionData struct {
+}
+
+// Getsockopt defines the function to get socket options requested by channelz.
+// It is to be passed to syscall.RawConn.Control().
+// Windows OS doesn't support Socket Option
+func (s *SocketOptionData) Getsockopt(fd uintptr) {
+	once.Do(func() {
+		grpclog.Warningln("Channelz: socket options are not supported on non-linux os and appengine.")
+	})
+}
diff --git a/vendor/google.golang.org/grpc/envconfig.go b/vendor/google.golang.org/grpc/internal/channelz/util_linux.go
similarity index 61%
rename from vendor/google.golang.org/grpc/envconfig.go
rename to vendor/google.golang.org/grpc/internal/channelz/util_linux.go
index d50178e..fdf409d 100644
--- a/vendor/google.golang.org/grpc/envconfig.go
+++ b/vendor/google.golang.org/grpc/internal/channelz/util_linux.go
@@ -1,3 +1,5 @@
+// +build linux,!appengine
+
 /*
  *
  * Copyright 2018 gRPC authors.
@@ -16,22 +18,22 @@
  *
  */
 
-package grpc
+package channelz
 
 import (
-	"os"
-	"strings"
+	"syscall"
 )
 
-const (
-	envConfigPrefix        = "GRPC_GO_"
-	envConfigStickinessStr = envConfigPrefix + "STICKINESS"
-)
-
-var (
-	envConfigStickinessOn bool
-)
-
-func init() {
-	envConfigStickinessOn = strings.EqualFold(os.Getenv(envConfigStickinessStr), "on")
+// GetSocketOption gets the socket option info of the conn.
+func GetSocketOption(socket interface{}) *SocketOptionData {
+	c, ok := socket.(syscall.Conn)
+	if !ok {
+		return nil
+	}
+	data := &SocketOptionData{}
+	if rawConn, err := c.SyscallConn(); err == nil {
+		rawConn.Control(data.Getsockopt)
+		return data
+	}
+	return nil
 }
diff --git a/vendor/google.golang.org/grpc/naming/go18.go b/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go
similarity index 73%
copy from vendor/google.golang.org/grpc/naming/go18.go
copy to vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go
index b5a0f84..8864a08 100644
--- a/vendor/google.golang.org/grpc/naming/go18.go
+++ b/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go
@@ -1,8 +1,8 @@
-// +build go1.8
+// +build !linux appengine
 
 /*
  *
- * Copyright 2017 gRPC authors.
+ * Copyright 2018 gRPC authors.
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -18,11 +18,9 @@
  *
  */
 
-package naming
+package channelz
 
-import "net"
-
-var (
-	lookupHost = net.DefaultResolver.LookupHost
-	lookupSRV  = net.DefaultResolver.LookupSRV
-)
+// GetSocketOption gets the socket option info of the conn.
+func GetSocketOption(c interface{}) *SocketOptionData {
+	return nil
+}
diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
new file mode 100644
index 0000000..11be7cd
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
@@ -0,0 +1,64 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package envconfig contains grpc settings configured by environment variables.
+package envconfig
+
+import (
+	"os"
+	"strings"
+)
+
+const (
+	prefix              = "GRPC_GO_"
+	retryStr            = prefix + "RETRY"
+	requireHandshakeStr = prefix + "REQUIRE_HANDSHAKE"
+)
+
+// RequireHandshakeSetting describes the settings for handshaking.
+type RequireHandshakeSetting int
+
+const (
+	// RequireHandshakeOn indicates to wait for handshake before considering a
+	// connection ready/successful.
+	RequireHandshakeOn RequireHandshakeSetting = iota
+	// RequireHandshakeOff indicates to not wait for handshake before
+	// considering a connection ready/successful.
+	RequireHandshakeOff
+)
+
+var (
+	// Retry is set if retry is explicitly enabled via "GRPC_GO_RETRY=on".
+	Retry = strings.EqualFold(os.Getenv(retryStr), "on")
+	// RequireHandshake is set based upon the GRPC_GO_REQUIRE_HANDSHAKE
+	// environment variable.
+	//
+	// Will be removed after the 1.18 release.
+	RequireHandshake = RequireHandshakeOn
+)
+
+func init() {
+	switch strings.ToLower(os.Getenv(requireHandshakeStr)) {
+	case "on":
+		fallthrough
+	default:
+		RequireHandshake = RequireHandshakeOn
+	case "off":
+		RequireHandshake = RequireHandshakeOff
+	}
+}
diff --git a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go
new file mode 100644
index 0000000..200b115
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go
@@ -0,0 +1,56 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package grpcrand implements math/rand functions in a concurrent-safe way
+// with a global random source, independent of math/rand's global source.
+package grpcrand
+
+import (
+	"math/rand"
+	"sync"
+	"time"
+)
+
+var (
+	r  = rand.New(rand.NewSource(time.Now().UnixNano()))
+	mu sync.Mutex
+)
+
+// Int63n implements rand.Int63n on the grpcrand global source.
+func Int63n(n int64) int64 {
+	mu.Lock()
+	res := r.Int63n(n)
+	mu.Unlock()
+	return res
+}
+
+// Intn implements rand.Intn on the grpcrand global source.
+func Intn(n int) int {
+	mu.Lock()
+	res := r.Intn(n)
+	mu.Unlock()
+	return res
+}
+
+// Float64 implements rand.Float64 on the grpcrand global source.
+func Float64() float64 {
+	mu.Lock()
+	res := r.Float64()
+	mu.Unlock()
+	return res
+}
diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/event.go b/vendor/google.golang.org/grpc/internal/grpcsync/event.go
new file mode 100644
index 0000000..fbe697c
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/grpcsync/event.go
@@ -0,0 +1,61 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package grpcsync implements additional synchronization primitives built upon
+// the sync package.
+package grpcsync
+
+import (
+	"sync"
+	"sync/atomic"
+)
+
+// Event represents a one-time event that may occur in the future.
+type Event struct {
+	fired int32
+	c     chan struct{}
+	o     sync.Once
+}
+
+// Fire causes e to complete.  It is safe to call multiple times, and
+// concurrently.  It returns true iff this call to Fire caused the signaling
+// channel returned by Done to close.
+func (e *Event) Fire() bool {
+	ret := false
+	e.o.Do(func() {
+		atomic.StoreInt32(&e.fired, 1)
+		close(e.c)
+		ret = true
+	})
+	return ret
+}
+
+// Done returns a channel that will be closed when Fire is called.
+func (e *Event) Done() <-chan struct{} {
+	return e.c
+}
+
+// HasFired returns true if Fire has been called.
+func (e *Event) HasFired() bool {
+	return atomic.LoadInt32(&e.fired) == 1
+}
+
+// NewEvent returns a new, ready-to-use Event.
+func NewEvent() *Event {
+	return &Event{c: make(chan struct{})}
+}
diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go
index 53f1775..c1d2c69 100644
--- a/vendor/google.golang.org/grpc/internal/internal.go
+++ b/vendor/google.golang.org/grpc/internal/internal.go
@@ -15,13 +15,40 @@
  *
  */
 
-// Package internal contains gRPC-internal code for testing, to avoid polluting
-// the godoc of the top-level grpc package.
+// Package internal contains gRPC-internal code, to avoid polluting
+// the godoc of the top-level grpc package.  It must not import any grpc
+// symbols to avoid circular dependencies.
 package internal
 
-// TestingUseHandlerImpl enables the http.Handler-based server implementation.
-// It must be called before Serve and requires TLS credentials.
-//
-// The provided grpcServer must be of type *grpc.Server. It is untyped
-// for circular dependency reasons.
-var TestingUseHandlerImpl func(grpcServer interface{})
+import (
+	"context"
+	"time"
+)
+
+var (
+	// WithResolverBuilder is exported by dialoptions.go
+	WithResolverBuilder interface{} // func (resolver.Builder) grpc.DialOption
+	// WithHealthCheckFunc is not exported by dialoptions.go
+	WithHealthCheckFunc interface{} // func (HealthChecker) DialOption
+	// HealthCheckFunc is used to provide client-side LB channel health checking
+	HealthCheckFunc HealthChecker
+	// BalancerUnregister is exported by package balancer to unregister a balancer.
+	BalancerUnregister func(name string)
+	// KeepaliveMinPingTime is the minimum ping interval.  This must be 10s by
+	// default, but tests may wish to set it lower for convenience.
+	KeepaliveMinPingTime = 10 * time.Second
+)
+
+// HealthChecker defines the signature of the client-side LB channel health checking function.
+type HealthChecker func(ctx context.Context, newStream func() (interface{}, error), reportHealth func(bool), serviceName string) error
+
+const (
+	// CredsBundleModeFallback switches GoogleDefaultCreds to fallback mode.
+	CredsBundleModeFallback = "fallback"
+	// CredsBundleModeBalancer switches GoogleDefaultCreds to grpclb balancer
+	// mode.
+	CredsBundleModeBalancer = "balancer"
+	// CredsBundleModeBackendFromBalancer switches GoogleDefaultCreds to mode
+	// that supports backend returned by grpclb balancer.
+	CredsBundleModeBackendFromBalancer = "backend-from-balancer"
+)
diff --git a/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go b/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go
new file mode 100644
index 0000000..43281a3
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go
@@ -0,0 +1,114 @@
+// +build !appengine
+
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package syscall provides functionalities that grpc uses to get low-level operating system
+// stats/info.
+package syscall
+
+import (
+	"fmt"
+	"net"
+	"syscall"
+	"time"
+
+	"golang.org/x/sys/unix"
+	"google.golang.org/grpc/grpclog"
+)
+
+// GetCPUTime returns the how much CPU time has passed since the start of this process.
+func GetCPUTime() int64 {
+	var ts unix.Timespec
+	if err := unix.ClockGettime(unix.CLOCK_PROCESS_CPUTIME_ID, &ts); err != nil {
+		grpclog.Fatal(err)
+	}
+	return ts.Nano()
+}
+
+// Rusage is an alias for syscall.Rusage under linux non-appengine environment.
+type Rusage syscall.Rusage
+
+// GetRusage returns the resource usage of current process.
+func GetRusage() (rusage *Rusage) {
+	rusage = new(Rusage)
+	syscall.Getrusage(syscall.RUSAGE_SELF, (*syscall.Rusage)(rusage))
+	return
+}
+
+// CPUTimeDiff returns the differences of user CPU time and system CPU time used
+// between two Rusage structs.
+func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) {
+	f := (*syscall.Rusage)(first)
+	l := (*syscall.Rusage)(latest)
+	var (
+		utimeDiffs  = l.Utime.Sec - f.Utime.Sec
+		utimeDiffus = l.Utime.Usec - f.Utime.Usec
+		stimeDiffs  = l.Stime.Sec - f.Stime.Sec
+		stimeDiffus = l.Stime.Usec - f.Stime.Usec
+	)
+
+	uTimeElapsed := float64(utimeDiffs) + float64(utimeDiffus)*1.0e-6
+	sTimeElapsed := float64(stimeDiffs) + float64(stimeDiffus)*1.0e-6
+
+	return uTimeElapsed, sTimeElapsed
+}
+
+// SetTCPUserTimeout sets the TCP user timeout on a connection's socket
+func SetTCPUserTimeout(conn net.Conn, timeout time.Duration) error {
+	tcpconn, ok := conn.(*net.TCPConn)
+	if !ok {
+		// not a TCP connection. exit early
+		return nil
+	}
+	rawConn, err := tcpconn.SyscallConn()
+	if err != nil {
+		return fmt.Errorf("error getting raw connection: %v", err)
+	}
+	err = rawConn.Control(func(fd uintptr) {
+		err = syscall.SetsockoptInt(int(fd), syscall.IPPROTO_TCP, unix.TCP_USER_TIMEOUT, int(timeout/time.Millisecond))
+	})
+	if err != nil {
+		return fmt.Errorf("error setting option on socket: %v", err)
+	}
+
+	return nil
+}
+
+// GetTCPUserTimeout gets the TCP user timeout on a connection's socket
+func GetTCPUserTimeout(conn net.Conn) (opt int, err error) {
+	tcpconn, ok := conn.(*net.TCPConn)
+	if !ok {
+		err = fmt.Errorf("conn is not *net.TCPConn. got %T", conn)
+		return
+	}
+	rawConn, err := tcpconn.SyscallConn()
+	if err != nil {
+		err = fmt.Errorf("error getting raw connection: %v", err)
+		return
+	}
+	err = rawConn.Control(func(fd uintptr) {
+		opt, err = syscall.GetsockoptInt(int(fd), syscall.IPPROTO_TCP, unix.TCP_USER_TIMEOUT)
+	})
+	if err != nil {
+		err = fmt.Errorf("error getting option on socket: %v", err)
+		return
+	}
+
+	return
+}
diff --git a/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go b/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go
new file mode 100644
index 0000000..d3fd9da
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go
@@ -0,0 +1,73 @@
+// +build !linux appengine
+
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package syscall
+
+import (
+	"net"
+	"sync"
+	"time"
+
+	"google.golang.org/grpc/grpclog"
+)
+
+var once sync.Once
+
+func log() {
+	once.Do(func() {
+		grpclog.Info("CPU time info is unavailable on non-linux or appengine environment.")
+	})
+}
+
+// GetCPUTime returns the how much CPU time has passed since the start of this process.
+// It always returns 0 under non-linux or appengine environment.
+func GetCPUTime() int64 {
+	log()
+	return 0
+}
+
+// Rusage is an empty struct under non-linux or appengine environment.
+type Rusage struct{}
+
+// GetRusage is a no-op function under non-linux or appengine environment.
+func GetRusage() (rusage *Rusage) {
+	log()
+	return nil
+}
+
+// CPUTimeDiff returns the differences of user CPU time and system CPU time used
+// between two Rusage structs. It a no-op function for non-linux or appengine environment.
+func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) {
+	log()
+	return 0, 0
+}
+
+// SetTCPUserTimeout is a no-op function under non-linux or appengine environments
+func SetTCPUserTimeout(conn net.Conn, timeout time.Duration) error {
+	log()
+	return nil
+}
+
+// GetTCPUserTimeout is a no-op function under non-linux or appengine environments
+// a negative return value indicates the operation is not supported
+func GetTCPUserTimeout(conn net.Conn) (int, error) {
+	log()
+	return -1, nil
+}
diff --git a/vendor/google.golang.org/grpc/transport/bdp_estimator.go b/vendor/google.golang.org/grpc/internal/transport/bdp_estimator.go
similarity index 94%
rename from vendor/google.golang.org/grpc/transport/bdp_estimator.go
rename to vendor/google.golang.org/grpc/internal/transport/bdp_estimator.go
index 63cd262..070680e 100644
--- a/vendor/google.golang.org/grpc/transport/bdp_estimator.go
+++ b/vendor/google.golang.org/grpc/internal/transport/bdp_estimator.go
@@ -24,9 +24,10 @@
 )
 
 const (
-	// bdpLimit is the maximum value the flow control windows
-	// will be increased to.
-	bdpLimit = (1 << 20) * 4
+	// bdpLimit is the maximum value the flow control windows will be increased
+	// to.  TCP typically limits this to 4MB, but some systems go up to 16MB.
+	// Since this is only a limit, it is safe to make it optimistic.
+	bdpLimit = (1 << 20) * 16
 	// alpha is a constant factor used to keep a moving average
 	// of RTTs.
 	alpha = 0.9
diff --git a/vendor/google.golang.org/grpc/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go
similarity index 72%
rename from vendor/google.golang.org/grpc/transport/controlbuf.go
rename to vendor/google.golang.org/grpc/internal/transport/controlbuf.go
index e147cd5..204ba15 100644
--- a/vendor/google.golang.org/grpc/transport/controlbuf.go
+++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go
@@ -28,6 +28,10 @@
 	"golang.org/x/net/http2/hpack"
 )
 
+var updateHeaderTblSize = func(e *hpack.Encoder, v uint32) {
+	e.SetMaxDynamicTableSizeLimit(v)
+}
+
 type itemNode struct {
 	it   interface{}
 	next *itemNode
@@ -80,6 +84,13 @@
 // the control buffer of transport. They represent different aspects of
 // control tasks, e.g., flow control, settings, streaming resetting, etc.
 
+// registerStream is used to register an incoming stream with loopy writer.
+type registerStream struct {
+	streamID uint32
+	wq       *writeQuota
+}
+
+// headerFrame is also used to register stream on the client-side.
 type headerFrame struct {
 	streamID   uint32
 	hf         []hpack.HeaderField
@@ -93,7 +104,6 @@
 
 type cleanupStream struct {
 	streamID uint32
-	idPtr    *uint32
 	rst      bool
 	rstCode  http2.ErrCode
 	onWrite  func()
@@ -127,9 +137,6 @@
 	ss []http2.Setting
 }
 
-type settingsAck struct {
-}
-
 type incomingGoAway struct {
 }
 
@@ -218,6 +225,12 @@
 	return b
 }
 
+// controlBuffer is a way to pass information to loopy.
+// Information is passed as specific struct types called control frames.
+// A control frame not only represents data, messages or headers to be sent out
+// but can also be used to instruct loopy to update its internal state.
+// It shouldn't be confused with an HTTP2 frame, although some of the control frames
+// like dataFrame and headerFrame do go out on wire as HTTP2 frames.
 type controlBuffer struct {
 	ch              chan struct{}
 	done            <-chan struct{}
@@ -268,6 +281,21 @@
 	return true, nil
 }
 
+// Note argument f should never be nil.
+func (c *controlBuffer) execute(f func(it interface{}) bool, it interface{}) (bool, error) {
+	c.mu.Lock()
+	if c.err != nil {
+		c.mu.Unlock()
+		return false, c.err
+	}
+	if !f(it) { // f wasn't successful
+		c.mu.Unlock()
+		return false, nil
+	}
+	c.mu.Unlock()
+	return true, nil
+}
+
 func (c *controlBuffer) get(block bool) (interface{}, error) {
 	for {
 		c.mu.Lock()
@@ -324,13 +352,29 @@
 	serverSide
 )
 
+// Loopy receives frames from the control buffer.
+// Each frame is handled individually; most of the work done by loopy goes
+// into handling data frames. Loopy maintains a queue of active streams, and each
+// stream maintains a queue of data frames; as loopy receives data frames
+// it gets added to the queue of the relevant stream.
+// Loopy goes over this list of active streams by processing one node every iteration,
+// thereby closely resemebling to a round-robin scheduling over all streams. While
+// processing a stream, loopy writes out data bytes from this stream capped by the min
+// of http2MaxFrameLen, connection-level flow control and stream-level flow control.
 type loopyWriter struct {
-	side          side
-	cbuf          *controlBuffer
-	sendQuota     uint32
-	oiws          uint32                // outbound initial window size.
-	estdStreams   map[uint32]*outStream // Established streams.
-	activeStreams *outStreamList        // Streams that are sending data.
+	side      side
+	cbuf      *controlBuffer
+	sendQuota uint32
+	oiws      uint32 // outbound initial window size.
+	// estdStreams is map of all established streams that are not cleaned-up yet.
+	// On client-side, this is all streams whose headers were sent out.
+	// On server-side, this is all streams whose headers were received.
+	estdStreams map[uint32]*outStream // Established streams.
+	// activeStreams is a linked-list of all streams that have data to send and some
+	// stream-level flow control quota.
+	// Each of these streams internally have a list of data items(and perhaps trailers
+	// on the server-side) to be sent out.
+	activeStreams *outStreamList
 	framer        *framer
 	hBuf          *bytes.Buffer  // The buffer for HPACK encoding.
 	hEnc          *hpack.Encoder // HPACK encoder.
@@ -361,44 +405,62 @@
 const minBatchSize = 1000
 
 // run should be run in a separate goroutine.
-func (l *loopyWriter) run() {
-	var (
-		it      interface{}
-		err     error
-		isEmpty bool
-	)
+// It reads control frames from controlBuf and processes them by:
+// 1. Updating loopy's internal state, or/and
+// 2. Writing out HTTP2 frames on the wire.
+//
+// Loopy keeps all active streams with data to send in a linked-list.
+// All streams in the activeStreams linked-list must have both:
+// 1. Data to send, and
+// 2. Stream level flow control quota available.
+//
+// In each iteration of run loop, other than processing the incoming control
+// frame, loopy calls processData, which processes one node from the activeStreams linked-list.
+// This results in writing of HTTP2 frames into an underlying write buffer.
+// When there's no more control frames to read from controlBuf, loopy flushes the write buffer.
+// As an optimization, to increase the batch size for each flush, loopy yields the processor, once
+// if the batch size is too low to give stream goroutines a chance to fill it up.
+func (l *loopyWriter) run() (err error) {
 	defer func() {
-		errorf("transport: loopyWriter.run returning. Err: %v", err)
+		if err == ErrConnClosing {
+			// Don't log ErrConnClosing as error since it happens
+			// 1. When the connection is closed by some other known issue.
+			// 2. User closed the connection.
+			// 3. A graceful close of connection.
+			infof("transport: loopyWriter.run returning. %v", err)
+			err = nil
+		}
 	}()
 	for {
-		it, err = l.cbuf.get(true)
+		it, err := l.cbuf.get(true)
 		if err != nil {
-			return
+			return err
 		}
 		if err = l.handle(it); err != nil {
-			return
+			return err
 		}
 		if _, err = l.processData(); err != nil {
-			return
+			return err
 		}
 		gosched := true
 	hasdata:
 		for {
-			it, err = l.cbuf.get(false)
+			it, err := l.cbuf.get(false)
 			if err != nil {
-				return
+				return err
 			}
 			if it != nil {
 				if err = l.handle(it); err != nil {
-					return
+					return err
 				}
 				if _, err = l.processData(); err != nil {
-					return
+					return err
 				}
 				continue hasdata
 			}
-			if isEmpty, err = l.processData(); err != nil {
-				return
+			isEmpty, err := l.processData()
+			if err != nil {
+				return err
 			}
 			if !isEmpty {
 				continue hasdata
@@ -450,30 +512,39 @@
 	return l.framer.fr.WriteSettingsAck()
 }
 
+func (l *loopyWriter) registerStreamHandler(h *registerStream) error {
+	str := &outStream{
+		id:    h.streamID,
+		state: empty,
+		itl:   &itemList{},
+		wq:    h.wq,
+	}
+	l.estdStreams[h.streamID] = str
+	return nil
+}
+
 func (l *loopyWriter) headerHandler(h *headerFrame) error {
 	if l.side == serverSide {
-		if h.endStream { // Case 1.A: Server wants to close stream.
-			// Make sure it's not a trailers only response.
-			if str, ok := l.estdStreams[h.streamID]; ok {
-				if str.state != empty { // either active or waiting on stream quota.
-					// add it str's list of items.
-					str.itl.enqueue(h)
-					return nil
-				}
-			}
-			if err := l.writeHeader(h.streamID, h.endStream, h.hf, h.onWrite); err != nil {
-				return err
-			}
-			return l.cleanupStreamHandler(h.cleanup)
+		str, ok := l.estdStreams[h.streamID]
+		if !ok {
+			warningf("transport: loopy doesn't recognize the stream: %d", h.streamID)
+			return nil
 		}
-		// Case 1.B: Server is responding back with headers.
-		str := &outStream{
-			state: empty,
-			itl:   &itemList{},
-			wq:    h.wq,
+		// Case 1.A: Server is responding back with headers.
+		if !h.endStream {
+			return l.writeHeader(h.streamID, h.endStream, h.hf, h.onWrite)
 		}
-		l.estdStreams[h.streamID] = str
-		return l.writeHeader(h.streamID, h.endStream, h.hf, h.onWrite)
+		// else:  Case 1.B: Server wants to close stream.
+
+		if str.state != empty { // either active or waiting on stream quota.
+			// add it str's list of items.
+			str.itl.enqueue(h)
+			return nil
+		}
+		if err := l.writeHeader(h.streamID, h.endStream, h.hf, h.onWrite); err != nil {
+			return err
+		}
+		return l.cleanupStreamHandler(h.cleanup)
 	}
 	// Case 2: Client wants to originate stream.
 	str := &outStream{
@@ -632,6 +703,8 @@
 		return l.outgoingSettingsHandler(i)
 	case *headerFrame:
 		return l.headerHandler(i)
+	case *registerStream:
+		return l.registerStreamHandler(i)
 	case *cleanupStream:
 		return l.cleanupStreamHandler(i)
 	case *incomingGoAway:
@@ -664,26 +737,37 @@
 					}
 				}
 			}
+		case http2.SettingHeaderTableSize:
+			updateHeaderTblSize(l.hEnc, s.Val)
 		}
 	}
 	return nil
 }
 
+// processData removes the first stream from active streams, writes out at most 16KB
+// of its data and then puts it at the end of activeStreams if there's still more data
+// to be sent and stream has some stream-level flow control.
 func (l *loopyWriter) processData() (bool, error) {
 	if l.sendQuota == 0 {
 		return true, nil
 	}
-	str := l.activeStreams.dequeue()
+	str := l.activeStreams.dequeue() // Remove the first stream.
 	if str == nil {
 		return true, nil
 	}
-	dataItem := str.itl.peek().(*dataFrame)
-	if len(dataItem.h) == 0 && len(dataItem.d) == 0 {
+	dataItem := str.itl.peek().(*dataFrame) // Peek at the first data item this stream.
+	// A data item is represented by a dataFrame, since it later translates into
+	// multiple HTTP2 data frames.
+	// Every dataFrame has two buffers; h that keeps grpc-message header and d that is acutal data.
+	// As an optimization to keep wire traffic low, data from d is copied to h to make as big as the
+	// maximum possilbe HTTP2 frame size.
+
+	if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // Empty data frame
 		// Client sends out empty data frame with endStream = true
 		if err := l.framer.fr.WriteData(dataItem.streamID, dataItem.endStream, nil); err != nil {
 			return false, err
 		}
-		str.itl.dequeue()
+		str.itl.dequeue() // remove the empty data item from stream
 		if str.itl.isEmpty() {
 			str.state = empty
 		} else if trailer, ok := str.itl.peek().(*headerFrame); ok { // the next item is trailers.
@@ -712,21 +796,20 @@
 	if len(buf) < size {
 		size = len(buf)
 	}
-	if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota <= 0 {
+	if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota <= 0 { // stream-level flow control.
 		str.state = waitingOnStreamQuota
 		return false, nil
 	} else if strQuota < size {
 		size = strQuota
 	}
 
-	if l.sendQuota < uint32(size) {
+	if l.sendQuota < uint32(size) { // connection-level flow control.
 		size = int(l.sendQuota)
 	}
 	// Now that outgoing flow controls are checked we can replenish str's write quota
 	str.wq.replenish(size)
 	var endStream bool
-	// This last data message on this stream and all
-	// of it can be written in this go.
+	// If this is the last data message on this stream and all of it can be written in this iteration.
 	if dataItem.endStream && size == len(buf) {
 		// buf contains either data or it contains header but data is empty.
 		if idx == 1 || len(dataItem.d) == 0 {
diff --git a/vendor/google.golang.org/grpc/internal/transport/defaults.go b/vendor/google.golang.org/grpc/internal/transport/defaults.go
new file mode 100644
index 0000000..9fa306b
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/transport/defaults.go
@@ -0,0 +1,49 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package transport
+
+import (
+	"math"
+	"time"
+)
+
+const (
+	// The default value of flow control window size in HTTP2 spec.
+	defaultWindowSize = 65535
+	// The initial window size for flow control.
+	initialWindowSize             = defaultWindowSize // for an RPC
+	infinity                      = time.Duration(math.MaxInt64)
+	defaultClientKeepaliveTime    = infinity
+	defaultClientKeepaliveTimeout = 20 * time.Second
+	defaultMaxStreamsClient       = 100
+	defaultMaxConnectionIdle      = infinity
+	defaultMaxConnectionAge       = infinity
+	defaultMaxConnectionAgeGrace  = infinity
+	defaultServerKeepaliveTime    = 2 * time.Hour
+	defaultServerKeepaliveTimeout = 20 * time.Second
+	defaultKeepalivePolicyMinTime = 5 * time.Minute
+	// max window limit set by HTTP2 Specs.
+	maxWindowSize = math.MaxInt32
+	// defaultWriteQuota is the default value for number of data
+	// bytes that each stream can schedule before some of it being
+	// flushed out.
+	defaultWriteQuota              = 64 * 1024
+	defaultClientMaxHeaderListSize = uint32(16 << 20)
+	defaultServerMaxHeaderListSize = uint32(16 << 20)
+)
diff --git a/vendor/google.golang.org/grpc/transport/flowcontrol.go b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go
similarity index 84%
rename from vendor/google.golang.org/grpc/transport/flowcontrol.go
rename to vendor/google.golang.org/grpc/internal/transport/flowcontrol.go
index 378f5c4..5ea997a 100644
--- a/vendor/google.golang.org/grpc/transport/flowcontrol.go
+++ b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go
@@ -23,30 +23,6 @@
 	"math"
 	"sync"
 	"sync/atomic"
-	"time"
-)
-
-const (
-	// The default value of flow control window size in HTTP2 spec.
-	defaultWindowSize = 65535
-	// The initial window size for flow control.
-	initialWindowSize             = defaultWindowSize // for an RPC
-	infinity                      = time.Duration(math.MaxInt64)
-	defaultClientKeepaliveTime    = infinity
-	defaultClientKeepaliveTimeout = 20 * time.Second
-	defaultMaxStreamsClient       = 100
-	defaultMaxConnectionIdle      = infinity
-	defaultMaxConnectionAge       = infinity
-	defaultMaxConnectionAgeGrace  = infinity
-	defaultServerKeepaliveTime    = 2 * time.Hour
-	defaultServerKeepaliveTimeout = 20 * time.Second
-	defaultKeepalivePolicyMinTime = 5 * time.Minute
-	// max window limit set by HTTP2 Specs.
-	maxWindowSize = math.MaxInt32
-	// defaultWriteQuota is the default value for number of data
-	// bytes that each stream can schedule before some of it being
-	// flushed out.
-	defaultWriteQuota = 64 * 1024
 )
 
 // writeQuota is a soft limit on the amount of data a stream can
@@ -58,14 +34,20 @@
 	ch chan struct{}
 	// done is triggered in error case.
 	done <-chan struct{}
+	// replenish is called by loopyWriter to give quota back to.
+	// It is implemented as a field so that it can be updated
+	// by tests.
+	replenish func(n int)
 }
 
 func newWriteQuota(sz int32, done <-chan struct{}) *writeQuota {
-	return &writeQuota{
+	w := &writeQuota{
 		quota: sz,
 		ch:    make(chan struct{}, 1),
 		done:  done,
 	}
+	w.replenish = w.realReplenish
+	return w
 }
 
 func (w *writeQuota) get(sz int32) error {
@@ -83,7 +65,7 @@
 	}
 }
 
-func (w *writeQuota) replenish(n int) {
+func (w *writeQuota) realReplenish(n int) {
 	sz := int32(n)
 	a := atomic.AddInt32(&w.quota, sz)
 	b := a - sz
diff --git a/vendor/google.golang.org/grpc/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go
similarity index 90%
rename from vendor/google.golang.org/grpc/transport/handler_server.go
rename to vendor/google.golang.org/grpc/internal/transport/handler_server.go
index f71b748..f2de84d 100644
--- a/vendor/google.golang.org/grpc/transport/handler_server.go
+++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go
@@ -24,6 +24,7 @@
 package transport
 
 import (
+	"context"
 	"errors"
 	"fmt"
 	"io"
@@ -34,7 +35,6 @@
 	"time"
 
 	"github.com/golang/protobuf/proto"
-	"golang.org/x/net/context"
 	"golang.org/x/net/http2"
 	"google.golang.org/grpc/codes"
 	"google.golang.org/grpc/credentials"
@@ -63,9 +63,6 @@
 	if _, ok := w.(http.Flusher); !ok {
 		return nil, errors.New("gRPC requires a ResponseWriter supporting http.Flusher")
 	}
-	if _, ok := w.(http.CloseNotifier); !ok {
-		return nil, errors.New("gRPC requires a ResponseWriter supporting http.CloseNotifier")
-	}
 
 	st := &serverHandlerTransport{
 		rw:             w,
@@ -80,7 +77,7 @@
 	if v := r.Header.Get("grpc-timeout"); v != "" {
 		to, err := decodeTimeout(v)
 		if err != nil {
-			return nil, streamErrorf(codes.Internal, "malformed time-out: %v", err)
+			return nil, status.Errorf(codes.Internal, "malformed time-out: %v", err)
 		}
 		st.timeoutSet = true
 		st.timeout = to
@@ -98,7 +95,7 @@
 		for _, v := range vv {
 			v, err := decodeMetadataHeader(k, v)
 			if err != nil {
-				return nil, streamErrorf(codes.Internal, "malformed binary metadata: %v", err)
+				return nil, status.Errorf(codes.Internal, "malformed binary metadata: %v", err)
 			}
 			metakv = append(metakv, k, v)
 		}
@@ -176,17 +173,11 @@
 
 // do runs fn in the ServeHTTP goroutine.
 func (ht *serverHandlerTransport) do(fn func()) error {
-	// Avoid a panic writing to closed channel. Imperfect but maybe good enough.
 	select {
 	case <-ht.closedCh:
 		return ErrConnClosing
-	default:
-		select {
-		case ht.writes <- fn:
-			return nil
-		case <-ht.closedCh:
-			return ErrConnClosing
-		}
+	case ht.writes <- fn:
+		return nil
 	}
 }
 
@@ -237,9 +228,8 @@
 		if ht.stats != nil {
 			ht.stats.HandleRPC(s.Context(), &stats.OutTrailer{})
 		}
-		ht.Close()
-		close(ht.writes)
 	}
+	ht.Close()
 	return err
 }
 
@@ -274,9 +264,7 @@
 		ht.writeCommonHeaders(s)
 		ht.rw.Write(hdr)
 		ht.rw.Write(data)
-		if !opts.Delay {
-			ht.rw.(http.Flusher).Flush()
-		}
+		ht.rw.(http.Flusher).Flush()
 	})
 }
 
@@ -309,7 +297,7 @@
 func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), traceCtx func(context.Context, string) context.Context) {
 	// With this transport type there will be exactly 1 stream: this HTTP request.
 
-	ctx := contextFromRequest(ht.req)
+	ctx := ht.req.Context()
 	var cancel context.CancelFunc
 	if ht.timeoutSet {
 		ctx, cancel = context.WithTimeout(ctx, ht.timeout)
@@ -317,22 +305,16 @@
 		ctx, cancel = context.WithCancel(ctx)
 	}
 
-	// requestOver is closed when either the request's context is done
-	// or the status has been written via WriteStatus.
+	// requestOver is closed when the status has been written via WriteStatus.
 	requestOver := make(chan struct{})
-
-	// clientGone receives a single value if peer is gone, either
-	// because the underlying connection is dead or because the
-	// peer sends an http2 RST_STREAM.
-	clientGone := ht.rw.(http.CloseNotifier).CloseNotify()
 	go func() {
 		select {
 		case <-requestOver:
-			return
 		case <-ht.closedCh:
-		case <-clientGone:
+		case <-ht.req.Context().Done():
 		}
 		cancel()
+		ht.Close()
 	}()
 
 	req := ht.req
@@ -409,10 +391,7 @@
 func (ht *serverHandlerTransport) runStream() {
 	for {
 		select {
-		case fn, ok := <-ht.writes:
-			if !ok {
-				return
-			}
+		case fn := <-ht.writes:
 			fn()
 		case <-ht.closedCh:
 			return
@@ -434,18 +413,18 @@
 //   * io.EOF
 //   * io.ErrUnexpectedEOF
 //   * of type transport.ConnectionError
-//   * of type transport.StreamError
+//   * an error from the status package
 func mapRecvMsgError(err error) error {
 	if err == io.EOF || err == io.ErrUnexpectedEOF {
 		return err
 	}
 	if se, ok := err.(http2.StreamError); ok {
 		if code, ok := http2ErrConvTab[se.Code]; ok {
-			return StreamError{
-				Code: code,
-				Desc: se.Error(),
-			}
+			return status.Error(code, se.Error())
 		}
 	}
+	if strings.Contains(err.Error(), "body closed by handler") {
+		return status.Error(codes.Canceled, err.Error())
+	}
 	return connectionErrorf(true, err, err.Error())
 }
diff --git a/vendor/google.golang.org/grpc/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go
similarity index 78%
rename from vendor/google.golang.org/grpc/transport/http2_client.go
rename to vendor/google.golang.org/grpc/internal/transport/http2_client.go
index 1fdabd9..9dee6db 100644
--- a/vendor/google.golang.org/grpc/transport/http2_client.go
+++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go
@@ -19,21 +19,24 @@
 package transport
 
 import (
+	"context"
+	"fmt"
 	"io"
 	"math"
 	"net"
+	"strconv"
 	"strings"
 	"sync"
 	"sync/atomic"
 	"time"
 
-	"golang.org/x/net/context"
 	"golang.org/x/net/http2"
 	"golang.org/x/net/http2/hpack"
 
-	"google.golang.org/grpc/channelz"
 	"google.golang.org/grpc/codes"
 	"google.golang.org/grpc/credentials"
+	"google.golang.org/grpc/internal/channelz"
+	"google.golang.org/grpc/internal/syscall"
 	"google.golang.org/grpc/keepalive"
 	"google.golang.org/grpc/metadata"
 	"google.golang.org/grpc/peer"
@@ -72,22 +75,26 @@
 
 	isSecure bool
 
-	creds []credentials.PerRPCCredentials
+	perRPCCreds []credentials.PerRPCCredentials
 
 	// Boolean to keep track of reading activity on transport.
 	// 1 is true and 0 is false.
-	activity uint32 // Accessed atomically.
-	kp       keepalive.ClientParameters
+	activity         uint32 // Accessed atomically.
+	kp               keepalive.ClientParameters
+	keepaliveEnabled bool
 
 	statsHandler stats.Handler
 
 	initialWindowSize int32
 
+	// configured by peer through SETTINGS_MAX_HEADER_LIST_SIZE
+	maxSendHeaderListSize *uint32
+
 	bdpEst *bdpEstimator
-	// onSuccess is a callback that client transport calls upon
+	// onPrefaceReceipt is a callback that client transport calls upon
 	// receiving server preface to signal that a succefull HTTP2
 	// connection was established.
-	onSuccess func()
+	onPrefaceReceipt func()
 
 	maxConcurrentStreams  uint32
 	streamQuota           int64
@@ -106,26 +113,17 @@
 
 	// Fields below are for channelz metric collection.
 	channelzID int64 // channelz unique identification number
-	czmu       sync.RWMutex
-	kpCount    int64
-	// The number of streams that have started, including already finished ones.
-	streamsStarted int64
-	// The number of streams that have ended successfully by receiving EoS bit set
-	// frame from server.
-	streamsSucceeded  int64
-	streamsFailed     int64
-	lastStreamCreated time.Time
-	msgSent           int64
-	msgRecv           int64
-	lastMsgSent       time.Time
-	lastMsgRecv       time.Time
+	czData     *channelzData
+
+	onGoAway func(GoAwayReason)
+	onClose  func()
 }
 
 func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr string) (net.Conn, error) {
 	if fn != nil {
 		return fn(ctx, addr)
 	}
-	return dialContext(ctx, "tcp", addr)
+	return (&net.Dialer{}).DialContext(ctx, "tcp", addr)
 }
 
 func isTemporary(err error) bool {
@@ -147,7 +145,7 @@
 // newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2
 // and starts to receive messages on it. Non-nil error returns if construction
 // fails.
-func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts ConnectOptions, onSuccess func()) (_ ClientTransport, err error) {
+func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (_ *http2Client, err error) {
 	scheme := "http"
 	ctx, cancel := context.WithCancel(ctx)
 	defer func() {
@@ -169,18 +167,6 @@
 			conn.Close()
 		}
 	}(conn)
-	var (
-		isSecure bool
-		authInfo credentials.AuthInfo
-	)
-	if creds := opts.TransportCredentials; creds != nil {
-		scheme = "https"
-		conn, authInfo, err = creds.ClientHandshake(connectCtx, addr.Authority, conn)
-		if err != nil {
-			return nil, connectionErrorf(isTemporary(err), err, "transport: authentication handshake failed: %v", err)
-		}
-		isSecure = true
-	}
 	kp := opts.KeepaliveParams
 	// Validate keepalive parameters.
 	if kp.Time == 0 {
@@ -189,19 +175,47 @@
 	if kp.Timeout == 0 {
 		kp.Timeout = defaultClientKeepaliveTimeout
 	}
+	keepaliveEnabled := false
+	if kp.Time != infinity {
+		if err = syscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil {
+			return nil, connectionErrorf(false, err, "transport: failed to set TCP_USER_TIMEOUT: %v", err)
+		}
+		keepaliveEnabled = true
+	}
+	var (
+		isSecure bool
+		authInfo credentials.AuthInfo
+	)
+	transportCreds := opts.TransportCredentials
+	perRPCCreds := opts.PerRPCCredentials
+
+	if b := opts.CredsBundle; b != nil {
+		if t := b.TransportCredentials(); t != nil {
+			transportCreds = t
+		}
+		if t := b.PerRPCCredentials(); t != nil {
+			perRPCCreds = append(perRPCCreds, t)
+		}
+	}
+	if transportCreds != nil {
+		scheme = "https"
+		conn, authInfo, err = transportCreds.ClientHandshake(connectCtx, addr.Authority, conn)
+		if err != nil {
+			return nil, connectionErrorf(isTemporary(err), err, "transport: authentication handshake failed: %v", err)
+		}
+		isSecure = true
+	}
 	dynamicWindow := true
 	icwz := int32(initialWindowSize)
 	if opts.InitialConnWindowSize >= defaultWindowSize {
 		icwz = opts.InitialConnWindowSize
 		dynamicWindow = false
 	}
-	writeBufSize := defaultWriteBufSize
-	if opts.WriteBufferSize > 0 {
-		writeBufSize = opts.WriteBufferSize
-	}
-	readBufSize := defaultReadBufSize
-	if opts.ReadBufferSize > 0 {
-		readBufSize = opts.ReadBufferSize
+	writeBufSize := opts.WriteBufferSize
+	readBufSize := opts.ReadBufferSize
+	maxHeaderListSize := defaultClientMaxHeaderListSize
+	if opts.MaxHeaderListSize != nil {
+		maxHeaderListSize = *opts.MaxHeaderListSize
 	}
 	t := &http2Client{
 		ctx:                   ctx,
@@ -217,20 +231,24 @@
 		writerDone:            make(chan struct{}),
 		goAway:                make(chan struct{}),
 		awakenKeepalive:       make(chan struct{}, 1),
-		framer:                newFramer(conn, writeBufSize, readBufSize),
+		framer:                newFramer(conn, writeBufSize, readBufSize, maxHeaderListSize),
 		fc:                    &trInFlow{limit: uint32(icwz)},
 		scheme:                scheme,
 		activeStreams:         make(map[uint32]*Stream),
 		isSecure:              isSecure,
-		creds:                 opts.PerRPCCredentials,
+		perRPCCreds:           perRPCCreds,
 		kp:                    kp,
 		statsHandler:          opts.StatsHandler,
 		initialWindowSize:     initialWindowSize,
-		onSuccess:             onSuccess,
+		onPrefaceReceipt:      onPrefaceReceipt,
 		nextID:                1,
 		maxConcurrentStreams:  defaultMaxStreamsClient,
 		streamQuota:           defaultMaxStreamsClient,
 		streamsQuotaAvailable: make(chan struct{}, 1),
+		czData:                new(channelzData),
+		onGoAway:              onGoAway,
+		onClose:               onClose,
+		keepaliveEnabled:      keepaliveEnabled,
 	}
 	t.controlBuf = newControlBuffer(t.ctxDone)
 	if opts.InitialWindowSize >= defaultWindowSize {
@@ -257,12 +275,16 @@
 		t.statsHandler.HandleConn(t.ctx, connBegin)
 	}
 	if channelz.IsOn() {
-		t.channelzID = channelz.RegisterNormalSocket(t, opts.ChannelzParentID, "")
+		t.channelzID = channelz.RegisterNormalSocket(t, opts.ChannelzParentID, fmt.Sprintf("%s -> %s", t.localAddr, t.remoteAddr))
+	}
+	if t.keepaliveEnabled {
+		go t.keepalive()
 	}
 	// Start the reader goroutine for incoming message. Each transport has
 	// a dedicated goroutine which reads HTTP2 frame from network. Then it
 	// dispatches the frame to the corresponding stream entity.
 	go t.reader()
+
 	// Send connection preface to server.
 	n, err := t.conn.Write(clientPreface)
 	if err != nil {
@@ -273,14 +295,21 @@
 		t.Close()
 		return nil, connectionErrorf(true, err, "transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface))
 	}
+	var ss []http2.Setting
+
 	if t.initialWindowSize != defaultWindowSize {
-		err = t.framer.fr.WriteSettings(http2.Setting{
+		ss = append(ss, http2.Setting{
 			ID:  http2.SettingInitialWindowSize,
 			Val: uint32(t.initialWindowSize),
 		})
-	} else {
-		err = t.framer.fr.WriteSettings()
 	}
+	if opts.MaxHeaderListSize != nil {
+		ss = append(ss, http2.Setting{
+			ID:  http2.SettingMaxHeaderListSize,
+			Val: *opts.MaxHeaderListSize,
+		})
+	}
+	err = t.framer.fr.WriteSettings(ss...)
 	if err != nil {
 		t.Close()
 		return nil, connectionErrorf(true, err, "transport: failed to write initial settings frame: %v", err)
@@ -292,16 +321,23 @@
 			return nil, connectionErrorf(true, err, "transport: failed to write window update: %v", err)
 		}
 	}
-	t.framer.writer.Flush()
+
+	if err := t.framer.writer.Flush(); err != nil {
+		return nil, err
+	}
 	go func() {
 		t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst)
-		t.loopy.run()
-		t.conn.Close()
+		err := t.loopy.run()
+		if err != nil {
+			errorf("transport: loopyWriter.run returning. Err: %v", err)
+		}
+		// If it's a connection error, let reader goroutine handle it
+		// since there might be data in the buffers.
+		if _, ok := err.(net.Error); !ok {
+			t.conn.Close()
+		}
 		close(t.writerDone)
 	}()
-	if t.kp.Time != infinity {
-		go t.keepalive()
-	}
 	return t, nil
 }
 
@@ -328,6 +364,9 @@
 			ctx:     s.ctx,
 			ctxDone: s.ctx.Done(),
 			recv:    s.buf,
+			closeStream: func(err error) {
+				t.CloseStream(s, err)
+			},
 		},
 		windowHandler: func(n int) {
 			t.updateWindow(s, uint32(n))
@@ -370,6 +409,9 @@
 	headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: contentType(callHdr.ContentSubtype)})
 	headerFields = append(headerFields, hpack.HeaderField{Name: "user-agent", Value: t.userAgent})
 	headerFields = append(headerFields, hpack.HeaderField{Name: "te", Value: "trailers"})
+	if callHdr.PreviousAttempts > 0 {
+		headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-previous-rpc-attempts", Value: strconv.Itoa(callHdr.PreviousAttempts)})
+	}
 
 	if callHdr.SendCompress != "" {
 		headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress})
@@ -377,7 +419,7 @@
 	if dl, ok := ctx.Deadline(); ok {
 		// Send out timeout regardless its value. The server can detect timeout context by itself.
 		// TODO(mmukhi): Perhaps this field should be updated when actually writing out to the wire.
-		timeout := dl.Sub(time.Now())
+		timeout := time.Until(dl)
 		headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-timeout", Value: encodeTimeout(timeout)})
 	}
 	for k, v := range authData {
@@ -433,7 +475,7 @@
 
 func (t *http2Client) createAudience(callHdr *CallHdr) string {
 	// Create an audience string only if needed.
-	if len(t.creds) == 0 && callHdr.Creds == nil {
+	if len(t.perRPCCreds) == 0 && callHdr.Creds == nil {
 		return ""
 	}
 	// Construct URI required to get auth request metadata.
@@ -448,14 +490,14 @@
 
 func (t *http2Client) getTrAuthData(ctx context.Context, audience string) (map[string]string, error) {
 	authData := map[string]string{}
-	for _, c := range t.creds {
+	for _, c := range t.perRPCCreds {
 		data, err := c.GetRequestMetadata(ctx, audience)
 		if err != nil {
 			if _, ok := status.FromError(err); ok {
 				return nil, err
 			}
 
-			return nil, streamErrorf(codes.Unauthenticated, "transport: %v", err)
+			return nil, status.Errorf(codes.Unauthenticated, "transport: %v", err)
 		}
 		for k, v := range data {
 			// Capital header names are illegal in HTTP/2.
@@ -473,11 +515,11 @@
 	// options, then both sets of credentials will be applied.
 	if callCreds := callHdr.Creds; callCreds != nil {
 		if !t.isSecure && callCreds.RequireTransportSecurity() {
-			return nil, streamErrorf(codes.Unauthenticated, "transport: cannot send secure credentials on an insecure connection")
+			return nil, status.Error(codes.Unauthenticated, "transport: cannot send secure credentials on an insecure connection")
 		}
 		data, err := callCreds.GetRequestMetadata(ctx, audience)
 		if err != nil {
-			return nil, streamErrorf(codes.Internal, "transport: %v", err)
+			return nil, status.Errorf(codes.Internal, "transport: %v", err)
 		}
 		for k, v := range data {
 			// Capital header names are illegal in HTTP/2
@@ -529,15 +571,13 @@
 			}
 			t.activeStreams[id] = s
 			if channelz.IsOn() {
-				t.czmu.Lock()
-				t.streamsStarted++
-				t.lastStreamCreated = time.Now()
-				t.czmu.Unlock()
+				atomic.AddInt64(&t.czData.streamsStarted, 1)
+				atomic.StoreInt64(&t.czData.lastStreamCreatedTime, time.Now().UnixNano())
 			}
 			var sendPing bool
 			// If the number of active streams change from 0 to 1, then check if keepalive
 			// has gone dormant. If so, wake it up.
-			if len(t.activeStreams) == 1 {
+			if len(t.activeStreams) == 1 && t.keepaliveEnabled {
 				select {
 				case t.awakenKeepalive <- struct{}{}:
 					sendPing = true
@@ -581,14 +621,40 @@
 		}
 		return true
 	}
+	var hdrListSizeErr error
+	checkForHeaderListSize := func(it interface{}) bool {
+		if t.maxSendHeaderListSize == nil {
+			return true
+		}
+		hdrFrame := it.(*headerFrame)
+		var sz int64
+		for _, f := range hdrFrame.hf {
+			if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) {
+				hdrListSizeErr = status.Errorf(codes.Internal, "header list size to send violates the maximum size (%d bytes) set by server", *t.maxSendHeaderListSize)
+				return false
+			}
+		}
+		return true
+	}
 	for {
-		success, err := t.controlBuf.executeAndPut(checkForStreamQuota, hdr)
+		success, err := t.controlBuf.executeAndPut(func(it interface{}) bool {
+			if !checkForStreamQuota(it) {
+				return false
+			}
+			if !checkForHeaderListSize(it) {
+				return false
+			}
+			return true
+		}, hdr)
 		if err != nil {
 			return nil, err
 		}
 		if success {
 			break
 		}
+		if hdrListSizeErr != nil {
+			return nil, hdrListSizeErr
+		}
 		firstTry = false
 		select {
 		case <-ch:
@@ -624,13 +690,15 @@
 		rst = true
 		rstCode = http2.ErrCodeCancel
 	}
-	t.closeStream(s, err, rst, rstCode, nil, nil, false)
+	t.closeStream(s, err, rst, rstCode, status.Convert(err), nil, false)
 }
 
 func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2.ErrCode, st *status.Status, mdata map[string][]string, eosReceived bool) {
 	// Set stream status to done.
 	if s.swapState(streamDone) == streamDone {
-		// If it was already done, return.
+		// If it was already done, return.  If multiple closeStream calls
+		// happen simultaneously, wait for the first to finish.
+		<-s.done
 		return
 	}
 	// status and trailers can be updated here without any synchronization because the stream goroutine will
@@ -644,10 +712,9 @@
 		// This will unblock reads eventually.
 		s.write(recvMsg{err: err})
 	}
-	// This will unblock write.
-	close(s.done)
 	// If headerChan isn't closed, then close it.
 	if atomic.SwapUint32(&s.headerDone, 1) == 0 {
+		s.noHeaders = true
 		close(s.headerChan)
 	}
 	cleanup := &cleanupStream{
@@ -659,13 +726,11 @@
 			}
 			t.mu.Unlock()
 			if channelz.IsOn() {
-				t.czmu.Lock()
 				if eosReceived {
-					t.streamsSucceeded++
+					atomic.AddInt64(&t.czData.streamsSucceeded, 1)
 				} else {
-					t.streamsFailed++
+					atomic.AddInt64(&t.czData.streamsFailed, 1)
 				}
-				t.czmu.Unlock()
 			}
 		},
 		rst:     rst,
@@ -682,11 +747,17 @@
 		return true
 	}
 	t.controlBuf.executeAndPut(addBackStreamQuota, cleanup)
+	// This will unblock write.
+	close(s.done)
 }
 
 // Close kicks off the shutdown process of the transport. This should be called
 // only once on a transport. Once it is called, the transport should not be
 // accessed any more.
+//
+// This method blocks until the addrConn that initiated this transport is
+// re-connected. This happens because t.onClose() begins reconnect logic at the
+// addrConn level and blocks until the addrConn is successfully connected.
 func (t *http2Client) Close() error {
 	t.mu.Lock()
 	// Make sure we only Close once.
@@ -706,7 +777,7 @@
 	}
 	// Notify all active streams.
 	for _, s := range streams {
-		t.closeStream(s, ErrConnClosing, false, http2.ErrCodeNo, nil, nil, false)
+		t.closeStream(s, ErrConnClosing, false, http2.ErrCodeNo, status.New(codes.Unavailable, ErrConnClosing.Desc), nil, false)
 	}
 	if t.statsHandler != nil {
 		connEnd := &stats.ConnEnd{
@@ -714,6 +785,7 @@
 		}
 		t.statsHandler.HandleConn(t.ctx, connEnd)
 	}
+	t.onClose()
 	return err
 }
 
@@ -735,6 +807,7 @@
 	if active == 0 {
 		return t.Close()
 	}
+	t.controlBuf.put(&incomingGoAway{})
 	return nil
 }
 
@@ -899,6 +972,13 @@
 		warningf("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error %v", f.ErrCode)
 		statusCode = codes.Unknown
 	}
+	if statusCode == codes.Canceled {
+		// Our deadline was already exceeded, and that was likely the cause of
+		// this cancelation.  Alter the status code accordingly.
+		if d, ok := s.ctx.Deadline(); ok && d.After(time.Now()) {
+			statusCode = codes.DeadlineExceeded
+		}
+	}
 	t.closeStream(s, io.EOF, false, http2.ErrCodeNo, status.Newf(statusCode, "stream terminated by RST_STREAM with error code: %v", f.ErrCode), nil, false)
 }
 
@@ -908,13 +988,20 @@
 	}
 	var maxStreams *uint32
 	var ss []http2.Setting
+	var updateFuncs []func()
 	f.ForeachSetting(func(s http2.Setting) error {
-		if s.ID == http2.SettingMaxConcurrentStreams {
+		switch s.ID {
+		case http2.SettingMaxConcurrentStreams:
 			maxStreams = new(uint32)
 			*maxStreams = s.Val
-			return nil
+		case http2.SettingMaxHeaderListSize:
+			updateFuncs = append(updateFuncs, func() {
+				t.maxSendHeaderListSize = new(uint32)
+				*t.maxSendHeaderListSize = s.Val
+			})
+		default:
+			ss = append(ss, s)
 		}
-		ss = append(ss, s)
 		return nil
 	})
 	if isFirst && maxStreams == nil {
@@ -924,21 +1011,24 @@
 	sf := &incomingSettings{
 		ss: ss,
 	}
-	if maxStreams == nil {
-		t.controlBuf.put(sf)
-		return
+	if maxStreams != nil {
+		updateStreamQuota := func() {
+			delta := int64(*maxStreams) - int64(t.maxConcurrentStreams)
+			t.maxConcurrentStreams = *maxStreams
+			t.streamQuota += delta
+			if delta > 0 && t.waitingStreams > 0 {
+				close(t.streamsQuotaAvailable) // wake all of them up.
+				t.streamsQuotaAvailable = make(chan struct{}, 1)
+			}
+		}
+		updateFuncs = append(updateFuncs, updateStreamQuota)
 	}
-	updateStreamQuota := func(interface{}) bool {
-		delta := int64(*maxStreams) - int64(t.maxConcurrentStreams)
-		t.maxConcurrentStreams = *maxStreams
-		t.streamQuota += delta
-		if delta > 0 && t.waitingStreams > 0 {
-			close(t.streamsQuotaAvailable) // wake all of them up.
-			t.streamsQuotaAvailable = make(chan struct{}, 1)
+	t.controlBuf.executeAndPut(func(interface{}) bool {
+		for _, f := range updateFuncs {
+			f()
 		}
 		return true
-	}
-	t.controlBuf.executeAndPut(updateStreamQuota, sf)
+	}, sf)
 }
 
 func (t *http2Client) handlePing(f *http2.PingFrame) {
@@ -992,6 +1082,9 @@
 		close(t.goAway)
 		t.state = draining
 		t.controlBuf.put(&incomingGoAway{})
+
+		// This has to be a new goroutine because we're still using the current goroutine to read in the transport.
+		t.onGoAway(t.goAwayReason)
 	}
 	// All streams with IDs greater than the GoAwayId
 	// and smaller than the previous GoAway ID should be killed.
@@ -1047,15 +1140,27 @@
 	if !ok {
 		return
 	}
+	endStream := frame.StreamEnded()
 	atomic.StoreUint32(&s.bytesReceived, 1)
-	var state decodeState
-	if err := state.decodeResponseHeader(frame); err != nil {
-		t.closeStream(s, err, true, http2.ErrCodeProtocol, nil, nil, false)
-		// Something wrong. Stops reading even when there is remaining.
+	initialHeader := atomic.SwapUint32(&s.headerDone, 1) == 0
+
+	if !initialHeader && !endStream {
+		// As specified by RFC 7540, a HEADERS frame (and associated CONTINUATION frames) can only appear
+		// at the start or end of a stream. Therefore, second HEADERS frame must have EOS bit set.
+		st := status.New(codes.Internal, "a HEADERS frame cannot appear in the middle of a stream")
+		t.closeStream(s, st.Err(), true, http2.ErrCodeProtocol, st, nil, false)
 		return
 	}
 
-	endStream := frame.StreamEnded()
+	state := &decodeState{}
+	// Initialize isGRPC value to be !initialHeader, since if a gRPC ResponseHeader has been received
+	// which indicates peer speaking gRPC, we are in gRPC mode.
+	state.data.isGRPC = !initialHeader
+	if err := state.decodeHeader(frame); err != nil {
+		t.closeStream(s, err, true, http2.ErrCodeProtocol, status.Convert(err), nil, endStream)
+		return
+	}
+
 	var isHeader bool
 	defer func() {
 		if t.statsHandler != nil {
@@ -1074,25 +1179,30 @@
 			}
 		}
 	}()
+
 	// If headers haven't been received yet.
-	if atomic.SwapUint32(&s.headerDone, 1) == 0 {
+	if initialHeader {
 		if !endStream {
-			// Headers frame is not actually a trailers-only frame.
+			// Headers frame is ResponseHeader.
 			isHeader = true
 			// These values can be set without any synchronization because
 			// stream goroutine will read it only after seeing a closed
 			// headerChan which we'll close after setting this.
-			s.recvCompress = state.encoding
-			if len(state.mdata) > 0 {
-				s.header = state.mdata
+			s.recvCompress = state.data.encoding
+			if len(state.data.mdata) > 0 {
+				s.header = state.data.mdata
 			}
+			close(s.headerChan)
+			return
 		}
+		// Headers frame is Trailers-only.
+		s.noHeaders = true
 		close(s.headerChan)
 	}
-	if !endStream {
-		return
-	}
-	t.closeStream(s, io.EOF, false, http2.ErrCodeNo, state.status(), state.mdata, true)
+
+	// if client received END_STREAM from server while stream was still active, send RST_STREAM
+	rst := s.getState() == streamActive
+	t.closeStream(s, io.EOF, rst, http2.ErrCodeNo, state.status(), state.data.mdata, true)
 }
 
 // reader runs as a separate goroutine in charge of reading data from network
@@ -1106,22 +1216,27 @@
 	// Check the validity of server preface.
 	frame, err := t.framer.fr.ReadFrame()
 	if err != nil {
-		t.Close()
+		t.Close() // this kicks off resetTransport, so must be last before return
 		return
 	}
-	atomic.CompareAndSwapUint32(&t.activity, 0, 1)
+	t.conn.SetReadDeadline(time.Time{}) // reset deadline once we get the settings frame (we didn't time out, yay!)
+	if t.keepaliveEnabled {
+		atomic.CompareAndSwapUint32(&t.activity, 0, 1)
+	}
 	sf, ok := frame.(*http2.SettingsFrame)
 	if !ok {
-		t.Close()
+		t.Close() // this kicks off resetTransport, so must be last before return
 		return
 	}
-	t.onSuccess()
+	t.onPrefaceReceipt()
 	t.handleSettings(sf, true)
 
 	// loop to keep reading incoming messages on this transport.
 	for {
 		frame, err := t.framer.fr.ReadFrame()
-		atomic.CompareAndSwapUint32(&t.activity, 0, 1)
+		if t.keepaliveEnabled {
+			atomic.CompareAndSwapUint32(&t.activity, 0, 1)
+		}
 		if err != nil {
 			// Abort an active stream if the http2.Framer returns a
 			// http2.StreamError. This can happen only if the server's response
@@ -1132,7 +1247,9 @@
 				t.mu.Unlock()
 				if s != nil {
 					// use error detail to provide better err message
-					t.closeStream(s, streamErrorf(http2ErrConvTab[se.Code], "%v", t.framer.fr.ErrorDetail()), true, http2.ErrCodeProtocol, nil, nil, false)
+					code := http2ErrConvTab[se.Code]
+					msg := t.framer.fr.ErrorDetail().Error()
+					t.closeStream(s, status.Error(code, msg), true, http2.ErrCodeProtocol, status.New(code, msg), nil, false)
 				}
 				continue
 			} else {
@@ -1189,9 +1306,7 @@
 			} else {
 				t.mu.Unlock()
 				if channelz.IsOn() {
-					t.czmu.Lock()
-					t.kpCount++
-					t.czmu.Unlock()
+					atomic.AddInt64(&t.czData.kpCount, 1)
 				}
 				// Send ping.
 				t.controlBuf.put(p)
@@ -1231,41 +1346,39 @@
 }
 
 func (t *http2Client) ChannelzMetric() *channelz.SocketInternalMetric {
-	t.czmu.RLock()
 	s := channelz.SocketInternalMetric{
-		StreamsStarted:                  t.streamsStarted,
-		StreamsSucceeded:                t.streamsSucceeded,
-		StreamsFailed:                   t.streamsFailed,
-		MessagesSent:                    t.msgSent,
-		MessagesReceived:                t.msgRecv,
-		KeepAlivesSent:                  t.kpCount,
-		LastLocalStreamCreatedTimestamp: t.lastStreamCreated,
-		LastMessageSentTimestamp:        t.lastMsgSent,
-		LastMessageReceivedTimestamp:    t.lastMsgRecv,
+		StreamsStarted:                  atomic.LoadInt64(&t.czData.streamsStarted),
+		StreamsSucceeded:                atomic.LoadInt64(&t.czData.streamsSucceeded),
+		StreamsFailed:                   atomic.LoadInt64(&t.czData.streamsFailed),
+		MessagesSent:                    atomic.LoadInt64(&t.czData.msgSent),
+		MessagesReceived:                atomic.LoadInt64(&t.czData.msgRecv),
+		KeepAlivesSent:                  atomic.LoadInt64(&t.czData.kpCount),
+		LastLocalStreamCreatedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastStreamCreatedTime)),
+		LastMessageSentTimestamp:        time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgSentTime)),
+		LastMessageReceivedTimestamp:    time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgRecvTime)),
 		LocalFlowControlWindow:          int64(t.fc.getSize()),
-		//socket options
-		LocalAddr:  t.localAddr,
-		RemoteAddr: t.remoteAddr,
-		// Security
+		SocketOptions:                   channelz.GetSocketOption(t.conn),
+		LocalAddr:                       t.localAddr,
+		RemoteAddr:                      t.remoteAddr,
 		// RemoteName :
 	}
-	t.czmu.RUnlock()
+	if au, ok := t.authInfo.(credentials.ChannelzSecurityInfo); ok {
+		s.Security = au.GetSecurityValue()
+	}
 	s.RemoteFlowControlWindow = t.getOutFlowWindow()
 	return &s
 }
 
+func (t *http2Client) RemoteAddr() net.Addr { return t.remoteAddr }
+
 func (t *http2Client) IncrMsgSent() {
-	t.czmu.Lock()
-	t.msgSent++
-	t.lastMsgSent = time.Now()
-	t.czmu.Unlock()
+	atomic.AddInt64(&t.czData.msgSent, 1)
+	atomic.StoreInt64(&t.czData.lastMsgSentTime, time.Now().UnixNano())
 }
 
 func (t *http2Client) IncrMsgRecv() {
-	t.czmu.Lock()
-	t.msgRecv++
-	t.lastMsgRecv = time.Now()
-	t.czmu.Unlock()
+	atomic.AddInt64(&t.czData.msgRecv, 1)
+	atomic.StoreInt64(&t.czData.lastMsgRecvTime, time.Now().UnixNano())
 }
 
 func (t *http2Client) getOutFlowWindow() int64 {
diff --git a/vendor/google.golang.org/grpc/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go
similarity index 78%
rename from vendor/google.golang.org/grpc/transport/http2_server.go
rename to vendor/google.golang.org/grpc/internal/transport/http2_server.go
index ab35618..435092e 100644
--- a/vendor/google.golang.org/grpc/transport/http2_server.go
+++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go
@@ -20,11 +20,11 @@
 
 import (
 	"bytes"
+	"context"
 	"errors"
 	"fmt"
 	"io"
 	"math"
-	"math/rand"
 	"net"
 	"strconv"
 	"sync"
@@ -32,13 +32,14 @@
 	"time"
 
 	"github.com/golang/protobuf/proto"
-	"golang.org/x/net/context"
 	"golang.org/x/net/http2"
 	"golang.org/x/net/http2/hpack"
 
-	"google.golang.org/grpc/channelz"
 	"google.golang.org/grpc/codes"
 	"google.golang.org/grpc/credentials"
+	"google.golang.org/grpc/grpclog"
+	"google.golang.org/grpc/internal/channelz"
+	"google.golang.org/grpc/internal/grpcrand"
 	"google.golang.org/grpc/keepalive"
 	"google.golang.org/grpc/metadata"
 	"google.golang.org/grpc/peer"
@@ -47,9 +48,14 @@
 	"google.golang.org/grpc/tap"
 )
 
-// ErrIllegalHeaderWrite indicates that setting header is illegal because of
-// the stream's state.
-var ErrIllegalHeaderWrite = errors.New("transport: the stream is done or WriteHeader was already called")
+var (
+	// ErrIllegalHeaderWrite indicates that setting header is illegal because of
+	// the stream's state.
+	ErrIllegalHeaderWrite = errors.New("transport: the stream is done or WriteHeader was already called")
+	// ErrHeaderListSizeLimitViolation indicates that the header list size is larger
+	// than the limit set by peer.
+	ErrHeaderListSizeLimitViolation = errors.New("transport: trying to send header list size larger than the limit set by peer")
+)
 
 // http2Server implements the ServerTransport interface with HTTP2.
 type http2Server struct {
@@ -88,9 +94,10 @@
 	// Flag to signify that number of ping strikes should be reset to 0.
 	// This is set whenever data or header frames are sent.
 	// 1 means yes.
-	resetPingStrikes  uint32 // Accessed atomically.
-	initialWindowSize int32
-	bdpEst            *bdpEstimator
+	resetPingStrikes      uint32 // Accessed atomically.
+	initialWindowSize     int32
+	bdpEst                *bdpEstimator
+	maxSendHeaderListSize *uint32
 
 	mu sync.Mutex // guard the following
 
@@ -111,33 +118,19 @@
 
 	// Fields below are for channelz metric collection.
 	channelzID int64 // channelz unique identification number
-	czmu       sync.RWMutex
-	kpCount    int64
-	// The number of streams that have started, including already finished ones.
-	streamsStarted int64
-	// The number of streams that have ended successfully by sending frame with
-	// EoS bit set.
-	streamsSucceeded  int64
-	streamsFailed     int64
-	lastStreamCreated time.Time
-	msgSent           int64
-	msgRecv           int64
-	lastMsgSent       time.Time
-	lastMsgRecv       time.Time
+	czData     *channelzData
 }
 
 // newHTTP2Server constructs a ServerTransport based on HTTP2. ConnectionError is
 // returned if something goes wrong.
 func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err error) {
-	writeBufSize := defaultWriteBufSize
-	if config.WriteBufferSize > 0 {
-		writeBufSize = config.WriteBufferSize
+	writeBufSize := config.WriteBufferSize
+	readBufSize := config.ReadBufferSize
+	maxHeaderListSize := defaultServerMaxHeaderListSize
+	if config.MaxHeaderListSize != nil {
+		maxHeaderListSize = *config.MaxHeaderListSize
 	}
-	readBufSize := defaultReadBufSize
-	if config.ReadBufferSize > 0 {
-		readBufSize = config.ReadBufferSize
-	}
-	framer := newFramer(conn, writeBufSize, readBufSize)
+	framer := newFramer(conn, writeBufSize, readBufSize, maxHeaderListSize)
 	// Send initial settings as connection preface to client.
 	var isettings []http2.Setting
 	// TODO(zhaoq): Have a better way to signal "no limit" because 0 is
@@ -167,6 +160,12 @@
 			ID:  http2.SettingInitialWindowSize,
 			Val: uint32(iwz)})
 	}
+	if config.MaxHeaderListSize != nil {
+		isettings = append(isettings, http2.Setting{
+			ID:  http2.SettingMaxHeaderListSize,
+			Val: *config.MaxHeaderListSize,
+		})
+	}
 	if err := framer.fr.WriteSettings(isettings...); err != nil {
 		return nil, connectionErrorf(false, err, "transport: %v", err)
 	}
@@ -220,6 +219,7 @@
 		idle:              time.Now(),
 		kep:               kep,
 		initialWindowSize: iwz,
+		czData:            new(channelzData),
 	}
 	t.controlBuf = newControlBuffer(t.ctxDone)
 	if dynamicWindow {
@@ -237,7 +237,7 @@
 		t.stats.HandleConn(t.ctx, connBegin)
 	}
 	if channelz.IsOn() {
-		t.channelzID = channelz.RegisterNormalSocket(t, config.ChannelzParentID, "")
+		t.channelzID = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.remoteAddr, t.localAddr))
 	}
 	t.framer.writer.Flush()
 
@@ -273,7 +273,9 @@
 	go func() {
 		t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst)
 		t.loopy.ssGoAwayHandler = t.outgoingGoAwayHandler
-		t.loopy.run()
+		if err := t.loopy.run(); err != nil {
+			errorf("transport: loopyWriter.run returning. Err: %v", err)
+		}
 		t.conn.Close()
 		close(t.writerDone)
 	}()
@@ -282,21 +284,21 @@
 }
 
 // operateHeader takes action on the decoded headers.
-func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) (close bool) {
+func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) (fatal bool) {
 	streamID := frame.Header().StreamID
-	var state decodeState
-	for _, hf := range frame.Fields {
-		if err := state.processHeaderField(hf); err != nil {
-			if se, ok := err.(StreamError); ok {
-				t.controlBuf.put(&cleanupStream{
-					streamID: streamID,
-					rst:      true,
-					rstCode:  statusCodeConvTab[se.Code],
-					onWrite:  func() {},
-				})
-			}
-			return
+	state := &decodeState{
+		serverSide: true,
+	}
+	if err := state.decodeHeader(frame); err != nil {
+		if se, ok := status.FromError(err); ok {
+			t.controlBuf.put(&cleanupStream{
+				streamID: streamID,
+				rst:      true,
+				rstCode:  statusCodeConvTab[se.Code()],
+				onWrite:  func() {},
+			})
 		}
+		return false
 	}
 
 	buf := newRecvBuffer()
@@ -305,16 +307,16 @@
 		st:             t,
 		buf:            buf,
 		fc:             &inFlow{limit: uint32(t.initialWindowSize)},
-		recvCompress:   state.encoding,
-		method:         state.method,
-		contentSubtype: state.contentSubtype,
+		recvCompress:   state.data.encoding,
+		method:         state.data.method,
+		contentSubtype: state.data.contentSubtype,
 	}
 	if frame.StreamEnded() {
 		// s is just created by the caller. No lock needed.
 		s.state = streamReadDone
 	}
-	if state.timeoutSet {
-		s.ctx, s.cancel = context.WithTimeout(t.ctx, state.timeout)
+	if state.data.timeoutSet {
+		s.ctx, s.cancel = context.WithTimeout(t.ctx, state.data.timeout)
 	} else {
 		s.ctx, s.cancel = context.WithCancel(t.ctx)
 	}
@@ -327,19 +329,19 @@
 	}
 	s.ctx = peer.NewContext(s.ctx, pr)
 	// Attach the received metadata to the context.
-	if len(state.mdata) > 0 {
-		s.ctx = metadata.NewIncomingContext(s.ctx, state.mdata)
+	if len(state.data.mdata) > 0 {
+		s.ctx = metadata.NewIncomingContext(s.ctx, state.data.mdata)
 	}
-	if state.statsTags != nil {
-		s.ctx = stats.SetIncomingTags(s.ctx, state.statsTags)
+	if state.data.statsTags != nil {
+		s.ctx = stats.SetIncomingTags(s.ctx, state.data.statsTags)
 	}
-	if state.statsTrace != nil {
-		s.ctx = stats.SetIncomingTrace(s.ctx, state.statsTrace)
+	if state.data.statsTrace != nil {
+		s.ctx = stats.SetIncomingTrace(s.ctx, state.data.statsTrace)
 	}
 	if t.inTapHandle != nil {
 		var err error
 		info := &tap.Info{
-			FullMethodName: state.method,
+			FullMethodName: state.data.method,
 		}
 		s.ctx, err = t.inTapHandle(s.ctx, info)
 		if err != nil {
@@ -350,13 +352,13 @@
 				rstCode:  http2.ErrCodeRefusedStream,
 				onWrite:  func() {},
 			})
-			return
+			return false
 		}
 	}
 	t.mu.Lock()
 	if t.state != reachable {
 		t.mu.Unlock()
-		return
+		return false
 	}
 	if uint32(len(t.activeStreams)) >= t.maxStreams {
 		t.mu.Unlock()
@@ -366,7 +368,7 @@
 			rstCode:  http2.ErrCodeRefusedStream,
 			onWrite:  func() {},
 		})
-		return
+		return false
 	}
 	if streamID%2 != 1 || streamID <= t.maxStreamID {
 		t.mu.Unlock()
@@ -381,10 +383,8 @@
 	}
 	t.mu.Unlock()
 	if channelz.IsOn() {
-		t.czmu.Lock()
-		t.streamsStarted++
-		t.lastStreamCreated = time.Now()
-		t.czmu.Unlock()
+		atomic.AddInt64(&t.czData.streamsStarted, 1)
+		atomic.StoreInt64(&t.czData.lastStreamCreatedTime, time.Now().UnixNano())
 	}
 	s.requestRead = func(n int) {
 		t.adjustWindow(s, uint32(n))
@@ -413,8 +413,13 @@
 			t.updateWindow(s, uint32(n))
 		},
 	}
+	// Register the stream with loopy.
+	t.controlBuf.put(&registerStream{
+		streamID: s.id,
+		wq:       s.wq,
+	})
 	handle(s)
-	return
+	return false
 }
 
 // HandleStreams receives incoming streams using the given handler. This is
@@ -432,7 +437,7 @@
 				s := t.activeStreams[se.StreamID]
 				t.mu.Unlock()
 				if s != nil {
-					t.closeStream(s, true, se.Code, nil, false)
+					t.closeStream(s, true, se.Code, false)
 				} else {
 					t.controlBuf.put(&cleanupStream{
 						streamID: se.StreamID,
@@ -574,7 +579,7 @@
 	}
 	if size > 0 {
 		if err := s.fc.onData(size); err != nil {
-			t.closeStream(s, true, http2.ErrCodeFlowControl, nil, false)
+			t.closeStream(s, true, http2.ErrCodeFlowControl, false)
 			return
 		}
 		if f.Header().Flags.Has(http2.FlagDataPadded) {
@@ -599,11 +604,18 @@
 }
 
 func (t *http2Server) handleRSTStream(f *http2.RSTStreamFrame) {
-	s, ok := t.getStream(f)
-	if !ok {
+	// If the stream is not deleted from the transport's active streams map, then do a regular close stream.
+	if s, ok := t.getStream(f); ok {
+		t.closeStream(s, false, 0, false)
 		return
 	}
-	t.closeStream(s, false, 0, nil, false)
+	// If the stream is already deleted from the active streams map, then put a cleanupStream item into controlbuf to delete the stream from loopy writer's established streams map.
+	t.controlBuf.put(&cleanupStream{
+		streamID: f.Header().StreamID,
+		rst:      false,
+		rstCode:  0,
+		onWrite:  func() {},
+	})
 }
 
 func (t *http2Server) handleSettings(f *http2.SettingsFrame) {
@@ -611,11 +623,25 @@
 		return
 	}
 	var ss []http2.Setting
+	var updateFuncs []func()
 	f.ForeachSetting(func(s http2.Setting) error {
-		ss = append(ss, s)
+		switch s.ID {
+		case http2.SettingMaxHeaderListSize:
+			updateFuncs = append(updateFuncs, func() {
+				t.maxSendHeaderListSize = new(uint32)
+				*t.maxSendHeaderListSize = s.Val
+			})
+		default:
+			ss = append(ss, s)
+		}
 		return nil
 	})
-	t.controlBuf.put(&incomingSettings{
+	t.controlBuf.executeAndPut(func(interface{}) bool {
+		for _, f := range updateFuncs {
+			f()
+		}
+		return true
+	}, &incomingSettings{
 		ss: ss,
 	})
 }
@@ -695,6 +721,21 @@
 	return headerFields
 }
 
+func (t *http2Server) checkForHeaderListSize(it interface{}) bool {
+	if t.maxSendHeaderListSize == nil {
+		return true
+	}
+	hdrFrame := it.(*headerFrame)
+	var sz int64
+	for _, f := range hdrFrame.hf {
+		if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) {
+			errorf("header list size to send violates the maximum size (%d bytes) set by client", *t.maxSendHeaderListSize)
+			return false
+		}
+	}
+	return true
+}
+
 // WriteHeader sends the header metedata md back to the client.
 func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error {
 	if s.updateHeaderSent() || s.getState() == streamDone {
@@ -708,12 +749,15 @@
 			s.header = md
 		}
 	}
-	t.writeHeaderLocked(s)
+	if err := t.writeHeaderLocked(s); err != nil {
+		s.hdrMu.Unlock()
+		return err
+	}
 	s.hdrMu.Unlock()
 	return nil
 }
 
-func (t *http2Server) writeHeaderLocked(s *Stream) {
+func (t *http2Server) writeHeaderLocked(s *Stream) error {
 	// TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields
 	// first and create a slice of that exact size.
 	headerFields := make([]hpack.HeaderField, 0, 2) // at least :status, content-type will be there if none else.
@@ -723,21 +767,28 @@
 		headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress})
 	}
 	headerFields = appendHeaderFieldsFromMD(headerFields, s.header)
-	t.controlBuf.put(&headerFrame{
+	success, err := t.controlBuf.executeAndPut(t.checkForHeaderListSize, &headerFrame{
 		streamID:  s.id,
 		hf:        headerFields,
 		endStream: false,
 		onWrite: func() {
 			atomic.StoreUint32(&t.resetPingStrikes, 1)
 		},
-		wq: s.wq,
 	})
+	if !success {
+		if err != nil {
+			return err
+		}
+		t.closeStream(s, true, http2.ErrCodeInternal, false)
+		return ErrHeaderListSizeLimitViolation
+	}
 	if t.stats != nil {
 		// Note: WireLength is not set in outHeader.
 		// TODO(mmukhi): Revisit this later, if needed.
 		outHeader := &stats.OutHeader{}
 		t.stats.HandleRPC(s.Context(), outHeader)
 	}
+	return nil
 }
 
 // WriteStatus sends stream status to the client and terminates the stream.
@@ -754,7 +805,10 @@
 	headerFields := make([]hpack.HeaderField, 0, 2) // grpc-status and grpc-message will be there if none else.
 	if !s.updateHeaderSent() {                      // No headers have been sent.
 		if len(s.header) > 0 { // Send a separate header frame.
-			t.writeHeaderLocked(s)
+			if err := t.writeHeaderLocked(s); err != nil {
+				s.hdrMu.Unlock()
+				return err
+			}
 		} else { // Send a trailer only response.
 			headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"})
 			headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: contentType(s.contentSubtype)})
@@ -767,10 +821,10 @@
 		stBytes, err := proto.Marshal(p)
 		if err != nil {
 			// TODO: return error instead, when callers are able to handle it.
-			panic(err)
+			grpclog.Errorf("transport: failed to marshal rpc status: %v, error: %v", p, err)
+		} else {
+			headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status-details-bin", Value: encodeBinHeader(stBytes)})
 		}
-
-		headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status-details-bin", Value: encodeBinHeader(stBytes)})
 	}
 
 	// Attach the trailer metadata.
@@ -784,7 +838,17 @@
 		},
 	}
 	s.hdrMu.Unlock()
-	t.closeStream(s, false, 0, trailingHeader, true)
+	success, err := t.controlBuf.execute(t.checkForHeaderListSize, trailingHeader)
+	if !success {
+		if err != nil {
+			return err
+		}
+		t.closeStream(s, true, http2.ErrCodeInternal, false)
+		return ErrHeaderListSizeLimitViolation
+	}
+	// Send a RST_STREAM after the trailers if the client has not already half-closed.
+	rst := s.getState() == streamActive
+	t.finishStream(s, rst, http2.ErrCodeNo, trailingHeader, true)
 	if t.stats != nil {
 		t.stats.HandleRPC(s.Context(), &stats.OutTrailer{})
 	}
@@ -796,8 +860,11 @@
 func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) error {
 	if !s.isHeaderSent() { // Headers haven't been written yet.
 		if err := t.WriteHeader(s, nil); err != nil {
+			if _, ok := err.(ConnectionError); ok {
+				return err
+			}
 			// TODO(mmukhi, dfawley): Make sure this is the right code to return.
-			return streamErrorf(codes.Internal, "transport: %v", err)
+			return status.Errorf(codes.Internal, "transport: %v", err)
 		}
 	} else {
 		// Writing headers checks for this condition.
@@ -911,9 +978,7 @@
 			}
 			pingSent = true
 			if channelz.IsOn() {
-				t.czmu.Lock()
-				t.kpCount++
-				t.czmu.Unlock()
+				atomic.AddInt64(&t.czData.kpCount, 1)
 			}
 			t.controlBuf.put(p)
 			keepalive.Reset(t.kp.Timeout)
@@ -953,47 +1018,65 @@
 	return err
 }
 
-// closeStream clears the footprint of a stream when the stream is not needed
-// any more.
-func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) {
-	if s.swapState(streamDone) == streamDone {
+// deleteStream deletes the stream s from transport's active streams.
+func (t *http2Server) deleteStream(s *Stream, eosReceived bool) (oldState streamState) {
+	oldState = s.swapState(streamDone)
+	if oldState == streamDone {
 		// If the stream was already done, return.
-		return
+		return oldState
 	}
+
 	// In case stream sending and receiving are invoked in separate
 	// goroutines (e.g., bi-directional streaming), cancel needs to be
 	// called to interrupt the potential blocking on other goroutines.
 	s.cancel()
-	cleanup := &cleanupStream{
+
+	t.mu.Lock()
+	if _, ok := t.activeStreams[s.id]; ok {
+		delete(t.activeStreams, s.id)
+		if len(t.activeStreams) == 0 {
+			t.idle = time.Now()
+		}
+	}
+	t.mu.Unlock()
+
+	if channelz.IsOn() {
+		if eosReceived {
+			atomic.AddInt64(&t.czData.streamsSucceeded, 1)
+		} else {
+			atomic.AddInt64(&t.czData.streamsFailed, 1)
+		}
+	}
+
+	return oldState
+}
+
+// finishStream closes the stream and puts the trailing headerFrame into controlbuf.
+func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) {
+	oldState := t.deleteStream(s, eosReceived)
+	// If the stream is already closed, then don't put trailing header to controlbuf.
+	if oldState == streamDone {
+		return
+	}
+
+	hdr.cleanup = &cleanupStream{
 		streamID: s.id,
 		rst:      rst,
 		rstCode:  rstCode,
-		onWrite: func() {
-			t.mu.Lock()
-			if t.activeStreams != nil {
-				delete(t.activeStreams, s.id)
-				if len(t.activeStreams) == 0 {
-					t.idle = time.Now()
-				}
-			}
-			t.mu.Unlock()
-			if channelz.IsOn() {
-				t.czmu.Lock()
-				if eosReceived {
-					t.streamsSucceeded++
-				} else {
-					t.streamsFailed++
-				}
-				t.czmu.Unlock()
-			}
-		},
+		onWrite:  func() {},
 	}
-	if hdr != nil {
-		hdr.cleanup = cleanup
-		t.controlBuf.put(hdr)
-	} else {
-		t.controlBuf.put(cleanup)
-	}
+	t.controlBuf.put(hdr)
+}
+
+// closeStream clears the footprint of a stream when the stream is not needed any more.
+func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, eosReceived bool) {
+	t.deleteStream(s, eosReceived)
+	t.controlBuf.put(&cleanupStream{
+		streamID: s.id,
+		rst:      rst,
+		rstCode:  rstCode,
+		onWrite:  func() {},
+	})
 }
 
 func (t *http2Server) RemoteAddr() net.Addr {
@@ -1072,45 +1155,41 @@
 }
 
 func (t *http2Server) ChannelzMetric() *channelz.SocketInternalMetric {
-	t.czmu.RLock()
 	s := channelz.SocketInternalMetric{
-		StreamsStarted:                   t.streamsStarted,
-		StreamsSucceeded:                 t.streamsSucceeded,
-		StreamsFailed:                    t.streamsFailed,
-		MessagesSent:                     t.msgSent,
-		MessagesReceived:                 t.msgRecv,
-		KeepAlivesSent:                   t.kpCount,
-		LastRemoteStreamCreatedTimestamp: t.lastStreamCreated,
-		LastMessageSentTimestamp:         t.lastMsgSent,
-		LastMessageReceivedTimestamp:     t.lastMsgRecv,
+		StreamsStarted:                   atomic.LoadInt64(&t.czData.streamsStarted),
+		StreamsSucceeded:                 atomic.LoadInt64(&t.czData.streamsSucceeded),
+		StreamsFailed:                    atomic.LoadInt64(&t.czData.streamsFailed),
+		MessagesSent:                     atomic.LoadInt64(&t.czData.msgSent),
+		MessagesReceived:                 atomic.LoadInt64(&t.czData.msgRecv),
+		KeepAlivesSent:                   atomic.LoadInt64(&t.czData.kpCount),
+		LastRemoteStreamCreatedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastStreamCreatedTime)),
+		LastMessageSentTimestamp:         time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgSentTime)),
+		LastMessageReceivedTimestamp:     time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgRecvTime)),
 		LocalFlowControlWindow:           int64(t.fc.getSize()),
-		//socket options
-		LocalAddr:  t.localAddr,
-		RemoteAddr: t.remoteAddr,
-		// Security
+		SocketOptions:                    channelz.GetSocketOption(t.conn),
+		LocalAddr:                        t.localAddr,
+		RemoteAddr:                       t.remoteAddr,
 		// RemoteName :
 	}
-	t.czmu.RUnlock()
+	if au, ok := t.authInfo.(credentials.ChannelzSecurityInfo); ok {
+		s.Security = au.GetSecurityValue()
+	}
 	s.RemoteFlowControlWindow = t.getOutFlowWindow()
 	return &s
 }
 
 func (t *http2Server) IncrMsgSent() {
-	t.czmu.Lock()
-	t.msgSent++
-	t.lastMsgSent = time.Now()
-	t.czmu.Unlock()
+	atomic.AddInt64(&t.czData.msgSent, 1)
+	atomic.StoreInt64(&t.czData.lastMsgSentTime, time.Now().UnixNano())
 }
 
 func (t *http2Server) IncrMsgRecv() {
-	t.czmu.Lock()
-	t.msgRecv++
-	t.lastMsgRecv = time.Now()
-	t.czmu.Unlock()
+	atomic.AddInt64(&t.czData.msgRecv, 1)
+	atomic.StoreInt64(&t.czData.lastMsgRecvTime, time.Now().UnixNano())
 }
 
 func (t *http2Server) getOutFlowWindow() int64 {
-	resp := make(chan uint32)
+	resp := make(chan uint32, 1)
 	timer := time.NewTimer(time.Second)
 	defer timer.Stop()
 	t.controlBuf.put(&outFlowControlSizeRequest{resp})
@@ -1124,14 +1203,12 @@
 	}
 }
 
-var rgen = rand.New(rand.NewSource(time.Now().UnixNano()))
-
 func getJitter(v time.Duration) time.Duration {
 	if v == infinity {
 		return 0
 	}
 	// Generate a jitter between +/- 10% of the value.
 	r := int64(v / 10)
-	j := rgen.Int63n(2*r) - r
+	j := grpcrand.Int63n(2*r) - r
 	return time.Duration(j)
 }
diff --git a/vendor/google.golang.org/grpc/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go
similarity index 66%
rename from vendor/google.golang.org/grpc/transport/http_util.go
rename to vendor/google.golang.org/grpc/internal/transport/http_util.go
index 835c812..9d21286 100644
--- a/vendor/google.golang.org/grpc/transport/http_util.go
+++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go
@@ -23,11 +23,14 @@
 	"bytes"
 	"encoding/base64"
 	"fmt"
+	"io"
+	"math"
 	"net"
 	"net/http"
 	"strconv"
 	"strings"
 	"time"
+	"unicode/utf8"
 
 	"github.com/golang/protobuf/proto"
 	"golang.org/x/net/http2"
@@ -42,9 +45,6 @@
 	http2MaxFrameLen = 16384 // 16KB frame
 	// http://http2.github.io/http2-spec/#SettingValues
 	http2InitHeaderTableSize = 4096
-	// http2IOBufSize specifies the buffer size for sending frames.
-	defaultWriteBufSize = 32 * 1024
-	defaultReadBufSize  = 32 * 1024
 	// baseContentType is the base content-type for gRPC.  This is a valid
 	// content-type on it's own, but can also include a content-subtype such as
 	// "proto" as a suffix after "+" or ";".  See
@@ -78,7 +78,8 @@
 		codes.ResourceExhausted: http2.ErrCodeEnhanceYourCalm,
 		codes.PermissionDenied:  http2.ErrCodeInadequateSecurity,
 	}
-	httpStatusConvTab = map[int]codes.Code{
+	// HTTPStatusConvTab is the HTTP status code to gRPC error code conversion table.
+	HTTPStatusConvTab = map[int]codes.Code{
 		// 400 Bad Request - INTERNAL.
 		http.StatusBadRequest: codes.Internal,
 		// 401 Unauthorized  - UNAUTHENTICATED.
@@ -98,9 +99,7 @@
 	}
 )
 
-// Records the states during HPACK decoding. Must be reset once the
-// decoding of the entire headers are finished.
-type decodeState struct {
+type parsedHeaderData struct {
 	encoding string
 	// statusGen caches the stream status received from the trailer the server
 	// sent.  Client side only.  Do not access directly.  After all trailers are
@@ -120,6 +119,30 @@
 	statsTags      []byte
 	statsTrace     []byte
 	contentSubtype string
+
+	// isGRPC field indicates whether the peer is speaking gRPC (otherwise HTTP).
+	//
+	// We are in gRPC mode (peer speaking gRPC) if:
+	// 	* We are client side and have already received a HEADER frame that indicates gRPC peer.
+	//  * The header contains valid  a content-type, i.e. a string starts with "application/grpc"
+	// And we should handle error specific to gRPC.
+	//
+	// Otherwise (i.e. a content-type string starts without "application/grpc", or does not exist), we
+	// are in HTTP fallback mode, and should handle error specific to HTTP.
+	isGRPC         bool
+	grpcErr        error
+	httpErr        error
+	contentTypeErr string
+}
+
+// decodeState configures decoding criteria and records the decoded data.
+type decodeState struct {
+	// whether decoding on server side or not
+	serverSide bool
+
+	// Records the states during HPACK decoding. It will be filled with info parsed from HTTP HEADERS
+	// frame once decodeHeader function has been invoked and returned.
+	data parsedHeaderData
 }
 
 // isReservedHeader checks whether hdr belongs to HTTP2 headers
@@ -138,6 +161,9 @@
 		"grpc-status",
 		"grpc-timeout",
 		"grpc-status-details-bin",
+		// Intentionally exclude grpc-previous-rpc-attempts and
+		// grpc-retry-pushback-ms, which are "reserved", but their API
+		// intentionally works via metadata.
 		"te":
 		return true
 	default:
@@ -145,8 +171,8 @@
 	}
 }
 
-// isWhitelistedHeader checks whether hdr should be propagated
-// into metadata visible to users.
+// isWhitelistedHeader checks whether hdr should be propagated into metadata
+// visible to users, even though it is classified as "reserved", above.
 func isWhitelistedHeader(hdr string) bool {
 	switch hdr {
 	case ":authority", "user-agent":
@@ -197,11 +223,11 @@
 }
 
 func (d *decodeState) status() *status.Status {
-	if d.statusGen == nil {
+	if d.data.statusGen == nil {
 		// No status-details were provided; generate status using code/msg.
-		d.statusGen = status.New(codes.Code(int32(*(d.rawStatusCode))), d.rawStatusMsg)
+		d.data.statusGen = status.New(codes.Code(int32(*(d.data.rawStatusCode))), d.data.rawStatusMsg)
 	}
-	return d.statusGen
+	return d.data.statusGen
 }
 
 const binHdrSuffix = "-bin"
@@ -233,111 +259,152 @@
 	return v, nil
 }
 
-func (d *decodeState) decodeResponseHeader(frame *http2.MetaHeadersFrame) error {
-	for _, hf := range frame.Fields {
-		if err := d.processHeaderField(hf); err != nil {
-			return err
-		}
+func (d *decodeState) decodeHeader(frame *http2.MetaHeadersFrame) error {
+	// frame.Truncated is set to true when framer detects that the current header
+	// list size hits MaxHeaderListSize limit.
+	if frame.Truncated {
+		return status.Error(codes.Internal, "peer header list size exceeded limit")
 	}
 
-	// If grpc status exists, no need to check further.
-	if d.rawStatusCode != nil || d.statusGen != nil {
+	for _, hf := range frame.Fields {
+		d.processHeaderField(hf)
+	}
+
+	if d.data.isGRPC {
+		if d.data.grpcErr != nil {
+			return d.data.grpcErr
+		}
+		if d.serverSide {
+			return nil
+		}
+		if d.data.rawStatusCode == nil && d.data.statusGen == nil {
+			// gRPC status doesn't exist.
+			// Set rawStatusCode to be unknown and return nil error.
+			// So that, if the stream has ended this Unknown status
+			// will be propagated to the user.
+			// Otherwise, it will be ignored. In which case, status from
+			// a later trailer, that has StreamEnded flag set, is propagated.
+			code := int(codes.Unknown)
+			d.data.rawStatusCode = &code
+		}
 		return nil
 	}
 
-	// If grpc status doesn't exist and http status doesn't exist,
-	// then it's a malformed header.
-	if d.httpStatus == nil {
-		return streamErrorf(codes.Internal, "malformed header: doesn't contain status(gRPC or HTTP)")
+	// HTTP fallback mode
+	if d.data.httpErr != nil {
+		return d.data.httpErr
 	}
 
-	if *(d.httpStatus) != http.StatusOK {
-		code, ok := httpStatusConvTab[*(d.httpStatus)]
+	var (
+		code = codes.Internal // when header does not include HTTP status, return INTERNAL
+		ok   bool
+	)
+
+	if d.data.httpStatus != nil {
+		code, ok = HTTPStatusConvTab[*(d.data.httpStatus)]
 		if !ok {
 			code = codes.Unknown
 		}
-		return streamErrorf(code, http.StatusText(*(d.httpStatus)))
 	}
 
-	// gRPC status doesn't exist and http status is OK.
-	// Set rawStatusCode to be unknown and return nil error.
-	// So that, if the stream has ended this Unknown status
-	// will be propagated to the user.
-	// Otherwise, it will be ignored. In which case, status from
-	// a later trailer, that has StreamEnded flag set, is propagated.
-	code := int(codes.Unknown)
-	d.rawStatusCode = &code
-	return nil
+	return status.Error(code, d.constructHTTPErrMsg())
+}
 
+// constructErrMsg constructs error message to be returned in HTTP fallback mode.
+// Format: HTTP status code and its corresponding message + content-type error message.
+func (d *decodeState) constructHTTPErrMsg() string {
+	var errMsgs []string
+
+	if d.data.httpStatus == nil {
+		errMsgs = append(errMsgs, "malformed header: missing HTTP status")
+	} else {
+		errMsgs = append(errMsgs, fmt.Sprintf("%s: HTTP status code %d", http.StatusText(*(d.data.httpStatus)), *d.data.httpStatus))
+	}
+
+	if d.data.contentTypeErr == "" {
+		errMsgs = append(errMsgs, "transport: missing content-type field")
+	} else {
+		errMsgs = append(errMsgs, d.data.contentTypeErr)
+	}
+
+	return strings.Join(errMsgs, "; ")
 }
 
 func (d *decodeState) addMetadata(k, v string) {
-	if d.mdata == nil {
-		d.mdata = make(map[string][]string)
+	if d.data.mdata == nil {
+		d.data.mdata = make(map[string][]string)
 	}
-	d.mdata[k] = append(d.mdata[k], v)
+	d.data.mdata[k] = append(d.data.mdata[k], v)
 }
 
-func (d *decodeState) processHeaderField(f hpack.HeaderField) error {
+func (d *decodeState) processHeaderField(f hpack.HeaderField) {
 	switch f.Name {
 	case "content-type":
 		contentSubtype, validContentType := contentSubtype(f.Value)
 		if !validContentType {
-			return streamErrorf(codes.Internal, "transport: received the unexpected content-type %q", f.Value)
+			d.data.contentTypeErr = fmt.Sprintf("transport: received the unexpected content-type %q", f.Value)
+			return
 		}
-		d.contentSubtype = contentSubtype
+		d.data.contentSubtype = contentSubtype
 		// TODO: do we want to propagate the whole content-type in the metadata,
 		// or come up with a way to just propagate the content-subtype if it was set?
 		// ie {"content-type": "application/grpc+proto"} or {"content-subtype": "proto"}
 		// in the metadata?
 		d.addMetadata(f.Name, f.Value)
+		d.data.isGRPC = true
 	case "grpc-encoding":
-		d.encoding = f.Value
+		d.data.encoding = f.Value
 	case "grpc-status":
 		code, err := strconv.Atoi(f.Value)
 		if err != nil {
-			return streamErrorf(codes.Internal, "transport: malformed grpc-status: %v", err)
+			d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-status: %v", err)
+			return
 		}
-		d.rawStatusCode = &code
+		d.data.rawStatusCode = &code
 	case "grpc-message":
-		d.rawStatusMsg = decodeGrpcMessage(f.Value)
+		d.data.rawStatusMsg = decodeGrpcMessage(f.Value)
 	case "grpc-status-details-bin":
 		v, err := decodeBinHeader(f.Value)
 		if err != nil {
-			return streamErrorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err)
+			d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err)
+			return
 		}
 		s := &spb.Status{}
 		if err := proto.Unmarshal(v, s); err != nil {
-			return streamErrorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err)
+			d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err)
+			return
 		}
-		d.statusGen = status.FromProto(s)
+		d.data.statusGen = status.FromProto(s)
 	case "grpc-timeout":
-		d.timeoutSet = true
+		d.data.timeoutSet = true
 		var err error
-		if d.timeout, err = decodeTimeout(f.Value); err != nil {
-			return streamErrorf(codes.Internal, "transport: malformed time-out: %v", err)
+		if d.data.timeout, err = decodeTimeout(f.Value); err != nil {
+			d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed time-out: %v", err)
 		}
 	case ":path":
-		d.method = f.Value
+		d.data.method = f.Value
 	case ":status":
 		code, err := strconv.Atoi(f.Value)
 		if err != nil {
-			return streamErrorf(codes.Internal, "transport: malformed http-status: %v", err)
+			d.data.httpErr = status.Errorf(codes.Internal, "transport: malformed http-status: %v", err)
+			return
 		}
-		d.httpStatus = &code
+		d.data.httpStatus = &code
 	case "grpc-tags-bin":
 		v, err := decodeBinHeader(f.Value)
 		if err != nil {
-			return streamErrorf(codes.Internal, "transport: malformed grpc-tags-bin: %v", err)
+			d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-tags-bin: %v", err)
+			return
 		}
-		d.statsTags = v
+		d.data.statsTags = v
 		d.addMetadata(f.Name, string(v))
 	case "grpc-trace-bin":
 		v, err := decodeBinHeader(f.Value)
 		if err != nil {
-			return streamErrorf(codes.Internal, "transport: malformed grpc-trace-bin: %v", err)
+			d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-trace-bin: %v", err)
+			return
 		}
-		d.statsTrace = v
+		d.data.statsTrace = v
 		d.addMetadata(f.Name, string(v))
 	default:
 		if isReservedHeader(f.Name) && !isWhitelistedHeader(f.Name) {
@@ -346,11 +413,10 @@
 		v, err := decodeMetadataHeader(f.Name, f.Value)
 		if err != nil {
 			errorf("Failed to decode metadata header (%q, %q): %v", f.Name, f.Value, err)
-			return nil
+			return
 		}
 		d.addMetadata(f.Name, v)
 	}
-	return nil
 }
 
 type timeoutUnit uint8
@@ -423,6 +489,10 @@
 	if size < 2 {
 		return 0, fmt.Errorf("transport: timeout string is too short: %q", s)
 	}
+	if size > 9 {
+		// Spec allows for 8 digits plus the unit.
+		return 0, fmt.Errorf("transport: timeout string is too long: %q", s)
+	}
 	unit := timeoutUnit(s[size-1])
 	d, ok := timeoutUnitToDuration(unit)
 	if !ok {
@@ -432,21 +502,27 @@
 	if err != nil {
 		return 0, err
 	}
+	const maxHours = math.MaxInt64 / int64(time.Hour)
+	if d == time.Hour && t > maxHours {
+		// This timeout would overflow math.MaxInt64; clamp it.
+		return time.Duration(math.MaxInt64), nil
+	}
 	return d * time.Duration(t), nil
 }
 
 const (
 	spaceByte   = ' '
-	tildaByte   = '~'
+	tildeByte   = '~'
 	percentByte = '%'
 )
 
 // encodeGrpcMessage is used to encode status code in header field
-// "grpc-message".
-// It checks to see if each individual byte in msg is an
-// allowable byte, and then either percent encoding or passing it through.
-// When percent encoding, the byte is converted into hexadecimal notation
-// with a '%' prepended.
+// "grpc-message". It does percent encoding and also replaces invalid utf-8
+// characters with Unicode replacement character.
+//
+// It checks to see if each individual byte in msg is an allowable byte, and
+// then either percent encoding or passing it through. When percent encoding,
+// the byte is converted into hexadecimal notation with a '%' prepended.
 func encodeGrpcMessage(msg string) string {
 	if msg == "" {
 		return ""
@@ -454,7 +530,7 @@
 	lenMsg := len(msg)
 	for i := 0; i < lenMsg; i++ {
 		c := msg[i]
-		if !(c >= spaceByte && c < tildaByte && c != percentByte) {
+		if !(c >= spaceByte && c <= tildeByte && c != percentByte) {
 			return encodeGrpcMessageUnchecked(msg)
 		}
 	}
@@ -463,14 +539,26 @@
 
 func encodeGrpcMessageUnchecked(msg string) string {
 	var buf bytes.Buffer
-	lenMsg := len(msg)
-	for i := 0; i < lenMsg; i++ {
-		c := msg[i]
-		if c >= spaceByte && c < tildaByte && c != percentByte {
-			buf.WriteByte(c)
-		} else {
-			buf.WriteString(fmt.Sprintf("%%%02X", c))
+	for len(msg) > 0 {
+		r, size := utf8.DecodeRuneInString(msg)
+		for _, b := range []byte(string(r)) {
+			if size > 1 {
+				// If size > 1, r is not ascii. Always do percent encoding.
+				buf.WriteString(fmt.Sprintf("%%%02X", b))
+				continue
+			}
+
+			// The for loop is necessary even if size == 1. r could be
+			// utf8.RuneError.
+			//
+			// fmt.Sprintf("%%%02X", utf8.RuneError) gives "%FFFD".
+			if b >= spaceByte && b <= tildeByte && b != percentByte {
+				buf.WriteByte(b)
+			} else {
+				buf.WriteString(fmt.Sprintf("%%%02X", b))
+			}
 		}
+		msg = msg[size:]
 	}
 	return buf.String()
 }
@@ -531,6 +619,9 @@
 	if w.err != nil {
 		return 0, w.err
 	}
+	if w.batchSize == 0 { // Buffer has been disabled.
+		return w.conn.Write(b)
+	}
 	for len(b) > 0 {
 		nn := copy(w.buf[w.offset:], b)
 		b = b[nn:]
@@ -563,8 +654,14 @@
 	fr     *http2.Framer
 }
 
-func newFramer(conn net.Conn, writeBufferSize, readBufferSize int) *framer {
-	r := bufio.NewReaderSize(conn, readBufferSize)
+func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, maxHeaderListSize uint32) *framer {
+	if writeBufferSize < 0 {
+		writeBufferSize = 0
+	}
+	var r io.Reader = conn
+	if readBufferSize > 0 {
+		r = bufio.NewReaderSize(r, readBufferSize)
+	}
 	w := newBufWriter(conn, writeBufferSize)
 	f := &framer{
 		writer: w,
@@ -573,6 +670,7 @@
 	// Opt-in to Frame reuse API on framer to reduce garbage.
 	// Frames aren't safe to read from after a subsequent call to ReadFrame.
 	f.fr.SetReuseFrames()
+	f.fr.MaxHeaderListSize = maxHeaderListSize
 	f.fr.ReadMetaHeaders = hpack.NewDecoder(http2InitHeaderTableSize, nil)
 	return f
 }
diff --git a/vendor/google.golang.org/grpc/transport/log.go b/vendor/google.golang.org/grpc/internal/transport/log.go
similarity index 90%
rename from vendor/google.golang.org/grpc/transport/log.go
rename to vendor/google.golang.org/grpc/internal/transport/log.go
index ac8e358..879df80 100644
--- a/vendor/google.golang.org/grpc/transport/log.go
+++ b/vendor/google.golang.org/grpc/internal/transport/log.go
@@ -42,9 +42,3 @@
 		grpclog.Errorf(format, args...)
 	}
 }
-
-func fatalf(format string, args ...interface{}) {
-	if grpclog.V(logLevel) {
-		grpclog.Fatalf(format, args...)
-	}
-}
diff --git a/vendor/google.golang.org/grpc/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go
similarity index 80%
rename from vendor/google.golang.org/grpc/transport/transport.go
rename to vendor/google.golang.org/grpc/internal/transport/transport.go
index f51f878..7f82cbb 100644
--- a/vendor/google.golang.org/grpc/transport/transport.go
+++ b/vendor/google.golang.org/grpc/internal/transport/transport.go
@@ -19,9 +19,10 @@
 // Package transport defines and implements message oriented communication
 // channel to complete various transactions (e.g., an RPC).  It is meant for
 // grpc-internal usage and is not intended to be imported directly by users.
-package transport // externally used as import "google.golang.org/grpc/transport"
+package transport
 
 import (
+	"context"
 	"errors"
 	"fmt"
 	"io"
@@ -29,7 +30,6 @@
 	"sync"
 	"sync/atomic"
 
-	"golang.org/x/net/context"
 	"google.golang.org/grpc/codes"
 	"google.golang.org/grpc/credentials"
 	"google.golang.org/grpc/keepalive"
@@ -110,15 +110,15 @@
 	return b.c
 }
 
-//
 // recvBufferReader implements io.Reader interface to read the data from
 // recvBuffer.
 type recvBufferReader struct {
-	ctx     context.Context
-	ctxDone <-chan struct{} // cache of ctx.Done() (for performance).
-	recv    *recvBuffer
-	last    []byte // Stores the remaining data in the previous calls.
-	err     error
+	closeStream func(error) // Closes the client transport stream with the given error and nil trailer metadata.
+	ctx         context.Context
+	ctxDone     <-chan struct{} // cache of ctx.Done() (for performance).
+	recv        *recvBuffer
+	last        []byte // Stores the remaining data in the previous calls.
+	err         error
 }
 
 // Read reads the next len(p) bytes from last. If last is drained, it tries to
@@ -128,31 +128,53 @@
 	if r.err != nil {
 		return 0, r.err
 	}
-	n, r.err = r.read(p)
-	return n, r.err
-}
-
-func (r *recvBufferReader) read(p []byte) (n int, err error) {
 	if r.last != nil && len(r.last) > 0 {
 		// Read remaining data left in last call.
 		copied := copy(p, r.last)
 		r.last = r.last[copied:]
 		return copied, nil
 	}
+	if r.closeStream != nil {
+		n, r.err = r.readClient(p)
+	} else {
+		n, r.err = r.read(p)
+	}
+	return n, r.err
+}
+
+func (r *recvBufferReader) read(p []byte) (n int, err error) {
 	select {
 	case <-r.ctxDone:
 		return 0, ContextErr(r.ctx.Err())
 	case m := <-r.recv.get():
-		r.recv.load()
-		if m.err != nil {
-			return 0, m.err
-		}
-		copied := copy(p, m.data)
-		r.last = m.data[copied:]
-		return copied, nil
+		return r.readAdditional(m, p)
 	}
 }
 
+func (r *recvBufferReader) readClient(p []byte) (n int, err error) {
+	// If the context is canceled, then closes the stream with nil metadata.
+	// closeStream writes its error parameter to r.recv as a recvMsg.
+	// r.readAdditional acts on that message and returns the necessary error.
+	select {
+	case <-r.ctxDone:
+		r.closeStream(ContextErr(r.ctx.Err()))
+		m := <-r.recv.get()
+		return r.readAdditional(m, p)
+	case m := <-r.recv.get():
+		return r.readAdditional(m, p)
+	}
+}
+
+func (r *recvBufferReader) readAdditional(m recvMsg, p []byte) (n int, err error) {
+	r.recv.load()
+	if m.err != nil {
+		return 0, m.err
+	}
+	copied := copy(p, m.data)
+	r.last = m.data[copied:]
+	return copied, nil
+}
+
 type streamState uint32
 
 const (
@@ -176,7 +198,6 @@
 	buf          *recvBuffer
 	trReader     io.Reader
 	fc           *inFlow
-	recvQuota    uint32
 	wq           *writeQuota
 
 	// Callback to state application's intentions to read data. This
@@ -187,10 +208,16 @@
 	headerDone uint32        // set when headerChan is closed. Used to avoid closing headerChan multiple times.
 
 	// hdrMu protects header and trailer metadata on the server-side.
-	hdrMu   sync.Mutex
-	header  metadata.MD // the received header metadata.
+	hdrMu sync.Mutex
+	// On client side, header keeps the received header metadata.
+	//
+	// On server side, header keeps the header set by SetHeader(). The complete
+	// header will merged into this after t.WriteHeader() is called.
+	header  metadata.MD
 	trailer metadata.MD // the key-value map of trailer metadata.
 
+	noHeaders bool // set if the client never received headers (set only after the stream is done).
+
 	// On the server-side, headerSent is atomically set to 1 when the headers are sent out.
 	headerSent uint32
 
@@ -259,16 +286,25 @@
 	s.sendCompress = str
 }
 
-// Done returns a chanel which is closed when it receives the final status
+// Done returns a channel which is closed when it receives the final status
 // from the server.
 func (s *Stream) Done() <-chan struct{} {
 	return s.done
 }
 
-// Header acquires the key-value pairs of header metadata once it
-// is available. It blocks until i) the metadata is ready or ii) there is no
-// header metadata or iii) the stream is canceled/expired.
+// Header returns the header metadata of the stream.
+//
+// On client side, it acquires the key-value pairs of header metadata once it is
+// available. It blocks until i) the metadata is ready or ii) there is no header
+// metadata or iii) the stream is canceled/expired.
+//
+// On server side, it returns the out header after t.WriteHeader is called.
 func (s *Stream) Header() (metadata.MD, error) {
+	if s.headerChan == nil && s.header != nil {
+		// On server side, return the header in stream. It will be the out
+		// header after t.WriteHeader is called.
+		return s.header.Copy(), nil
+	}
 	err := s.waitOnHeader()
 	// Even if the stream is closed, header is returned if available.
 	select {
@@ -282,6 +318,18 @@
 	return nil, err
 }
 
+// TrailersOnly blocks until a header or trailers-only frame is received and
+// then returns true if the stream was trailers-only.  If the stream ends
+// before headers are received, returns true, nil.  If a context error happens
+// first, returns it as a status error.  Client-side only.
+func (s *Stream) TrailersOnly() (bool, error) {
+	err := s.waitOnHeader()
+	if err != nil {
+		return false, err
+	}
+	return s.noHeaders, nil
+}
+
 // Trailer returns the cached trailer metedata. Note that if it is not called
 // after the entire stream is done, it could return an empty MD. Client
 // side only.
@@ -292,12 +340,6 @@
 	return c
 }
 
-// ServerTransport returns the underlying ServerTransport for the stream.
-// The client side stream always returns nil.
-func (s *Stream) ServerTransport() ServerTransport {
-	return s.st
-}
-
 // ContentSubtype returns the content-subtype for a request. For example, a
 // content-subtype of "proto" will result in a content-type of
 // "application/grpc+proto". This will always be lowercase.  See
@@ -319,7 +361,7 @@
 
 // Status returns the status received from the server.
 // Status can be read safely only after the stream has ended,
-// that is, read or write has returned io.EOF.
+// that is, after Done() is closed.
 func (s *Stream) Status() *status.Status {
 	return s.status
 }
@@ -344,8 +386,7 @@
 // combined with any metadata set by previous calls to SetHeader and
 // then written to the transport stream.
 func (s *Stream) SendHeader(md metadata.MD) error {
-	t := s.ServerTransport()
-	return t.WriteHeader(s, md)
+	return s.st.WriteHeader(s, md)
 }
 
 // SetTrailer sets the trailer metadata which will be sent with the RPC status
@@ -439,6 +480,7 @@
 	WriteBufferSize       int
 	ReadBufferSize        int
 	ChannelzParentID      int64
+	MaxHeaderListSize     *uint32
 }
 
 // NewServerTransport creates a ServerTransport with conn or non-nil error
@@ -451,17 +493,18 @@
 type ConnectOptions struct {
 	// UserAgent is the application user agent.
 	UserAgent string
-	// Authority is the :authority pseudo-header to use. This field has no effect if
-	// TransportCredentials is set.
-	Authority string
 	// Dialer specifies how to dial a network address.
 	Dialer func(context.Context, string) (net.Conn, error)
 	// FailOnNonTempDialError specifies if gRPC fails on non-temporary dial errors.
 	FailOnNonTempDialError bool
 	// PerRPCCredentials stores the PerRPCCredentials required to issue RPCs.
 	PerRPCCredentials []credentials.PerRPCCredentials
-	// TransportCredentials stores the Authenticator required to setup a client connection.
+	// TransportCredentials stores the Authenticator required to setup a client
+	// connection. Only one of TransportCredentials and CredsBundle is non-nil.
 	TransportCredentials credentials.TransportCredentials
+	// CredsBundle is the credentials bundle to be used. Only one of
+	// TransportCredentials and CredsBundle is non-nil.
+	CredsBundle credentials.Bundle
 	// KeepaliveParams stores the keepalive parameters.
 	KeepaliveParams keepalive.ClientParameters
 	// StatsHandler stores the handler for stats.
@@ -476,6 +519,8 @@
 	ReadBufferSize int
 	// ChannelzParentID sets the addrConn id which initiate the creation of this client transport.
 	ChannelzParentID int64
+	// MaxHeaderListSize sets the max (uncompressed) size of header list that is prepared to be received.
+	MaxHeaderListSize *uint32
 }
 
 // TargetInfo contains the information of the target such as network address and metadata.
@@ -487,8 +532,8 @@
 
 // NewClientTransport establishes the transport with the required ConnectOptions
 // and returns it to the caller.
-func NewClientTransport(connectCtx, ctx context.Context, target TargetInfo, opts ConnectOptions, onSuccess func()) (ClientTransport, error) {
-	return newHTTP2Client(connectCtx, ctx, target, opts, onSuccess)
+func NewClientTransport(connectCtx, ctx context.Context, target TargetInfo, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (ClientTransport, error) {
+	return newHTTP2Client(connectCtx, ctx, target, opts, onPrefaceReceipt, onGoAway, onClose)
 }
 
 // Options provides additional hints and information for message
@@ -497,11 +542,6 @@
 	// Last indicates whether this write is the last piece for
 	// this stream.
 	Last bool
-
-	// Delay is a hint to the transport implementation for whether
-	// the data could be buffered for a batching write. The
-	// transport implementation may ignore the hint.
-	Delay bool
 }
 
 // CallHdr carries the information of a particular RPC.
@@ -519,14 +559,6 @@
 	// Creds specifies credentials.PerRPCCredentials for a call.
 	Creds credentials.PerRPCCredentials
 
-	// Flush indicates whether a new stream command should be sent
-	// to the peer without waiting for the first data. This is
-	// only a hint.
-	// If it's true, the transport may modify the flush decision
-	// for performance purposes.
-	// If it's false, new stream will never be flushed.
-	Flush bool
-
 	// ContentSubtype specifies the content-subtype for a request. For example, a
 	// content-subtype of "proto" will result in a content-type of
 	// "application/grpc+proto". The value of ContentSubtype must be all
@@ -534,6 +566,8 @@
 	// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests
 	// for more details.
 	ContentSubtype string
+
+	PreviousAttempts int // value of grpc-previous-rpc-attempts header to set
 }
 
 // ClientTransport is the common interface for all gRPC client-side transport
@@ -576,6 +610,9 @@
 	// GetGoAwayReason returns the reason why GoAway frame was received.
 	GetGoAwayReason() GoAwayReason
 
+	// RemoteAddr returns the remote network address.
+	RemoteAddr() net.Addr
+
 	// IncrMsgSent increments the number of message sent through this transport.
 	IncrMsgSent()
 
@@ -622,14 +659,6 @@
 	IncrMsgRecv()
 }
 
-// streamErrorf creates an StreamError with the specified error code and description.
-func streamErrorf(c codes.Code, format string, a ...interface{}) StreamError {
-	return StreamError{
-		Code: c,
-		Desc: fmt.Sprintf(format, a...),
-	}
-}
-
 // connectionErrorf creates an ConnectionError with the specified error description.
 func connectionErrorf(temp bool, e error, format string, a ...interface{}) ConnectionError {
 	return ConnectionError{
@@ -672,7 +701,7 @@
 	// errStreamDrain indicates that the stream is rejected because the
 	// connection is draining. This could be caused by goaway or balancer
 	// removing the address.
-	errStreamDrain = streamErrorf(codes.Unavailable, "the connection is draining")
+	errStreamDrain = status.Error(codes.Unavailable, "the connection is draining")
 	// errStreamDone is returned from write at the client side to indiacte application
 	// layer of an error.
 	errStreamDone = errors.New("the stream is done")
@@ -681,18 +710,6 @@
 	statusGoAway = status.New(codes.Unavailable, "the stream is rejected because server is draining the connection")
 )
 
-// TODO: See if we can replace StreamError with status package errors.
-
-// StreamError is an error that only affects one stream within a connection.
-type StreamError struct {
-	Code codes.Code
-	Desc string
-}
-
-func (e StreamError) Error() string {
-	return fmt.Sprintf("stream error: code = %s desc = %q", e.Code, e.Desc)
-}
-
 // GoAwayReason contains the reason for the GoAway frame received.
 type GoAwayReason uint8
 
@@ -706,3 +723,38 @@
 	// "too_many_pings".
 	GoAwayTooManyPings GoAwayReason = 2
 )
+
+// channelzData is used to store channelz related data for http2Client and http2Server.
+// These fields cannot be embedded in the original structs (e.g. http2Client), since to do atomic
+// operation on int64 variable on 32-bit machine, user is responsible to enforce memory alignment.
+// Here, by grouping those int64 fields inside a struct, we are enforcing the alignment.
+type channelzData struct {
+	kpCount int64
+	// The number of streams that have started, including already finished ones.
+	streamsStarted int64
+	// Client side: The number of streams that have ended successfully by receiving
+	// EoS bit set frame from server.
+	// Server side: The number of streams that have ended successfully by sending
+	// frame with EoS bit set.
+	streamsSucceeded int64
+	streamsFailed    int64
+	// lastStreamCreatedTime stores the timestamp that the last stream gets created. It is of int64 type
+	// instead of time.Time since it's more costly to atomically update time.Time variable than int64
+	// variable. The same goes for lastMsgSentTime and lastMsgRecvTime.
+	lastStreamCreatedTime int64
+	msgSent               int64
+	msgRecv               int64
+	lastMsgSentTime       int64
+	lastMsgRecvTime       int64
+}
+
+// ContextErr converts the error from context package into a status error.
+func ContextErr(err error) error {
+	switch err {
+	case context.DeadlineExceeded:
+		return status.Error(codes.DeadlineExceeded, err.Error())
+	case context.Canceled:
+		return status.Error(codes.Canceled, err.Error())
+	}
+	return status.Errorf(codes.Internal, "Unexpected error from context packet: %v", err)
+}
diff --git a/vendor/google.golang.org/grpc/keepalive/keepalive.go b/vendor/google.golang.org/grpc/keepalive/keepalive.go
index f8adc7e..34d31b5 100644
--- a/vendor/google.golang.org/grpc/keepalive/keepalive.go
+++ b/vendor/google.golang.org/grpc/keepalive/keepalive.go
@@ -16,7 +16,8 @@
  *
  */
 
-// Package keepalive defines configurable parameters for point-to-point healthcheck.
+// Package keepalive defines configurable parameters for point-to-point
+// healthcheck.
 package keepalive
 
 import (
@@ -24,42 +25,61 @@
 )
 
 // ClientParameters is used to set keepalive parameters on the client-side.
-// These configure how the client will actively probe to notice when a connection is broken
-// and send pings so intermediaries will be aware of the liveness of the connection.
-// Make sure these parameters are set in coordination with the keepalive policy on the server,
-// as incompatible settings can result in closing of connection.
+// These configure how the client will actively probe to notice when a
+// connection is broken and send pings so intermediaries will be aware of the
+// liveness of the connection. Make sure these parameters are set in
+// coordination with the keepalive policy on the server, as incompatible
+// settings can result in closing of connection.
 type ClientParameters struct {
-	// After a duration of this time if the client doesn't see any activity it pings the server to see if the transport is still alive.
+	// After a duration of this time if the client doesn't see any activity it
+	// pings the server to see if the transport is still alive.
+	// If set below 10s, a minimum value of 10s will be used instead.
 	Time time.Duration // The current default value is infinity.
-	// After having pinged for keepalive check, the client waits for a duration of Timeout and if no activity is seen even after that
-	// the connection is closed.
+	// After having pinged for keepalive check, the client waits for a duration
+	// of Timeout and if no activity is seen even after that the connection is
+	// closed.
 	Timeout time.Duration // The current default value is 20 seconds.
-	// If true, client runs keepalive checks even with no active RPCs.
+	// If true, client sends keepalive pings even with no active RPCs. If false,
+	// when there are no active RPCs, Time and Timeout will be ignored and no
+	// keepalive pings will be sent.
 	PermitWithoutStream bool // false by default.
 }
 
-// ServerParameters is used to set keepalive and max-age parameters on the server-side.
+// ServerParameters is used to set keepalive and max-age parameters on the
+// server-side.
 type ServerParameters struct {
-	// MaxConnectionIdle is a duration for the amount of time after which an idle connection would be closed by sending a GoAway.
-	// Idleness duration is defined since the most recent time the number of outstanding RPCs became zero or the connection establishment.
+	// MaxConnectionIdle is a duration for the amount of time after which an
+	// idle connection would be closed by sending a GoAway. Idleness duration is
+	// defined since the most recent time the number of outstanding RPCs became
+	// zero or the connection establishment.
 	MaxConnectionIdle time.Duration // The current default value is infinity.
-	// MaxConnectionAge is a duration for the maximum amount of time a connection may exist before it will be closed by sending a GoAway.
-	// A random jitter of +/-10% will be added to MaxConnectionAge to spread out connection storms.
+	// MaxConnectionAge is a duration for the maximum amount of time a
+	// connection may exist before it will be closed by sending a GoAway. A
+	// random jitter of +/-10% will be added to MaxConnectionAge to spread out
+	// connection storms.
 	MaxConnectionAge time.Duration // The current default value is infinity.
-	// MaxConnectinoAgeGrace is an additive period after MaxConnectionAge after which the connection will be forcibly closed.
+	// MaxConnectionAgeGrace is an additive period after MaxConnectionAge after
+	// which the connection will be forcibly closed.
 	MaxConnectionAgeGrace time.Duration // The current default value is infinity.
-	// After a duration of this time if the server doesn't see any activity it pings the client to see if the transport is still alive.
+	// After a duration of this time if the server doesn't see any activity it
+	// pings the client to see if the transport is still alive.
+	// If set below 1s, a minimum value of 1s will be used instead.
 	Time time.Duration // The current default value is 2 hours.
-	// After having pinged for keepalive check, the server waits for a duration of Timeout and if no activity is seen even after that
-	// the connection is closed.
+	// After having pinged for keepalive check, the server waits for a duration
+	// of Timeout and if no activity is seen even after that the connection is
+	// closed.
 	Timeout time.Duration // The current default value is 20 seconds.
 }
 
-// EnforcementPolicy is used to set keepalive enforcement policy on the server-side.
-// Server will close connection with a client that violates this policy.
+// EnforcementPolicy is used to set keepalive enforcement policy on the
+// server-side. Server will close connection with a client that violates this
+// policy.
 type EnforcementPolicy struct {
-	// MinTime is the minimum amount of time a client should wait before sending a keepalive ping.
+	// MinTime is the minimum amount of time a client should wait before sending
+	// a keepalive ping.
 	MinTime time.Duration // The current default value is 5 minutes.
-	// If true, server expects keepalive pings even when there are no active streams(RPCs).
+	// If true, server allows keepalive pings even when there are no active
+	// streams(RPCs). If false, and client sends ping when there are no active
+	// streams, server will send GOAWAY and close the connection.
 	PermitWithoutStream bool // false by default.
 }
diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go
index bd2eaf4..cf6d1b9 100644
--- a/vendor/google.golang.org/grpc/metadata/metadata.go
+++ b/vendor/google.golang.org/grpc/metadata/metadata.go
@@ -22,10 +22,9 @@
 package metadata // import "google.golang.org/grpc/metadata"
 
 import (
+	"context"
 	"fmt"
 	"strings"
-
-	"golang.org/x/net/context"
 )
 
 // DecodeKeyValue returns k, v, nil.
diff --git a/vendor/google.golang.org/grpc/naming/dns_resolver.go b/vendor/google.golang.org/grpc/naming/dns_resolver.go
index 0f8a908..c9f79dc 100644
--- a/vendor/google.golang.org/grpc/naming/dns_resolver.go
+++ b/vendor/google.golang.org/grpc/naming/dns_resolver.go
@@ -19,13 +19,13 @@
 package naming
 
 import (
+	"context"
 	"errors"
 	"fmt"
 	"net"
 	"strconv"
 	"time"
 
-	"golang.org/x/net/context"
 	"google.golang.org/grpc/grpclog"
 )
 
@@ -37,6 +37,9 @@
 var (
 	errMissingAddr  = errors.New("missing address")
 	errWatcherClose = errors.New("watcher has been closed")
+
+	lookupHost = net.DefaultResolver.LookupHost
+	lookupSRV  = net.DefaultResolver.LookupSRV
 )
 
 // NewDNSResolverWithFreq creates a DNS Resolver that can resolve DNS names, and
@@ -73,8 +76,8 @@
 
 // parseTarget takes the user input target string, returns formatted host and port info.
 // If target doesn't specify a port, set the port to be the defaultPort.
-// If target is in IPv6 format and host-name is enclosed in sqarue brackets, brackets
-// are strippd when setting the host.
+// If target is in IPv6 format and host-name is enclosed in square brackets, brackets
+// are stripped when setting the host.
 // examples:
 // target: "www.google.com" returns host: "www.google.com", port: "443"
 // target: "ipv4-host:80" returns host: "ipv4-host", port: "80"
@@ -218,7 +221,7 @@
 	for _, s := range srvs {
 		lbAddrs, err := lookupHost(w.ctx, s.Target)
 		if err != nil {
-			grpclog.Warningf("grpc: failed load banlacer address dns lookup due to %v.\n", err)
+			grpclog.Warningf("grpc: failed load balancer address dns lookup due to %v.\n", err)
 			continue
 		}
 		for _, a := range lbAddrs {
diff --git a/vendor/google.golang.org/grpc/naming/go17.go b/vendor/google.golang.org/grpc/naming/go17.go
deleted file mode 100644
index 57b65d7..0000000
--- a/vendor/google.golang.org/grpc/naming/go17.go
+++ /dev/null
@@ -1,34 +0,0 @@
-// +build go1.6,!go1.8
-
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package naming
-
-import (
-	"net"
-
-	"golang.org/x/net/context"
-)
-
-var (
-	lookupHost = func(ctx context.Context, host string) ([]string, error) { return net.LookupHost(host) }
-	lookupSRV  = func(ctx context.Context, service, proto, name string) (string, []*net.SRV, error) {
-		return net.LookupSRV(service, proto, name)
-	}
-)
diff --git a/vendor/google.golang.org/grpc/naming/naming.go b/vendor/google.golang.org/grpc/naming/naming.go
index 8cc39e9..c99fdbe 100644
--- a/vendor/google.golang.org/grpc/naming/naming.go
+++ b/vendor/google.golang.org/grpc/naming/naming.go
@@ -17,7 +17,7 @@
  */
 
 // Package naming defines the naming API and related data structures for gRPC.
-// The interface is EXPERIMENTAL and may be suject to change.
+// The interface is EXPERIMENTAL and may be subject to change.
 //
 // Deprecated: please use package resolver.
 package naming
diff --git a/vendor/google.golang.org/grpc/peer/peer.go b/vendor/google.golang.org/grpc/peer/peer.go
index 317b8b9..e01d219 100644
--- a/vendor/google.golang.org/grpc/peer/peer.go
+++ b/vendor/google.golang.org/grpc/peer/peer.go
@@ -21,9 +21,9 @@
 package peer
 
 import (
+	"context"
 	"net"
 
-	"golang.org/x/net/context"
 	"google.golang.org/grpc/credentials"
 )
 
diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go
index 0a984e6..f962549 100644
--- a/vendor/google.golang.org/grpc/picker_wrapper.go
+++ b/vendor/google.golang.org/grpc/picker_wrapper.go
@@ -19,19 +19,16 @@
 package grpc
 
 import (
+	"context"
 	"io"
 	"sync"
-	"sync/atomic"
 
-	"golang.org/x/net/context"
 	"google.golang.org/grpc/balancer"
-	"google.golang.org/grpc/channelz"
 	"google.golang.org/grpc/codes"
 	"google.golang.org/grpc/grpclog"
-	"google.golang.org/grpc/metadata"
-	"google.golang.org/grpc/resolver"
+	"google.golang.org/grpc/internal/channelz"
+	"google.golang.org/grpc/internal/transport"
 	"google.golang.org/grpc/status"
-	"google.golang.org/grpc/transport"
 )
 
 // pickerWrapper is a wrapper of balancer.Picker. It blocks on certain pick
@@ -45,16 +42,10 @@
 	// The latest connection happened.
 	connErrMu sync.Mutex
 	connErr   error
-
-	stickinessMDKey atomic.Value
-	stickiness      *stickyStore
 }
 
 func newPickerWrapper() *pickerWrapper {
-	bp := &pickerWrapper{
-		blockingCh: make(chan struct{}),
-		stickiness: newStickyStore(),
-	}
+	bp := &pickerWrapper{blockingCh: make(chan struct{})}
 	return bp
 }
 
@@ -71,27 +62,6 @@
 	return err
 }
 
-func (bp *pickerWrapper) updateStickinessMDKey(newKey string) {
-	// No need to check ok because mdKey == "" if ok == false.
-	if oldKey, _ := bp.stickinessMDKey.Load().(string); oldKey != newKey {
-		bp.stickinessMDKey.Store(newKey)
-		bp.stickiness.reset(newKey)
-	}
-}
-
-func (bp *pickerWrapper) getStickinessMDKey() string {
-	// No need to check ok because mdKey == "" if ok == false.
-	mdKey, _ := bp.stickinessMDKey.Load().(string)
-	return mdKey
-}
-
-func (bp *pickerWrapper) clearStickinessState() {
-	if oldKey := bp.getStickinessMDKey(); oldKey != "" {
-		// There's no need to reset store if mdKey was "".
-		bp.stickiness.reset(oldKey)
-	}
-}
-
 // updatePicker is called by UpdateBalancerState. It unblocks all blocked pick.
 func (bp *pickerWrapper) updatePicker(p balancer.Picker) {
 	bp.mu.Lock()
@@ -131,31 +101,7 @@
 // - the subConn returned by the current picker is not READY
 // When one of these situations happens, pick blocks until the picker gets updated.
 func (bp *pickerWrapper) pick(ctx context.Context, failfast bool, opts balancer.PickOptions) (transport.ClientTransport, func(balancer.DoneInfo), error) {
-
-	mdKey := bp.getStickinessMDKey()
-	stickyKey, isSticky := stickyKeyFromContext(ctx, mdKey)
-
-	// Potential race here: if stickinessMDKey is updated after the above two
-	// lines, and this pick is a sticky pick, the following put could add an
-	// entry to sticky store with an outdated sticky key.
-	//
-	// The solution: keep the current md key in sticky store, and at the
-	// beginning of each get/put, check the mdkey against store.curMDKey.
-	//  - Cons: one more string comparing for each get/put.
-	//  - Pros: the string matching happens inside get/put, so the overhead for
-	//  non-sticky RPCs will be minimal.
-
-	if isSticky {
-		if t, ok := bp.stickiness.get(mdKey, stickyKey); ok {
-			// Done function returned is always nil.
-			return t, nil, nil
-		}
-	}
-
-	var (
-		p  balancer.Picker
-		ch chan struct{}
-	)
+	var ch chan struct{}
 
 	for {
 		bp.mu.Lock()
@@ -181,7 +127,7 @@
 		}
 
 		ch = bp.blockingCh
-		p = bp.picker
+		p := bp.picker
 		bp.mu.Unlock()
 
 		subConn, done, err := p.Pick(ctx, opts)
@@ -195,26 +141,35 @@
 					continue
 				}
 				return nil, nil, status.Errorf(codes.Unavailable, "%v, latest connection error: %v", err, bp.connectionError())
+			case context.DeadlineExceeded:
+				return nil, nil, status.Error(codes.DeadlineExceeded, err.Error())
+			case context.Canceled:
+				return nil, nil, status.Error(codes.Canceled, err.Error())
 			default:
+				if _, ok := status.FromError(err); ok {
+					return nil, nil, err
+				}
 				// err is some other error.
-				return nil, nil, toRPCErr(err)
+				return nil, nil, status.Error(codes.Unknown, err.Error())
 			}
 		}
 
 		acw, ok := subConn.(*acBalancerWrapper)
 		if !ok {
-			grpclog.Infof("subconn returned from pick is not *acBalancerWrapper")
+			grpclog.Error("subconn returned from pick is not *acBalancerWrapper")
 			continue
 		}
 		if t, ok := acw.getAddrConn().getReadyTransport(); ok {
-			if isSticky {
-				bp.stickiness.put(mdKey, stickyKey, acw)
-			}
 			if channelz.IsOn() {
 				return t, doneChannelzWrapper(acw, done), nil
 			}
 			return t, done, nil
 		}
+		if done != nil {
+			// Calling done with nil error, no bytes sent and no bytes received.
+			// DoneInfo with default value works.
+			done(balancer.DoneInfo{})
+		}
 		grpclog.Infof("blockingPicker: the picked transport is not ready, loop back to repick")
 		// If ok == false, ac.state is not READY.
 		// A valid picker always returns READY subConn. This means the state of ac
@@ -232,100 +187,3 @@
 	bp.done = true
 	close(bp.blockingCh)
 }
-
-type stickyStoreEntry struct {
-	acw  *acBalancerWrapper
-	addr resolver.Address
-}
-
-type stickyStore struct {
-	mu sync.Mutex
-	// curMDKey is check before every get/put to avoid races. The operation will
-	// abort immediately when the given mdKey is different from the curMDKey.
-	curMDKey string
-	store    map[string]*stickyStoreEntry
-}
-
-func newStickyStore() *stickyStore {
-	return &stickyStore{
-		store: make(map[string]*stickyStoreEntry),
-	}
-}
-
-// reset clears the map in stickyStore, and set the currentMDKey to newMDKey.
-func (ss *stickyStore) reset(newMDKey string) {
-	ss.mu.Lock()
-	ss.curMDKey = newMDKey
-	ss.store = make(map[string]*stickyStoreEntry)
-	ss.mu.Unlock()
-}
-
-// stickyKey is the key to look up in store. mdKey will be checked against
-// curMDKey to avoid races.
-func (ss *stickyStore) put(mdKey, stickyKey string, acw *acBalancerWrapper) {
-	ss.mu.Lock()
-	defer ss.mu.Unlock()
-	if mdKey != ss.curMDKey {
-		return
-	}
-	// TODO(stickiness): limit the total number of entries.
-	ss.store[stickyKey] = &stickyStoreEntry{
-		acw:  acw,
-		addr: acw.getAddrConn().getCurAddr(),
-	}
-}
-
-// stickyKey is the key to look up in store. mdKey will be checked against
-// curMDKey to avoid races.
-func (ss *stickyStore) get(mdKey, stickyKey string) (transport.ClientTransport, bool) {
-	ss.mu.Lock()
-	defer ss.mu.Unlock()
-	if mdKey != ss.curMDKey {
-		return nil, false
-	}
-	entry, ok := ss.store[stickyKey]
-	if !ok {
-		return nil, false
-	}
-	ac := entry.acw.getAddrConn()
-	if ac.getCurAddr() != entry.addr {
-		delete(ss.store, stickyKey)
-		return nil, false
-	}
-	t, ok := ac.getReadyTransport()
-	if !ok {
-		delete(ss.store, stickyKey)
-		return nil, false
-	}
-	return t, true
-}
-
-// Get one value from metadata in ctx with key stickinessMDKey.
-//
-// It returns "", false if stickinessMDKey is an empty string.
-func stickyKeyFromContext(ctx context.Context, stickinessMDKey string) (string, bool) {
-	if stickinessMDKey == "" {
-		return "", false
-	}
-
-	md, added, ok := metadata.FromOutgoingContextRaw(ctx)
-	if !ok {
-		return "", false
-	}
-
-	if vv, ok := md[stickinessMDKey]; ok {
-		if len(vv) > 0 {
-			return vv[0], true
-		}
-	}
-
-	for _, ss := range added {
-		for i := 0; i < len(ss)-1; i += 2 {
-			if ss[i] == stickinessMDKey {
-				return ss[i+1], true
-			}
-		}
-	}
-
-	return "", false
-}
diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/pickfirst.go
index bf659d4..d1e38aa 100644
--- a/vendor/google.golang.org/grpc/pickfirst.go
+++ b/vendor/google.golang.org/grpc/pickfirst.go
@@ -19,7 +19,8 @@
 package grpc
 
 import (
-	"golang.org/x/net/context"
+	"context"
+
 	"google.golang.org/grpc/balancer"
 	"google.golang.org/grpc/connectivity"
 	"google.golang.org/grpc/grpclog"
@@ -56,6 +57,7 @@
 	if b.sc == nil {
 		b.sc, err = b.cc.NewSubConn(addrs, balancer.NewSubConnOptions{})
 		if err != nil {
+			//TODO(yuxuanli): why not change the cc state to Idle?
 			grpclog.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err)
 			return
 		}
diff --git a/vendor/google.golang.org/grpc/proxy.go b/vendor/google.golang.org/grpc/proxy.go
index 2d40236..f8f69bf 100644
--- a/vendor/google.golang.org/grpc/proxy.go
+++ b/vendor/google.golang.org/grpc/proxy.go
@@ -20,6 +20,8 @@
 
 import (
 	"bufio"
+	"context"
+	"encoding/base64"
 	"errors"
 	"fmt"
 	"io"
@@ -27,10 +29,10 @@
 	"net/http"
 	"net/http/httputil"
 	"net/url"
-
-	"golang.org/x/net/context"
 )
 
+const proxyAuthHeaderKey = "Proxy-Authorization"
+
 var (
 	// errDisabled indicates that proxy is disabled for the address.
 	errDisabled = errors.New("proxy is disabled for the address")
@@ -38,7 +40,7 @@
 	httpProxyFromEnvironment = http.ProxyFromEnvironment
 )
 
-func mapAddress(ctx context.Context, address string) (string, error) {
+func mapAddress(ctx context.Context, address string) (*url.URL, error) {
 	req := &http.Request{
 		URL: &url.URL{
 			Scheme: "https",
@@ -47,12 +49,12 @@
 	}
 	url, err := httpProxyFromEnvironment(req)
 	if err != nil {
-		return "", err
+		return nil, err
 	}
 	if url == nil {
-		return "", errDisabled
+		return nil, errDisabled
 	}
-	return url.Host, nil
+	return url, nil
 }
 
 // To read a response from a net.Conn, http.ReadResponse() takes a bufio.Reader.
@@ -69,18 +71,28 @@
 	return c.r.Read(b)
 }
 
-func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, addr string) (_ net.Conn, err error) {
+func basicAuth(username, password string) string {
+	auth := username + ":" + password
+	return base64.StdEncoding.EncodeToString([]byte(auth))
+}
+
+func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr string, proxyURL *url.URL) (_ net.Conn, err error) {
 	defer func() {
 		if err != nil {
 			conn.Close()
 		}
 	}()
 
-	req := (&http.Request{
+	req := &http.Request{
 		Method: http.MethodConnect,
-		URL:    &url.URL{Host: addr},
+		URL:    &url.URL{Host: backendAddr},
 		Header: map[string][]string{"User-Agent": {grpcUA}},
-	})
+	}
+	if t := proxyURL.User; t != nil {
+		u := t.Username()
+		p, _ := t.Password()
+		req.Header.Add(proxyAuthHeaderKey, "Basic "+basicAuth(u, p))
+	}
 
 	if err := sendHTTPRequest(ctx, req, conn); err != nil {
 		return nil, fmt.Errorf("failed to write the HTTP request: %v", err)
@@ -108,23 +120,33 @@
 // provided dialer, does HTTP CONNECT handshake and returns the connection.
 func newProxyDialer(dialer func(context.Context, string) (net.Conn, error)) func(context.Context, string) (net.Conn, error) {
 	return func(ctx context.Context, addr string) (conn net.Conn, err error) {
-		var skipHandshake bool
-		newAddr, err := mapAddress(ctx, addr)
+		var newAddr string
+		proxyURL, err := mapAddress(ctx, addr)
 		if err != nil {
 			if err != errDisabled {
 				return nil, err
 			}
-			skipHandshake = true
 			newAddr = addr
+		} else {
+			newAddr = proxyURL.Host
 		}
 
 		conn, err = dialer(ctx, newAddr)
 		if err != nil {
 			return
 		}
-		if !skipHandshake {
-			conn, err = doHTTPConnectHandshake(ctx, conn, addr)
+		if proxyURL != nil {
+			// proxy is disabled if proxyURL is nil.
+			conn, err = doHTTPConnectHandshake(ctx, conn, addr, proxyURL)
 		}
 		return
 	}
 }
+
+func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error {
+	req = req.WithContext(ctx)
+	if err := req.Write(conn); err != nil {
+		return fmt.Errorf("failed to write the HTTP request: %v", err)
+	}
+	return nil
+}
diff --git a/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go
index c1cabfc..5835599 100644
--- a/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go
+++ b/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go
@@ -1,6 +1,6 @@
 /*
  *
- * Copyright 2017 gRPC authors.
+ * Copyright 2018 gRPC authors.
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -21,10 +21,10 @@
 package dns
 
 import (
+	"context"
 	"encoding/json"
 	"errors"
 	"fmt"
-	"math/rand"
 	"net"
 	"os"
 	"strconv"
@@ -32,8 +32,9 @@
 	"sync"
 	"time"
 
-	"golang.org/x/net/context"
 	"google.golang.org/grpc/grpclog"
+	"google.golang.org/grpc/internal/backoff"
+	"google.golang.org/grpc/internal/grpcrand"
 	"google.golang.org/grpc/resolver"
 )
 
@@ -42,32 +43,65 @@
 }
 
 const (
-	defaultPort = "443"
-	defaultFreq = time.Minute * 30
-	golang      = "GO"
+	defaultPort       = "443"
+	defaultFreq       = time.Minute * 30
+	defaultDNSSvrPort = "53"
+	golang            = "GO"
+	// txtPrefix is the prefix string to be prepended to the host name for txt record lookup.
+	txtPrefix = "_grpc_config."
 	// In DNS, service config is encoded in a TXT record via the mechanism
 	// described in RFC-1464 using the attribute name grpc_config.
 	txtAttribute = "grpc_config="
 )
 
 var (
-	errMissingAddr = errors.New("missing address")
-	randomGen      = rand.New(rand.NewSource(time.Now().UnixNano()))
+	errMissingAddr = errors.New("dns resolver: missing address")
+
+	// Addresses ending with a colon that is supposed to be the separator
+	// between host and port is not allowed.  E.g. "::" is a valid address as
+	// it is an IPv6 address (host only) and "[::]:" is invalid as it ends with
+	// a colon as the host and port separator
+	errEndsWithColon = errors.New("dns resolver: missing port after port-separator colon")
 )
 
+var (
+	defaultResolver netResolver = net.DefaultResolver
+)
+
+var customAuthorityDialler = func(authority string) func(ctx context.Context, network, address string) (net.Conn, error) {
+	return func(ctx context.Context, network, address string) (net.Conn, error) {
+		var dialer net.Dialer
+		return dialer.DialContext(ctx, network, authority)
+	}
+}
+
+var customAuthorityResolver = func(authority string) (netResolver, error) {
+	host, port, err := parseTarget(authority, defaultDNSSvrPort)
+	if err != nil {
+		return nil, err
+	}
+
+	authorityWithPort := net.JoinHostPort(host, port)
+
+	return &net.Resolver{
+		PreferGo: true,
+		Dial:     customAuthorityDialler(authorityWithPort),
+	}, nil
+}
+
 // NewBuilder creates a dnsBuilder which is used to factory DNS resolvers.
 func NewBuilder() resolver.Builder {
-	return &dnsBuilder{freq: defaultFreq}
+	return &dnsBuilder{minFreq: defaultFreq}
 }
 
 type dnsBuilder struct {
-	// frequency of polling the DNS server.
-	freq time.Duration
+	// minimum frequency of polling the DNS server.
+	minFreq time.Duration
 }
 
 // Build creates and starts a DNS resolver that watches the name resolution of the target.
 func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOption) (resolver.Resolver, error) {
-	host, port, err := parseTarget(target.Endpoint)
+	host, port, err := parseTarget(target.Endpoint, defaultPort)
 	if err != nil {
 		return nil, err
 	}
@@ -90,7 +124,8 @@
 	// DNS address (non-IP).
 	ctx, cancel := context.WithCancel(context.Background())
 	d := &dnsResolver{
-		freq:                 b.freq,
+		freq:                 b.minFreq,
+		backoff:              backoff.Exponential{MaxDelay: b.minFreq},
 		host:                 host,
 		port:                 port,
 		ctx:                  ctx,
@@ -101,6 +136,15 @@
 		disableServiceConfig: opts.DisableServiceConfig,
 	}
 
+	if target.Authority == "" {
+		d.resolver = defaultResolver
+	} else {
+		d.resolver, err = customAuthorityResolver(target.Authority)
+		if err != nil {
+			return nil, err
+		}
+	}
+
 	d.wg.Add(1)
 	go d.watcher()
 	return d, nil
@@ -111,6 +155,12 @@
 	return "dns"
 }
 
+type netResolver interface {
+	LookupHost(ctx context.Context, host string) (addrs []string, err error)
+	LookupSRV(ctx context.Context, service, proto, name string) (cname string, addrs []*net.SRV, err error)
+	LookupTXT(ctx context.Context, name string) (txts []string, err error)
+}
+
 // ipResolver watches for the name resolution update for an IP address.
 type ipResolver struct {
 	cc resolver.ClientConn
@@ -146,12 +196,15 @@
 
 // dnsResolver watches for the name resolution update for a non-IP target.
 type dnsResolver struct {
-	freq   time.Duration
-	host   string
-	port   string
-	ctx    context.Context
-	cancel context.CancelFunc
-	cc     resolver.ClientConn
+	freq       time.Duration
+	backoff    backoff.Exponential
+	retryCount int
+	host       string
+	port       string
+	resolver   netResolver
+	ctx        context.Context
+	cancel     context.CancelFunc
+	cc         resolver.ClientConn
 	// rn channel is used by ResolveNow() to force an immediate resolution of the target.
 	rn chan struct{}
 	t  *time.Timer
@@ -190,8 +243,15 @@
 		case <-d.rn:
 		}
 		result, sc := d.lookup()
-		// Next lookup should happen after an interval defined by d.freq.
-		d.t.Reset(d.freq)
+		// Next lookup should happen within an interval defined by d.freq. It may be
+		// more often due to exponential retry on empty address list.
+		if len(result) == 0 {
+			d.retryCount++
+			d.t.Reset(d.backoff.Backoff(d.retryCount))
+		} else {
+			d.retryCount = 0
+			d.t.Reset(d.freq)
+		}
 		d.cc.NewServiceConfig(sc)
 		d.cc.NewAddress(result)
 	}
@@ -199,13 +259,13 @@
 
 func (d *dnsResolver) lookupSRV() []resolver.Address {
 	var newAddrs []resolver.Address
-	_, srvs, err := lookupSRV(d.ctx, "grpclb", "tcp", d.host)
+	_, srvs, err := d.resolver.LookupSRV(d.ctx, "grpclb", "tcp", d.host)
 	if err != nil {
 		grpclog.Infof("grpc: failed dns SRV record lookup due to %v.\n", err)
 		return nil
 	}
 	for _, s := range srvs {
-		lbAddrs, err := lookupHost(d.ctx, s.Target)
+		lbAddrs, err := d.resolver.LookupHost(d.ctx, s.Target)
 		if err != nil {
 			grpclog.Infof("grpc: failed load balancer address dns lookup due to %v.\n", err)
 			continue
@@ -224,7 +284,7 @@
 }
 
 func (d *dnsResolver) lookupTXT() string {
-	ss, err := lookupTXT(d.ctx, d.host)
+	ss, err := d.resolver.LookupTXT(d.ctx, txtPrefix+d.host)
 	if err != nil {
 		grpclog.Infof("grpc: failed dns TXT record lookup due to %v.\n", err)
 		return ""
@@ -244,7 +304,7 @@
 
 func (d *dnsResolver) lookupHost() []resolver.Address {
 	var newAddrs []resolver.Address
-	addrs, err := lookupHost(d.ctx, d.host)
+	addrs, err := d.resolver.LookupHost(d.ctx, d.host)
 	if err != nil {
 		grpclog.Warningf("grpc: failed dns A record lookup due to %v.\n", err)
 		return nil
@@ -286,17 +346,16 @@
 	return "[" + addr + "]", true
 }
 
-// parseTarget takes the user input target string, returns formatted host and port info.
+// parseTarget takes the user input target string and default port, returns formatted host and port info.
 // If target doesn't specify a port, set the port to be the defaultPort.
-// If target is in IPv6 format and host-name is enclosed in sqarue brackets, brackets
-// are strippd when setting the host.
+// If target is in IPv6 format and host-name is enclosed in square brackets, brackets
+// are stripped when setting the host.
 // examples:
-// target: "www.google.com" returns host: "www.google.com", port: "443"
-// target: "ipv4-host:80" returns host: "ipv4-host", port: "80"
-// target: "[ipv6-host]" returns host: "ipv6-host", port: "443"
-// target: ":80" returns host: "localhost", port: "80"
-// target: ":" returns host: "localhost", port: "443"
-func parseTarget(target string) (host, port string, err error) {
+// target: "www.google.com" defaultPort: "443" returns host: "www.google.com", port: "443"
+// target: "ipv4-host:80" defaultPort: "443" returns host: "ipv4-host", port: "80"
+// target: "[ipv6-host]" defaultPort: "443" returns host: "ipv6-host", port: "443"
+// target: ":80" defaultPort: "443" returns host: "localhost", port: "80"
+func parseTarget(target, defaultPort string) (host, port string, err error) {
 	if target == "" {
 		return "", "", errMissingAddr
 	}
@@ -305,15 +364,15 @@
 		return target, defaultPort, nil
 	}
 	if host, port, err = net.SplitHostPort(target); err == nil {
+		if port == "" {
+			// If the port field is empty (target ends with colon), e.g. "[::1]:", this is an error.
+			return "", "", errEndsWithColon
+		}
 		// target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port
 		if host == "" {
 			// Keep consistent with net.Dial(): If the host is empty, as in ":80", the local system is assumed.
 			host = "localhost"
 		}
-		if port == "" {
-			// If the port field is empty(target ends with colon), e.g. "[::1]:", defaultPort is used.
-			port = defaultPort
-		}
 		return host, port, nil
 	}
 	if host, port, err = net.SplitHostPort(target + ":" + defaultPort); err == nil {
@@ -346,7 +405,7 @@
 	if a == nil {
 		return true
 	}
-	return randomGen.Intn(100)+1 <= *a
+	return grpcrand.Intn(100)+1 <= *a
 }
 
 func canaryingSC(js string) string {
diff --git a/vendor/google.golang.org/grpc/resolver/dns/go17.go b/vendor/google.golang.org/grpc/resolver/dns/go17.go
deleted file mode 100644
index b466bc8..0000000
--- a/vendor/google.golang.org/grpc/resolver/dns/go17.go
+++ /dev/null
@@ -1,35 +0,0 @@
-// +build go1.6, !go1.8
-
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package dns
-
-import (
-	"net"
-
-	"golang.org/x/net/context"
-)
-
-var (
-	lookupHost = func(ctx context.Context, host string) ([]string, error) { return net.LookupHost(host) }
-	lookupSRV  = func(ctx context.Context, service, proto, name string) (string, []*net.SRV, error) {
-		return net.LookupSRV(service, proto, name)
-	}
-	lookupTXT = func(ctx context.Context, name string) ([]string, error) { return net.LookupTXT(name) }
-)
diff --git a/vendor/google.golang.org/grpc/resolver/dns/go18.go b/vendor/google.golang.org/grpc/resolver/dns/go18.go
deleted file mode 100644
index fa34f14..0000000
--- a/vendor/google.golang.org/grpc/resolver/dns/go18.go
+++ /dev/null
@@ -1,29 +0,0 @@
-// +build go1.8
-
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package dns
-
-import "net"
-
-var (
-	lookupHost = net.DefaultResolver.LookupHost
-	lookupSRV  = net.DefaultResolver.LookupSRV
-	lookupTXT  = net.DefaultResolver.LookupTXT
-)
diff --git a/vendor/google.golang.org/grpc/resolver/passthrough/passthrough.go b/vendor/google.golang.org/grpc/resolver/passthrough/passthrough.go
index b76010d..893d5d1 100644
--- a/vendor/google.golang.org/grpc/resolver/passthrough/passthrough.go
+++ b/vendor/google.golang.org/grpc/resolver/passthrough/passthrough.go
@@ -45,7 +45,7 @@
 }
 
 func (r *passthroughResolver) start() {
-	r.cc.NewAddress([]resolver.Address{{Addr: r.target.Endpoint}})
+	r.cc.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: r.target.Endpoint}}})
 }
 
 func (*passthroughResolver) ResolveNow(o resolver.ResolveNowOption) {}
diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go
index 506afac..52ec603 100644
--- a/vendor/google.golang.org/grpc/resolver/resolver.go
+++ b/vendor/google.golang.org/grpc/resolver/resolver.go
@@ -49,8 +49,12 @@
 	return nil
 }
 
-// SetDefaultScheme sets the default scheme that will be used.
-// The default default scheme is "passthrough".
+// SetDefaultScheme sets the default scheme that will be used. The default
+// default scheme is "passthrough".
+//
+// NOTE: this function must only be called during initialization time (i.e. in
+// an init() function), and is not thread-safe. The scheme set last overrides
+// previously set values.
 func SetDefaultScheme(scheme string) {
 	defaultScheme = scheme
 }
@@ -94,6 +98,15 @@
 	DisableServiceConfig bool
 }
 
+// State contains the current Resolver state relevant to the ClientConn.
+type State struct {
+	Addresses     []Address // Resolved addresses for the target
+	ServiceConfig string    // JSON representation of the service config
+
+	// TODO: add Err error
+	// TODO: add ParsedServiceConfig interface{}
+}
+
 // ClientConn contains the callbacks for resolver to notify any updates
 // to the gRPC ClientConn.
 //
@@ -102,12 +115,18 @@
 // testing, the new implementation should embed this interface. This allows
 // gRPC to add new methods to this interface.
 type ClientConn interface {
+	// UpdateState updates the state of the ClientConn appropriately.
+	UpdateState(State)
 	// NewAddress is called by resolver to notify ClientConn a new list
 	// of resolved addresses.
 	// The address list should be the complete list of resolved addresses.
+	//
+	// Deprecated: Use UpdateState instead.
 	NewAddress(addresses []Address)
 	// NewServiceConfig is called by resolver to notify ClientConn a new
 	// service config. The service config should be provided as a json string.
+	//
+	// Deprecated: Use UpdateState instead.
 	NewServiceConfig(serviceConfig string)
 }
 
diff --git a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go
index 1b493db..e9cef3a 100644
--- a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go
+++ b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go
@@ -21,8 +21,10 @@
 import (
 	"fmt"
 	"strings"
+	"sync/atomic"
 
 	"google.golang.org/grpc/grpclog"
+	"google.golang.org/grpc/internal/channelz"
 	"google.golang.org/grpc/resolver"
 )
 
@@ -33,11 +35,12 @@
 	resolver resolver.Resolver
 	addrCh   chan []resolver.Address
 	scCh     chan string
-	done     chan struct{}
+	done     uint32 // accessed atomically; set to 1 when closed.
+	curState resolver.State
 }
 
 // split2 returns the values from strings.SplitN(s, sep, 2).
-// If sep is not found, it returns ("", s, false) instead.
+// If sep is not found, it returns ("", "", false) instead.
 func split2(s, sep string) (string, string, bool) {
 	spl := strings.SplitN(s, sep, 2)
 	if len(spl) < 2 {
@@ -65,8 +68,8 @@
 }
 
 // newCCResolverWrapper parses cc.target for scheme and gets the resolver
-// builder for this scheme. It then builds the resolver and starts the
-// monitoring goroutine for it.
+// builder for this scheme and builds the resolver. The monitoring goroutine
+// for it is not started yet and can be created by calling start().
 //
 // If withResolverBuilder dial option is set, the specified resolver will be
 // used instead.
@@ -80,7 +83,6 @@
 		cc:     cc,
 		addrCh: make(chan []resolver.Address, 1),
 		scCh:   make(chan string, 1),
-		done:   make(chan struct{}),
 	}
 
 	var err error
@@ -91,68 +93,73 @@
 	return ccr, nil
 }
 
-func (ccr *ccResolverWrapper) start() {
-	go ccr.watcher()
-}
-
-// watcher processes address updates and service config updates sequentially.
-// Otherwise, we need to resolve possible races between address and service
-// config (e.g. they specify different balancer types).
-func (ccr *ccResolverWrapper) watcher() {
-	for {
-		select {
-		case <-ccr.done:
-			return
-		default:
-		}
-
-		select {
-		case addrs := <-ccr.addrCh:
-			select {
-			case <-ccr.done:
-				return
-			default:
-			}
-			grpclog.Infof("ccResolverWrapper: sending new addresses to cc: %v", addrs)
-			ccr.cc.handleResolvedAddrs(addrs, nil)
-		case sc := <-ccr.scCh:
-			select {
-			case <-ccr.done:
-				return
-			default:
-			}
-			grpclog.Infof("ccResolverWrapper: got new service config: %v", sc)
-			ccr.cc.handleServiceConfig(sc)
-		case <-ccr.done:
-			return
-		}
-	}
-}
-
 func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOption) {
 	ccr.resolver.ResolveNow(o)
 }
 
 func (ccr *ccResolverWrapper) close() {
 	ccr.resolver.Close()
-	close(ccr.done)
+	atomic.StoreUint32(&ccr.done, 1)
 }
 
-// NewAddress is called by the resolver implemenetion to send addresses to gRPC.
+func (ccr *ccResolverWrapper) isDone() bool {
+	return atomic.LoadUint32(&ccr.done) == 1
+}
+
+func (ccr *ccResolverWrapper) UpdateState(s resolver.State) {
+	if ccr.isDone() {
+		return
+	}
+	grpclog.Infof("ccResolverWrapper: sending update to cc: %v", s)
+	if channelz.IsOn() {
+		ccr.addChannelzTraceEvent(s)
+	}
+	ccr.cc.updateResolverState(s)
+	ccr.curState = s
+}
+
+// NewAddress is called by the resolver implementation to send addresses to gRPC.
 func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) {
-	select {
-	case <-ccr.addrCh:
-	default:
+	if ccr.isDone() {
+		return
 	}
-	ccr.addrCh <- addrs
+	grpclog.Infof("ccResolverWrapper: sending new addresses to cc: %v", addrs)
+	if channelz.IsOn() {
+		ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig})
+	}
+	ccr.curState.Addresses = addrs
+	ccr.cc.updateResolverState(ccr.curState)
 }
 
-// NewServiceConfig is called by the resolver implemenetion to send service
-// configs to gPRC.
+// NewServiceConfig is called by the resolver implementation to send service
+// configs to gRPC.
 func (ccr *ccResolverWrapper) NewServiceConfig(sc string) {
-	select {
-	case <-ccr.scCh:
-	default:
+	if ccr.isDone() {
+		return
 	}
-	ccr.scCh <- sc
+	grpclog.Infof("ccResolverWrapper: got new service config: %v", sc)
+	if channelz.IsOn() {
+		ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: sc})
+	}
+	ccr.curState.ServiceConfig = sc
+	ccr.cc.updateResolverState(ccr.curState)
+}
+
+func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) {
+	if s.ServiceConfig == ccr.curState.ServiceConfig && (len(ccr.curState.Addresses) == 0) == (len(s.Addresses) == 0) {
+		return
+	}
+	var updates []string
+	if s.ServiceConfig != ccr.curState.ServiceConfig {
+		updates = append(updates, "service config updated")
+	}
+	if len(ccr.curState.Addresses) > 0 && len(s.Addresses) == 0 {
+		updates = append(updates, "resolver returned an empty address list")
+	} else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 {
+		updates = append(updates, "resolver returned new addresses")
+	}
+	channelz.AddTraceEvent(ccr.cc.channelzID, &channelz.TraceEventDesc{
+		Desc:     fmt.Sprintf("Resolver state updated: %+v (%v)", s, strings.Join(updates, "; ")),
+		Severity: channelz.CtINFO,
+	})
 }
diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go
index 5de1b03..2a59562 100644
--- a/vendor/google.golang.org/grpc/rpc_util.go
+++ b/vendor/google.golang.org/grpc/rpc_util.go
@@ -21,6 +21,7 @@
 import (
 	"bytes"
 	"compress/gzip"
+	"context"
 	"encoding/binary"
 	"fmt"
 	"io"
@@ -31,16 +32,15 @@
 	"sync"
 	"time"
 
-	"golang.org/x/net/context"
 	"google.golang.org/grpc/codes"
 	"google.golang.org/grpc/credentials"
 	"google.golang.org/grpc/encoding"
 	"google.golang.org/grpc/encoding/proto"
+	"google.golang.org/grpc/internal/transport"
 	"google.golang.org/grpc/metadata"
 	"google.golang.org/grpc/peer"
 	"google.golang.org/grpc/stats"
 	"google.golang.org/grpc/status"
-	"google.golang.org/grpc/transport"
 )
 
 // Compressor defines the interface gRPC uses to compress a message.
@@ -155,17 +155,20 @@
 type callInfo struct {
 	compressorType        string
 	failFast              bool
-	stream                *clientStream
-	traceInfo             traceInfo // in trace.go
+	stream                ClientStream
 	maxReceiveMessageSize *int
 	maxSendMessageSize    *int
 	creds                 credentials.PerRPCCredentials
 	contentSubtype        string
 	codec                 baseCodec
+	maxRetryRPCBufferSize int
 }
 
 func defaultCallInfo() *callInfo {
-	return &callInfo{failFast: true}
+	return &callInfo{
+		failFast:              true,
+		maxRetryRPCBufferSize: 256 * 1024, // 256KB
+	}
 }
 
 // CallOption configures a Call before it starts or extracts information from
@@ -250,8 +253,8 @@
 	}
 }
 
-// FailFast configures the action to take when an RPC is attempted on broken
-// connections or unreachable servers.  If failFast is true, the RPC will fail
+// WaitForReady configures the action to take when an RPC is attempted on broken
+// connections or unreachable servers. If waitForReady is false, the RPC will fail
 // immediately. Otherwise, the RPC client will block the call until a
 // connection is available (or the call is canceled or times out) and will
 // retry the call if it fails due to a transient error.  gRPC will not retry if
@@ -259,7 +262,14 @@
 // the data.  Please refer to
 // https://github.com/grpc/grpc/blob/master/doc/wait-for-ready.md.
 //
-// By default, RPCs are "Fail Fast".
+// By default, RPCs don't "wait for ready".
+func WaitForReady(waitForReady bool) CallOption {
+	return FailFastCallOption{FailFast: !waitForReady}
+}
+
+// FailFast is the opposite of WaitForReady.
+//
+// Deprecated: use WaitForReady.
 func FailFast(failFast bool) CallOption {
 	return FailFastCallOption{FailFast: failFast}
 }
@@ -360,13 +370,13 @@
 // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
 // more details.
 //
-// If CallCustomCodec is not also used, the content-subtype will be used to
-// look up the Codec to use in the registry controlled by RegisterCodec. See
-// the documentation on RegisterCodec for details on registration. The lookup
-// of content-subtype is case-insensitive. If no such Codec is found, the call
+// If ForceCodec is not also used, the content-subtype will be used to look up
+// the Codec to use in the registry controlled by RegisterCodec. See the
+// documentation on RegisterCodec for details on registration. The lookup of
+// content-subtype is case-insensitive. If no such Codec is found, the call
 // will result in an error with code codes.Internal.
 //
-// If CallCustomCodec is also used, that Codec will be used for all request and
+// If ForceCodec is also used, that Codec will be used for all request and
 // response messages, with the content-subtype set to the given contentSubtype
 // here for requests.
 func CallContentSubtype(contentSubtype string) CallOption {
@@ -386,7 +396,7 @@
 }
 func (o ContentSubtypeCallOption) after(c *callInfo) {}
 
-// CallCustomCodec returns a CallOption that will set the given Codec to be
+// ForceCodec returns a CallOption that will set the given Codec to be
 // used for all request and response messages for a call. The result of calling
 // String() will be used as the content-subtype in a case-insensitive manner.
 //
@@ -398,12 +408,37 @@
 //
 // This function is provided for advanced users; prefer to use only
 // CallContentSubtype to select a registered codec instead.
+//
+// This is an EXPERIMENTAL API.
+func ForceCodec(codec encoding.Codec) CallOption {
+	return ForceCodecCallOption{Codec: codec}
+}
+
+// ForceCodecCallOption is a CallOption that indicates the codec used for
+// marshaling messages.
+//
+// This is an EXPERIMENTAL API.
+type ForceCodecCallOption struct {
+	Codec encoding.Codec
+}
+
+func (o ForceCodecCallOption) before(c *callInfo) error {
+	c.codec = o.Codec
+	return nil
+}
+func (o ForceCodecCallOption) after(c *callInfo) {}
+
+// CallCustomCodec behaves like ForceCodec, but accepts a grpc.Codec instead of
+// an encoding.Codec.
+//
+// Deprecated: use ForceCodec instead.
 func CallCustomCodec(codec Codec) CallOption {
 	return CustomCodecCallOption{Codec: codec}
 }
 
 // CustomCodecCallOption is a CallOption that indicates the codec used for
 // marshaling messages.
+//
 // This is an EXPERIMENTAL API.
 type CustomCodecCallOption struct {
 	Codec Codec
@@ -415,12 +450,33 @@
 }
 func (o CustomCodecCallOption) after(c *callInfo) {}
 
+// MaxRetryRPCBufferSize returns a CallOption that limits the amount of memory
+// used for buffering this RPC's requests for retry purposes.
+//
+// This API is EXPERIMENTAL.
+func MaxRetryRPCBufferSize(bytes int) CallOption {
+	return MaxRetryRPCBufferSizeCallOption{bytes}
+}
+
+// MaxRetryRPCBufferSizeCallOption is a CallOption indicating the amount of
+// memory to be used for caching this RPC for retry purposes.
+// This is an EXPERIMENTAL API.
+type MaxRetryRPCBufferSizeCallOption struct {
+	MaxRetryRPCBufferSize int
+}
+
+func (o MaxRetryRPCBufferSizeCallOption) before(c *callInfo) error {
+	c.maxRetryRPCBufferSize = o.MaxRetryRPCBufferSize
+	return nil
+}
+func (o MaxRetryRPCBufferSizeCallOption) after(c *callInfo) {}
+
 // The format of the payload: compressed or not?
 type payloadFormat uint8
 
 const (
-	compressionNone payloadFormat = iota // no compression
-	compressionMade
+	compressionNone payloadFormat = 0 // no compression
+	compressionMade payloadFormat = 1 // compressed
 )
 
 // parser reads complete gRPC messages from the underlying reader.
@@ -444,7 +500,7 @@
 //   * io.EOF, when no messages remain
 //   * io.ErrUnexpectedEOF
 //   * of type transport.ConnectionError
-//   * of type transport.StreamError
+//   * an error from the status package
 // No other error values or types must be returned, which also means
 // that the underlying io.Reader must not return an incompatible
 // error.
@@ -477,65 +533,85 @@
 	return pf, msg, nil
 }
 
-// encode serializes msg and returns a buffer of message header and a buffer of msg.
-// If msg is nil, it generates the message header and an empty msg buffer.
-// TODO(ddyihai): eliminate extra Compressor parameter.
-func encode(c baseCodec, msg interface{}, cp Compressor, outPayload *stats.OutPayload, compressor encoding.Compressor) ([]byte, []byte, error) {
-	var (
-		b    []byte
-		cbuf *bytes.Buffer
-	)
-	const (
-		payloadLen = 1
-		sizeLen    = 4
-	)
-	if msg != nil {
-		var err error
-		b, err = c.Marshal(msg)
-		if err != nil {
-			return nil, nil, status.Errorf(codes.Internal, "grpc: error while marshaling: %v", err.Error())
-		}
-		if outPayload != nil {
-			outPayload.Payload = msg
-			// TODO truncate large payload.
-			outPayload.Data = b
-			outPayload.Length = len(b)
-		}
-		if compressor != nil || cp != nil {
-			cbuf = new(bytes.Buffer)
-			// Has compressor, check Compressor is set by UseCompressor first.
-			if compressor != nil {
-				z, _ := compressor.Compress(cbuf)
-				if _, err := z.Write(b); err != nil {
-					return nil, nil, status.Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error())
-				}
-				z.Close()
-			} else {
-				// If Compressor is not set by UseCompressor, use default Compressor
-				if err := cp.Do(cbuf, b); err != nil {
-					return nil, nil, status.Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error())
-				}
-			}
-			b = cbuf.Bytes()
-		}
+// encode serializes msg and returns a buffer containing the message, or an
+// error if it is too large to be transmitted by grpc.  If msg is nil, it
+// generates an empty message.
+func encode(c baseCodec, msg interface{}) ([]byte, error) {
+	if msg == nil { // NOTE: typed nils will not be caught by this check
+		return nil, nil
+	}
+	b, err := c.Marshal(msg)
+	if err != nil {
+		return nil, status.Errorf(codes.Internal, "grpc: error while marshaling: %v", err.Error())
 	}
 	if uint(len(b)) > math.MaxUint32 {
-		return nil, nil, status.Errorf(codes.ResourceExhausted, "grpc: message too large (%d bytes)", len(b))
+		return nil, status.Errorf(codes.ResourceExhausted, "grpc: message too large (%d bytes)", len(b))
 	}
+	return b, nil
+}
 
-	bufHeader := make([]byte, payloadLen+sizeLen)
-	if compressor != nil || cp != nil {
-		bufHeader[0] = byte(compressionMade)
+// compress returns the input bytes compressed by compressor or cp.  If both
+// compressors are nil, returns nil.
+//
+// TODO(dfawley): eliminate cp parameter by wrapping Compressor in an encoding.Compressor.
+func compress(in []byte, cp Compressor, compressor encoding.Compressor) ([]byte, error) {
+	if compressor == nil && cp == nil {
+		return nil, nil
+	}
+	wrapErr := func(err error) error {
+		return status.Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error())
+	}
+	cbuf := &bytes.Buffer{}
+	if compressor != nil {
+		z, err := compressor.Compress(cbuf)
+		if err != nil {
+			return nil, wrapErr(err)
+		}
+		if _, err := z.Write(in); err != nil {
+			return nil, wrapErr(err)
+		}
+		if err := z.Close(); err != nil {
+			return nil, wrapErr(err)
+		}
 	} else {
-		bufHeader[0] = byte(compressionNone)
+		if err := cp.Do(cbuf, in); err != nil {
+			return nil, wrapErr(err)
+		}
+	}
+	return cbuf.Bytes(), nil
+}
+
+const (
+	payloadLen = 1
+	sizeLen    = 4
+	headerLen  = payloadLen + sizeLen
+)
+
+// msgHeader returns a 5-byte header for the message being transmitted and the
+// payload, which is compData if non-nil or data otherwise.
+func msgHeader(data, compData []byte) (hdr []byte, payload []byte) {
+	hdr = make([]byte, headerLen)
+	if compData != nil {
+		hdr[0] = byte(compressionMade)
+		data = compData
+	} else {
+		hdr[0] = byte(compressionNone)
 	}
 
-	// Write length of b into buf
-	binary.BigEndian.PutUint32(bufHeader[payloadLen:], uint32(len(b)))
-	if outPayload != nil {
-		outPayload.WireLength = payloadLen + sizeLen + len(b)
+	// Write length of payload into buf
+	binary.BigEndian.PutUint32(hdr[payloadLen:], uint32(len(data)))
+	return hdr, data
+}
+
+func outPayload(client bool, msg interface{}, data, payload []byte, t time.Time) *stats.OutPayload {
+	return &stats.OutPayload{
+		Client:     client,
+		Payload:    msg,
+		Data:       data,
+		Length:     len(data),
+		WireLength: len(payload) + headerLen,
+		SentTime:   t,
 	}
-	return bufHeader, b, nil
 }
 
 func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool) *status.Status {
@@ -554,20 +630,22 @@
 	return nil
 }
 
-// For the two compressor parameters, both should not be set, but if they are,
-// dc takes precedence over compressor.
-// TODO(dfawley): wrap the old compressor/decompressor using the new API?
-func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m interface{}, maxReceiveMessageSize int, inPayload *stats.InPayload, compressor encoding.Compressor) error {
+type payloadInfo struct {
+	wireLength        int // The compressed length got from wire.
+	uncompressedBytes []byte
+}
+
+func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) ([]byte, error) {
 	pf, d, err := p.recvMsg(maxReceiveMessageSize)
 	if err != nil {
-		return err
+		return nil, err
 	}
-	if inPayload != nil {
-		inPayload.WireLength = len(d)
+	if payInfo != nil {
+		payInfo.wireLength = len(d)
 	}
 
 	if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil); st != nil {
-		return st.Err()
+		return nil, st.Err()
 	}
 
 	if pf == compressionMade {
@@ -576,33 +654,42 @@
 		if dc != nil {
 			d, err = dc.Do(bytes.NewReader(d))
 			if err != nil {
-				return status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
+				return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
 			}
 		} else {
 			dcReader, err := compressor.Decompress(bytes.NewReader(d))
 			if err != nil {
-				return status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
+				return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
 			}
-			d, err = ioutil.ReadAll(dcReader)
+			// Read from LimitReader with limit max+1. So if the underlying
+			// reader is over limit, the result will be bigger than max.
+			d, err = ioutil.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1))
 			if err != nil {
-				return status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
+				return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
 			}
 		}
 	}
 	if len(d) > maxReceiveMessageSize {
 		// TODO: Revisit the error code. Currently keep it consistent with java
 		// implementation.
-		return status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", len(d), maxReceiveMessageSize)
+		return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", len(d), maxReceiveMessageSize)
+	}
+	return d, nil
+}
+
+// For the two compressor parameters, both should not be set, but if they are,
+// dc takes precedence over compressor.
+// TODO(dfawley): wrap the old compressor/decompressor using the new API?
+func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m interface{}, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) error {
+	d, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor)
+	if err != nil {
+		return err
 	}
 	if err := c.Unmarshal(d, m); err != nil {
 		return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message %v", err)
 	}
-	if inPayload != nil {
-		inPayload.RecvTime = time.Now()
-		inPayload.Payload = m
-		// TODO truncate large payload.
-		inPayload.Data = d
-		inPayload.Length = len(d)
+	if payInfo != nil {
+		payInfo.uncompressedBytes = d
 	}
 	return nil
 }
@@ -625,23 +712,17 @@
 // Code returns the error code for err if it was produced by the rpc system.
 // Otherwise, it returns codes.Unknown.
 //
-// Deprecated: use status.FromError and Code method instead.
+// Deprecated: use status.Code instead.
 func Code(err error) codes.Code {
-	if s, ok := status.FromError(err); ok {
-		return s.Code()
-	}
-	return codes.Unknown
+	return status.Code(err)
 }
 
 // ErrorDesc returns the error description of err if it was produced by the rpc system.
 // Otherwise, it returns err.Error() or empty string when err is nil.
 //
-// Deprecated: use status.FromError and Message method instead.
+// Deprecated: use status.Convert and Message method instead.
 func ErrorDesc(err error) string {
-	if s, ok := status.FromError(err); ok {
-		return s.Message()
-	}
-	return err.Error()
+	return status.Convert(err).Message()
 }
 
 // Errorf returns an error containing an error code and a description;
@@ -652,6 +733,31 @@
 	return status.Errorf(c, format, a...)
 }
 
+// toRPCErr converts an error into an error from the status package.
+func toRPCErr(err error) error {
+	if err == nil || err == io.EOF {
+		return err
+	}
+	if err == io.ErrUnexpectedEOF {
+		return status.Error(codes.Internal, err.Error())
+	}
+	if _, ok := status.FromError(err); ok {
+		return err
+	}
+	switch e := err.(type) {
+	case transport.ConnectionError:
+		return status.Error(codes.Unavailable, e.Desc)
+	default:
+		switch err {
+		case context.DeadlineExceeded:
+			return status.Error(codes.DeadlineExceeded, err.Error())
+		case context.Canceled:
+			return status.Error(codes.Canceled, err.Error())
+		}
+	}
+	return status.Error(codes.Unknown, err.Error())
+}
+
 // setCallInfoCodec should only be called after CallOptions have been applied.
 func setCallInfoCodec(c *callInfo) error {
 	if c.codec != nil {
@@ -707,6 +813,19 @@
 	return net, target
 }
 
+// channelzData is used to store channelz related data for ClientConn, addrConn and Server.
+// These fields cannot be embedded in the original structs (e.g. ClientConn), since to do atomic
+// operation on int64 variable on 32-bit machine, user is responsible to enforce memory alignment.
+// Here, by grouping those int64 fields inside a struct, we are enforcing the alignment.
+type channelzData struct {
+	callsStarted   int64
+	callsFailed    int64
+	callsSucceeded int64
+	// lastCallStartedTime stores the timestamp that last call starts. It is of int64 type instead of
+	// time.Time since it's more costly to atomically update time.Time variable than int64 variable.
+	lastCallStartedTime int64
+}
+
 // The SupportPackageIsVersion variables are referenced from generated protocol
 // buffer files to ensure compatibility with the gRPC version used.  The latest
 // support package version is 5.
@@ -721,7 +840,4 @@
 	SupportPackageIsVersion5 = true
 )
 
-// Version is the current grpc version.
-const Version = "1.12.2"
-
 const grpcUA = "grpc-go/" + Version
diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go
index 4969331..8115828 100644
--- a/vendor/google.golang.org/grpc/server.go
+++ b/vendor/google.golang.org/grpc/server.go
@@ -19,7 +19,7 @@
 package grpc
 
 import (
-	"bytes"
+	"context"
 	"errors"
 	"fmt"
 	"io"
@@ -30,27 +30,25 @@
 	"runtime"
 	"strings"
 	"sync"
+	"sync/atomic"
 	"time"
 
-	"io/ioutil"
-
-	"golang.org/x/net/context"
-	"golang.org/x/net/http2"
 	"golang.org/x/net/trace"
 
-	"google.golang.org/grpc/channelz"
 	"google.golang.org/grpc/codes"
 	"google.golang.org/grpc/credentials"
 	"google.golang.org/grpc/encoding"
 	"google.golang.org/grpc/encoding/proto"
 	"google.golang.org/grpc/grpclog"
-	"google.golang.org/grpc/internal"
+	"google.golang.org/grpc/internal/binarylog"
+	"google.golang.org/grpc/internal/channelz"
+	"google.golang.org/grpc/internal/transport"
 	"google.golang.org/grpc/keepalive"
 	"google.golang.org/grpc/metadata"
+	"google.golang.org/grpc/peer"
 	"google.golang.org/grpc/stats"
 	"google.golang.org/grpc/status"
 	"google.golang.org/grpc/tap"
-	"google.golang.org/grpc/transport"
 )
 
 const (
@@ -106,12 +104,8 @@
 	channelzRemoveOnce sync.Once
 	serveWG            sync.WaitGroup // counts active Serve goroutines for GracefulStop
 
-	channelzID          int64 // channelz unique identification number
-	czmu                sync.RWMutex
-	callsStarted        int64
-	callsFailed         int64
-	callsSucceeded      int64
-	lastCallStartedTime time.Time
+	channelzID int64 // channelz unique identification number
+	czData     *channelzData
 }
 
 type options struct {
@@ -126,7 +120,6 @@
 	maxConcurrentStreams  uint32
 	maxReceiveMessageSize int
 	maxSendMessageSize    int
-	useHandlerImpl        bool // use http.Handler-based server
 	unknownStreamDesc     *StreamDesc
 	keepaliveParams       keepalive.ServerParameters
 	keepalivePolicy       keepalive.EnforcementPolicy
@@ -135,19 +128,25 @@
 	writeBufferSize       int
 	readBufferSize        int
 	connectionTimeout     time.Duration
+	maxHeaderListSize     *uint32
 }
 
 var defaultServerOptions = options{
 	maxReceiveMessageSize: defaultServerMaxReceiveMessageSize,
 	maxSendMessageSize:    defaultServerMaxSendMessageSize,
 	connectionTimeout:     120 * time.Second,
+	writeBufferSize:       defaultWriteBufSize,
+	readBufferSize:        defaultReadBufSize,
 }
 
 // A ServerOption sets options such as credentials, codec and keepalive parameters, etc.
 type ServerOption func(*options)
 
-// WriteBufferSize lets you set the size of write buffer, this determines how much data can be batched
-// before doing a write on the wire.
+// WriteBufferSize determines how much data can be batched before doing a write on the wire.
+// The corresponding memory allocation for this buffer will be twice the size to keep syscalls low.
+// The default value for this buffer is 32KB.
+// Zero will disable the write buffer such that each write will be on underlying connection.
+// Note: A Send call may not directly translate to a write.
 func WriteBufferSize(s int) ServerOption {
 	return func(o *options) {
 		o.writeBufferSize = s
@@ -156,6 +155,9 @@
 
 // ReadBufferSize lets you set the size of read buffer, this determines how much data can be read at most
 // for one read syscall.
+// The default value for this buffer is 32KB.
+// Zero will disable read buffer for a connection so data framer can access the underlying
+// conn directly.
 func ReadBufferSize(s int) ServerOption {
 	return func(o *options) {
 		o.readBufferSize = s
@@ -180,6 +182,11 @@
 
 // KeepaliveParams returns a ServerOption that sets keepalive and max-age parameters for the server.
 func KeepaliveParams(kp keepalive.ServerParameters) ServerOption {
+	if kp.Time > 0 && kp.Time < time.Second {
+		grpclog.Warning("Adjusting keepalive ping interval to minimum period of 1s")
+		kp.Time = time.Second
+	}
+
 	return func(o *options) {
 		o.keepaliveParams = kp
 	}
@@ -242,7 +249,7 @@
 }
 
 // MaxSendMsgSize returns a ServerOption to set the max message size in bytes the server can send.
-// If this is not set, gRPC uses the default 4MB.
+// If this is not set, gRPC uses the default `math.MaxInt32`.
 func MaxSendMsgSize(m int) ServerOption {
 	return func(o *options) {
 		o.maxSendMessageSize = m
@@ -335,6 +342,14 @@
 	}
 }
 
+// MaxHeaderListSize returns a ServerOption that sets the max (uncompressed) size
+// of header list that the server is prepared to accept.
+func MaxHeaderListSize(s uint32) ServerOption {
+	return func(o *options) {
+		o.maxHeaderListSize = &s
+	}
+}
+
 // NewServer creates a gRPC server which has no service registered and has not
 // started to accept requests yet.
 func NewServer(opt ...ServerOption) *Server {
@@ -343,12 +358,13 @@
 		o(&opts)
 	}
 	s := &Server{
-		lis:   make(map[net.Listener]bool),
-		opts:  opts,
-		conns: make(map[io.Closer]bool),
-		m:     make(map[string]*service),
-		quit:  make(chan struct{}),
-		done:  make(chan struct{}),
+		lis:    make(map[net.Listener]bool),
+		opts:   opts,
+		conns:  make(map[io.Closer]bool),
+		m:      make(map[string]*service),
+		quit:   make(chan struct{}),
+		done:   make(chan struct{}),
+		czData: new(channelzData),
 	}
 	s.cv = sync.NewCond(&s.mu)
 	if EnableTracing {
@@ -357,7 +373,7 @@
 	}
 
 	if channelz.IsOn() {
-		s.channelzID = channelz.RegisterServer(s, "")
+		s.channelzID = channelz.RegisterServer(&channelzServer{s}, "")
 	}
 	return s
 }
@@ -481,7 +497,8 @@
 
 func (l *listenSocket) ChannelzMetric() *channelz.SocketInternalMetric {
 	return &channelz.SocketInternalMetric{
-		LocalAddr: l.Listener.Addr(),
+		SocketOptions: channelz.GetSocketOption(l.Listener),
+		LocalAddr:     l.Listener.Addr(),
 	}
 }
 
@@ -525,7 +542,7 @@
 	s.lis[ls] = true
 
 	if channelz.IsOn() {
-		ls.channelzID = channelz.RegisterListenSocket(ls, s.channelzID, "")
+		ls.channelzID = channelz.RegisterListenSocket(ls, s.channelzID, lis.Addr().String())
 	}
 	s.mu.Unlock()
 
@@ -597,12 +614,13 @@
 	rawConn.SetDeadline(time.Now().Add(s.opts.connectionTimeout))
 	conn, authInfo, err := s.useTransportAuthenticator(rawConn)
 	if err != nil {
-		s.mu.Lock()
-		s.errorf("ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err)
-		s.mu.Unlock()
-		grpclog.Warningf("grpc: Server.Serve failed to complete security handshake from %q: %v", rawConn.RemoteAddr(), err)
-		// If serverHandshake returns ErrConnDispatched, keep rawConn open.
+		// ErrConnDispatched means that the connection was dispatched away from
+		// gRPC; those connections should be left open.
 		if err != credentials.ErrConnDispatched {
+			s.mu.Lock()
+			s.errorf("ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err)
+			s.mu.Unlock()
+			grpclog.Warningf("grpc: Server.Serve failed to complete security handshake from %q: %v", rawConn.RemoteAddr(), err)
 			rawConn.Close()
 		}
 		rawConn.SetDeadline(time.Time{})
@@ -617,27 +635,19 @@
 	}
 	s.mu.Unlock()
 
-	var serve func()
-	c := conn.(io.Closer)
-	if s.opts.useHandlerImpl {
-		serve = func() { s.serveUsingHandler(conn) }
-	} else {
-		// Finish handshaking (HTTP2)
-		st := s.newHTTP2Transport(conn, authInfo)
-		if st == nil {
-			return
-		}
-		c = st
-		serve = func() { s.serveStreams(st) }
+	// Finish handshaking (HTTP2)
+	st := s.newHTTP2Transport(conn, authInfo)
+	if st == nil {
+		return
 	}
 
 	rawConn.SetDeadline(time.Time{})
-	if !s.addConn(c) {
+	if !s.addConn(st) {
 		return
 	}
 	go func() {
-		serve()
-		s.removeConn(c)
+		s.serveStreams(st)
+		s.removeConn(st)
 	}()
 }
 
@@ -656,6 +666,7 @@
 		WriteBufferSize:       s.opts.writeBufferSize,
 		ReadBufferSize:        s.opts.readBufferSize,
 		ChannelzParentID:      s.channelzID,
+		MaxHeaderListSize:     s.opts.maxHeaderListSize,
 	}
 	st, err := transport.NewServerTransport("http2", c, config)
 	if err != nil {
@@ -691,27 +702,6 @@
 
 var _ http.Handler = (*Server)(nil)
 
-// serveUsingHandler is called from handleRawConn when s is configured
-// to handle requests via the http.Handler interface. It sets up a
-// net/http.Server to handle the just-accepted conn. The http.Server
-// is configured to route all incoming requests (all HTTP/2 streams)
-// to ServeHTTP, which creates a new ServerTransport for each stream.
-// serveUsingHandler blocks until conn closes.
-//
-// This codepath is only used when Server.TestingUseHandlerImpl has
-// been configured. This lets the end2end tests exercise the ServeHTTP
-// method as one of the environment types.
-//
-// conn is the *tls.Conn that's already been authenticated.
-func (s *Server) serveUsingHandler(conn net.Conn) {
-	h2s := &http2.Server{
-		MaxConcurrentStreams: s.opts.maxConcurrentStreams,
-	}
-	h2s.ServeConn(conn, &http2.ServeConnOpts{
-		Handler: s,
-	})
-}
-
 // ServeHTTP implements the Go standard library's http.Handler
 // interface by responding to the gRPC request r, by looking up
 // the requested gRPC method in the gRPC server s.
@@ -759,12 +749,13 @@
 
 	trInfo = &traceInfo{
 		tr: tr,
+		firstLine: firstLine{
+			client:     false,
+			remoteAddr: st.RemoteAddr(),
+		},
 	}
-	trInfo.firstLine.client = false
-	trInfo.firstLine.remoteAddr = st.RemoteAddr()
-
 	if dl, ok := stream.Context().Deadline(); ok {
-		trInfo.firstLine.deadline = dl.Sub(time.Now())
+		trInfo.firstLine.deadline = time.Until(dl)
 	}
 	return trInfo
 }
@@ -794,57 +785,47 @@
 	}
 }
 
-// ChannelzMetric returns ServerInternalMetric of current server.
-// This is an EXPERIMENTAL API.
-func (s *Server) ChannelzMetric() *channelz.ServerInternalMetric {
-	s.czmu.RLock()
-	defer s.czmu.RUnlock()
+func (s *Server) channelzMetric() *channelz.ServerInternalMetric {
 	return &channelz.ServerInternalMetric{
-		CallsStarted:             s.callsStarted,
-		CallsSucceeded:           s.callsSucceeded,
-		CallsFailed:              s.callsFailed,
-		LastCallStartedTimestamp: s.lastCallStartedTime,
+		CallsStarted:             atomic.LoadInt64(&s.czData.callsStarted),
+		CallsSucceeded:           atomic.LoadInt64(&s.czData.callsSucceeded),
+		CallsFailed:              atomic.LoadInt64(&s.czData.callsFailed),
+		LastCallStartedTimestamp: time.Unix(0, atomic.LoadInt64(&s.czData.lastCallStartedTime)),
 	}
 }
 
 func (s *Server) incrCallsStarted() {
-	s.czmu.Lock()
-	s.callsStarted++
-	s.lastCallStartedTime = time.Now()
-	s.czmu.Unlock()
+	atomic.AddInt64(&s.czData.callsStarted, 1)
+	atomic.StoreInt64(&s.czData.lastCallStartedTime, time.Now().UnixNano())
 }
 
 func (s *Server) incrCallsSucceeded() {
-	s.czmu.Lock()
-	s.callsSucceeded++
-	s.czmu.Unlock()
+	atomic.AddInt64(&s.czData.callsSucceeded, 1)
 }
 
 func (s *Server) incrCallsFailed() {
-	s.czmu.Lock()
-	s.callsFailed++
-	s.czmu.Unlock()
+	atomic.AddInt64(&s.czData.callsFailed, 1)
 }
 
 func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, cp Compressor, opts *transport.Options, comp encoding.Compressor) error {
-	var (
-		outPayload *stats.OutPayload
-	)
-	if s.opts.statsHandler != nil {
-		outPayload = &stats.OutPayload{}
-	}
-	hdr, data, err := encode(s.getCodec(stream.ContentSubtype()), msg, cp, outPayload, comp)
+	data, err := encode(s.getCodec(stream.ContentSubtype()), msg)
 	if err != nil {
 		grpclog.Errorln("grpc: server failed to encode response: ", err)
 		return err
 	}
-	if len(data) > s.opts.maxSendMessageSize {
-		return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(data), s.opts.maxSendMessageSize)
+	compData, err := compress(data, cp, comp)
+	if err != nil {
+		grpclog.Errorln("grpc: server failed to compress response: ", err)
+		return err
 	}
-	err = t.Write(stream, hdr, data, opts)
-	if err == nil && outPayload != nil {
-		outPayload.SentTime = time.Now()
-		s.opts.statsHandler.HandleRPC(stream.Context(), outPayload)
+	hdr, payload := msgHeader(data, compData)
+	// TODO(dfawley): should we be checking len(data) instead?
+	if len(payload) > s.opts.maxSendMessageSize {
+		return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(payload), s.opts.maxSendMessageSize)
+	}
+	err = t.Write(stream, hdr, payload, opts)
+	if err == nil && s.opts.statsHandler != nil {
+		s.opts.statsHandler.HandleRPC(stream.Context(), outPayload(false, msg, data, payload, time.Now()))
 	}
 	return err
 }
@@ -880,7 +861,6 @@
 	}
 	if trInfo != nil {
 		defer trInfo.tr.Finish()
-		trInfo.firstLine.client = false
 		trInfo.tr.LazyLog(&trInfo.firstLine, false)
 		defer func() {
 			if err != nil && err != io.EOF {
@@ -890,6 +870,30 @@
 		}()
 	}
 
+	binlog := binarylog.GetMethodLogger(stream.Method())
+	if binlog != nil {
+		ctx := stream.Context()
+		md, _ := metadata.FromIncomingContext(ctx)
+		logEntry := &binarylog.ClientHeader{
+			Header:     md,
+			MethodName: stream.Method(),
+			PeerAddr:   nil,
+		}
+		if deadline, ok := ctx.Deadline(); ok {
+			logEntry.Timeout = time.Until(deadline)
+			if logEntry.Timeout < 0 {
+				logEntry.Timeout = 0
+			}
+		}
+		if a := md[":authority"]; len(a) > 0 {
+			logEntry.Authority = a[0]
+		}
+		if peer, ok := peer.FromContext(ctx); ok {
+			logEntry.PeerAddr = peer.Addr
+		}
+		binlog.Log(logEntry)
+	}
+
 	// comp and cp are used for compression.  decomp and dc are used for
 	// decompression.  If comp and decomp are both set, they are the same;
 	// however they are kept separate to ensure that at most one of the
@@ -926,81 +930,38 @@
 		}
 	}
 
-	p := &parser{r: stream}
-	pf, req, err := p.recvMsg(s.opts.maxReceiveMessageSize)
-	if err == io.EOF {
-		// The entire stream is done (for unary RPC only).
-		return err
+	var payInfo *payloadInfo
+	if sh != nil || binlog != nil {
+		payInfo = &payloadInfo{}
 	}
-	if err == io.ErrUnexpectedEOF {
-		err = status.Errorf(codes.Internal, io.ErrUnexpectedEOF.Error())
-	}
+	d, err := recvAndDecompress(&parser{r: stream}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp)
 	if err != nil {
 		if st, ok := status.FromError(err); ok {
 			if e := t.WriteStatus(stream, st); e != nil {
 				grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status %v", e)
 			}
-		} else {
-			switch st := err.(type) {
-			case transport.ConnectionError:
-				// Nothing to do here.
-			case transport.StreamError:
-				if e := t.WriteStatus(stream, status.New(st.Code, st.Desc)); e != nil {
-					grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status %v", e)
-				}
-			default:
-				panic(fmt.Sprintf("grpc: Unexpected error (%T) from recvMsg: %v", st, st))
-			}
 		}
 		return err
 	}
 	if channelz.IsOn() {
 		t.IncrMsgRecv()
 	}
-	if st := checkRecvPayload(pf, stream.RecvCompress(), dc != nil || decomp != nil); st != nil {
-		if e := t.WriteStatus(stream, st); e != nil {
-			grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status %v", e)
-		}
-		return st.Err()
-	}
-	var inPayload *stats.InPayload
-	if sh != nil {
-		inPayload = &stats.InPayload{
-			RecvTime: time.Now(),
-		}
-	}
 	df := func(v interface{}) error {
-		if inPayload != nil {
-			inPayload.WireLength = len(req)
-		}
-		if pf == compressionMade {
-			var err error
-			if dc != nil {
-				req, err = dc.Do(bytes.NewReader(req))
-				if err != nil {
-					return status.Errorf(codes.Internal, err.Error())
-				}
-			} else {
-				tmp, _ := decomp.Decompress(bytes.NewReader(req))
-				req, err = ioutil.ReadAll(tmp)
-				if err != nil {
-					return status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
-				}
-			}
-		}
-		if len(req) > s.opts.maxReceiveMessageSize {
-			// TODO: Revisit the error code. Currently keep it consistent with
-			// java implementation.
-			return status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", len(req), s.opts.maxReceiveMessageSize)
-		}
-		if err := s.getCodec(stream.ContentSubtype()).Unmarshal(req, v); err != nil {
+		if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil {
 			return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err)
 		}
-		if inPayload != nil {
-			inPayload.Payload = v
-			inPayload.Data = req
-			inPayload.Length = len(req)
-			sh.HandleRPC(stream.Context(), inPayload)
+		if sh != nil {
+			sh.HandleRPC(stream.Context(), &stats.InPayload{
+				RecvTime: time.Now(),
+				Payload:  v,
+				Data:     d,
+				Length:   len(d),
+			})
+		}
+		if binlog != nil {
+			binlog.Log(&binarylog.ClientMessage{
+				Message: d,
+			})
 		}
 		if trInfo != nil {
 			trInfo.tr.LazyLog(&payload{sent: false, msg: v}, true)
@@ -1023,15 +984,25 @@
 		if e := t.WriteStatus(stream, appStatus); e != nil {
 			grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status: %v", e)
 		}
+		if binlog != nil {
+			if h, _ := stream.Header(); h.Len() > 0 {
+				// Only log serverHeader if there was header. Otherwise it can
+				// be trailer only.
+				binlog.Log(&binarylog.ServerHeader{
+					Header: h,
+				})
+			}
+			binlog.Log(&binarylog.ServerTrailer{
+				Trailer: stream.Trailer(),
+				Err:     appErr,
+			})
+		}
 		return appErr
 	}
 	if trInfo != nil {
 		trInfo.tr.LazyLog(stringer("OK"), false)
 	}
-	opts := &transport.Options{
-		Last:  true,
-		Delay: false,
-	}
+	opts := &transport.Options{Last: true}
 
 	if err := s.sendResponse(t, stream, reply, cp, opts, comp); err != nil {
 		if err == io.EOF {
@@ -1046,16 +1017,31 @@
 			switch st := err.(type) {
 			case transport.ConnectionError:
 				// Nothing to do here.
-			case transport.StreamError:
-				if e := t.WriteStatus(stream, status.New(st.Code, st.Desc)); e != nil {
-					grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status %v", e)
-				}
 			default:
 				panic(fmt.Sprintf("grpc: Unexpected error (%T) from sendResponse: %v", st, st))
 			}
 		}
+		if binlog != nil {
+			h, _ := stream.Header()
+			binlog.Log(&binarylog.ServerHeader{
+				Header: h,
+			})
+			binlog.Log(&binarylog.ServerTrailer{
+				Trailer: stream.Trailer(),
+				Err:     appErr,
+			})
+		}
 		return err
 	}
+	if binlog != nil {
+		h, _ := stream.Header()
+		binlog.Log(&binarylog.ServerHeader{
+			Header: h,
+		})
+		binlog.Log(&binarylog.ServerMessage{
+			Message: reply,
+		})
+	}
 	if channelz.IsOn() {
 		t.IncrMsgSent()
 	}
@@ -1065,7 +1051,14 @@
 	// TODO: Should we be logging if writing status failed here, like above?
 	// Should the logging be in WriteStatus?  Should we ignore the WriteStatus
 	// error or allow the stats handler to see it?
-	return t.WriteStatus(stream, status.New(codes.OK, ""))
+	err = t.WriteStatus(stream, status.New(codes.OK, ""))
+	if binlog != nil {
+		binlog.Log(&binarylog.ServerTrailer{
+			Trailer: stream.Trailer(),
+			Err:     appErr,
+		})
+	}
+	return err
 }
 
 func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, sd *StreamDesc, trInfo *traceInfo) (err error) {
@@ -1099,17 +1092,40 @@
 	}
 	ctx := NewContextWithServerTransportStream(stream.Context(), stream)
 	ss := &serverStream{
-		ctx:   ctx,
-		t:     t,
-		s:     stream,
-		p:     &parser{r: stream},
-		codec: s.getCodec(stream.ContentSubtype()),
+		ctx:                   ctx,
+		t:                     t,
+		s:                     stream,
+		p:                     &parser{r: stream},
+		codec:                 s.getCodec(stream.ContentSubtype()),
 		maxReceiveMessageSize: s.opts.maxReceiveMessageSize,
 		maxSendMessageSize:    s.opts.maxSendMessageSize,
 		trInfo:                trInfo,
 		statsHandler:          sh,
 	}
 
+	ss.binlog = binarylog.GetMethodLogger(stream.Method())
+	if ss.binlog != nil {
+		md, _ := metadata.FromIncomingContext(ctx)
+		logEntry := &binarylog.ClientHeader{
+			Header:     md,
+			MethodName: stream.Method(),
+			PeerAddr:   nil,
+		}
+		if deadline, ok := ctx.Deadline(); ok {
+			logEntry.Timeout = time.Until(deadline)
+			if logEntry.Timeout < 0 {
+				logEntry.Timeout = 0
+			}
+		}
+		if a := md[":authority"]; len(a) > 0 {
+			logEntry.Authority = a[0]
+		}
+		if peer, ok := peer.FromContext(ss.Context()); ok {
+			logEntry.PeerAddr = peer.Addr
+		}
+		ss.binlog.Log(logEntry)
+	}
+
 	// If dc is set and matches the stream's compression, use it.  Otherwise, try
 	// to find a matching registered compressor for decomp.
 	if rc := stream.RecvCompress(); s.opts.dc != nil && s.opts.dc.Type() == rc {
@@ -1169,12 +1185,7 @@
 	if appErr != nil {
 		appStatus, ok := status.FromError(appErr)
 		if !ok {
-			switch err := appErr.(type) {
-			case transport.StreamError:
-				appStatus = status.New(err.Code, err.Desc)
-			default:
-				appStatus = status.New(codes.Unknown, appErr.Error())
-			}
+			appStatus = status.New(codes.Unknown, appErr.Error())
 			appErr = appStatus.Err()
 		}
 		if trInfo != nil {
@@ -1184,6 +1195,12 @@
 			ss.mu.Unlock()
 		}
 		t.WriteStatus(ss.s, appStatus)
+		if ss.binlog != nil {
+			ss.binlog.Log(&binarylog.ServerTrailer{
+				Trailer: ss.s.Trailer(),
+				Err:     appErr,
+			})
+		}
 		// TODO: Should we log an error from WriteStatus here and below?
 		return appErr
 	}
@@ -1192,7 +1209,14 @@
 		ss.trInfo.tr.LazyLog(stringer("OK"), false)
 		ss.mu.Unlock()
 	}
-	return t.WriteStatus(ss.s, status.New(codes.OK, ""))
+	err = t.WriteStatus(ss.s, status.New(codes.OK, ""))
+	if ss.binlog != nil {
+		ss.binlog.Log(&binarylog.ServerTrailer{
+			Trailer: ss.s.Trailer(),
+			Err:     appErr,
+		})
+	}
+	return err
 }
 
 func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream, trInfo *traceInfo) {
@@ -1221,47 +1245,33 @@
 	}
 	service := sm[:pos]
 	method := sm[pos+1:]
-	srv, ok := s.m[service]
-	if !ok {
-		if unknownDesc := s.opts.unknownStreamDesc; unknownDesc != nil {
-			s.processStreamingRPC(t, stream, nil, unknownDesc, trInfo)
+
+	srv, knownService := s.m[service]
+	if knownService {
+		if md, ok := srv.md[method]; ok {
+			s.processUnaryRPC(t, stream, srv, md, trInfo)
 			return
 		}
-		if trInfo != nil {
-			trInfo.tr.LazyLog(&fmtStringer{"Unknown service %v", []interface{}{service}}, true)
-			trInfo.tr.SetError()
+		if sd, ok := srv.sd[method]; ok {
+			s.processStreamingRPC(t, stream, srv, sd, trInfo)
+			return
 		}
-		errDesc := fmt.Sprintf("unknown service %v", service)
-		if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil {
-			if trInfo != nil {
-				trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
-				trInfo.tr.SetError()
-			}
-			grpclog.Warningf("grpc: Server.handleStream failed to write status: %v", err)
-		}
-		if trInfo != nil {
-			trInfo.tr.Finish()
-		}
-		return
 	}
-	// Unary RPC or Streaming RPC?
-	if md, ok := srv.md[method]; ok {
-		s.processUnaryRPC(t, stream, srv, md, trInfo)
-		return
-	}
-	if sd, ok := srv.sd[method]; ok {
-		s.processStreamingRPC(t, stream, srv, sd, trInfo)
-		return
-	}
-	if trInfo != nil {
-		trInfo.tr.LazyLog(&fmtStringer{"Unknown method %v", []interface{}{method}}, true)
-		trInfo.tr.SetError()
-	}
+	// Unknown service, or known server unknown method.
 	if unknownDesc := s.opts.unknownStreamDesc; unknownDesc != nil {
 		s.processStreamingRPC(t, stream, nil, unknownDesc, trInfo)
 		return
 	}
-	errDesc := fmt.Sprintf("unknown method %v", method)
+	var errDesc string
+	if !knownService {
+		errDesc = fmt.Sprintf("unknown service %v", service)
+	} else {
+		errDesc = fmt.Sprintf("unknown method %v for service %v", method, service)
+	}
+	if trInfo != nil {
+		trInfo.tr.LazyPrintf("%s", errDesc)
+		trInfo.tr.SetError()
+	}
 	if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil {
 		if trInfo != nil {
 			trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
@@ -1410,12 +1420,6 @@
 	s.mu.Unlock()
 }
 
-func init() {
-	internal.TestingUseHandlerImpl = func(arg interface{}) {
-		arg.(*Server).opts.useHandlerImpl = true
-	}
-}
-
 // contentSubtype must be lowercase
 // cannot return nil
 func (s *Server) getCodec(contentSubtype string) baseCodec {
@@ -1484,3 +1488,11 @@
 	}
 	return s.Method(), true
 }
+
+type channelzServer struct {
+	s *Server
+}
+
+func (c *channelzServer) ChannelzMetric() *channelz.ServerInternalMetric {
+	return c.s.channelzMetric()
+}
diff --git a/vendor/google.golang.org/grpc/service_config.go b/vendor/google.golang.org/grpc/service_config.go
index 015631d..1c52274 100644
--- a/vendor/google.golang.org/grpc/service_config.go
+++ b/vendor/google.golang.org/grpc/service_config.go
@@ -25,6 +25,7 @@
 	"strings"
 	"time"
 
+	"google.golang.org/grpc/codes"
 	"google.golang.org/grpc/grpclog"
 )
 
@@ -56,6 +57,8 @@
 	// MaxRespSize is the maximum allowed payload size for an individual response in a
 	// stream (server->client) in bytes.
 	MaxRespSize *int
+	// RetryPolicy configures retry options for the method.
+	retryPolicy *retryPolicy
 }
 
 // ServiceConfig is provided by the service provider and contains parameters for how
@@ -68,13 +71,96 @@
 	// LB is the load balancer the service providers recommends. The balancer specified
 	// via grpc.WithBalancer will override this.
 	LB *string
-	// Methods contains a map for the methods in this service.
-	// If there is an exact match for a method (i.e. /service/method) in the map, use the corresponding MethodConfig.
-	// If there's no exact match, look for the default config for the service (/service/) and use the corresponding MethodConfig if it exists.
-	// Otherwise, the method has no MethodConfig to use.
+
+	// Methods contains a map for the methods in this service.  If there is an
+	// exact match for a method (i.e. /service/method) in the map, use the
+	// corresponding MethodConfig.  If there's no exact match, look for the
+	// default config for the service (/service/) and use the corresponding
+	// MethodConfig if it exists.  Otherwise, the method has no MethodConfig to
+	// use.
 	Methods map[string]MethodConfig
 
-	stickinessMetadataKey *string
+	// If a retryThrottlingPolicy is provided, gRPC will automatically throttle
+	// retry attempts and hedged RPCs when the client’s ratio of failures to
+	// successes exceeds a threshold.
+	//
+	// For each server name, the gRPC client will maintain a token_count which is
+	// initially set to maxTokens, and can take values between 0 and maxTokens.
+	//
+	// Every outgoing RPC (regardless of service or method invoked) will change
+	// token_count as follows:
+	//
+	//   - Every failed RPC will decrement the token_count by 1.
+	//   - Every successful RPC will increment the token_count by tokenRatio.
+	//
+	// If token_count is less than or equal to maxTokens / 2, then RPCs will not
+	// be retried and hedged RPCs will not be sent.
+	retryThrottling *retryThrottlingPolicy
+	// healthCheckConfig must be set as one of the requirement to enable LB channel
+	// health check.
+	healthCheckConfig *healthCheckConfig
+	// rawJSONString stores service config json string that get parsed into
+	// this service config struct.
+	rawJSONString string
+}
+
+// healthCheckConfig defines the go-native version of the LB channel health check config.
+type healthCheckConfig struct {
+	// serviceName is the service name to use in the health-checking request.
+	ServiceName string
+}
+
+// retryPolicy defines the go-native version of the retry policy defined by the
+// service config here:
+// https://github.com/grpc/proposal/blob/master/A6-client-retries.md#integration-with-service-config
+type retryPolicy struct {
+	// MaxAttempts is the maximum number of attempts, including the original RPC.
+	//
+	// This field is required and must be two or greater.
+	maxAttempts int
+
+	// Exponential backoff parameters. The initial retry attempt will occur at
+	// random(0, initialBackoffMS). In general, the nth attempt will occur at
+	// random(0,
+	//   min(initialBackoffMS*backoffMultiplier**(n-1), maxBackoffMS)).
+	//
+	// These fields are required and must be greater than zero.
+	initialBackoff    time.Duration
+	maxBackoff        time.Duration
+	backoffMultiplier float64
+
+	// The set of status codes which may be retried.
+	//
+	// Status codes are specified as strings, e.g., "UNAVAILABLE".
+	//
+	// This field is required and must be non-empty.
+	// Note: a set is used to store this for easy lookup.
+	retryableStatusCodes map[codes.Code]bool
+}
+
+type jsonRetryPolicy struct {
+	MaxAttempts          int
+	InitialBackoff       string
+	MaxBackoff           string
+	BackoffMultiplier    float64
+	RetryableStatusCodes []codes.Code
+}
+
+// retryThrottlingPolicy defines the go-native version of the retry throttling
+// policy defined by the service config here:
+// https://github.com/grpc/proposal/blob/master/A6-client-retries.md#integration-with-service-config
+type retryThrottlingPolicy struct {
+	// The number of tokens starts at maxTokens. The token_count will always be
+	// between 0 and maxTokens.
+	//
+	// This field is required and must be greater than zero.
+	MaxTokens float64
+	// The amount of tokens to add on each successful RPC. Typically this will
+	// be some number between 0 and 1, e.g., 0.1.
+	//
+	// This field is required and must be greater than zero. Up to 3 decimal
+	// places are supported.
+	TokenRatio float64
 }
 
 func parseDuration(s *string) (*time.Duration, error) {
@@ -144,30 +230,33 @@
 	Timeout                 *string
 	MaxRequestMessageBytes  *int64
 	MaxResponseMessageBytes *int64
+	RetryPolicy             *jsonRetryPolicy
 }
 
 // TODO(lyuxuan): delete this struct after cleaning up old service config implementation.
 type jsonSC struct {
-	LoadBalancingPolicy   *string
-	StickinessMetadataKey *string
-	MethodConfig          *[]jsonMC
+	LoadBalancingPolicy *string
+	MethodConfig        *[]jsonMC
+	RetryThrottling     *retryThrottlingPolicy
+	HealthCheckConfig   *healthCheckConfig
 }
 
-func parseServiceConfig(js string) (ServiceConfig, error) {
+func parseServiceConfig(js string) (*ServiceConfig, error) {
 	var rsc jsonSC
 	err := json.Unmarshal([]byte(js), &rsc)
 	if err != nil {
 		grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err)
-		return ServiceConfig{}, err
+		return nil, err
 	}
 	sc := ServiceConfig{
-		LB:      rsc.LoadBalancingPolicy,
-		Methods: make(map[string]MethodConfig),
-
-		stickinessMetadataKey: rsc.StickinessMetadataKey,
+		LB:                rsc.LoadBalancingPolicy,
+		Methods:           make(map[string]MethodConfig),
+		retryThrottling:   rsc.RetryThrottling,
+		healthCheckConfig: rsc.HealthCheckConfig,
+		rawJSONString:     js,
 	}
 	if rsc.MethodConfig == nil {
-		return sc, nil
+		return &sc, nil
 	}
 
 	for _, m := range *rsc.MethodConfig {
@@ -177,13 +266,17 @@
 		d, err := parseDuration(m.Timeout)
 		if err != nil {
 			grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err)
-			return ServiceConfig{}, err
+			return nil, err
 		}
 
 		mc := MethodConfig{
 			WaitForReady: m.WaitForReady,
 			Timeout:      d,
 		}
+		if mc.retryPolicy, err = convertRetryPolicy(m.RetryPolicy); err != nil {
+			grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err)
+			return nil, err
+		}
 		if m.MaxRequestMessageBytes != nil {
 			if *m.MaxRequestMessageBytes > int64(maxInt) {
 				mc.MaxReqSize = newInt(maxInt)
@@ -205,7 +298,54 @@
 		}
 	}
 
-	return sc, nil
+	if sc.retryThrottling != nil {
+		if sc.retryThrottling.MaxTokens <= 0 ||
+			sc.retryThrottling.MaxTokens > 1000 ||
+			sc.retryThrottling.TokenRatio <= 0 {
+			// Illegal throttling config; disable throttling.
+			sc.retryThrottling = nil
+		}
+	}
+	return &sc, nil
+}
+
+func convertRetryPolicy(jrp *jsonRetryPolicy) (p *retryPolicy, err error) {
+	if jrp == nil {
+		return nil, nil
+	}
+	ib, err := parseDuration(&jrp.InitialBackoff)
+	if err != nil {
+		return nil, err
+	}
+	mb, err := parseDuration(&jrp.MaxBackoff)
+	if err != nil {
+		return nil, err
+	}
+
+	if jrp.MaxAttempts <= 1 ||
+		*ib <= 0 ||
+		*mb <= 0 ||
+		jrp.BackoffMultiplier <= 0 ||
+		len(jrp.RetryableStatusCodes) == 0 {
+		grpclog.Warningf("grpc: ignoring retry policy %v due to illegal configuration", jrp)
+		return nil, nil
+	}
+
+	rp := &retryPolicy{
+		maxAttempts:          jrp.MaxAttempts,
+		initialBackoff:       *ib,
+		maxBackoff:           *mb,
+		backoffMultiplier:    jrp.BackoffMultiplier,
+		retryableStatusCodes: make(map[codes.Code]bool),
+	}
+	if rp.maxAttempts > 5 {
+		// TODO(retry): Make the max maxAttempts configurable.
+		rp.maxAttempts = 5
+	}
+	for _, code := range jrp.RetryableStatusCodes {
+		rp.retryableStatusCodes[code] = true
+	}
+	return rp, nil
 }
 
 func min(a, b *int) *int {
diff --git a/vendor/google.golang.org/grpc/stats/handlers.go b/vendor/google.golang.org/grpc/stats/handlers.go
index 05b384c..dc03731 100644
--- a/vendor/google.golang.org/grpc/stats/handlers.go
+++ b/vendor/google.golang.org/grpc/stats/handlers.go
@@ -19,9 +19,8 @@
 package stats
 
 import (
+	"context"
 	"net"
-
-	"golang.org/x/net/context"
 )
 
 // ConnTagInfo defines the relevant information needed by connection context tagger.
diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go
index 3f13190..f3f593c 100644
--- a/vendor/google.golang.org/grpc/stats/stats.go
+++ b/vendor/google.golang.org/grpc/stats/stats.go
@@ -24,10 +24,11 @@
 package stats // import "google.golang.org/grpc/stats"
 
 import (
+	"context"
 	"net"
 	"time"
 
-	"golang.org/x/net/context"
+	"google.golang.org/grpc/metadata"
 )
 
 // RPCStats contains stats information about RPCs.
@@ -173,6 +174,9 @@
 	BeginTime time.Time
 	// EndTime is the time when the RPC ends.
 	EndTime time.Time
+	// Trailer contains the trailer metadata received from the server. This
+	// field is only valid if this End is from the client side.
+	Trailer metadata.MD
 	// Error is the error the RPC ended with. It is an error generated from
 	// status.Status and can be converted back to status.Status using
 	// status.FromError if non-nil.
diff --git a/vendor/google.golang.org/grpc/status/status.go b/vendor/google.golang.org/grpc/status/status.go
index 9c61b09..ed36681 100644
--- a/vendor/google.golang.org/grpc/status/status.go
+++ b/vendor/google.golang.org/grpc/status/status.go
@@ -28,6 +28,7 @@
 package status
 
 import (
+	"context"
 	"errors"
 	"fmt"
 
@@ -126,7 +127,9 @@
 	if err == nil {
 		return &Status{s: &spb.Status{Code: int32(codes.OK)}}, true
 	}
-	if se, ok := err.(interface{ GRPCStatus() *Status }); ok {
+	if se, ok := err.(interface {
+		GRPCStatus() *Status
+	}); ok {
 		return se.GRPCStatus(), true
 	}
 	return New(codes.Unknown, err.Error()), false
@@ -182,8 +185,26 @@
 	if err == nil {
 		return codes.OK
 	}
-	if se, ok := err.(interface{ GRPCStatus() *Status }); ok {
+	if se, ok := err.(interface {
+		GRPCStatus() *Status
+	}); ok {
 		return se.GRPCStatus().Code()
 	}
 	return codes.Unknown
 }
+
+// FromContextError converts a context error into a Status.  It returns a
+// Status with codes.OK if err is nil, or a Status with codes.Unknown if err is
+// non-nil and not a context error.
+func FromContextError(err error) *Status {
+	switch err {
+	case nil:
+		return New(codes.OK, "")
+	case context.DeadlineExceeded:
+		return New(codes.DeadlineExceeded, err.Error())
+	case context.Canceled:
+		return New(codes.Canceled, err.Error())
+	default:
+		return New(codes.Unknown, err.Error())
+	}
+}
diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go
index 82921a1..6e2bf51 100644
--- a/vendor/google.golang.org/grpc/stream.go
+++ b/vendor/google.golang.org/grpc/stream.go
@@ -19,21 +19,29 @@
 package grpc
 
 import (
+	"context"
 	"errors"
 	"io"
+	"math"
+	"strconv"
 	"sync"
 	"time"
 
-	"golang.org/x/net/context"
 	"golang.org/x/net/trace"
 	"google.golang.org/grpc/balancer"
-	"google.golang.org/grpc/channelz"
 	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/connectivity"
 	"google.golang.org/grpc/encoding"
+	"google.golang.org/grpc/grpclog"
+	"google.golang.org/grpc/internal/balancerload"
+	"google.golang.org/grpc/internal/binarylog"
+	"google.golang.org/grpc/internal/channelz"
+	"google.golang.org/grpc/internal/grpcrand"
+	"google.golang.org/grpc/internal/transport"
 	"google.golang.org/grpc/metadata"
+	"google.golang.org/grpc/peer"
 	"google.golang.org/grpc/stats"
 	"google.golang.org/grpc/status"
-	"google.golang.org/grpc/transport"
 )
 
 // StreamHandler defines the handler called by gRPC server to complete the
@@ -55,31 +63,20 @@
 
 // Stream defines the common interface a client or server stream has to satisfy.
 //
-// All errors returned from Stream are compatible with the status package.
+// Deprecated: See ClientStream and ServerStream documentation instead.
 type Stream interface {
-	// Context returns the context for this stream.
+	// Deprecated: See ClientStream and ServerStream documentation instead.
 	Context() context.Context
-	// SendMsg blocks until it sends m, the stream is done or the stream
-	// breaks.
-	// On error, it aborts the stream and returns an RPC status on client
-	// side. On server side, it simply returns the error to the caller.
-	// SendMsg is called by generated code. Also Users can call SendMsg
-	// directly when it is really needed in their use cases.
-	// It's safe to have a goroutine calling SendMsg and another goroutine calling
-	// recvMsg on the same stream at the same time.
-	// But it is not safe to call SendMsg on the same stream in different goroutines.
+	// Deprecated: See ClientStream and ServerStream documentation instead.
 	SendMsg(m interface{}) error
-	// RecvMsg blocks until it receives a message or the stream is
-	// done. On client side, it returns io.EOF when the stream is done. On
-	// any other error, it aborts the stream and returns an RPC status. On
-	// server side, it simply returns the error to the caller.
-	// It's safe to have a goroutine calling SendMsg and another goroutine calling
-	// recvMsg on the same stream at the same time.
-	// But it is not safe to call RecvMsg on the same stream in different goroutines.
+	// Deprecated: See ClientStream and ServerStream documentation instead.
 	RecvMsg(m interface{}) error
 }
 
-// ClientStream defines the interface a client stream has to satisfy.
+// ClientStream defines the client-side behavior of a streaming RPC.
+//
+// All errors returned from ClientStream methods are compatible with the
+// status package.
 type ClientStream interface {
 	// Header returns the header metadata received from the server if there
 	// is any. It blocks if the metadata is not ready to read.
@@ -89,19 +86,60 @@
 	// stream.Recv has returned a non-nil error (including io.EOF).
 	Trailer() metadata.MD
 	// CloseSend closes the send direction of the stream. It closes the stream
-	// when non-nil error is met.
+	// when non-nil error is met. It is also not safe to call CloseSend
+	// concurrently with SendMsg.
 	CloseSend() error
-	// Stream.SendMsg() may return a non-nil error when something wrong happens sending
-	// the request. The returned error indicates the status of this sending, not the final
-	// status of the RPC.
+	// Context returns the context for this stream.
 	//
-	// Always call Stream.RecvMsg() to drain the stream and get the final
-	// status, otherwise there could be leaked resources.
-	Stream
+	// It should not be called until after Header or RecvMsg has returned. Once
+	// called, subsequent client-side retries are disabled.
+	Context() context.Context
+	// SendMsg is generally called by generated code. On error, SendMsg aborts
+	// the stream. If the error was generated by the client, the status is
+	// returned directly; otherwise, io.EOF is returned and the status of
+	// the stream may be discovered using RecvMsg.
+	//
+	// SendMsg blocks until:
+	//   - There is sufficient flow control to schedule m with the transport, or
+	//   - The stream is done, or
+	//   - The stream breaks.
+	//
+	// SendMsg does not wait until the message is received by the server. An
+	// untimely stream closure may result in lost messages. To ensure delivery,
+	// users should ensure the RPC completed successfully using RecvMsg.
+	//
+	// It is safe to have a goroutine calling SendMsg and another goroutine
+	// calling RecvMsg on the same stream at the same time, but it is not safe
+	// to call SendMsg on the same stream in different goroutines. It is also
+	// not safe to call CloseSend concurrently with SendMsg.
+	SendMsg(m interface{}) error
+	// RecvMsg blocks until it receives a message into m or the stream is
+	// done. It returns io.EOF when the stream completes successfully. On
+	// any other error, the stream is aborted and the error contains the RPC
+	// status.
+	//
+	// It is safe to have a goroutine calling SendMsg and another goroutine
+	// calling RecvMsg on the same stream at the same time, but it is not
+	// safe to call RecvMsg on the same stream in different goroutines.
+	RecvMsg(m interface{}) error
 }
 
 // NewStream creates a new Stream for the client side. This is typically
-// called by generated code.
+// called by generated code. ctx is used for the lifetime of the stream.
+//
+// To ensure resources are not leaked due to the stream returned, one of the following
+// actions must be performed:
+//
+//      1. Call Close on the ClientConn.
+//      2. Cancel the context provided.
+//      3. Call RecvMsg until a non-nil error is returned. A protobuf-generated
+//         client-streaming RPC, for instance, might use the helper function
+//         CloseAndRecv (note that CloseSend does not Recv, therefore is not
+//         guaranteed to release all resources).
+//      4. Receive a non-nil, non-io.EOF error from Header or SendMsg.
+//
+// If none of the above happen, a goroutine and a context will be leaked, and grpc
+// will not call the optionally-configured stats handler with a stats.End message.
 func (cc *ClientConn) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) {
 	// allow interceptor to see all applicable call options, which means those
 	// configured as defaults from dial option as well as per-call options
@@ -113,10 +151,7 @@
 	return newClientStream(ctx, desc, cc, method, opts...)
 }
 
-// NewClientStream creates a new Stream for the client side. This is typically
-// called by generated code.
-//
-// DEPRECATED: Use ClientConn.NewStream instead.
+// NewClientStream is a wrapper for ClientConn.NewStream.
 func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) {
 	return cc.NewStream(ctx, desc, method, opts...)
 }
@@ -131,6 +166,11 @@
 		}()
 	}
 	c := defaultCallInfo()
+	// Provide an opportunity for the first RPC to see the first service config
+	// provided by the resolver.
+	if err := cc.waitForResolvedAddrs(ctx); err != nil {
+		return nil, err
+	}
 	mc := cc.GetMethodConfig(method)
 	if mc.WaitForReady != nil {
 		c.failFast = !*mc.WaitForReady
@@ -165,13 +205,8 @@
 	}
 
 	callHdr := &transport.CallHdr{
-		Host:   cc.authority,
-		Method: method,
-		// If it's not client streaming, we should already have the request to be sent,
-		// so we don't flush the header.
-		// If it's client streaming, the user may never send a request or send it any
-		// time soon, so we ask the transport to flush the header.
-		Flush:          desc.ClientStreams,
+		Host:           cc.authority,
+		Method:         method,
 		ContentSubtype: c.contentSubtype,
 	}
 
@@ -196,24 +231,19 @@
 	if c.creds != nil {
 		callHdr.Creds = c.creds
 	}
-	var trInfo traceInfo
+	var trInfo *traceInfo
 	if EnableTracing {
-		trInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method)
-		trInfo.firstLine.client = true
+		trInfo = &traceInfo{
+			tr: trace.New("grpc.Sent."+methodFamily(method), method),
+			firstLine: firstLine{
+				client: true,
+			},
+		}
 		if deadline, ok := ctx.Deadline(); ok {
-			trInfo.firstLine.deadline = deadline.Sub(time.Now())
+			trInfo.firstLine.deadline = time.Until(deadline)
 		}
 		trInfo.tr.LazyLog(&trInfo.firstLine, false)
 		ctx = trace.NewContext(ctx, trInfo.tr)
-		defer func() {
-			if err != nil {
-				// Need to call tr.finish() if error is returned.
-				// Because tr will not be returned to caller.
-				trInfo.tr.LazyPrintf("RPC: [%v]", err)
-				trInfo.tr.SetError()
-				trInfo.tr.Finish()
-			}
-		}()
 	}
 	ctx = newContextWithRPCInfo(ctx, c.failFast)
 	sh := cc.dopts.copts.StatsHandler
@@ -227,80 +257,59 @@
 			FailFast:  c.failFast,
 		}
 		sh.HandleRPC(ctx, begin)
-		defer func() {
-			if err != nil {
-				// Only handle end stats if err != nil.
-				end := &stats.End{
-					Client:    true,
-					Error:     err,
-					BeginTime: beginTime,
-					EndTime:   time.Now(),
-				}
-				sh.HandleRPC(ctx, end)
-			}
-		}()
-	}
-
-	var (
-		t    transport.ClientTransport
-		s    *transport.Stream
-		done func(balancer.DoneInfo)
-	)
-	for {
-		// Check to make sure the context has expired.  This will prevent us from
-		// looping forever if an error occurs for wait-for-ready RPCs where no data
-		// is sent on the wire.
-		select {
-		case <-ctx.Done():
-			return nil, toRPCErr(ctx.Err())
-		default:
-		}
-
-		t, done, err = cc.getTransport(ctx, c.failFast)
-		if err != nil {
-			return nil, err
-		}
-
-		s, err = t.NewStream(ctx, callHdr)
-		if err != nil {
-			if done != nil {
-				done(balancer.DoneInfo{Err: err})
-				done = nil
-			}
-			// In the event of any error from NewStream, we never attempted to write
-			// anything to the wire, so we can retry indefinitely for non-fail-fast
-			// RPCs.
-			if !c.failFast {
-				continue
-			}
-			return nil, toRPCErr(err)
-		}
-		break
 	}
 
 	cs := &clientStream{
-		opts:   opts,
-		c:      c,
-		cc:     cc,
-		desc:   desc,
-		codec:  c.codec,
-		cp:     cp,
-		comp:   comp,
-		cancel: cancel,
-		attempt: &csAttempt{
-			t:            t,
-			s:            s,
-			p:            &parser{r: s},
-			done:         done,
-			dc:           cc.dopts.dc,
-			ctx:          ctx,
-			trInfo:       trInfo,
-			statsHandler: sh,
-			beginTime:    beginTime,
-		},
+		callHdr:      callHdr,
+		ctx:          ctx,
+		methodConfig: &mc,
+		opts:         opts,
+		callInfo:     c,
+		cc:           cc,
+		desc:         desc,
+		codec:        c.codec,
+		cp:           cp,
+		comp:         comp,
+		cancel:       cancel,
+		beginTime:    beginTime,
+		firstAttempt: true,
 	}
-	cs.c.stream = cs
-	cs.attempt.cs = cs
+	if !cc.dopts.disableRetry {
+		cs.retryThrottler = cc.retryThrottler.Load().(*retryThrottler)
+	}
+	cs.binlog = binarylog.GetMethodLogger(method)
+
+	cs.callInfo.stream = cs
+	// Only this initial attempt has stats/tracing.
+	// TODO(dfawley): move to newAttempt when per-attempt stats are implemented.
+	if err := cs.newAttemptLocked(sh, trInfo); err != nil {
+		cs.finish(err)
+		return nil, err
+	}
+
+	op := func(a *csAttempt) error { return a.newStream() }
+	if err := cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }); err != nil {
+		cs.finish(err)
+		return nil, err
+	}
+
+	if cs.binlog != nil {
+		md, _ := metadata.FromOutgoingContext(ctx)
+		logEntry := &binarylog.ClientHeader{
+			OnClientSide: true,
+			Header:       md,
+			MethodName:   method,
+			Authority:    cs.cc.authority,
+		}
+		if deadline, ok := ctx.Deadline(); ok {
+			logEntry.Timeout = time.Until(deadline)
+			if logEntry.Timeout < 0 {
+				logEntry.Timeout = 0
+			}
+		}
+		cs.binlog.Log(logEntry)
+	}
+
 	if desc != unaryStreamDesc {
 		// Listen on cc and stream contexts to cleanup when the user closes the
 		// ClientConn or cancels the stream context.  In all other cases, an error
@@ -319,12 +328,48 @@
 	return cs, nil
 }
 
+func (cs *clientStream) newAttemptLocked(sh stats.Handler, trInfo *traceInfo) error {
+	cs.attempt = &csAttempt{
+		cs:           cs,
+		dc:           cs.cc.dopts.dc,
+		statsHandler: sh,
+		trInfo:       trInfo,
+	}
+
+	if err := cs.ctx.Err(); err != nil {
+		return toRPCErr(err)
+	}
+	t, done, err := cs.cc.getTransport(cs.ctx, cs.callInfo.failFast, cs.callHdr.Method)
+	if err != nil {
+		return err
+	}
+	if trInfo != nil {
+		trInfo.firstLine.SetRemoteAddr(t.RemoteAddr())
+	}
+	cs.attempt.t = t
+	cs.attempt.done = done
+	return nil
+}
+
+func (a *csAttempt) newStream() error {
+	cs := a.cs
+	cs.callHdr.PreviousAttempts = cs.numRetries
+	s, err := a.t.NewStream(cs.ctx, cs.callHdr)
+	if err != nil {
+		return toRPCErr(err)
+	}
+	cs.attempt.s = s
+	cs.attempt.p = &parser{r: s}
+	return nil
+}
+
 // clientStream implements a client side Stream.
 type clientStream struct {
-	opts []CallOption
-	c    *callInfo
-	cc   *ClientConn
-	desc *StreamDesc
+	callHdr  *transport.CallHdr
+	opts     []CallOption
+	callInfo *callInfo
+	cc       *ClientConn
+	desc     *StreamDesc
 
 	codec baseCodec
 	cp    Compressor
@@ -332,13 +377,34 @@
 
 	cancel context.CancelFunc // cancels all attempts
 
-	sentLast bool // sent an end stream
+	sentLast  bool // sent an end stream
+	beginTime time.Time
 
-	mu       sync.Mutex // guards finished
-	finished bool       // TODO: replace with atomic cmpxchg or sync.Once?
+	methodConfig *MethodConfig
 
-	attempt *csAttempt // the active client stream attempt
+	ctx context.Context // the application's context, wrapped by stats/tracing
+
+	retryThrottler *retryThrottler // The throttler active when the RPC began.
+
+	binlog *binarylog.MethodLogger // Binary logger, can be nil.
+	// serverHeaderBinlogged is a boolean for whether server header has been
+	// logged. Server header will be logged when the first time one of those
+	// happens: stream.Header(), stream.Recv().
+	//
+	// It's only read and used by Recv() and Header(), so it doesn't need to be
+	// synchronized.
+	serverHeaderBinlogged bool
+
+	mu                      sync.Mutex
+	firstAttempt            bool       // if true, transparent retry is valid
+	numRetries              int        // exclusive of transparent retry attempt(s)
+	numRetriesSincePushback int        // retries since pushback; to reset backoff
+	finished                bool       // TODO: replace with atomic cmpxchg or sync.Once?
+	attempt                 *csAttempt // the active client stream attempt
 	// TODO(hedging): hedging will have multiple attempts simultaneously.
+	committed  bool                       // active attempt committed for retry?
+	buffer     []func(a *csAttempt) error // operations to replay on retry
+	bufferSize int                        // current size of buffer
 }
 
 // csAttempt implements a single transport stream attempt within a
@@ -350,53 +416,360 @@
 	p    *parser
 	done func(balancer.DoneInfo)
 
+	finished  bool
 	dc        Decompressor
 	decomp    encoding.Compressor
 	decompSet bool
 
-	ctx context.Context // the application's context, wrapped by stats/tracing
-
 	mu sync.Mutex // guards trInfo.tr
+	// trInfo may be nil (if EnableTracing is false).
 	// trInfo.tr is set when created (if EnableTracing is true),
 	// and cleared when the finish method is called.
-	trInfo traceInfo
+	trInfo *traceInfo
 
 	statsHandler stats.Handler
-	beginTime    time.Time
+}
+
+func (cs *clientStream) commitAttemptLocked() {
+	cs.committed = true
+	cs.buffer = nil
+}
+
+func (cs *clientStream) commitAttempt() {
+	cs.mu.Lock()
+	cs.commitAttemptLocked()
+	cs.mu.Unlock()
+}
+
+// shouldRetry returns nil if the RPC should be retried; otherwise it returns
+// the error that should be returned by the operation.
+func (cs *clientStream) shouldRetry(err error) error {
+	if cs.attempt.s == nil && !cs.callInfo.failFast {
+		// In the event of any error from NewStream (attempt.s == nil), we
+		// never attempted to write anything to the wire, so we can retry
+		// indefinitely for non-fail-fast RPCs.
+		return nil
+	}
+	if cs.finished || cs.committed {
+		// RPC is finished or committed; cannot retry.
+		return err
+	}
+	// Wait for the trailers.
+	if cs.attempt.s != nil {
+		<-cs.attempt.s.Done()
+	}
+	if cs.firstAttempt && !cs.callInfo.failFast && (cs.attempt.s == nil || cs.attempt.s.Unprocessed()) {
+		// First attempt, wait-for-ready, stream unprocessed: transparently retry.
+		cs.firstAttempt = false
+		return nil
+	}
+	cs.firstAttempt = false
+	if cs.cc.dopts.disableRetry {
+		return err
+	}
+
+	pushback := 0
+	hasPushback := false
+	if cs.attempt.s != nil {
+		if to, toErr := cs.attempt.s.TrailersOnly(); toErr != nil || !to {
+			return err
+		}
+
+		// TODO(retry): Move down if the spec changes to not check server pushback
+		// before considering this a failure for throttling.
+		sps := cs.attempt.s.Trailer()["grpc-retry-pushback-ms"]
+		if len(sps) == 1 {
+			var e error
+			if pushback, e = strconv.Atoi(sps[0]); e != nil || pushback < 0 {
+				grpclog.Infof("Server retry pushback specified to abort (%q).", sps[0])
+				cs.retryThrottler.throttle() // This counts as a failure for throttling.
+				return err
+			}
+			hasPushback = true
+		} else if len(sps) > 1 {
+			grpclog.Warningf("Server retry pushback specified multiple values (%q); not retrying.", sps)
+			cs.retryThrottler.throttle() // This counts as a failure for throttling.
+			return err
+		}
+	}
+
+	var code codes.Code
+	if cs.attempt.s != nil {
+		code = cs.attempt.s.Status().Code()
+	} else {
+		code = status.Convert(err).Code()
+	}
+
+	rp := cs.methodConfig.retryPolicy
+	if rp == nil || !rp.retryableStatusCodes[code] {
+		return err
+	}
+
+	// Note: the ordering here is important; we count this as a failure
+	// only if the code matched a retryable code.
+	if cs.retryThrottler.throttle() {
+		return err
+	}
+	if cs.numRetries+1 >= rp.maxAttempts {
+		return err
+	}
+
+	var dur time.Duration
+	if hasPushback {
+		dur = time.Millisecond * time.Duration(pushback)
+		cs.numRetriesSincePushback = 0
+	} else {
+		fact := math.Pow(rp.backoffMultiplier, float64(cs.numRetriesSincePushback))
+		cur := float64(rp.initialBackoff) * fact
+		if max := float64(rp.maxBackoff); cur > max {
+			cur = max
+		}
+		dur = time.Duration(grpcrand.Int63n(int64(cur)))
+		cs.numRetriesSincePushback++
+	}
+
+	// TODO(dfawley): we could eagerly fail here if dur puts us past the
+	// deadline, but unsure if it is worth doing.
+	t := time.NewTimer(dur)
+	select {
+	case <-t.C:
+		cs.numRetries++
+		return nil
+	case <-cs.ctx.Done():
+		t.Stop()
+		return status.FromContextError(cs.ctx.Err()).Err()
+	}
+}
+
+// Returns nil if a retry was performed and succeeded; error otherwise.
+func (cs *clientStream) retryLocked(lastErr error) error {
+	for {
+		cs.attempt.finish(lastErr)
+		if err := cs.shouldRetry(lastErr); err != nil {
+			cs.commitAttemptLocked()
+			return err
+		}
+		if err := cs.newAttemptLocked(nil, nil); err != nil {
+			return err
+		}
+		if lastErr = cs.replayBufferLocked(); lastErr == nil {
+			return nil
+		}
+	}
 }
 
 func (cs *clientStream) Context() context.Context {
-	// TODO(retry): commit the current attempt (the context has peer-aware data).
-	return cs.attempt.context()
+	cs.commitAttempt()
+	// No need to lock before using attempt, since we know it is committed and
+	// cannot change.
+	return cs.attempt.s.Context()
+}
+
+func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) error {
+	cs.mu.Lock()
+	for {
+		if cs.committed {
+			cs.mu.Unlock()
+			return op(cs.attempt)
+		}
+		a := cs.attempt
+		cs.mu.Unlock()
+		err := op(a)
+		cs.mu.Lock()
+		if a != cs.attempt {
+			// We started another attempt already.
+			continue
+		}
+		if err == io.EOF {
+			<-a.s.Done()
+		}
+		if err == nil || (err == io.EOF && a.s.Status().Code() == codes.OK) {
+			onSuccess()
+			cs.mu.Unlock()
+			return err
+		}
+		if err := cs.retryLocked(err); err != nil {
+			cs.mu.Unlock()
+			return err
+		}
+	}
 }
 
 func (cs *clientStream) Header() (metadata.MD, error) {
-	m, err := cs.attempt.header()
+	var m metadata.MD
+	err := cs.withRetry(func(a *csAttempt) error {
+		var err error
+		m, err = a.s.Header()
+		return toRPCErr(err)
+	}, cs.commitAttemptLocked)
 	if err != nil {
-		// TODO(retry): maybe retry on error or commit attempt on success.
-		err = toRPCErr(err)
 		cs.finish(err)
+		return nil, err
+	}
+	if cs.binlog != nil && !cs.serverHeaderBinlogged {
+		// Only log if binary log is on and header has not been logged.
+		logEntry := &binarylog.ServerHeader{
+			OnClientSide: true,
+			Header:       m,
+			PeerAddr:     nil,
+		}
+		if peer, ok := peer.FromContext(cs.Context()); ok {
+			logEntry.PeerAddr = peer.Addr
+		}
+		cs.binlog.Log(logEntry)
+		cs.serverHeaderBinlogged = true
 	}
 	return m, err
 }
 
 func (cs *clientStream) Trailer() metadata.MD {
-	// TODO(retry): on error, maybe retry (trailers-only).
-	return cs.attempt.trailer()
+	// On RPC failure, we never need to retry, because usage requires that
+	// RecvMsg() returned a non-nil error before calling this function is valid.
+	// We would have retried earlier if necessary.
+	//
+	// Commit the attempt anyway, just in case users are not following those
+	// directions -- it will prevent races and should not meaningfully impact
+	// performance.
+	cs.commitAttempt()
+	if cs.attempt.s == nil {
+		return nil
+	}
+	return cs.attempt.s.Trailer()
+}
+
+func (cs *clientStream) replayBufferLocked() error {
+	a := cs.attempt
+	for _, f := range cs.buffer {
+		if err := f(a); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (cs *clientStream) bufferForRetryLocked(sz int, op func(a *csAttempt) error) {
+	// Note: we still will buffer if retry is disabled (for transparent retries).
+	if cs.committed {
+		return
+	}
+	cs.bufferSize += sz
+	if cs.bufferSize > cs.callInfo.maxRetryRPCBufferSize {
+		cs.commitAttemptLocked()
+		return
+	}
+	cs.buffer = append(cs.buffer, op)
 }
 
 func (cs *clientStream) SendMsg(m interface{}) (err error) {
-	// TODO(retry): buffer message for replaying if not committed.
-	return cs.attempt.sendMsg(m)
+	defer func() {
+		if err != nil && err != io.EOF {
+			// Call finish on the client stream for errors generated by this SendMsg
+			// call, as these indicate problems created by this client.  (Transport
+			// errors are converted to an io.EOF error in csAttempt.sendMsg; the real
+			// error will be returned from RecvMsg eventually in that case, or be
+			// retried.)
+			cs.finish(err)
+		}
+	}()
+	if cs.sentLast {
+		return status.Errorf(codes.Internal, "SendMsg called after CloseSend")
+	}
+	if !cs.desc.ClientStreams {
+		cs.sentLast = true
+	}
+	data, err := encode(cs.codec, m)
+	if err != nil {
+		return err
+	}
+	compData, err := compress(data, cs.cp, cs.comp)
+	if err != nil {
+		return err
+	}
+	hdr, payload := msgHeader(data, compData)
+	// TODO(dfawley): should we be checking len(data) instead?
+	if len(payload) > *cs.callInfo.maxSendMessageSize {
+		return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), *cs.callInfo.maxSendMessageSize)
+	}
+	msgBytes := data // Store the pointer before setting to nil. For binary logging.
+	op := func(a *csAttempt) error {
+		err := a.sendMsg(m, hdr, payload, data)
+		// nil out the message and uncomp when replaying; they are only needed for
+		// stats which is disabled for subsequent attempts.
+		m, data = nil, nil
+		return err
+	}
+	err = cs.withRetry(op, func() { cs.bufferForRetryLocked(len(hdr)+len(payload), op) })
+	if cs.binlog != nil && err == nil {
+		cs.binlog.Log(&binarylog.ClientMessage{
+			OnClientSide: true,
+			Message:      msgBytes,
+		})
+	}
+	return
 }
 
-func (cs *clientStream) RecvMsg(m interface{}) (err error) {
-	// TODO(retry): maybe retry on error or commit attempt on success.
-	return cs.attempt.recvMsg(m)
+func (cs *clientStream) RecvMsg(m interface{}) error {
+	if cs.binlog != nil && !cs.serverHeaderBinlogged {
+		// Call Header() to binary log header if it's not already logged.
+		cs.Header()
+	}
+	var recvInfo *payloadInfo
+	if cs.binlog != nil {
+		recvInfo = &payloadInfo{}
+	}
+	err := cs.withRetry(func(a *csAttempt) error {
+		return a.recvMsg(m, recvInfo)
+	}, cs.commitAttemptLocked)
+	if cs.binlog != nil && err == nil {
+		cs.binlog.Log(&binarylog.ServerMessage{
+			OnClientSide: true,
+			Message:      recvInfo.uncompressedBytes,
+		})
+	}
+	if err != nil || !cs.desc.ServerStreams {
+		// err != nil or non-server-streaming indicates end of stream.
+		cs.finish(err)
+
+		if cs.binlog != nil {
+			// finish will not log Trailer. Log Trailer here.
+			logEntry := &binarylog.ServerTrailer{
+				OnClientSide: true,
+				Trailer:      cs.Trailer(),
+				Err:          err,
+			}
+			if logEntry.Err == io.EOF {
+				logEntry.Err = nil
+			}
+			if peer, ok := peer.FromContext(cs.Context()); ok {
+				logEntry.PeerAddr = peer.Addr
+			}
+			cs.binlog.Log(logEntry)
+		}
+	}
+	return err
 }
 
 func (cs *clientStream) CloseSend() error {
-	cs.attempt.closeSend()
+	if cs.sentLast {
+		// TODO: return an error and finish the stream instead, due to API misuse?
+		return nil
+	}
+	cs.sentLast = true
+	op := func(a *csAttempt) error {
+		a.t.Write(a.s, nil, nil, &transport.Options{Last: true})
+		// Always return nil; io.EOF is the only error that might make sense
+		// instead, but there is no need to signal the client to call RecvMsg
+		// as the only use left for the stream after CloseSend is to call
+		// RecvMsg.  This also matches historical behavior.
+		return nil
+	}
+	cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) })
+	if cs.binlog != nil {
+		cs.binlog.Log(&binarylog.ClientHalfClose{
+			OnClientSide: true,
+		})
+	}
+	// We never returned an error here for reasons.
 	return nil
 }
 
@@ -411,7 +784,21 @@
 		return
 	}
 	cs.finished = true
+	cs.commitAttemptLocked()
 	cs.mu.Unlock()
+	// For binary logging. only log cancel in finish (could be caused by RPC ctx
+	// canceled or ClientConn closed). Trailer will be logged in RecvMsg.
+	//
+	// Only one of cancel or trailer needs to be logged. In the cases where
+	// users don't call RecvMsg, users must have already canceled the RPC.
+	if cs.binlog != nil && status.Code(err) == codes.Canceled {
+		cs.binlog.Log(&binarylog.Cancel{
+			OnClientSide: true,
+		})
+	}
+	if err == nil {
+		cs.retryThrottler.successfulRPC()
+	}
 	if channelz.IsOn() {
 		if err != nil {
 			cs.cc.incrCallsFailed()
@@ -419,97 +806,51 @@
 			cs.cc.incrCallsSucceeded()
 		}
 	}
-	// TODO(retry): commit current attempt if necessary.
-	cs.attempt.finish(err)
-	for _, o := range cs.opts {
-		o.after(cs.c)
+	if cs.attempt != nil {
+		cs.attempt.finish(err)
+	}
+	// after functions all rely upon having a stream.
+	if cs.attempt.s != nil {
+		for _, o := range cs.opts {
+			o.after(cs.callInfo)
+		}
 	}
 	cs.cancel()
 }
 
-func (a *csAttempt) context() context.Context {
-	return a.s.Context()
-}
-
-func (a *csAttempt) header() (metadata.MD, error) {
-	return a.s.Header()
-}
-
-func (a *csAttempt) trailer() metadata.MD {
-	return a.s.Trailer()
-}
-
-func (a *csAttempt) sendMsg(m interface{}) (err error) {
-	// TODO Investigate how to signal the stats handling party.
-	// generate error stats if err != nil && err != io.EOF?
+func (a *csAttempt) sendMsg(m interface{}, hdr, payld, data []byte) error {
 	cs := a.cs
-	defer func() {
-		// For non-client-streaming RPCs, we return nil instead of EOF on success
-		// because the generated code requires it.  finish is not called; RecvMsg()
-		// will call it with the stream's status independently.
-		if err == io.EOF && !cs.desc.ClientStreams {
-			err = nil
-		}
-		if err != nil && err != io.EOF {
-			// Call finish on the client stream for errors generated by this SendMsg
-			// call, as these indicate problems created by this client.  (Transport
-			// errors are converted to an io.EOF error below; the real error will be
-			// returned from RecvMsg eventually in that case, or be retried.)
-			cs.finish(err)
-		}
-	}()
-	// TODO: Check cs.sentLast and error if we already ended the stream.
-	if EnableTracing {
+	if a.trInfo != nil {
 		a.mu.Lock()
 		if a.trInfo.tr != nil {
 			a.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true)
 		}
 		a.mu.Unlock()
 	}
-	var outPayload *stats.OutPayload
+	if err := a.t.Write(a.s, hdr, payld, &transport.Options{Last: !cs.desc.ClientStreams}); err != nil {
+		if !cs.desc.ClientStreams {
+			// For non-client-streaming RPCs, we return nil instead of EOF on error
+			// because the generated code requires it.  finish is not called; RecvMsg()
+			// will call it with the stream's status independently.
+			return nil
+		}
+		return io.EOF
+	}
 	if a.statsHandler != nil {
-		outPayload = &stats.OutPayload{
-			Client: true,
-		}
+		a.statsHandler.HandleRPC(cs.ctx, outPayload(true, m, data, payld, time.Now()))
 	}
-	hdr, data, err := encode(cs.codec, m, cs.cp, outPayload, cs.comp)
-	if err != nil {
-		return err
+	if channelz.IsOn() {
+		a.t.IncrMsgSent()
 	}
-	if len(data) > *cs.c.maxSendMessageSize {
-		return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(data), *cs.c.maxSendMessageSize)
-	}
-	if !cs.desc.ClientStreams {
-		cs.sentLast = true
-	}
-	err = a.t.Write(a.s, hdr, data, &transport.Options{Last: !cs.desc.ClientStreams})
-	if err == nil {
-		if outPayload != nil {
-			outPayload.SentTime = time.Now()
-			a.statsHandler.HandleRPC(a.ctx, outPayload)
-		}
-		if channelz.IsOn() {
-			a.t.IncrMsgSent()
-		}
-		return nil
-	}
-	return io.EOF
+	return nil
 }
 
-func (a *csAttempt) recvMsg(m interface{}) (err error) {
+func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) {
 	cs := a.cs
-	defer func() {
-		if err != nil || !cs.desc.ServerStreams {
-			// err != nil or non-server-streaming indicates end of stream.
-			cs.finish(err)
-		}
-	}()
-	var inPayload *stats.InPayload
-	if a.statsHandler != nil {
-		inPayload = &stats.InPayload{
-			Client: true,
-		}
+	if a.statsHandler != nil && payInfo == nil {
+		payInfo = &payloadInfo{}
 	}
+
 	if !a.decompSet {
 		// Block until we receive headers containing received message encoding.
 		if ct := a.s.RecvCompress(); ct != "" && ct != encoding.Identity {
@@ -526,7 +867,7 @@
 		// Only initialize this state once per stream.
 		a.decompSet = true
 	}
-	err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.c.maxReceiveMessageSize, inPayload, a.decomp)
+	err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, payInfo, a.decomp)
 	if err != nil {
 		if err == io.EOF {
 			if statusErr := a.s.Status().Err(); statusErr != nil {
@@ -536,15 +877,23 @@
 		}
 		return toRPCErr(err)
 	}
-	if EnableTracing {
+	if a.trInfo != nil {
 		a.mu.Lock()
 		if a.trInfo.tr != nil {
 			a.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true)
 		}
 		a.mu.Unlock()
 	}
-	if inPayload != nil {
-		a.statsHandler.HandleRPC(a.ctx, inPayload)
+	if a.statsHandler != nil {
+		a.statsHandler.HandleRPC(cs.ctx, &stats.InPayload{
+			Client:   true,
+			RecvTime: time.Now(),
+			Payload:  m,
+			// TODO truncate large payload.
+			Data:       payInfo.uncompressedBytes,
+			WireLength: payInfo.wireLength,
+			Length:     len(payInfo.uncompressedBytes),
+		})
 	}
 	if channelz.IsOn() {
 		a.t.IncrMsgRecv()
@@ -553,10 +902,9 @@
 		// Subsequent messages should be received by subsequent RecvMsg calls.
 		return nil
 	}
-
 	// Special handling for non-server-stream rpcs.
 	// This recv expects EOF or errors, so we don't collect inPayload.
-	err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.c.maxReceiveMessageSize, nil, a.decomp)
+	err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decomp)
 	if err == nil {
 		return toRPCErr(errors.New("grpc: client streaming protocol violation: get <nil>, want <EOF>"))
 	}
@@ -566,39 +914,47 @@
 	return toRPCErr(err)
 }
 
-func (a *csAttempt) closeSend() {
-	cs := a.cs
-	if cs.sentLast {
-		return
-	}
-	cs.sentLast = true
-	cs.attempt.t.Write(cs.attempt.s, nil, nil, &transport.Options{Last: true})
-	// We ignore errors from Write.  Any error it would return would also be
-	// returned by a subsequent RecvMsg call, and the user is supposed to always
-	// finish the stream by calling RecvMsg until it returns err != nil.
-}
-
 func (a *csAttempt) finish(err error) {
 	a.mu.Lock()
-	a.t.CloseStream(a.s, err)
+	if a.finished {
+		a.mu.Unlock()
+		return
+	}
+	a.finished = true
+	if err == io.EOF {
+		// Ending a stream with EOF indicates a success.
+		err = nil
+	}
+	var tr metadata.MD
+	if a.s != nil {
+		a.t.CloseStream(a.s, err)
+		tr = a.s.Trailer()
+	}
 
 	if a.done != nil {
+		br := false
+		if a.s != nil {
+			br = a.s.BytesReceived()
+		}
 		a.done(balancer.DoneInfo{
 			Err:           err,
-			BytesSent:     true,
-			BytesReceived: a.s.BytesReceived(),
+			Trailer:       tr,
+			BytesSent:     a.s != nil,
+			BytesReceived: br,
+			ServerLoad:    balancerload.Parse(tr),
 		})
 	}
 	if a.statsHandler != nil {
 		end := &stats.End{
 			Client:    true,
-			BeginTime: a.beginTime,
+			BeginTime: a.cs.beginTime,
 			EndTime:   time.Now(),
+			Trailer:   tr,
 			Error:     err,
 		}
-		a.statsHandler.HandleRPC(a.ctx, end)
+		a.statsHandler.HandleRPC(a.cs.ctx, end)
 	}
-	if a.trInfo.tr != nil {
+	if a.trInfo != nil && a.trInfo.tr != nil {
 		if err == nil {
 			a.trInfo.tr.LazyPrintf("RPC: [OK]")
 		} else {
@@ -611,7 +967,302 @@
 	a.mu.Unlock()
 }
 
-// ServerStream defines the interface a server stream has to satisfy.
+func (ac *addrConn) newClientStream(ctx context.Context, desc *StreamDesc, method string, t transport.ClientTransport, opts ...CallOption) (_ ClientStream, err error) {
+	ac.mu.Lock()
+	if ac.transport != t {
+		ac.mu.Unlock()
+		return nil, status.Error(codes.Canceled, "the provided transport is no longer valid to use")
+	}
+	// transition to CONNECTING state when an attempt starts
+	if ac.state != connectivity.Connecting {
+		ac.updateConnectivityState(connectivity.Connecting)
+		ac.cc.handleSubConnStateChange(ac.acbw, ac.state)
+	}
+	ac.mu.Unlock()
+
+	if t == nil {
+		// TODO: return RPC error here?
+		return nil, errors.New("transport provided is nil")
+	}
+	// defaultCallInfo contains unnecessary info(i.e. failfast, maxRetryRPCBufferSize), so we just initialize an empty struct.
+	c := &callInfo{}
+
+	for _, o := range opts {
+		if err := o.before(c); err != nil {
+			return nil, toRPCErr(err)
+		}
+	}
+	c.maxReceiveMessageSize = getMaxSize(nil, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize)
+	c.maxSendMessageSize = getMaxSize(nil, c.maxSendMessageSize, defaultServerMaxSendMessageSize)
+
+	// Possible context leak:
+	// The cancel function for the child context we create will only be called
+	// when RecvMsg returns a non-nil error, if the ClientConn is closed, or if
+	// an error is generated by SendMsg.
+	// https://github.com/grpc/grpc-go/issues/1818.
+	ctx, cancel := context.WithCancel(ctx)
+	defer func() {
+		if err != nil {
+			cancel()
+		}
+	}()
+
+	if err := setCallInfoCodec(c); err != nil {
+		return nil, err
+	}
+
+	callHdr := &transport.CallHdr{
+		Host:           ac.cc.authority,
+		Method:         method,
+		ContentSubtype: c.contentSubtype,
+	}
+
+	// Set our outgoing compression according to the UseCompressor CallOption, if
+	// set.  In that case, also find the compressor from the encoding package.
+	// Otherwise, use the compressor configured by the WithCompressor DialOption,
+	// if set.
+	var cp Compressor
+	var comp encoding.Compressor
+	if ct := c.compressorType; ct != "" {
+		callHdr.SendCompress = ct
+		if ct != encoding.Identity {
+			comp = encoding.GetCompressor(ct)
+			if comp == nil {
+				return nil, status.Errorf(codes.Internal, "grpc: Compressor is not installed for requested grpc-encoding %q", ct)
+			}
+		}
+	} else if ac.cc.dopts.cp != nil {
+		callHdr.SendCompress = ac.cc.dopts.cp.Type()
+		cp = ac.cc.dopts.cp
+	}
+	if c.creds != nil {
+		callHdr.Creds = c.creds
+	}
+
+	as := &addrConnStream{
+		callHdr:  callHdr,
+		ac:       ac,
+		ctx:      ctx,
+		cancel:   cancel,
+		opts:     opts,
+		callInfo: c,
+		desc:     desc,
+		codec:    c.codec,
+		cp:       cp,
+		comp:     comp,
+		t:        t,
+	}
+
+	as.callInfo.stream = as
+	s, err := as.t.NewStream(as.ctx, as.callHdr)
+	if err != nil {
+		err = toRPCErr(err)
+		return nil, err
+	}
+	as.s = s
+	as.p = &parser{r: s}
+	ac.incrCallsStarted()
+	if desc != unaryStreamDesc {
+		// Listen on cc and stream contexts to cleanup when the user closes the
+		// ClientConn or cancels the stream context.  In all other cases, an error
+		// should already be injected into the recv buffer by the transport, which
+		// the client will eventually receive, and then we will cancel the stream's
+		// context in clientStream.finish.
+		go func() {
+			select {
+			case <-ac.ctx.Done():
+				as.finish(status.Error(codes.Canceled, "grpc: the SubConn is closing"))
+			case <-ctx.Done():
+				as.finish(toRPCErr(ctx.Err()))
+			}
+		}()
+	}
+	return as, nil
+}
+
+type addrConnStream struct {
+	s         *transport.Stream
+	ac        *addrConn
+	callHdr   *transport.CallHdr
+	cancel    context.CancelFunc
+	opts      []CallOption
+	callInfo  *callInfo
+	t         transport.ClientTransport
+	ctx       context.Context
+	sentLast  bool
+	desc      *StreamDesc
+	codec     baseCodec
+	cp        Compressor
+	comp      encoding.Compressor
+	decompSet bool
+	dc        Decompressor
+	decomp    encoding.Compressor
+	p         *parser
+	mu        sync.Mutex
+	finished  bool
+}
+
+func (as *addrConnStream) Header() (metadata.MD, error) {
+	m, err := as.s.Header()
+	if err != nil {
+		as.finish(toRPCErr(err))
+	}
+	return m, err
+}
+
+func (as *addrConnStream) Trailer() metadata.MD {
+	return as.s.Trailer()
+}
+
+func (as *addrConnStream) CloseSend() error {
+	if as.sentLast {
+		// TODO: return an error and finish the stream instead, due to API misuse?
+		return nil
+	}
+	as.sentLast = true
+
+	as.t.Write(as.s, nil, nil, &transport.Options{Last: true})
+	// Always return nil; io.EOF is the only error that might make sense
+	// instead, but there is no need to signal the client to call RecvMsg
+	// as the only use left for the stream after CloseSend is to call
+	// RecvMsg.  This also matches historical behavior.
+	return nil
+}
+
+func (as *addrConnStream) Context() context.Context {
+	return as.s.Context()
+}
+
+func (as *addrConnStream) SendMsg(m interface{}) (err error) {
+	defer func() {
+		if err != nil && err != io.EOF {
+			// Call finish on the client stream for errors generated by this SendMsg
+			// call, as these indicate problems created by this client.  (Transport
+			// errors are converted to an io.EOF error in csAttempt.sendMsg; the real
+			// error will be returned from RecvMsg eventually in that case, or be
+			// retried.)
+			as.finish(err)
+		}
+	}()
+	if as.sentLast {
+		return status.Errorf(codes.Internal, "SendMsg called after CloseSend")
+	}
+	if !as.desc.ClientStreams {
+		as.sentLast = true
+	}
+	data, err := encode(as.codec, m)
+	if err != nil {
+		return err
+	}
+	compData, err := compress(data, as.cp, as.comp)
+	if err != nil {
+		return err
+	}
+	hdr, payld := msgHeader(data, compData)
+	// TODO(dfawley): should we be checking len(data) instead?
+	if len(payld) > *as.callInfo.maxSendMessageSize {
+		return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payld), *as.callInfo.maxSendMessageSize)
+	}
+
+	if err := as.t.Write(as.s, hdr, payld, &transport.Options{Last: !as.desc.ClientStreams}); err != nil {
+		if !as.desc.ClientStreams {
+			// For non-client-streaming RPCs, we return nil instead of EOF on error
+			// because the generated code requires it.  finish is not called; RecvMsg()
+			// will call it with the stream's status independently.
+			return nil
+		}
+		return io.EOF
+	}
+
+	if channelz.IsOn() {
+		as.t.IncrMsgSent()
+	}
+	return nil
+}
+
+func (as *addrConnStream) RecvMsg(m interface{}) (err error) {
+	defer func() {
+		if err != nil || !as.desc.ServerStreams {
+			// err != nil or non-server-streaming indicates end of stream.
+			as.finish(err)
+		}
+	}()
+
+	if !as.decompSet {
+		// Block until we receive headers containing received message encoding.
+		if ct := as.s.RecvCompress(); ct != "" && ct != encoding.Identity {
+			if as.dc == nil || as.dc.Type() != ct {
+				// No configured decompressor, or it does not match the incoming
+				// message encoding; attempt to find a registered compressor that does.
+				as.dc = nil
+				as.decomp = encoding.GetCompressor(ct)
+			}
+		} else {
+			// No compression is used; disable our decompressor.
+			as.dc = nil
+		}
+		// Only initialize this state once per stream.
+		as.decompSet = true
+	}
+	err = recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp)
+	if err != nil {
+		if err == io.EOF {
+			if statusErr := as.s.Status().Err(); statusErr != nil {
+				return statusErr
+			}
+			return io.EOF // indicates successful end of stream.
+		}
+		return toRPCErr(err)
+	}
+
+	if channelz.IsOn() {
+		as.t.IncrMsgRecv()
+	}
+	if as.desc.ServerStreams {
+		// Subsequent messages should be received by subsequent RecvMsg calls.
+		return nil
+	}
+
+	// Special handling for non-server-stream rpcs.
+	// This recv expects EOF or errors, so we don't collect inPayload.
+	err = recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp)
+	if err == nil {
+		return toRPCErr(errors.New("grpc: client streaming protocol violation: get <nil>, want <EOF>"))
+	}
+	if err == io.EOF {
+		return as.s.Status().Err() // non-server streaming Recv returns nil on success
+	}
+	return toRPCErr(err)
+}
+
+func (as *addrConnStream) finish(err error) {
+	as.mu.Lock()
+	if as.finished {
+		as.mu.Unlock()
+		return
+	}
+	as.finished = true
+	if err == io.EOF {
+		// Ending a stream with EOF indicates a success.
+		err = nil
+	}
+	if as.s != nil {
+		as.t.CloseStream(as.s, err)
+	}
+
+	if err != nil {
+		as.ac.incrCallsFailed()
+	} else {
+		as.ac.incrCallsSucceeded()
+	}
+	as.cancel()
+	as.mu.Unlock()
+}
+
+// ServerStream defines the server-side behavior of a streaming RPC.
+//
+// All errors returned from ServerStream methods are compatible with the
+// status package.
 type ServerStream interface {
 	// SetHeader sets the header metadata. It may be called multiple times.
 	// When call multiple times, all the provided metadata will be merged.
@@ -627,7 +1278,32 @@
 	// SetTrailer sets the trailer metadata which will be sent with the RPC status.
 	// When called more than once, all the provided metadata will be merged.
 	SetTrailer(metadata.MD)
-	Stream
+	// Context returns the context for this stream.
+	Context() context.Context
+	// SendMsg sends a message. On error, SendMsg aborts the stream and the
+	// error is returned directly.
+	//
+	// SendMsg blocks until:
+	//   - There is sufficient flow control to schedule m with the transport, or
+	//   - The stream is done, or
+	//   - The stream breaks.
+	//
+	// SendMsg does not wait until the message is received by the client. An
+	// untimely stream closure may result in lost messages.
+	//
+	// It is safe to have a goroutine calling SendMsg and another goroutine
+	// calling RecvMsg on the same stream at the same time, but it is not safe
+	// to call SendMsg on the same stream in different goroutines.
+	SendMsg(m interface{}) error
+	// RecvMsg blocks until it receives a message into m or the stream is
+	// done. It returns io.EOF when the client has performed a CloseSend. On
+	// any non-EOF error, the stream is aborted and the error contains the
+	// RPC status.
+	//
+	// It is safe to have a goroutine calling SendMsg and another goroutine
+	// calling RecvMsg on the same stream at the same time, but it is not
+	// safe to call RecvMsg on the same stream in different goroutines.
+	RecvMsg(m interface{}) error
 }
 
 // serverStream implements a server side Stream.
@@ -649,6 +1325,15 @@
 
 	statsHandler stats.Handler
 
+	binlog *binarylog.MethodLogger
+	// serverHeaderBinlogged indicates whether server header has been logged. It
+	// will happen when one of the following two happens: stream.SendHeader(),
+	// stream.Send().
+	//
+	// It's only checked in send and sendHeader, doesn't need to be
+	// synchronized.
+	serverHeaderBinlogged bool
+
 	mu sync.Mutex // protects trInfo.tr after the service handler runs.
 }
 
@@ -664,7 +1349,15 @@
 }
 
 func (ss *serverStream) SendHeader(md metadata.MD) error {
-	return ss.t.WriteHeader(ss.s, md)
+	err := ss.t.WriteHeader(ss.s, md)
+	if ss.binlog != nil && !ss.serverHeaderBinlogged {
+		h, _ := ss.s.Header()
+		ss.binlog.Log(&binarylog.ServerHeader{
+			Header: h,
+		})
+		ss.serverHeaderBinlogged = true
+	}
+	return err
 }
 
 func (ss *serverStream) SetTrailer(md metadata.MD) {
@@ -691,28 +1384,47 @@
 		if err != nil && err != io.EOF {
 			st, _ := status.FromError(toRPCErr(err))
 			ss.t.WriteStatus(ss.s, st)
+			// Non-user specified status was sent out. This should be an error
+			// case (as a server side Cancel maybe).
+			//
+			// This is not handled specifically now. User will return a final
+			// status from the service handler, we will log that error instead.
+			// This behavior is similar to an interceptor.
 		}
 		if channelz.IsOn() && err == nil {
 			ss.t.IncrMsgSent()
 		}
 	}()
-	var outPayload *stats.OutPayload
-	if ss.statsHandler != nil {
-		outPayload = &stats.OutPayload{}
-	}
-	hdr, data, err := encode(ss.codec, m, ss.cp, outPayload, ss.comp)
+	data, err := encode(ss.codec, m)
 	if err != nil {
 		return err
 	}
-	if len(data) > ss.maxSendMessageSize {
-		return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(data), ss.maxSendMessageSize)
+	compData, err := compress(data, ss.cp, ss.comp)
+	if err != nil {
+		return err
 	}
-	if err := ss.t.Write(ss.s, hdr, data, &transport.Options{Last: false}); err != nil {
+	hdr, payload := msgHeader(data, compData)
+	// TODO(dfawley): should we be checking len(data) instead?
+	if len(payload) > ss.maxSendMessageSize {
+		return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), ss.maxSendMessageSize)
+	}
+	if err := ss.t.Write(ss.s, hdr, payload, &transport.Options{Last: false}); err != nil {
 		return toRPCErr(err)
 	}
-	if outPayload != nil {
-		outPayload.SentTime = time.Now()
-		ss.statsHandler.HandleRPC(ss.s.Context(), outPayload)
+	if ss.binlog != nil {
+		if !ss.serverHeaderBinlogged {
+			h, _ := ss.s.Header()
+			ss.binlog.Log(&binarylog.ServerHeader{
+				Header: h,
+			})
+			ss.serverHeaderBinlogged = true
+		}
+		ss.binlog.Log(&binarylog.ServerMessage{
+			Message: data,
+		})
+	}
+	if ss.statsHandler != nil {
+		ss.statsHandler.HandleRPC(ss.s.Context(), outPayload(false, m, data, payload, time.Now()))
 	}
 	return nil
 }
@@ -734,17 +1446,26 @@
 		if err != nil && err != io.EOF {
 			st, _ := status.FromError(toRPCErr(err))
 			ss.t.WriteStatus(ss.s, st)
+			// Non-user specified status was sent out. This should be an error
+			// case (as a server side Cancel maybe).
+			//
+			// This is not handled specifically now. User will return a final
+			// status from the service handler, we will log that error instead.
+			// This behavior is similar to an interceptor.
 		}
 		if channelz.IsOn() && err == nil {
 			ss.t.IncrMsgRecv()
 		}
 	}()
-	var inPayload *stats.InPayload
-	if ss.statsHandler != nil {
-		inPayload = &stats.InPayload{}
+	var payInfo *payloadInfo
+	if ss.statsHandler != nil || ss.binlog != nil {
+		payInfo = &payloadInfo{}
 	}
-	if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, inPayload, ss.decomp); err != nil {
+	if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, payInfo, ss.decomp); err != nil {
 		if err == io.EOF {
+			if ss.binlog != nil {
+				ss.binlog.Log(&binarylog.ClientHalfClose{})
+			}
 			return err
 		}
 		if err == io.ErrUnexpectedEOF {
@@ -752,8 +1473,20 @@
 		}
 		return toRPCErr(err)
 	}
-	if inPayload != nil {
-		ss.statsHandler.HandleRPC(ss.s.Context(), inPayload)
+	if ss.statsHandler != nil {
+		ss.statsHandler.HandleRPC(ss.s.Context(), &stats.InPayload{
+			RecvTime: time.Now(),
+			Payload:  m,
+			// TODO truncate large payload.
+			Data:       payInfo.uncompressedBytes,
+			WireLength: payInfo.wireLength,
+			Length:     len(payInfo.uncompressedBytes),
+		})
+	}
+	if ss.binlog != nil {
+		ss.binlog.Log(&binarylog.ClientMessage{
+			Message: payInfo.uncompressedBytes,
+		})
 	}
 	return nil
 }
diff --git a/vendor/google.golang.org/grpc/tap/tap.go b/vendor/google.golang.org/grpc/tap/tap.go
index 22b8fb5..584360f 100644
--- a/vendor/google.golang.org/grpc/tap/tap.go
+++ b/vendor/google.golang.org/grpc/tap/tap.go
@@ -21,7 +21,7 @@
 package tap
 
 import (
-	"golang.org/x/net/context"
+	"context"
 )
 
 // Info defines the relevant information needed by the handles.
diff --git a/vendor/google.golang.org/grpc/trace.go b/vendor/google.golang.org/grpc/trace.go
index c1c96de..0a57b99 100644
--- a/vendor/google.golang.org/grpc/trace.go
+++ b/vendor/google.golang.org/grpc/trace.go
@@ -24,6 +24,7 @@
 	"io"
 	"net"
 	"strings"
+	"sync"
 	"time"
 
 	"golang.org/x/net/trace"
@@ -53,13 +54,25 @@
 }
 
 // firstLine is the first line of an RPC trace.
+// It may be mutated after construction; remoteAddr specifically may change
+// during client-side use.
 type firstLine struct {
+	mu         sync.Mutex
 	client     bool // whether this is a client (outgoing) RPC
 	remoteAddr net.Addr
 	deadline   time.Duration // may be zero
 }
 
+func (f *firstLine) SetRemoteAddr(addr net.Addr) {
+	f.mu.Lock()
+	f.remoteAddr = addr
+	f.mu.Unlock()
+}
+
 func (f *firstLine) String() string {
+	f.mu.Lock()
+	defer f.mu.Unlock()
+
 	var line bytes.Buffer
 	io.WriteString(&line, "RPC: ")
 	if f.client {
diff --git a/vendor/google.golang.org/grpc/transport/go16.go b/vendor/google.golang.org/grpc/transport/go16.go
deleted file mode 100644
index 5babcf9..0000000
--- a/vendor/google.golang.org/grpc/transport/go16.go
+++ /dev/null
@@ -1,51 +0,0 @@
-// +build go1.6,!go1.7
-
-/*
- *
- * Copyright 2016 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package transport
-
-import (
-	"net"
-	"net/http"
-
-	"google.golang.org/grpc/codes"
-
-	"golang.org/x/net/context"
-)
-
-// dialContext connects to the address on the named network.
-func dialContext(ctx context.Context, network, address string) (net.Conn, error) {
-	return (&net.Dialer{Cancel: ctx.Done()}).Dial(network, address)
-}
-
-// ContextErr converts the error from context package into a StreamError.
-func ContextErr(err error) StreamError {
-	switch err {
-	case context.DeadlineExceeded:
-		return streamErrorf(codes.DeadlineExceeded, "%v", err)
-	case context.Canceled:
-		return streamErrorf(codes.Canceled, "%v", err)
-	}
-	return streamErrorf(codes.Internal, "Unexpected error from context packet: %v", err)
-}
-
-// contextFromRequest returns a background context.
-func contextFromRequest(r *http.Request) context.Context {
-	return context.Background()
-}
diff --git a/vendor/google.golang.org/grpc/transport/go17.go b/vendor/google.golang.org/grpc/transport/go17.go
deleted file mode 100644
index b7fa6bd..0000000
--- a/vendor/google.golang.org/grpc/transport/go17.go
+++ /dev/null
@@ -1,52 +0,0 @@
-// +build go1.7
-
-/*
- *
- * Copyright 2016 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package transport
-
-import (
-	"context"
-	"net"
-	"net/http"
-
-	"google.golang.org/grpc/codes"
-
-	netctx "golang.org/x/net/context"
-)
-
-// dialContext connects to the address on the named network.
-func dialContext(ctx context.Context, network, address string) (net.Conn, error) {
-	return (&net.Dialer{}).DialContext(ctx, network, address)
-}
-
-// ContextErr converts the error from context package into a StreamError.
-func ContextErr(err error) StreamError {
-	switch err {
-	case context.DeadlineExceeded, netctx.DeadlineExceeded:
-		return streamErrorf(codes.DeadlineExceeded, "%v", err)
-	case context.Canceled, netctx.Canceled:
-		return streamErrorf(codes.Canceled, "%v", err)
-	}
-	return streamErrorf(codes.Internal, "Unexpected error from context packet: %v", err)
-}
-
-// contextFromRequest returns a context from the HTTP Request.
-func contextFromRequest(r *http.Request) context.Context {
-	return r.Context()
-}
diff --git a/vendor/google.golang.org/grpc/naming/go18.go b/vendor/google.golang.org/grpc/version.go
similarity index 76%
rename from vendor/google.golang.org/grpc/naming/go18.go
rename to vendor/google.golang.org/grpc/version.go
index b5a0f84..092e088 100644
--- a/vendor/google.golang.org/grpc/naming/go18.go
+++ b/vendor/google.golang.org/grpc/version.go
@@ -1,8 +1,6 @@
-// +build go1.8
-
 /*
  *
- * Copyright 2017 gRPC authors.
+ * Copyright 2018 gRPC authors.
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -18,11 +16,7 @@
  *
  */
 
-package naming
+package grpc
 
-import "net"
-
-var (
-	lookupHost = net.DefaultResolver.LookupHost
-	lookupSRV  = net.DefaultResolver.LookupSRV
-)
+// Version is the current grpc version.
+const Version = "1.20.1"