| // Code generated by protoc-gen-go. DO NOT EDIT. |
| // source: google/cloud/bigquery/storage/v1alpha2/storage.proto |
| |
| package storage |
| |
| import ( |
| context "context" |
| fmt "fmt" |
| math "math" |
| |
| proto "github.com/golang/protobuf/proto" |
| _ "github.com/golang/protobuf/ptypes/empty" |
| timestamp "github.com/golang/protobuf/ptypes/timestamp" |
| wrappers "github.com/golang/protobuf/ptypes/wrappers" |
| _ "google.golang.org/genproto/googleapis/api/annotations" |
| status "google.golang.org/genproto/googleapis/rpc/status" |
| grpc "google.golang.org/grpc" |
| codes "google.golang.org/grpc/codes" |
| status1 "google.golang.org/grpc/status" |
| ) |
| |
| // Reference imports to suppress errors if they are not otherwise used. |
| var _ = proto.Marshal |
| var _ = fmt.Errorf |
| var _ = math.Inf |
| |
| // This is a compile-time assertion to ensure that this generated file |
| // is compatible with the proto package it is being compiled against. |
| // A compilation error at this line likely means your copy of the |
| // proto package needs to be updated. |
| const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package |
| |
| // Request message for `CreateWriteStream`. |
| type CreateWriteStreamRequest struct { |
| // Required. Reference to the table to which the stream belongs, in the format |
| // of `projects/{project_id}/datasets/{dataset_id}/tables/{table_id}`. |
| Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` |
| // Required. Stream to be created. |
| WriteStream *WriteStream `protobuf:"bytes,2,opt,name=write_stream,json=writeStream,proto3" json:"write_stream,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *CreateWriteStreamRequest) Reset() { *m = CreateWriteStreamRequest{} } |
| func (m *CreateWriteStreamRequest) String() string { return proto.CompactTextString(m) } |
| func (*CreateWriteStreamRequest) ProtoMessage() {} |
| func (*CreateWriteStreamRequest) Descriptor() ([]byte, []int) { |
| return fileDescriptor_f4f9d0517c05d712, []int{0} |
| } |
| |
| func (m *CreateWriteStreamRequest) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_CreateWriteStreamRequest.Unmarshal(m, b) |
| } |
| func (m *CreateWriteStreamRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_CreateWriteStreamRequest.Marshal(b, m, deterministic) |
| } |
| func (m *CreateWriteStreamRequest) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_CreateWriteStreamRequest.Merge(m, src) |
| } |
| func (m *CreateWriteStreamRequest) XXX_Size() int { |
| return xxx_messageInfo_CreateWriteStreamRequest.Size(m) |
| } |
| func (m *CreateWriteStreamRequest) XXX_DiscardUnknown() { |
| xxx_messageInfo_CreateWriteStreamRequest.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_CreateWriteStreamRequest proto.InternalMessageInfo |
| |
| func (m *CreateWriteStreamRequest) GetParent() string { |
| if m != nil { |
| return m.Parent |
| } |
| return "" |
| } |
| |
| func (m *CreateWriteStreamRequest) GetWriteStream() *WriteStream { |
| if m != nil { |
| return m.WriteStream |
| } |
| return nil |
| } |
| |
| // Request message for `AppendRows`. |
| type AppendRowsRequest struct { |
| // Required. The stream that is the target of the append operation. This value must be |
| // specified for the initial request. If subsequent requests specify the |
| // stream name, it must equal to the value provided in the first request. |
| WriteStream string `protobuf:"bytes,1,opt,name=write_stream,json=writeStream,proto3" json:"write_stream,omitempty"` |
| // Optional. If present, the write is only performed if the next append offset is same |
| // as the provided value. If not present, the write is performed at the |
| // current end of stream. |
| Offset *wrappers.Int64Value `protobuf:"bytes,2,opt,name=offset,proto3" json:"offset,omitempty"` |
| // Input rows. The `writer_schema` field must be specified at the initial |
| // request and currently, it will be ignored if specified in following |
| // requests. Following requests must have data in the same format as the |
| // initial request. |
| // |
| // Types that are valid to be assigned to Rows: |
| // *AppendRowsRequest_ProtoRows |
| Rows isAppendRowsRequest_Rows `protobuf_oneof:"rows"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *AppendRowsRequest) Reset() { *m = AppendRowsRequest{} } |
| func (m *AppendRowsRequest) String() string { return proto.CompactTextString(m) } |
| func (*AppendRowsRequest) ProtoMessage() {} |
| func (*AppendRowsRequest) Descriptor() ([]byte, []int) { |
| return fileDescriptor_f4f9d0517c05d712, []int{1} |
| } |
| |
| func (m *AppendRowsRequest) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_AppendRowsRequest.Unmarshal(m, b) |
| } |
| func (m *AppendRowsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_AppendRowsRequest.Marshal(b, m, deterministic) |
| } |
| func (m *AppendRowsRequest) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_AppendRowsRequest.Merge(m, src) |
| } |
| func (m *AppendRowsRequest) XXX_Size() int { |
| return xxx_messageInfo_AppendRowsRequest.Size(m) |
| } |
| func (m *AppendRowsRequest) XXX_DiscardUnknown() { |
| xxx_messageInfo_AppendRowsRequest.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_AppendRowsRequest proto.InternalMessageInfo |
| |
| func (m *AppendRowsRequest) GetWriteStream() string { |
| if m != nil { |
| return m.WriteStream |
| } |
| return "" |
| } |
| |
| func (m *AppendRowsRequest) GetOffset() *wrappers.Int64Value { |
| if m != nil { |
| return m.Offset |
| } |
| return nil |
| } |
| |
| type isAppendRowsRequest_Rows interface { |
| isAppendRowsRequest_Rows() |
| } |
| |
| type AppendRowsRequest_ProtoRows struct { |
| ProtoRows *AppendRowsRequest_ProtoData `protobuf:"bytes,4,opt,name=proto_rows,json=protoRows,proto3,oneof"` |
| } |
| |
| func (*AppendRowsRequest_ProtoRows) isAppendRowsRequest_Rows() {} |
| |
| func (m *AppendRowsRequest) GetRows() isAppendRowsRequest_Rows { |
| if m != nil { |
| return m.Rows |
| } |
| return nil |
| } |
| |
| func (m *AppendRowsRequest) GetProtoRows() *AppendRowsRequest_ProtoData { |
| if x, ok := m.GetRows().(*AppendRowsRequest_ProtoRows); ok { |
| return x.ProtoRows |
| } |
| return nil |
| } |
| |
| // XXX_OneofWrappers is for the internal use of the proto package. |
| func (*AppendRowsRequest) XXX_OneofWrappers() []interface{} { |
| return []interface{}{ |
| (*AppendRowsRequest_ProtoRows)(nil), |
| } |
| } |
| |
| type AppendRowsRequest_ProtoData struct { |
| // Proto schema used to serialize the data. |
| WriterSchema *ProtoSchema `protobuf:"bytes,1,opt,name=writer_schema,json=writerSchema,proto3" json:"writer_schema,omitempty"` |
| // Serialized row data in protobuf message format. |
| Rows *ProtoRows `protobuf:"bytes,2,opt,name=rows,proto3" json:"rows,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *AppendRowsRequest_ProtoData) Reset() { *m = AppendRowsRequest_ProtoData{} } |
| func (m *AppendRowsRequest_ProtoData) String() string { return proto.CompactTextString(m) } |
| func (*AppendRowsRequest_ProtoData) ProtoMessage() {} |
| func (*AppendRowsRequest_ProtoData) Descriptor() ([]byte, []int) { |
| return fileDescriptor_f4f9d0517c05d712, []int{1, 0} |
| } |
| |
| func (m *AppendRowsRequest_ProtoData) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_AppendRowsRequest_ProtoData.Unmarshal(m, b) |
| } |
| func (m *AppendRowsRequest_ProtoData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_AppendRowsRequest_ProtoData.Marshal(b, m, deterministic) |
| } |
| func (m *AppendRowsRequest_ProtoData) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_AppendRowsRequest_ProtoData.Merge(m, src) |
| } |
| func (m *AppendRowsRequest_ProtoData) XXX_Size() int { |
| return xxx_messageInfo_AppendRowsRequest_ProtoData.Size(m) |
| } |
| func (m *AppendRowsRequest_ProtoData) XXX_DiscardUnknown() { |
| xxx_messageInfo_AppendRowsRequest_ProtoData.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_AppendRowsRequest_ProtoData proto.InternalMessageInfo |
| |
| func (m *AppendRowsRequest_ProtoData) GetWriterSchema() *ProtoSchema { |
| if m != nil { |
| return m.WriterSchema |
| } |
| return nil |
| } |
| |
| func (m *AppendRowsRequest_ProtoData) GetRows() *ProtoRows { |
| if m != nil { |
| return m.Rows |
| } |
| return nil |
| } |
| |
| // Response message for `AppendRows`. |
| type AppendRowsResponse struct { |
| // Types that are valid to be assigned to Response: |
| // *AppendRowsResponse_Offset |
| // *AppendRowsResponse_Error |
| Response isAppendRowsResponse_Response `protobuf_oneof:"response"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *AppendRowsResponse) Reset() { *m = AppendRowsResponse{} } |
| func (m *AppendRowsResponse) String() string { return proto.CompactTextString(m) } |
| func (*AppendRowsResponse) ProtoMessage() {} |
| func (*AppendRowsResponse) Descriptor() ([]byte, []int) { |
| return fileDescriptor_f4f9d0517c05d712, []int{2} |
| } |
| |
| func (m *AppendRowsResponse) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_AppendRowsResponse.Unmarshal(m, b) |
| } |
| func (m *AppendRowsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_AppendRowsResponse.Marshal(b, m, deterministic) |
| } |
| func (m *AppendRowsResponse) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_AppendRowsResponse.Merge(m, src) |
| } |
| func (m *AppendRowsResponse) XXX_Size() int { |
| return xxx_messageInfo_AppendRowsResponse.Size(m) |
| } |
| func (m *AppendRowsResponse) XXX_DiscardUnknown() { |
| xxx_messageInfo_AppendRowsResponse.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_AppendRowsResponse proto.InternalMessageInfo |
| |
| type isAppendRowsResponse_Response interface { |
| isAppendRowsResponse_Response() |
| } |
| |
| type AppendRowsResponse_Offset struct { |
| Offset int64 `protobuf:"varint,1,opt,name=offset,proto3,oneof"` |
| } |
| |
| type AppendRowsResponse_Error struct { |
| Error *status.Status `protobuf:"bytes,2,opt,name=error,proto3,oneof"` |
| } |
| |
| func (*AppendRowsResponse_Offset) isAppendRowsResponse_Response() {} |
| |
| func (*AppendRowsResponse_Error) isAppendRowsResponse_Response() {} |
| |
| func (m *AppendRowsResponse) GetResponse() isAppendRowsResponse_Response { |
| if m != nil { |
| return m.Response |
| } |
| return nil |
| } |
| |
| func (m *AppendRowsResponse) GetOffset() int64 { |
| if x, ok := m.GetResponse().(*AppendRowsResponse_Offset); ok { |
| return x.Offset |
| } |
| return 0 |
| } |
| |
| func (m *AppendRowsResponse) GetError() *status.Status { |
| if x, ok := m.GetResponse().(*AppendRowsResponse_Error); ok { |
| return x.Error |
| } |
| return nil |
| } |
| |
| // XXX_OneofWrappers is for the internal use of the proto package. |
| func (*AppendRowsResponse) XXX_OneofWrappers() []interface{} { |
| return []interface{}{ |
| (*AppendRowsResponse_Offset)(nil), |
| (*AppendRowsResponse_Error)(nil), |
| } |
| } |
| |
| // Request message for `GetWriteStreamRequest`. |
| type GetWriteStreamRequest struct { |
| // Required. Name of the stream to get, in the form of |
| // `projects/{project_id}/datasets/{dataset_id}/tables/{table_id}/streams/{stream_id}`. |
| Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *GetWriteStreamRequest) Reset() { *m = GetWriteStreamRequest{} } |
| func (m *GetWriteStreamRequest) String() string { return proto.CompactTextString(m) } |
| func (*GetWriteStreamRequest) ProtoMessage() {} |
| func (*GetWriteStreamRequest) Descriptor() ([]byte, []int) { |
| return fileDescriptor_f4f9d0517c05d712, []int{3} |
| } |
| |
| func (m *GetWriteStreamRequest) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_GetWriteStreamRequest.Unmarshal(m, b) |
| } |
| func (m *GetWriteStreamRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_GetWriteStreamRequest.Marshal(b, m, deterministic) |
| } |
| func (m *GetWriteStreamRequest) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_GetWriteStreamRequest.Merge(m, src) |
| } |
| func (m *GetWriteStreamRequest) XXX_Size() int { |
| return xxx_messageInfo_GetWriteStreamRequest.Size(m) |
| } |
| func (m *GetWriteStreamRequest) XXX_DiscardUnknown() { |
| xxx_messageInfo_GetWriteStreamRequest.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_GetWriteStreamRequest proto.InternalMessageInfo |
| |
| func (m *GetWriteStreamRequest) GetName() string { |
| if m != nil { |
| return m.Name |
| } |
| return "" |
| } |
| |
| // Request message for `BatchCommitWriteStreams`. |
| type BatchCommitWriteStreamsRequest struct { |
| // Required. Parent table that all the streams should belong to, in the form of |
| // `projects/{project_id}/datasets/{dataset_id}/tables/{table_id}`. |
| Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` |
| // Required. The group of streams that will be committed atomically. |
| WriteStreams []string `protobuf:"bytes,2,rep,name=write_streams,json=writeStreams,proto3" json:"write_streams,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *BatchCommitWriteStreamsRequest) Reset() { *m = BatchCommitWriteStreamsRequest{} } |
| func (m *BatchCommitWriteStreamsRequest) String() string { return proto.CompactTextString(m) } |
| func (*BatchCommitWriteStreamsRequest) ProtoMessage() {} |
| func (*BatchCommitWriteStreamsRequest) Descriptor() ([]byte, []int) { |
| return fileDescriptor_f4f9d0517c05d712, []int{4} |
| } |
| |
| func (m *BatchCommitWriteStreamsRequest) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_BatchCommitWriteStreamsRequest.Unmarshal(m, b) |
| } |
| func (m *BatchCommitWriteStreamsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_BatchCommitWriteStreamsRequest.Marshal(b, m, deterministic) |
| } |
| func (m *BatchCommitWriteStreamsRequest) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_BatchCommitWriteStreamsRequest.Merge(m, src) |
| } |
| func (m *BatchCommitWriteStreamsRequest) XXX_Size() int { |
| return xxx_messageInfo_BatchCommitWriteStreamsRequest.Size(m) |
| } |
| func (m *BatchCommitWriteStreamsRequest) XXX_DiscardUnknown() { |
| xxx_messageInfo_BatchCommitWriteStreamsRequest.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_BatchCommitWriteStreamsRequest proto.InternalMessageInfo |
| |
| func (m *BatchCommitWriteStreamsRequest) GetParent() string { |
| if m != nil { |
| return m.Parent |
| } |
| return "" |
| } |
| |
| func (m *BatchCommitWriteStreamsRequest) GetWriteStreams() []string { |
| if m != nil { |
| return m.WriteStreams |
| } |
| return nil |
| } |
| |
| // Response message for `BatchCommitWriteStreams`. |
| type BatchCommitWriteStreamsResponse struct { |
| // The time at which streams were committed in microseconds granularity. |
| CommitTime *timestamp.Timestamp `protobuf:"bytes,1,opt,name=commit_time,json=commitTime,proto3" json:"commit_time,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *BatchCommitWriteStreamsResponse) Reset() { *m = BatchCommitWriteStreamsResponse{} } |
| func (m *BatchCommitWriteStreamsResponse) String() string { return proto.CompactTextString(m) } |
| func (*BatchCommitWriteStreamsResponse) ProtoMessage() {} |
| func (*BatchCommitWriteStreamsResponse) Descriptor() ([]byte, []int) { |
| return fileDescriptor_f4f9d0517c05d712, []int{5} |
| } |
| |
| func (m *BatchCommitWriteStreamsResponse) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_BatchCommitWriteStreamsResponse.Unmarshal(m, b) |
| } |
| func (m *BatchCommitWriteStreamsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_BatchCommitWriteStreamsResponse.Marshal(b, m, deterministic) |
| } |
| func (m *BatchCommitWriteStreamsResponse) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_BatchCommitWriteStreamsResponse.Merge(m, src) |
| } |
| func (m *BatchCommitWriteStreamsResponse) XXX_Size() int { |
| return xxx_messageInfo_BatchCommitWriteStreamsResponse.Size(m) |
| } |
| func (m *BatchCommitWriteStreamsResponse) XXX_DiscardUnknown() { |
| xxx_messageInfo_BatchCommitWriteStreamsResponse.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_BatchCommitWriteStreamsResponse proto.InternalMessageInfo |
| |
| func (m *BatchCommitWriteStreamsResponse) GetCommitTime() *timestamp.Timestamp { |
| if m != nil { |
| return m.CommitTime |
| } |
| return nil |
| } |
| |
| // Request message for invoking `FinalizeWriteStream`. |
| type FinalizeWriteStreamRequest struct { |
| // Required. Name of the stream to finalize, in the form of |
| // `projects/{project_id}/datasets/{dataset_id}/tables/{table_id}/streams/{stream_id}`. |
| Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *FinalizeWriteStreamRequest) Reset() { *m = FinalizeWriteStreamRequest{} } |
| func (m *FinalizeWriteStreamRequest) String() string { return proto.CompactTextString(m) } |
| func (*FinalizeWriteStreamRequest) ProtoMessage() {} |
| func (*FinalizeWriteStreamRequest) Descriptor() ([]byte, []int) { |
| return fileDescriptor_f4f9d0517c05d712, []int{6} |
| } |
| |
| func (m *FinalizeWriteStreamRequest) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_FinalizeWriteStreamRequest.Unmarshal(m, b) |
| } |
| func (m *FinalizeWriteStreamRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_FinalizeWriteStreamRequest.Marshal(b, m, deterministic) |
| } |
| func (m *FinalizeWriteStreamRequest) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_FinalizeWriteStreamRequest.Merge(m, src) |
| } |
| func (m *FinalizeWriteStreamRequest) XXX_Size() int { |
| return xxx_messageInfo_FinalizeWriteStreamRequest.Size(m) |
| } |
| func (m *FinalizeWriteStreamRequest) XXX_DiscardUnknown() { |
| xxx_messageInfo_FinalizeWriteStreamRequest.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_FinalizeWriteStreamRequest proto.InternalMessageInfo |
| |
| func (m *FinalizeWriteStreamRequest) GetName() string { |
| if m != nil { |
| return m.Name |
| } |
| return "" |
| } |
| |
| // Response message for `FinalizeWriteStream`. |
| type FinalizeWriteStreamResponse struct { |
| // Number of rows in the finalized stream. |
| RowCount int64 `protobuf:"varint,1,opt,name=row_count,json=rowCount,proto3" json:"row_count,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *FinalizeWriteStreamResponse) Reset() { *m = FinalizeWriteStreamResponse{} } |
| func (m *FinalizeWriteStreamResponse) String() string { return proto.CompactTextString(m) } |
| func (*FinalizeWriteStreamResponse) ProtoMessage() {} |
| func (*FinalizeWriteStreamResponse) Descriptor() ([]byte, []int) { |
| return fileDescriptor_f4f9d0517c05d712, []int{7} |
| } |
| |
| func (m *FinalizeWriteStreamResponse) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_FinalizeWriteStreamResponse.Unmarshal(m, b) |
| } |
| func (m *FinalizeWriteStreamResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_FinalizeWriteStreamResponse.Marshal(b, m, deterministic) |
| } |
| func (m *FinalizeWriteStreamResponse) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_FinalizeWriteStreamResponse.Merge(m, src) |
| } |
| func (m *FinalizeWriteStreamResponse) XXX_Size() int { |
| return xxx_messageInfo_FinalizeWriteStreamResponse.Size(m) |
| } |
| func (m *FinalizeWriteStreamResponse) XXX_DiscardUnknown() { |
| xxx_messageInfo_FinalizeWriteStreamResponse.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_FinalizeWriteStreamResponse proto.InternalMessageInfo |
| |
| func (m *FinalizeWriteStreamResponse) GetRowCount() int64 { |
| if m != nil { |
| return m.RowCount |
| } |
| return 0 |
| } |
| |
| func init() { |
| proto.RegisterType((*CreateWriteStreamRequest)(nil), "google.cloud.bigquery.storage.v1alpha2.CreateWriteStreamRequest") |
| proto.RegisterType((*AppendRowsRequest)(nil), "google.cloud.bigquery.storage.v1alpha2.AppendRowsRequest") |
| proto.RegisterType((*AppendRowsRequest_ProtoData)(nil), "google.cloud.bigquery.storage.v1alpha2.AppendRowsRequest.ProtoData") |
| proto.RegisterType((*AppendRowsResponse)(nil), "google.cloud.bigquery.storage.v1alpha2.AppendRowsResponse") |
| proto.RegisterType((*GetWriteStreamRequest)(nil), "google.cloud.bigquery.storage.v1alpha2.GetWriteStreamRequest") |
| proto.RegisterType((*BatchCommitWriteStreamsRequest)(nil), "google.cloud.bigquery.storage.v1alpha2.BatchCommitWriteStreamsRequest") |
| proto.RegisterType((*BatchCommitWriteStreamsResponse)(nil), "google.cloud.bigquery.storage.v1alpha2.BatchCommitWriteStreamsResponse") |
| proto.RegisterType((*FinalizeWriteStreamRequest)(nil), "google.cloud.bigquery.storage.v1alpha2.FinalizeWriteStreamRequest") |
| proto.RegisterType((*FinalizeWriteStreamResponse)(nil), "google.cloud.bigquery.storage.v1alpha2.FinalizeWriteStreamResponse") |
| } |
| |
| func init() { |
| proto.RegisterFile("google/cloud/bigquery/storage/v1alpha2/storage.proto", fileDescriptor_f4f9d0517c05d712) |
| } |
| |
| var fileDescriptor_f4f9d0517c05d712 = []byte{ |
| // 886 bytes of a gzipped FileDescriptorProto |
| 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x56, 0x4f, 0x6f, 0x1b, 0x45, |
| 0x14, 0xcf, 0xc4, 0x21, 0x6a, 0x5e, 0x5a, 0xa4, 0x0e, 0x42, 0xb1, 0xd6, 0x28, 0xad, 0xf6, 0x50, |
| 0x45, 0x16, 0xec, 0x36, 0x49, 0x5b, 0x91, 0x94, 0x22, 0x6c, 0x97, 0x34, 0x48, 0x20, 0xc1, 0x06, |
| 0x01, 0x02, 0x09, 0x6b, 0xbc, 0x1e, 0xaf, 0x17, 0xed, 0xee, 0x4c, 0x67, 0x66, 0xbb, 0x2a, 0x15, |
| 0x07, 0xb8, 0x72, 0x03, 0x3e, 0x06, 0x07, 0x3e, 0x06, 0xea, 0x11, 0xc4, 0x9d, 0x43, 0xf9, 0x1e, |
| 0x68, 0x67, 0x67, 0xed, 0xc5, 0xb1, 0xc9, 0xda, 0xe9, 0x2d, 0x9e, 0xf7, 0x7e, 0xef, 0xfd, 0x7e, |
| 0xef, 0x5f, 0x16, 0xee, 0x04, 0x8c, 0x05, 0x11, 0x75, 0xfd, 0x88, 0xa5, 0x43, 0x77, 0x10, 0x06, |
| 0x8f, 0x53, 0x2a, 0x9e, 0xba, 0x52, 0x31, 0x41, 0x02, 0xea, 0x3e, 0xd9, 0x27, 0x11, 0x1f, 0x93, |
| 0x83, 0xf2, 0xc1, 0xe1, 0x82, 0x29, 0x86, 0x6f, 0x15, 0x28, 0x47, 0xa3, 0x9c, 0x12, 0xe5, 0x94, |
| 0x4e, 0x25, 0xca, 0x7a, 0xc3, 0x44, 0x27, 0x3c, 0x74, 0x49, 0x92, 0x30, 0x45, 0x54, 0xc8, 0x12, |
| 0x59, 0x44, 0xb1, 0x76, 0x2a, 0x56, 0x3f, 0x0a, 0x69, 0xa2, 0x8c, 0xe1, 0x46, 0xc5, 0x30, 0x0a, |
| 0x69, 0x34, 0xec, 0x0f, 0xe8, 0x98, 0x3c, 0x09, 0x99, 0x30, 0x0e, 0x77, 0x6b, 0xb2, 0xd6, 0xde, |
| 0x83, 0x74, 0x64, 0x60, 0x87, 0xb5, 0xc5, 0x0a, 0x4a, 0x62, 0x03, 0x6a, 0x19, 0x50, 0x19, 0xcb, |
| 0xa5, 0x31, 0x57, 0x4f, 0x67, 0x98, 0x4e, 0x8c, 0x2a, 0x8c, 0xa9, 0x54, 0x24, 0xe6, 0xc6, 0x61, |
| 0x77, 0xd6, 0x21, 0x13, 0x84, 0x73, 0x2a, 0x66, 0x6b, 0x20, 0xb8, 0xef, 0x4a, 0x45, 0x54, 0x6a, |
| 0x0c, 0xf6, 0x2f, 0x08, 0x9a, 0x3d, 0x41, 0x89, 0xa2, 0x9f, 0x8b, 0x50, 0xd1, 0x33, 0x4d, 0xc9, |
| 0xa3, 0x8f, 0x53, 0x2a, 0x15, 0x6e, 0xc1, 0x26, 0x27, 0x82, 0x26, 0xaa, 0x89, 0x6e, 0xa2, 0xbd, |
| 0xad, 0x6e, 0xe3, 0xef, 0xce, 0xba, 0x67, 0x9e, 0xf0, 0x57, 0x70, 0x35, 0xcb, 0x21, 0xfd, 0x42, |
| 0x46, 0x73, 0xfd, 0x26, 0xda, 0xdb, 0x3e, 0x38, 0x74, 0xea, 0xf5, 0xcc, 0xa9, 0xa4, 0x2b, 0xe2, |
| 0x6e, 0x67, 0xd3, 0x17, 0xfb, 0xa7, 0x06, 0x5c, 0xef, 0x70, 0x4e, 0x93, 0xa1, 0xc7, 0x32, 0x59, |
| 0xf2, 0xb9, 0x35, 0x93, 0xb2, 0xc2, 0xaa, 0x8a, 0xc6, 0x6f, 0xc3, 0x26, 0x1b, 0x8d, 0x24, 0x55, |
| 0x86, 0x54, 0xab, 0x24, 0x35, 0x69, 0xd4, 0x07, 0x89, 0xba, 0x77, 0xe7, 0x33, 0x12, 0xa5, 0x34, |
| 0x87, 0x23, 0xcf, 0xf8, 0xe3, 0x21, 0x80, 0xf6, 0xe9, 0x0b, 0x96, 0xc9, 0xe6, 0x86, 0x46, 0xf7, |
| 0xea, 0x4a, 0x3a, 0x47, 0xd8, 0xf9, 0x38, 0x8f, 0xf5, 0x90, 0x28, 0x72, 0xba, 0xe6, 0x6d, 0xe9, |
| 0xc0, 0xb9, 0xd5, 0xfa, 0x15, 0xc1, 0xd6, 0xc4, 0x84, 0xbf, 0x80, 0x6b, 0x9a, 0xbc, 0xe8, 0x4b, |
| 0x7f, 0x4c, 0x63, 0xa2, 0x65, 0x2d, 0x51, 0x49, 0x1d, 0xe9, 0x4c, 0x43, 0xbd, 0xa2, 0x3e, 0xa2, |
| 0xf8, 0x85, 0xdf, 0x87, 0x0d, 0xad, 0xa3, 0xa8, 0xc2, 0xfe, 0x52, 0x01, 0xb5, 0x0c, 0x0d, 0xef, |
| 0x6e, 0x16, 0x61, 0xec, 0x11, 0xe0, 0xaa, 0x44, 0xc9, 0x59, 0x22, 0x29, 0x6e, 0x4e, 0x8a, 0x9d, |
| 0xf3, 0x6e, 0x9c, 0xae, 0x4d, 0x8a, 0xd9, 0x86, 0x57, 0xa8, 0x10, 0x4c, 0x98, 0xfc, 0xb8, 0xcc, |
| 0x2f, 0xb8, 0xef, 0x9c, 0xe9, 0x21, 0x3c, 0x5d, 0xf3, 0x0a, 0x97, 0x2e, 0xc0, 0x15, 0x61, 0x22, |
| 0xda, 0xb7, 0xe1, 0xf5, 0x47, 0x54, 0xcd, 0x99, 0xc7, 0x1d, 0xd8, 0x48, 0x48, 0x4c, 0xab, 0x7d, |
| 0xd7, 0x0f, 0x76, 0x00, 0xbb, 0x5d, 0xa2, 0xfc, 0x71, 0x8f, 0xc5, 0x71, 0x58, 0x45, 0xca, 0x5a, |
| 0xa3, 0xbc, 0x67, 0x3a, 0x60, 0xe6, 0x2a, 0x2f, 0x58, 0xa3, 0xf4, 0xb9, 0x5a, 0x19, 0x2c, 0x69, |
| 0x7f, 0x0d, 0x37, 0x16, 0x26, 0x32, 0xf5, 0xb8, 0x0f, 0xdb, 0xbe, 0xb6, 0xf6, 0xf3, 0x25, 0x35, |
| 0xcd, 0xb4, 0xce, 0x4d, 0xe0, 0xa7, 0xe5, 0x06, 0x7b, 0x50, 0xb8, 0xe7, 0x0f, 0xf6, 0x5d, 0xb0, |
| 0x4e, 0xc2, 0x84, 0x44, 0xe1, 0xb7, 0x74, 0x19, 0xfd, 0xc7, 0xd0, 0x9a, 0x0b, 0x33, 0x94, 0x5a, |
| 0xb0, 0x25, 0x58, 0xd6, 0xf7, 0x59, 0x6a, 0xf4, 0x37, 0xbc, 0x2b, 0x82, 0x65, 0xbd, 0xfc, 0xf7, |
| 0xc1, 0xf7, 0x00, 0xd7, 0xba, 0x61, 0xf0, 0x49, 0x3e, 0x0b, 0x1a, 0x8c, 0x7f, 0x47, 0x70, 0xfd, |
| 0xdc, 0x4d, 0xc0, 0xef, 0xd5, 0x1d, 0x9f, 0x45, 0xe7, 0xc4, 0x5a, 0xe5, 0x36, 0xd8, 0xef, 0xfc, |
| 0xf0, 0xe7, 0x8b, 0x9f, 0xd7, 0xef, 0xd9, 0xfb, 0xd3, 0xbb, 0xf9, 0xac, 0x68, 0xdb, 0x03, 0x2e, |
| 0xd8, 0x37, 0xd4, 0x57, 0xd2, 0x6d, 0xbb, 0x43, 0xa2, 0x88, 0xa4, 0xfa, 0x4f, 0x45, 0x06, 0x11, |
| 0x95, 0x6e, 0xfb, 0xbb, 0x63, 0xd4, 0xc6, 0x7f, 0x21, 0x80, 0xe9, 0xcc, 0xe2, 0xa3, 0x95, 0x57, |
| 0xd9, 0x3a, 0x5e, 0x05, 0x6a, 0x06, 0xfa, 0x43, 0xad, 0xe1, 0xc4, 0xee, 0x54, 0x34, 0x54, 0x07, |
| 0xee, 0x02, 0x25, 0xe6, 0x1f, 0x85, 0xd1, 0xb4, 0x87, 0x6e, 0x23, 0xfc, 0x1c, 0xc1, 0xab, 0xff, |
| 0xdd, 0x11, 0xfc, 0xa0, 0x2e, 0xc1, 0xb9, 0xbb, 0xb5, 0x5a, 0x73, 0x1e, 0x6a, 0x61, 0xef, 0xda, |
| 0x47, 0x15, 0x61, 0xf9, 0x40, 0x2e, 0x25, 0x08, 0xbf, 0x40, 0xf0, 0xda, 0x9c, 0xf1, 0xc5, 0xdd, |
| 0xba, 0x94, 0x16, 0xaf, 0x8c, 0xd5, 0xbb, 0x54, 0x0c, 0xd3, 0xbf, 0x97, 0x23, 0xf3, 0x1f, 0x04, |
| 0x3b, 0x0b, 0x8e, 0x07, 0x3e, 0xa9, 0x4b, 0xf3, 0xff, 0xcf, 0x9c, 0xf5, 0xe8, 0xd2, 0x71, 0x8c, |
| 0xe4, 0x23, 0x2d, 0xf9, 0x10, 0x2f, 0xbf, 0x76, 0xd6, 0x6f, 0xe8, 0x79, 0x67, 0xb7, 0xcc, 0x5c, |
| 0x26, 0x2e, 0x68, 0x11, 0x1e, 0x4a, 0xc7, 0x67, 0xf1, 0x1f, 0x9d, 0x1f, 0xd1, 0x58, 0x29, 0x2e, |
| 0x8f, 0x5d, 0x37, 0xcb, 0xb2, 0x19, 0xab, 0x4b, 0x52, 0x35, 0x9e, 0x7c, 0x37, 0xbd, 0x59, 0xd7, |
| 0xd1, 0x09, 0x13, 0x49, 0x85, 0xca, 0x39, 0x5d, 0x88, 0xd1, 0xe5, 0x79, 0x8b, 0x47, 0x44, 0x8d, |
| 0x98, 0x88, 0xbb, 0xcf, 0xa0, 0xed, 0xb3, 0xb8, 0x66, 0xed, 0xbe, 0xfc, 0xc8, 0xf8, 0x05, 0x2c, |
| 0x22, 0x49, 0xe0, 0x30, 0x11, 0xb8, 0x01, 0x4d, 0xf4, 0x65, 0x77, 0xa7, 0xc9, 0x2e, 0xfa, 0xfc, |
| 0xbb, 0x6f, 0x1e, 0x06, 0x9b, 0x1a, 0x79, 0xf8, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x76, 0xc7, |
| 0x14, 0x8b, 0x24, 0x0b, 0x00, 0x00, |
| } |
| |
| // Reference imports to suppress errors if they are not otherwise used. |
| var _ context.Context |
| var _ grpc.ClientConnInterface |
| |
| // This is a compile-time assertion to ensure that this generated file |
| // is compatible with the grpc package it is being compiled against. |
| const _ = grpc.SupportPackageIsVersion6 |
| |
| // BigQueryWriteClient is the client API for BigQueryWrite service. |
| // |
| // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. |
| type BigQueryWriteClient interface { |
| // Creates a write stream to the given table. |
| CreateWriteStream(ctx context.Context, in *CreateWriteStreamRequest, opts ...grpc.CallOption) (*WriteStream, error) |
| // Appends data to the given stream. |
| // |
| // If `offset` is specified, the `offset` is checked against the end of |
| // stream. The server returns `OUT_OF_RANGE` in `AppendRowsResponse` if an |
| // attempt is made to append to an offset beyond the current end of the stream |
| // or `ALREADY_EXISTS` if user provids an `offset` that has already been |
| // written to. User can retry with adjusted offset within the same RPC |
| // stream. If `offset` is not specified, append happens at the end of the |
| // stream. |
| // |
| // The response contains the offset at which the append happened. Responses |
| // are received in the same order in which requests are sent. There will be |
| // one response for each successful request. If the `offset` is not set in |
| // response, it means append didn't happen due to some errors. If one request |
| // fails, all the subsequent requests will also fail until a success request |
| // is made again. |
| // |
| // If the stream is of `PENDING` type, data will only be available for read |
| // operations after the stream is committed. |
| AppendRows(ctx context.Context, opts ...grpc.CallOption) (BigQueryWrite_AppendRowsClient, error) |
| // Gets a write stream. |
| GetWriteStream(ctx context.Context, in *GetWriteStreamRequest, opts ...grpc.CallOption) (*WriteStream, error) |
| // Finalize a write stream so that no new data can be appended to the |
| // stream. |
| FinalizeWriteStream(ctx context.Context, in *FinalizeWriteStreamRequest, opts ...grpc.CallOption) (*FinalizeWriteStreamResponse, error) |
| // Atomically commits a group of `PENDING` streams that belong to the same |
| // `parent` table. |
| // Streams must be finalized before commit and cannot be committed multiple |
| // times. Once a stream is committed, data in the stream becomes available |
| // for read operations. |
| BatchCommitWriteStreams(ctx context.Context, in *BatchCommitWriteStreamsRequest, opts ...grpc.CallOption) (*BatchCommitWriteStreamsResponse, error) |
| } |
| |
| type bigQueryWriteClient struct { |
| cc grpc.ClientConnInterface |
| } |
| |
| func NewBigQueryWriteClient(cc grpc.ClientConnInterface) BigQueryWriteClient { |
| return &bigQueryWriteClient{cc} |
| } |
| |
| func (c *bigQueryWriteClient) CreateWriteStream(ctx context.Context, in *CreateWriteStreamRequest, opts ...grpc.CallOption) (*WriteStream, error) { |
| out := new(WriteStream) |
| err := c.cc.Invoke(ctx, "/google.cloud.bigquery.storage.v1alpha2.BigQueryWrite/CreateWriteStream", in, out, opts...) |
| if err != nil { |
| return nil, err |
| } |
| return out, nil |
| } |
| |
| func (c *bigQueryWriteClient) AppendRows(ctx context.Context, opts ...grpc.CallOption) (BigQueryWrite_AppendRowsClient, error) { |
| stream, err := c.cc.NewStream(ctx, &_BigQueryWrite_serviceDesc.Streams[0], "/google.cloud.bigquery.storage.v1alpha2.BigQueryWrite/AppendRows", opts...) |
| if err != nil { |
| return nil, err |
| } |
| x := &bigQueryWriteAppendRowsClient{stream} |
| return x, nil |
| } |
| |
| type BigQueryWrite_AppendRowsClient interface { |
| Send(*AppendRowsRequest) error |
| Recv() (*AppendRowsResponse, error) |
| grpc.ClientStream |
| } |
| |
| type bigQueryWriteAppendRowsClient struct { |
| grpc.ClientStream |
| } |
| |
| func (x *bigQueryWriteAppendRowsClient) Send(m *AppendRowsRequest) error { |
| return x.ClientStream.SendMsg(m) |
| } |
| |
| func (x *bigQueryWriteAppendRowsClient) Recv() (*AppendRowsResponse, error) { |
| m := new(AppendRowsResponse) |
| if err := x.ClientStream.RecvMsg(m); err != nil { |
| return nil, err |
| } |
| return m, nil |
| } |
| |
| func (c *bigQueryWriteClient) GetWriteStream(ctx context.Context, in *GetWriteStreamRequest, opts ...grpc.CallOption) (*WriteStream, error) { |
| out := new(WriteStream) |
| err := c.cc.Invoke(ctx, "/google.cloud.bigquery.storage.v1alpha2.BigQueryWrite/GetWriteStream", in, out, opts...) |
| if err != nil { |
| return nil, err |
| } |
| return out, nil |
| } |
| |
| func (c *bigQueryWriteClient) FinalizeWriteStream(ctx context.Context, in *FinalizeWriteStreamRequest, opts ...grpc.CallOption) (*FinalizeWriteStreamResponse, error) { |
| out := new(FinalizeWriteStreamResponse) |
| err := c.cc.Invoke(ctx, "/google.cloud.bigquery.storage.v1alpha2.BigQueryWrite/FinalizeWriteStream", in, out, opts...) |
| if err != nil { |
| return nil, err |
| } |
| return out, nil |
| } |
| |
| func (c *bigQueryWriteClient) BatchCommitWriteStreams(ctx context.Context, in *BatchCommitWriteStreamsRequest, opts ...grpc.CallOption) (*BatchCommitWriteStreamsResponse, error) { |
| out := new(BatchCommitWriteStreamsResponse) |
| err := c.cc.Invoke(ctx, "/google.cloud.bigquery.storage.v1alpha2.BigQueryWrite/BatchCommitWriteStreams", in, out, opts...) |
| if err != nil { |
| return nil, err |
| } |
| return out, nil |
| } |
| |
| // BigQueryWriteServer is the server API for BigQueryWrite service. |
| type BigQueryWriteServer interface { |
| // Creates a write stream to the given table. |
| CreateWriteStream(context.Context, *CreateWriteStreamRequest) (*WriteStream, error) |
| // Appends data to the given stream. |
| // |
| // If `offset` is specified, the `offset` is checked against the end of |
| // stream. The server returns `OUT_OF_RANGE` in `AppendRowsResponse` if an |
| // attempt is made to append to an offset beyond the current end of the stream |
| // or `ALREADY_EXISTS` if user provids an `offset` that has already been |
| // written to. User can retry with adjusted offset within the same RPC |
| // stream. If `offset` is not specified, append happens at the end of the |
| // stream. |
| // |
| // The response contains the offset at which the append happened. Responses |
| // are received in the same order in which requests are sent. There will be |
| // one response for each successful request. If the `offset` is not set in |
| // response, it means append didn't happen due to some errors. If one request |
| // fails, all the subsequent requests will also fail until a success request |
| // is made again. |
| // |
| // If the stream is of `PENDING` type, data will only be available for read |
| // operations after the stream is committed. |
| AppendRows(BigQueryWrite_AppendRowsServer) error |
| // Gets a write stream. |
| GetWriteStream(context.Context, *GetWriteStreamRequest) (*WriteStream, error) |
| // Finalize a write stream so that no new data can be appended to the |
| // stream. |
| FinalizeWriteStream(context.Context, *FinalizeWriteStreamRequest) (*FinalizeWriteStreamResponse, error) |
| // Atomically commits a group of `PENDING` streams that belong to the same |
| // `parent` table. |
| // Streams must be finalized before commit and cannot be committed multiple |
| // times. Once a stream is committed, data in the stream becomes available |
| // for read operations. |
| BatchCommitWriteStreams(context.Context, *BatchCommitWriteStreamsRequest) (*BatchCommitWriteStreamsResponse, error) |
| } |
| |
| // UnimplementedBigQueryWriteServer can be embedded to have forward compatible implementations. |
| type UnimplementedBigQueryWriteServer struct { |
| } |
| |
| func (*UnimplementedBigQueryWriteServer) CreateWriteStream(ctx context.Context, req *CreateWriteStreamRequest) (*WriteStream, error) { |
| return nil, status1.Errorf(codes.Unimplemented, "method CreateWriteStream not implemented") |
| } |
| func (*UnimplementedBigQueryWriteServer) AppendRows(srv BigQueryWrite_AppendRowsServer) error { |
| return status1.Errorf(codes.Unimplemented, "method AppendRows not implemented") |
| } |
| func (*UnimplementedBigQueryWriteServer) GetWriteStream(ctx context.Context, req *GetWriteStreamRequest) (*WriteStream, error) { |
| return nil, status1.Errorf(codes.Unimplemented, "method GetWriteStream not implemented") |
| } |
| func (*UnimplementedBigQueryWriteServer) FinalizeWriteStream(ctx context.Context, req *FinalizeWriteStreamRequest) (*FinalizeWriteStreamResponse, error) { |
| return nil, status1.Errorf(codes.Unimplemented, "method FinalizeWriteStream not implemented") |
| } |
| func (*UnimplementedBigQueryWriteServer) BatchCommitWriteStreams(ctx context.Context, req *BatchCommitWriteStreamsRequest) (*BatchCommitWriteStreamsResponse, error) { |
| return nil, status1.Errorf(codes.Unimplemented, "method BatchCommitWriteStreams not implemented") |
| } |
| |
| func RegisterBigQueryWriteServer(s *grpc.Server, srv BigQueryWriteServer) { |
| s.RegisterService(&_BigQueryWrite_serviceDesc, srv) |
| } |
| |
| func _BigQueryWrite_CreateWriteStream_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { |
| in := new(CreateWriteStreamRequest) |
| if err := dec(in); err != nil { |
| return nil, err |
| } |
| if interceptor == nil { |
| return srv.(BigQueryWriteServer).CreateWriteStream(ctx, in) |
| } |
| info := &grpc.UnaryServerInfo{ |
| Server: srv, |
| FullMethod: "/google.cloud.bigquery.storage.v1alpha2.BigQueryWrite/CreateWriteStream", |
| } |
| handler := func(ctx context.Context, req interface{}) (interface{}, error) { |
| return srv.(BigQueryWriteServer).CreateWriteStream(ctx, req.(*CreateWriteStreamRequest)) |
| } |
| return interceptor(ctx, in, info, handler) |
| } |
| |
| func _BigQueryWrite_AppendRows_Handler(srv interface{}, stream grpc.ServerStream) error { |
| return srv.(BigQueryWriteServer).AppendRows(&bigQueryWriteAppendRowsServer{stream}) |
| } |
| |
| type BigQueryWrite_AppendRowsServer interface { |
| Send(*AppendRowsResponse) error |
| Recv() (*AppendRowsRequest, error) |
| grpc.ServerStream |
| } |
| |
| type bigQueryWriteAppendRowsServer struct { |
| grpc.ServerStream |
| } |
| |
| func (x *bigQueryWriteAppendRowsServer) Send(m *AppendRowsResponse) error { |
| return x.ServerStream.SendMsg(m) |
| } |
| |
| func (x *bigQueryWriteAppendRowsServer) Recv() (*AppendRowsRequest, error) { |
| m := new(AppendRowsRequest) |
| if err := x.ServerStream.RecvMsg(m); err != nil { |
| return nil, err |
| } |
| return m, nil |
| } |
| |
| func _BigQueryWrite_GetWriteStream_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { |
| in := new(GetWriteStreamRequest) |
| if err := dec(in); err != nil { |
| return nil, err |
| } |
| if interceptor == nil { |
| return srv.(BigQueryWriteServer).GetWriteStream(ctx, in) |
| } |
| info := &grpc.UnaryServerInfo{ |
| Server: srv, |
| FullMethod: "/google.cloud.bigquery.storage.v1alpha2.BigQueryWrite/GetWriteStream", |
| } |
| handler := func(ctx context.Context, req interface{}) (interface{}, error) { |
| return srv.(BigQueryWriteServer).GetWriteStream(ctx, req.(*GetWriteStreamRequest)) |
| } |
| return interceptor(ctx, in, info, handler) |
| } |
| |
| func _BigQueryWrite_FinalizeWriteStream_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { |
| in := new(FinalizeWriteStreamRequest) |
| if err := dec(in); err != nil { |
| return nil, err |
| } |
| if interceptor == nil { |
| return srv.(BigQueryWriteServer).FinalizeWriteStream(ctx, in) |
| } |
| info := &grpc.UnaryServerInfo{ |
| Server: srv, |
| FullMethod: "/google.cloud.bigquery.storage.v1alpha2.BigQueryWrite/FinalizeWriteStream", |
| } |
| handler := func(ctx context.Context, req interface{}) (interface{}, error) { |
| return srv.(BigQueryWriteServer).FinalizeWriteStream(ctx, req.(*FinalizeWriteStreamRequest)) |
| } |
| return interceptor(ctx, in, info, handler) |
| } |
| |
| func _BigQueryWrite_BatchCommitWriteStreams_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { |
| in := new(BatchCommitWriteStreamsRequest) |
| if err := dec(in); err != nil { |
| return nil, err |
| } |
| if interceptor == nil { |
| return srv.(BigQueryWriteServer).BatchCommitWriteStreams(ctx, in) |
| } |
| info := &grpc.UnaryServerInfo{ |
| Server: srv, |
| FullMethod: "/google.cloud.bigquery.storage.v1alpha2.BigQueryWrite/BatchCommitWriteStreams", |
| } |
| handler := func(ctx context.Context, req interface{}) (interface{}, error) { |
| return srv.(BigQueryWriteServer).BatchCommitWriteStreams(ctx, req.(*BatchCommitWriteStreamsRequest)) |
| } |
| return interceptor(ctx, in, info, handler) |
| } |
| |
| var _BigQueryWrite_serviceDesc = grpc.ServiceDesc{ |
| ServiceName: "google.cloud.bigquery.storage.v1alpha2.BigQueryWrite", |
| HandlerType: (*BigQueryWriteServer)(nil), |
| Methods: []grpc.MethodDesc{ |
| { |
| MethodName: "CreateWriteStream", |
| Handler: _BigQueryWrite_CreateWriteStream_Handler, |
| }, |
| { |
| MethodName: "GetWriteStream", |
| Handler: _BigQueryWrite_GetWriteStream_Handler, |
| }, |
| { |
| MethodName: "FinalizeWriteStream", |
| Handler: _BigQueryWrite_FinalizeWriteStream_Handler, |
| }, |
| { |
| MethodName: "BatchCommitWriteStreams", |
| Handler: _BigQueryWrite_BatchCommitWriteStreams_Handler, |
| }, |
| }, |
| Streams: []grpc.StreamDesc{ |
| { |
| StreamName: "AppendRows", |
| Handler: _BigQueryWrite_AppendRows_Handler, |
| ServerStreams: true, |
| ClientStreams: true, |
| }, |
| }, |
| Metadata: "google/cloud/bigquery/storage/v1alpha2/storage.proto", |
| } |