| // Code generated by protoc-gen-go. DO NOT EDIT. |
| // source: google/cloud/bigquery/storage/v1beta1/storage.proto |
| |
| package storage |
| |
| import ( |
| context "context" |
| fmt "fmt" |
| math "math" |
| |
| proto "github.com/golang/protobuf/proto" |
| empty "github.com/golang/protobuf/ptypes/empty" |
| timestamp "github.com/golang/protobuf/ptypes/timestamp" |
| _ "google.golang.org/genproto/googleapis/api/annotations" |
| grpc "google.golang.org/grpc" |
| codes "google.golang.org/grpc/codes" |
| status "google.golang.org/grpc/status" |
| ) |
| |
| // Reference imports to suppress errors if they are not otherwise used. |
| var _ = proto.Marshal |
| var _ = fmt.Errorf |
| var _ = math.Inf |
| |
| // This is a compile-time assertion to ensure that this generated file |
| // is compatible with the proto package it is being compiled against. |
| // A compilation error at this line likely means your copy of the |
| // proto package needs to be updated. |
| const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package |
| |
| // Data format for input or output data. |
| type DataFormat int32 |
| |
| const ( |
| // Data format is unspecified. |
| DataFormat_DATA_FORMAT_UNSPECIFIED DataFormat = 0 |
| // Avro is a standard open source row based file format. |
| // See https://avro.apache.org/ for more details. |
| DataFormat_AVRO DataFormat = 1 |
| DataFormat_ARROW DataFormat = 3 |
| ) |
| |
| var DataFormat_name = map[int32]string{ |
| 0: "DATA_FORMAT_UNSPECIFIED", |
| 1: "AVRO", |
| 3: "ARROW", |
| } |
| |
| var DataFormat_value = map[string]int32{ |
| "DATA_FORMAT_UNSPECIFIED": 0, |
| "AVRO": 1, |
| "ARROW": 3, |
| } |
| |
| func (x DataFormat) String() string { |
| return proto.EnumName(DataFormat_name, int32(x)) |
| } |
| |
| func (DataFormat) EnumDescriptor() ([]byte, []int) { |
| return fileDescriptor_2a3518a93fa439fd, []int{0} |
| } |
| |
| // Strategy for distributing data among multiple streams in a read session. |
| type ShardingStrategy int32 |
| |
| const ( |
| // Same as LIQUID. |
| ShardingStrategy_SHARDING_STRATEGY_UNSPECIFIED ShardingStrategy = 0 |
| // Assigns data to each stream based on the client's read rate. The faster the |
| // client reads from a stream, the more data is assigned to the stream. In |
| // this strategy, it's possible to read all data from a single stream even if |
| // there are other streams present. |
| ShardingStrategy_LIQUID ShardingStrategy = 1 |
| // Assigns data to each stream such that roughly the same number of rows can |
| // be read from each stream. Because the server-side unit for assigning data |
| // is collections of rows, the API does not guarantee that each stream will |
| // return the same number or rows. Additionally, the limits are enforced based |
| // on the number of pre-filtering rows, so some filters can lead to lopsided |
| // assignments. |
| ShardingStrategy_BALANCED ShardingStrategy = 2 |
| ) |
| |
| var ShardingStrategy_name = map[int32]string{ |
| 0: "SHARDING_STRATEGY_UNSPECIFIED", |
| 1: "LIQUID", |
| 2: "BALANCED", |
| } |
| |
| var ShardingStrategy_value = map[string]int32{ |
| "SHARDING_STRATEGY_UNSPECIFIED": 0, |
| "LIQUID": 1, |
| "BALANCED": 2, |
| } |
| |
| func (x ShardingStrategy) String() string { |
| return proto.EnumName(ShardingStrategy_name, int32(x)) |
| } |
| |
| func (ShardingStrategy) EnumDescriptor() ([]byte, []int) { |
| return fileDescriptor_2a3518a93fa439fd, []int{1} |
| } |
| |
| // Information about a single data stream within a read session. |
| type Stream struct { |
| // Name of the stream, in the form |
| // `projects/{project_id}/locations/{location}/streams/{stream_id}`. |
| Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *Stream) Reset() { *m = Stream{} } |
| func (m *Stream) String() string { return proto.CompactTextString(m) } |
| func (*Stream) ProtoMessage() {} |
| func (*Stream) Descriptor() ([]byte, []int) { |
| return fileDescriptor_2a3518a93fa439fd, []int{0} |
| } |
| |
| func (m *Stream) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_Stream.Unmarshal(m, b) |
| } |
| func (m *Stream) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_Stream.Marshal(b, m, deterministic) |
| } |
| func (m *Stream) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_Stream.Merge(m, src) |
| } |
| func (m *Stream) XXX_Size() int { |
| return xxx_messageInfo_Stream.Size(m) |
| } |
| func (m *Stream) XXX_DiscardUnknown() { |
| xxx_messageInfo_Stream.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_Stream proto.InternalMessageInfo |
| |
| func (m *Stream) GetName() string { |
| if m != nil { |
| return m.Name |
| } |
| return "" |
| } |
| |
| // Expresses a point within a given stream using an offset position. |
| type StreamPosition struct { |
| // Identifier for a given Stream. |
| Stream *Stream `protobuf:"bytes,1,opt,name=stream,proto3" json:"stream,omitempty"` |
| // Position in the stream. |
| Offset int64 `protobuf:"varint,2,opt,name=offset,proto3" json:"offset,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *StreamPosition) Reset() { *m = StreamPosition{} } |
| func (m *StreamPosition) String() string { return proto.CompactTextString(m) } |
| func (*StreamPosition) ProtoMessage() {} |
| func (*StreamPosition) Descriptor() ([]byte, []int) { |
| return fileDescriptor_2a3518a93fa439fd, []int{1} |
| } |
| |
| func (m *StreamPosition) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_StreamPosition.Unmarshal(m, b) |
| } |
| func (m *StreamPosition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_StreamPosition.Marshal(b, m, deterministic) |
| } |
| func (m *StreamPosition) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_StreamPosition.Merge(m, src) |
| } |
| func (m *StreamPosition) XXX_Size() int { |
| return xxx_messageInfo_StreamPosition.Size(m) |
| } |
| func (m *StreamPosition) XXX_DiscardUnknown() { |
| xxx_messageInfo_StreamPosition.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_StreamPosition proto.InternalMessageInfo |
| |
| func (m *StreamPosition) GetStream() *Stream { |
| if m != nil { |
| return m.Stream |
| } |
| return nil |
| } |
| |
| func (m *StreamPosition) GetOffset() int64 { |
| if m != nil { |
| return m.Offset |
| } |
| return 0 |
| } |
| |
| // Information returned from a `CreateReadSession` request. |
| type ReadSession struct { |
| // Unique identifier for the session, in the form |
| // `projects/{project_id}/locations/{location}/sessions/{session_id}`. |
| Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` |
| // Time at which the session becomes invalid. After this time, subsequent |
| // requests to read this Session will return errors. |
| ExpireTime *timestamp.Timestamp `protobuf:"bytes,2,opt,name=expire_time,json=expireTime,proto3" json:"expire_time,omitempty"` |
| // The schema for the read. If read_options.selected_fields is set, the |
| // schema may be different from the table schema as it will only contain |
| // the selected fields. |
| // |
| // Types that are valid to be assigned to Schema: |
| // *ReadSession_AvroSchema |
| // *ReadSession_ArrowSchema |
| Schema isReadSession_Schema `protobuf_oneof:"schema"` |
| // Streams associated with this session. |
| Streams []*Stream `protobuf:"bytes,4,rep,name=streams,proto3" json:"streams,omitempty"` |
| // Table that this ReadSession is reading from. |
| TableReference *TableReference `protobuf:"bytes,7,opt,name=table_reference,json=tableReference,proto3" json:"table_reference,omitempty"` |
| // Any modifiers which are applied when reading from the specified table. |
| TableModifiers *TableModifiers `protobuf:"bytes,8,opt,name=table_modifiers,json=tableModifiers,proto3" json:"table_modifiers,omitempty"` |
| // The strategy to use for distributing data among the streams. |
| ShardingStrategy ShardingStrategy `protobuf:"varint,9,opt,name=sharding_strategy,json=shardingStrategy,proto3,enum=google.cloud.bigquery.storage.v1beta1.ShardingStrategy" json:"sharding_strategy,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *ReadSession) Reset() { *m = ReadSession{} } |
| func (m *ReadSession) String() string { return proto.CompactTextString(m) } |
| func (*ReadSession) ProtoMessage() {} |
| func (*ReadSession) Descriptor() ([]byte, []int) { |
| return fileDescriptor_2a3518a93fa439fd, []int{2} |
| } |
| |
| func (m *ReadSession) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_ReadSession.Unmarshal(m, b) |
| } |
| func (m *ReadSession) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_ReadSession.Marshal(b, m, deterministic) |
| } |
| func (m *ReadSession) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_ReadSession.Merge(m, src) |
| } |
| func (m *ReadSession) XXX_Size() int { |
| return xxx_messageInfo_ReadSession.Size(m) |
| } |
| func (m *ReadSession) XXX_DiscardUnknown() { |
| xxx_messageInfo_ReadSession.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_ReadSession proto.InternalMessageInfo |
| |
| func (m *ReadSession) GetName() string { |
| if m != nil { |
| return m.Name |
| } |
| return "" |
| } |
| |
| func (m *ReadSession) GetExpireTime() *timestamp.Timestamp { |
| if m != nil { |
| return m.ExpireTime |
| } |
| return nil |
| } |
| |
| type isReadSession_Schema interface { |
| isReadSession_Schema() |
| } |
| |
| type ReadSession_AvroSchema struct { |
| AvroSchema *AvroSchema `protobuf:"bytes,5,opt,name=avro_schema,json=avroSchema,proto3,oneof"` |
| } |
| |
| type ReadSession_ArrowSchema struct { |
| ArrowSchema *ArrowSchema `protobuf:"bytes,6,opt,name=arrow_schema,json=arrowSchema,proto3,oneof"` |
| } |
| |
| func (*ReadSession_AvroSchema) isReadSession_Schema() {} |
| |
| func (*ReadSession_ArrowSchema) isReadSession_Schema() {} |
| |
| func (m *ReadSession) GetSchema() isReadSession_Schema { |
| if m != nil { |
| return m.Schema |
| } |
| return nil |
| } |
| |
| func (m *ReadSession) GetAvroSchema() *AvroSchema { |
| if x, ok := m.GetSchema().(*ReadSession_AvroSchema); ok { |
| return x.AvroSchema |
| } |
| return nil |
| } |
| |
| func (m *ReadSession) GetArrowSchema() *ArrowSchema { |
| if x, ok := m.GetSchema().(*ReadSession_ArrowSchema); ok { |
| return x.ArrowSchema |
| } |
| return nil |
| } |
| |
| func (m *ReadSession) GetStreams() []*Stream { |
| if m != nil { |
| return m.Streams |
| } |
| return nil |
| } |
| |
| func (m *ReadSession) GetTableReference() *TableReference { |
| if m != nil { |
| return m.TableReference |
| } |
| return nil |
| } |
| |
| func (m *ReadSession) GetTableModifiers() *TableModifiers { |
| if m != nil { |
| return m.TableModifiers |
| } |
| return nil |
| } |
| |
| func (m *ReadSession) GetShardingStrategy() ShardingStrategy { |
| if m != nil { |
| return m.ShardingStrategy |
| } |
| return ShardingStrategy_SHARDING_STRATEGY_UNSPECIFIED |
| } |
| |
| // XXX_OneofWrappers is for the internal use of the proto package. |
| func (*ReadSession) XXX_OneofWrappers() []interface{} { |
| return []interface{}{ |
| (*ReadSession_AvroSchema)(nil), |
| (*ReadSession_ArrowSchema)(nil), |
| } |
| } |
| |
| // Creates a new read session, which may include additional options such as |
| // requested parallelism, projection filters and constraints. |
| type CreateReadSessionRequest struct { |
| // Required. Reference to the table to read. |
| TableReference *TableReference `protobuf:"bytes,1,opt,name=table_reference,json=tableReference,proto3" json:"table_reference,omitempty"` |
| // Required. String of the form `projects/{project_id}` indicating the |
| // project this ReadSession is associated with. This is the project that will |
| // be billed for usage. |
| Parent string `protobuf:"bytes,6,opt,name=parent,proto3" json:"parent,omitempty"` |
| // Any modifiers to the Table (e.g. snapshot timestamp). |
| TableModifiers *TableModifiers `protobuf:"bytes,2,opt,name=table_modifiers,json=tableModifiers,proto3" json:"table_modifiers,omitempty"` |
| // Initial number of streams. If unset or 0, we will |
| // provide a value of streams so as to produce reasonable throughput. Must be |
| // non-negative. The number of streams may be lower than the requested number, |
| // depending on the amount parallelism that is reasonable for the table and |
| // the maximum amount of parallelism allowed by the system. |
| // |
| // Streams must be read starting from offset 0. |
| RequestedStreams int32 `protobuf:"varint,3,opt,name=requested_streams,json=requestedStreams,proto3" json:"requested_streams,omitempty"` |
| // Read options for this session (e.g. column selection, filters). |
| ReadOptions *TableReadOptions `protobuf:"bytes,4,opt,name=read_options,json=readOptions,proto3" json:"read_options,omitempty"` |
| // Data output format. Currently default to Avro. |
| Format DataFormat `protobuf:"varint,5,opt,name=format,proto3,enum=google.cloud.bigquery.storage.v1beta1.DataFormat" json:"format,omitempty"` |
| // The strategy to use for distributing data among multiple streams. Currently |
| // defaults to liquid sharding. |
| ShardingStrategy ShardingStrategy `protobuf:"varint,7,opt,name=sharding_strategy,json=shardingStrategy,proto3,enum=google.cloud.bigquery.storage.v1beta1.ShardingStrategy" json:"sharding_strategy,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *CreateReadSessionRequest) Reset() { *m = CreateReadSessionRequest{} } |
| func (m *CreateReadSessionRequest) String() string { return proto.CompactTextString(m) } |
| func (*CreateReadSessionRequest) ProtoMessage() {} |
| func (*CreateReadSessionRequest) Descriptor() ([]byte, []int) { |
| return fileDescriptor_2a3518a93fa439fd, []int{3} |
| } |
| |
| func (m *CreateReadSessionRequest) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_CreateReadSessionRequest.Unmarshal(m, b) |
| } |
| func (m *CreateReadSessionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_CreateReadSessionRequest.Marshal(b, m, deterministic) |
| } |
| func (m *CreateReadSessionRequest) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_CreateReadSessionRequest.Merge(m, src) |
| } |
| func (m *CreateReadSessionRequest) XXX_Size() int { |
| return xxx_messageInfo_CreateReadSessionRequest.Size(m) |
| } |
| func (m *CreateReadSessionRequest) XXX_DiscardUnknown() { |
| xxx_messageInfo_CreateReadSessionRequest.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_CreateReadSessionRequest proto.InternalMessageInfo |
| |
| func (m *CreateReadSessionRequest) GetTableReference() *TableReference { |
| if m != nil { |
| return m.TableReference |
| } |
| return nil |
| } |
| |
| func (m *CreateReadSessionRequest) GetParent() string { |
| if m != nil { |
| return m.Parent |
| } |
| return "" |
| } |
| |
| func (m *CreateReadSessionRequest) GetTableModifiers() *TableModifiers { |
| if m != nil { |
| return m.TableModifiers |
| } |
| return nil |
| } |
| |
| func (m *CreateReadSessionRequest) GetRequestedStreams() int32 { |
| if m != nil { |
| return m.RequestedStreams |
| } |
| return 0 |
| } |
| |
| func (m *CreateReadSessionRequest) GetReadOptions() *TableReadOptions { |
| if m != nil { |
| return m.ReadOptions |
| } |
| return nil |
| } |
| |
| func (m *CreateReadSessionRequest) GetFormat() DataFormat { |
| if m != nil { |
| return m.Format |
| } |
| return DataFormat_DATA_FORMAT_UNSPECIFIED |
| } |
| |
| func (m *CreateReadSessionRequest) GetShardingStrategy() ShardingStrategy { |
| if m != nil { |
| return m.ShardingStrategy |
| } |
| return ShardingStrategy_SHARDING_STRATEGY_UNSPECIFIED |
| } |
| |
| // Requesting row data via `ReadRows` must provide Stream position information. |
| type ReadRowsRequest struct { |
| // Required. Identifier of the position in the stream to start reading from. |
| // The offset requested must be less than the last row read from ReadRows. |
| // Requesting a larger offset is undefined. |
| ReadPosition *StreamPosition `protobuf:"bytes,1,opt,name=read_position,json=readPosition,proto3" json:"read_position,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *ReadRowsRequest) Reset() { *m = ReadRowsRequest{} } |
| func (m *ReadRowsRequest) String() string { return proto.CompactTextString(m) } |
| func (*ReadRowsRequest) ProtoMessage() {} |
| func (*ReadRowsRequest) Descriptor() ([]byte, []int) { |
| return fileDescriptor_2a3518a93fa439fd, []int{4} |
| } |
| |
| func (m *ReadRowsRequest) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_ReadRowsRequest.Unmarshal(m, b) |
| } |
| func (m *ReadRowsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_ReadRowsRequest.Marshal(b, m, deterministic) |
| } |
| func (m *ReadRowsRequest) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_ReadRowsRequest.Merge(m, src) |
| } |
| func (m *ReadRowsRequest) XXX_Size() int { |
| return xxx_messageInfo_ReadRowsRequest.Size(m) |
| } |
| func (m *ReadRowsRequest) XXX_DiscardUnknown() { |
| xxx_messageInfo_ReadRowsRequest.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_ReadRowsRequest proto.InternalMessageInfo |
| |
| func (m *ReadRowsRequest) GetReadPosition() *StreamPosition { |
| if m != nil { |
| return m.ReadPosition |
| } |
| return nil |
| } |
| |
| // Progress information for a given Stream. |
| type StreamStatus struct { |
| // Number of estimated rows in the current stream. May change over time as |
| // different readers in the stream progress at rates which are relatively fast |
| // or slow. |
| EstimatedRowCount int64 `protobuf:"varint,1,opt,name=estimated_row_count,json=estimatedRowCount,proto3" json:"estimated_row_count,omitempty"` |
| // A value in the range [0.0, 1.0] that represents the fraction of rows |
| // assigned to this stream that have been processed by the server. In the |
| // presence of read filters, the server may process more rows than it returns, |
| // so this value reflects progress through the pre-filtering rows. |
| // |
| // This value is only populated for sessions created through the BALANCED |
| // sharding strategy. |
| FractionConsumed float32 `protobuf:"fixed32,2,opt,name=fraction_consumed,json=fractionConsumed,proto3" json:"fraction_consumed,omitempty"` |
| // Represents the progress of the current stream. |
| // |
| // Note: This value is under development and should not be used. Use |
| // `fraction_consumed` instead. |
| Progress *Progress `protobuf:"bytes,4,opt,name=progress,proto3" json:"progress,omitempty"` |
| // Whether this stream can be split. For sessions that use the LIQUID sharding |
| // strategy, this value is always false. For BALANCED sessions, this value is |
| // false when enough data have been read such that no more splits are possible |
| // at that point or beyond. For small tables or streams that are the result of |
| // a chain of splits, this value may never be true. |
| IsSplittable bool `protobuf:"varint,3,opt,name=is_splittable,json=isSplittable,proto3" json:"is_splittable,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *StreamStatus) Reset() { *m = StreamStatus{} } |
| func (m *StreamStatus) String() string { return proto.CompactTextString(m) } |
| func (*StreamStatus) ProtoMessage() {} |
| func (*StreamStatus) Descriptor() ([]byte, []int) { |
| return fileDescriptor_2a3518a93fa439fd, []int{5} |
| } |
| |
| func (m *StreamStatus) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_StreamStatus.Unmarshal(m, b) |
| } |
| func (m *StreamStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_StreamStatus.Marshal(b, m, deterministic) |
| } |
| func (m *StreamStatus) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_StreamStatus.Merge(m, src) |
| } |
| func (m *StreamStatus) XXX_Size() int { |
| return xxx_messageInfo_StreamStatus.Size(m) |
| } |
| func (m *StreamStatus) XXX_DiscardUnknown() { |
| xxx_messageInfo_StreamStatus.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_StreamStatus proto.InternalMessageInfo |
| |
| func (m *StreamStatus) GetEstimatedRowCount() int64 { |
| if m != nil { |
| return m.EstimatedRowCount |
| } |
| return 0 |
| } |
| |
| func (m *StreamStatus) GetFractionConsumed() float32 { |
| if m != nil { |
| return m.FractionConsumed |
| } |
| return 0 |
| } |
| |
| func (m *StreamStatus) GetProgress() *Progress { |
| if m != nil { |
| return m.Progress |
| } |
| return nil |
| } |
| |
| func (m *StreamStatus) GetIsSplittable() bool { |
| if m != nil { |
| return m.IsSplittable |
| } |
| return false |
| } |
| |
| type Progress struct { |
| // The fraction of rows assigned to the stream that have been processed by the |
| // server so far, not including the rows in the current response message. |
| // |
| // This value, along with `at_response_end`, can be used to interpolate the |
| // progress made as the rows in the message are being processed using the |
| // following formula: `at_response_start + (at_response_end - |
| // at_response_start) * rows_processed_from_response / rows_in_response`. |
| // |
| // Note that if a filter is provided, the `at_response_end` value of the |
| // previous response may not necessarily be equal to the `at_response_start` |
| // value of the current response. |
| AtResponseStart float32 `protobuf:"fixed32,1,opt,name=at_response_start,json=atResponseStart,proto3" json:"at_response_start,omitempty"` |
| // Similar to `at_response_start`, except that this value includes the rows in |
| // the current response. |
| AtResponseEnd float32 `protobuf:"fixed32,2,opt,name=at_response_end,json=atResponseEnd,proto3" json:"at_response_end,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *Progress) Reset() { *m = Progress{} } |
| func (m *Progress) String() string { return proto.CompactTextString(m) } |
| func (*Progress) ProtoMessage() {} |
| func (*Progress) Descriptor() ([]byte, []int) { |
| return fileDescriptor_2a3518a93fa439fd, []int{6} |
| } |
| |
| func (m *Progress) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_Progress.Unmarshal(m, b) |
| } |
| func (m *Progress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_Progress.Marshal(b, m, deterministic) |
| } |
| func (m *Progress) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_Progress.Merge(m, src) |
| } |
| func (m *Progress) XXX_Size() int { |
| return xxx_messageInfo_Progress.Size(m) |
| } |
| func (m *Progress) XXX_DiscardUnknown() { |
| xxx_messageInfo_Progress.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_Progress proto.InternalMessageInfo |
| |
| func (m *Progress) GetAtResponseStart() float32 { |
| if m != nil { |
| return m.AtResponseStart |
| } |
| return 0 |
| } |
| |
| func (m *Progress) GetAtResponseEnd() float32 { |
| if m != nil { |
| return m.AtResponseEnd |
| } |
| return 0 |
| } |
| |
| // Information on if the current connection is being throttled. |
| type ThrottleStatus struct { |
| // How much this connection is being throttled. |
| // 0 is no throttling, 100 is completely throttled. |
| ThrottlePercent int32 `protobuf:"varint,1,opt,name=throttle_percent,json=throttlePercent,proto3" json:"throttle_percent,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *ThrottleStatus) Reset() { *m = ThrottleStatus{} } |
| func (m *ThrottleStatus) String() string { return proto.CompactTextString(m) } |
| func (*ThrottleStatus) ProtoMessage() {} |
| func (*ThrottleStatus) Descriptor() ([]byte, []int) { |
| return fileDescriptor_2a3518a93fa439fd, []int{7} |
| } |
| |
| func (m *ThrottleStatus) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_ThrottleStatus.Unmarshal(m, b) |
| } |
| func (m *ThrottleStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_ThrottleStatus.Marshal(b, m, deterministic) |
| } |
| func (m *ThrottleStatus) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_ThrottleStatus.Merge(m, src) |
| } |
| func (m *ThrottleStatus) XXX_Size() int { |
| return xxx_messageInfo_ThrottleStatus.Size(m) |
| } |
| func (m *ThrottleStatus) XXX_DiscardUnknown() { |
| xxx_messageInfo_ThrottleStatus.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_ThrottleStatus proto.InternalMessageInfo |
| |
| func (m *ThrottleStatus) GetThrottlePercent() int32 { |
| if m != nil { |
| return m.ThrottlePercent |
| } |
| return 0 |
| } |
| |
| // Response from calling `ReadRows` may include row data, progress and |
| // throttling information. |
| type ReadRowsResponse struct { |
| // Row data is returned in format specified during session creation. |
| // |
| // Types that are valid to be assigned to Rows: |
| // *ReadRowsResponse_AvroRows |
| // *ReadRowsResponse_ArrowRecordBatch |
| Rows isReadRowsResponse_Rows `protobuf_oneof:"rows"` |
| // Number of serialized rows in the rows block. This value is recorded here, |
| // in addition to the row_count values in the output-specific messages in |
| // `rows`, so that code which needs to record progress through the stream can |
| // do so in an output format-independent way. |
| RowCount int64 `protobuf:"varint,6,opt,name=row_count,json=rowCount,proto3" json:"row_count,omitempty"` |
| // Estimated stream statistics. |
| Status *StreamStatus `protobuf:"bytes,2,opt,name=status,proto3" json:"status,omitempty"` |
| // Throttling status. If unset, the latest response still describes |
| // the current throttling status. |
| ThrottleStatus *ThrottleStatus `protobuf:"bytes,5,opt,name=throttle_status,json=throttleStatus,proto3" json:"throttle_status,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *ReadRowsResponse) Reset() { *m = ReadRowsResponse{} } |
| func (m *ReadRowsResponse) String() string { return proto.CompactTextString(m) } |
| func (*ReadRowsResponse) ProtoMessage() {} |
| func (*ReadRowsResponse) Descriptor() ([]byte, []int) { |
| return fileDescriptor_2a3518a93fa439fd, []int{8} |
| } |
| |
| func (m *ReadRowsResponse) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_ReadRowsResponse.Unmarshal(m, b) |
| } |
| func (m *ReadRowsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_ReadRowsResponse.Marshal(b, m, deterministic) |
| } |
| func (m *ReadRowsResponse) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_ReadRowsResponse.Merge(m, src) |
| } |
| func (m *ReadRowsResponse) XXX_Size() int { |
| return xxx_messageInfo_ReadRowsResponse.Size(m) |
| } |
| func (m *ReadRowsResponse) XXX_DiscardUnknown() { |
| xxx_messageInfo_ReadRowsResponse.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_ReadRowsResponse proto.InternalMessageInfo |
| |
| type isReadRowsResponse_Rows interface { |
| isReadRowsResponse_Rows() |
| } |
| |
| type ReadRowsResponse_AvroRows struct { |
| AvroRows *AvroRows `protobuf:"bytes,3,opt,name=avro_rows,json=avroRows,proto3,oneof"` |
| } |
| |
| type ReadRowsResponse_ArrowRecordBatch struct { |
| ArrowRecordBatch *ArrowRecordBatch `protobuf:"bytes,4,opt,name=arrow_record_batch,json=arrowRecordBatch,proto3,oneof"` |
| } |
| |
| func (*ReadRowsResponse_AvroRows) isReadRowsResponse_Rows() {} |
| |
| func (*ReadRowsResponse_ArrowRecordBatch) isReadRowsResponse_Rows() {} |
| |
| func (m *ReadRowsResponse) GetRows() isReadRowsResponse_Rows { |
| if m != nil { |
| return m.Rows |
| } |
| return nil |
| } |
| |
| func (m *ReadRowsResponse) GetAvroRows() *AvroRows { |
| if x, ok := m.GetRows().(*ReadRowsResponse_AvroRows); ok { |
| return x.AvroRows |
| } |
| return nil |
| } |
| |
| func (m *ReadRowsResponse) GetArrowRecordBatch() *ArrowRecordBatch { |
| if x, ok := m.GetRows().(*ReadRowsResponse_ArrowRecordBatch); ok { |
| return x.ArrowRecordBatch |
| } |
| return nil |
| } |
| |
| func (m *ReadRowsResponse) GetRowCount() int64 { |
| if m != nil { |
| return m.RowCount |
| } |
| return 0 |
| } |
| |
| func (m *ReadRowsResponse) GetStatus() *StreamStatus { |
| if m != nil { |
| return m.Status |
| } |
| return nil |
| } |
| |
| func (m *ReadRowsResponse) GetThrottleStatus() *ThrottleStatus { |
| if m != nil { |
| return m.ThrottleStatus |
| } |
| return nil |
| } |
| |
| // XXX_OneofWrappers is for the internal use of the proto package. |
| func (*ReadRowsResponse) XXX_OneofWrappers() []interface{} { |
| return []interface{}{ |
| (*ReadRowsResponse_AvroRows)(nil), |
| (*ReadRowsResponse_ArrowRecordBatch)(nil), |
| } |
| } |
| |
| // Information needed to request additional streams for an established read |
| // session. |
| type BatchCreateReadSessionStreamsRequest struct { |
| // Required. Must be a non-expired session obtained from a call to |
| // CreateReadSession. Only the name field needs to be set. |
| Session *ReadSession `protobuf:"bytes,1,opt,name=session,proto3" json:"session,omitempty"` |
| // Required. Number of new streams requested. Must be positive. |
| // Number of added streams may be less than this, see CreateReadSessionRequest |
| // for more information. |
| RequestedStreams int32 `protobuf:"varint,2,opt,name=requested_streams,json=requestedStreams,proto3" json:"requested_streams,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *BatchCreateReadSessionStreamsRequest) Reset() { *m = BatchCreateReadSessionStreamsRequest{} } |
| func (m *BatchCreateReadSessionStreamsRequest) String() string { return proto.CompactTextString(m) } |
| func (*BatchCreateReadSessionStreamsRequest) ProtoMessage() {} |
| func (*BatchCreateReadSessionStreamsRequest) Descriptor() ([]byte, []int) { |
| return fileDescriptor_2a3518a93fa439fd, []int{9} |
| } |
| |
| func (m *BatchCreateReadSessionStreamsRequest) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_BatchCreateReadSessionStreamsRequest.Unmarshal(m, b) |
| } |
| func (m *BatchCreateReadSessionStreamsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_BatchCreateReadSessionStreamsRequest.Marshal(b, m, deterministic) |
| } |
| func (m *BatchCreateReadSessionStreamsRequest) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_BatchCreateReadSessionStreamsRequest.Merge(m, src) |
| } |
| func (m *BatchCreateReadSessionStreamsRequest) XXX_Size() int { |
| return xxx_messageInfo_BatchCreateReadSessionStreamsRequest.Size(m) |
| } |
| func (m *BatchCreateReadSessionStreamsRequest) XXX_DiscardUnknown() { |
| xxx_messageInfo_BatchCreateReadSessionStreamsRequest.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_BatchCreateReadSessionStreamsRequest proto.InternalMessageInfo |
| |
| func (m *BatchCreateReadSessionStreamsRequest) GetSession() *ReadSession { |
| if m != nil { |
| return m.Session |
| } |
| return nil |
| } |
| |
| func (m *BatchCreateReadSessionStreamsRequest) GetRequestedStreams() int32 { |
| if m != nil { |
| return m.RequestedStreams |
| } |
| return 0 |
| } |
| |
| // The response from `BatchCreateReadSessionStreams` returns the stream |
| // identifiers for the newly created streams. |
| type BatchCreateReadSessionStreamsResponse struct { |
| // Newly added streams. |
| Streams []*Stream `protobuf:"bytes,1,rep,name=streams,proto3" json:"streams,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *BatchCreateReadSessionStreamsResponse) Reset() { *m = BatchCreateReadSessionStreamsResponse{} } |
| func (m *BatchCreateReadSessionStreamsResponse) String() string { return proto.CompactTextString(m) } |
| func (*BatchCreateReadSessionStreamsResponse) ProtoMessage() {} |
| func (*BatchCreateReadSessionStreamsResponse) Descriptor() ([]byte, []int) { |
| return fileDescriptor_2a3518a93fa439fd, []int{10} |
| } |
| |
| func (m *BatchCreateReadSessionStreamsResponse) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_BatchCreateReadSessionStreamsResponse.Unmarshal(m, b) |
| } |
| func (m *BatchCreateReadSessionStreamsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_BatchCreateReadSessionStreamsResponse.Marshal(b, m, deterministic) |
| } |
| func (m *BatchCreateReadSessionStreamsResponse) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_BatchCreateReadSessionStreamsResponse.Merge(m, src) |
| } |
| func (m *BatchCreateReadSessionStreamsResponse) XXX_Size() int { |
| return xxx_messageInfo_BatchCreateReadSessionStreamsResponse.Size(m) |
| } |
| func (m *BatchCreateReadSessionStreamsResponse) XXX_DiscardUnknown() { |
| xxx_messageInfo_BatchCreateReadSessionStreamsResponse.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_BatchCreateReadSessionStreamsResponse proto.InternalMessageInfo |
| |
| func (m *BatchCreateReadSessionStreamsResponse) GetStreams() []*Stream { |
| if m != nil { |
| return m.Streams |
| } |
| return nil |
| } |
| |
| // Request information for invoking `FinalizeStream`. |
| type FinalizeStreamRequest struct { |
| // Stream to finalize. |
| Stream *Stream `protobuf:"bytes,2,opt,name=stream,proto3" json:"stream,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *FinalizeStreamRequest) Reset() { *m = FinalizeStreamRequest{} } |
| func (m *FinalizeStreamRequest) String() string { return proto.CompactTextString(m) } |
| func (*FinalizeStreamRequest) ProtoMessage() {} |
| func (*FinalizeStreamRequest) Descriptor() ([]byte, []int) { |
| return fileDescriptor_2a3518a93fa439fd, []int{11} |
| } |
| |
| func (m *FinalizeStreamRequest) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_FinalizeStreamRequest.Unmarshal(m, b) |
| } |
| func (m *FinalizeStreamRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_FinalizeStreamRequest.Marshal(b, m, deterministic) |
| } |
| func (m *FinalizeStreamRequest) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_FinalizeStreamRequest.Merge(m, src) |
| } |
| func (m *FinalizeStreamRequest) XXX_Size() int { |
| return xxx_messageInfo_FinalizeStreamRequest.Size(m) |
| } |
| func (m *FinalizeStreamRequest) XXX_DiscardUnknown() { |
| xxx_messageInfo_FinalizeStreamRequest.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_FinalizeStreamRequest proto.InternalMessageInfo |
| |
| func (m *FinalizeStreamRequest) GetStream() *Stream { |
| if m != nil { |
| return m.Stream |
| } |
| return nil |
| } |
| |
| // Request information for `SplitReadStream`. |
| type SplitReadStreamRequest struct { |
| // Stream to split. |
| OriginalStream *Stream `protobuf:"bytes,1,opt,name=original_stream,json=originalStream,proto3" json:"original_stream,omitempty"` |
| // A value in the range (0.0, 1.0) that specifies the fractional point at |
| // which the original stream should be split. The actual split point is |
| // evaluated on pre-filtered rows, so if a filter is provided, then there is |
| // no guarantee that the division of the rows between the new child streams |
| // will be proportional to this fractional value. Additionally, because the |
| // server-side unit for assigning data is collections of rows, this fraction |
| // will always map to to a data storage boundary on the server side. |
| Fraction float32 `protobuf:"fixed32,2,opt,name=fraction,proto3" json:"fraction,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *SplitReadStreamRequest) Reset() { *m = SplitReadStreamRequest{} } |
| func (m *SplitReadStreamRequest) String() string { return proto.CompactTextString(m) } |
| func (*SplitReadStreamRequest) ProtoMessage() {} |
| func (*SplitReadStreamRequest) Descriptor() ([]byte, []int) { |
| return fileDescriptor_2a3518a93fa439fd, []int{12} |
| } |
| |
| func (m *SplitReadStreamRequest) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_SplitReadStreamRequest.Unmarshal(m, b) |
| } |
| func (m *SplitReadStreamRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_SplitReadStreamRequest.Marshal(b, m, deterministic) |
| } |
| func (m *SplitReadStreamRequest) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_SplitReadStreamRequest.Merge(m, src) |
| } |
| func (m *SplitReadStreamRequest) XXX_Size() int { |
| return xxx_messageInfo_SplitReadStreamRequest.Size(m) |
| } |
| func (m *SplitReadStreamRequest) XXX_DiscardUnknown() { |
| xxx_messageInfo_SplitReadStreamRequest.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_SplitReadStreamRequest proto.InternalMessageInfo |
| |
| func (m *SplitReadStreamRequest) GetOriginalStream() *Stream { |
| if m != nil { |
| return m.OriginalStream |
| } |
| return nil |
| } |
| |
| func (m *SplitReadStreamRequest) GetFraction() float32 { |
| if m != nil { |
| return m.Fraction |
| } |
| return 0 |
| } |
| |
| // Response from `SplitReadStream`. |
| type SplitReadStreamResponse struct { |
| // Primary stream, which contains the beginning portion of |
| // |original_stream|. An empty value indicates that the original stream can no |
| // longer be split. |
| PrimaryStream *Stream `protobuf:"bytes,1,opt,name=primary_stream,json=primaryStream,proto3" json:"primary_stream,omitempty"` |
| // Remainder stream, which contains the tail of |original_stream|. An empty |
| // value indicates that the original stream can no longer be split. |
| RemainderStream *Stream `protobuf:"bytes,2,opt,name=remainder_stream,json=remainderStream,proto3" json:"remainder_stream,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *SplitReadStreamResponse) Reset() { *m = SplitReadStreamResponse{} } |
| func (m *SplitReadStreamResponse) String() string { return proto.CompactTextString(m) } |
| func (*SplitReadStreamResponse) ProtoMessage() {} |
| func (*SplitReadStreamResponse) Descriptor() ([]byte, []int) { |
| return fileDescriptor_2a3518a93fa439fd, []int{13} |
| } |
| |
| func (m *SplitReadStreamResponse) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_SplitReadStreamResponse.Unmarshal(m, b) |
| } |
| func (m *SplitReadStreamResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_SplitReadStreamResponse.Marshal(b, m, deterministic) |
| } |
| func (m *SplitReadStreamResponse) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_SplitReadStreamResponse.Merge(m, src) |
| } |
| func (m *SplitReadStreamResponse) XXX_Size() int { |
| return xxx_messageInfo_SplitReadStreamResponse.Size(m) |
| } |
| func (m *SplitReadStreamResponse) XXX_DiscardUnknown() { |
| xxx_messageInfo_SplitReadStreamResponse.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_SplitReadStreamResponse proto.InternalMessageInfo |
| |
| func (m *SplitReadStreamResponse) GetPrimaryStream() *Stream { |
| if m != nil { |
| return m.PrimaryStream |
| } |
| return nil |
| } |
| |
| func (m *SplitReadStreamResponse) GetRemainderStream() *Stream { |
| if m != nil { |
| return m.RemainderStream |
| } |
| return nil |
| } |
| |
| func init() { |
| proto.RegisterEnum("google.cloud.bigquery.storage.v1beta1.DataFormat", DataFormat_name, DataFormat_value) |
| proto.RegisterEnum("google.cloud.bigquery.storage.v1beta1.ShardingStrategy", ShardingStrategy_name, ShardingStrategy_value) |
| proto.RegisterType((*Stream)(nil), "google.cloud.bigquery.storage.v1beta1.Stream") |
| proto.RegisterType((*StreamPosition)(nil), "google.cloud.bigquery.storage.v1beta1.StreamPosition") |
| proto.RegisterType((*ReadSession)(nil), "google.cloud.bigquery.storage.v1beta1.ReadSession") |
| proto.RegisterType((*CreateReadSessionRequest)(nil), "google.cloud.bigquery.storage.v1beta1.CreateReadSessionRequest") |
| proto.RegisterType((*ReadRowsRequest)(nil), "google.cloud.bigquery.storage.v1beta1.ReadRowsRequest") |
| proto.RegisterType((*StreamStatus)(nil), "google.cloud.bigquery.storage.v1beta1.StreamStatus") |
| proto.RegisterType((*Progress)(nil), "google.cloud.bigquery.storage.v1beta1.Progress") |
| proto.RegisterType((*ThrottleStatus)(nil), "google.cloud.bigquery.storage.v1beta1.ThrottleStatus") |
| proto.RegisterType((*ReadRowsResponse)(nil), "google.cloud.bigquery.storage.v1beta1.ReadRowsResponse") |
| proto.RegisterType((*BatchCreateReadSessionStreamsRequest)(nil), "google.cloud.bigquery.storage.v1beta1.BatchCreateReadSessionStreamsRequest") |
| proto.RegisterType((*BatchCreateReadSessionStreamsResponse)(nil), "google.cloud.bigquery.storage.v1beta1.BatchCreateReadSessionStreamsResponse") |
| proto.RegisterType((*FinalizeStreamRequest)(nil), "google.cloud.bigquery.storage.v1beta1.FinalizeStreamRequest") |
| proto.RegisterType((*SplitReadStreamRequest)(nil), "google.cloud.bigquery.storage.v1beta1.SplitReadStreamRequest") |
| proto.RegisterType((*SplitReadStreamResponse)(nil), "google.cloud.bigquery.storage.v1beta1.SplitReadStreamResponse") |
| } |
| |
| func init() { |
| proto.RegisterFile("google/cloud/bigquery/storage/v1beta1/storage.proto", fileDescriptor_2a3518a93fa439fd) |
| } |
| |
| var fileDescriptor_2a3518a93fa439fd = []byte{ |
| // 1645 bytes of a gzipped FileDescriptorProto |
| 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x58, 0xcb, 0x6f, 0x23, 0x49, |
| 0x19, 0x9f, 0x76, 0x12, 0x8f, 0xf3, 0x25, 0xb1, 0x9d, 0x42, 0xcc, 0x78, 0x1d, 0x16, 0x42, 0xc3, |
| 0xa0, 0x6c, 0x76, 0xc6, 0x9d, 0x64, 0x60, 0x67, 0x49, 0xd8, 0x5d, 0x3a, 0xaf, 0x49, 0x34, 0xd9, |
| 0x3c, 0xca, 0xde, 0x5d, 0x98, 0xc3, 0xb4, 0x2a, 0xee, 0xb2, 0xdd, 0x60, 0x77, 0xf5, 0x56, 0x97, |
| 0x63, 0x42, 0x94, 0xcb, 0xde, 0x90, 0x38, 0x20, 0x71, 0xe6, 0x0c, 0x12, 0x12, 0x17, 0x24, 0xfe, |
| 0x07, 0x38, 0x21, 0xb8, 0x45, 0x42, 0xda, 0x03, 0xa7, 0xe5, 0xc8, 0x3f, 0xb0, 0xea, 0xaa, 0x6a, |
| 0x3f, 0x33, 0x93, 0xf6, 0x64, 0x6e, 0x5d, 0x55, 0xdf, 0xef, 0xe7, 0xef, 0x5d, 0x5f, 0x19, 0x1e, |
| 0xd7, 0x19, 0xab, 0x37, 0xa9, 0x55, 0x6d, 0xb2, 0xb6, 0x6b, 0x9d, 0x7a, 0xf5, 0xcf, 0xdb, 0x94, |
| 0x9f, 0x5b, 0xa1, 0x60, 0x9c, 0xd4, 0xa9, 0x75, 0xb6, 0x7a, 0x4a, 0x05, 0x59, 0x8d, 0xd7, 0xa5, |
| 0x80, 0x33, 0xc1, 0xd0, 0x03, 0x05, 0x2a, 0x49, 0x50, 0x29, 0x06, 0x95, 0x62, 0x21, 0x0d, 0x2a, |
| 0x7e, 0x4b, 0x73, 0x93, 0xc0, 0xb3, 0x88, 0xef, 0x33, 0x41, 0x84, 0xc7, 0xfc, 0x50, 0x91, 0x14, |
| 0xef, 0xf7, 0x9d, 0x56, 0x9b, 0x1e, 0xf5, 0x85, 0x3e, 0xf8, 0x4e, 0xdf, 0x41, 0xcd, 0xa3, 0x4d, |
| 0xd7, 0x39, 0xa5, 0x0d, 0x72, 0xe6, 0x31, 0xae, 0x05, 0xde, 0xea, 0x13, 0xe0, 0x34, 0x64, 0x6d, |
| 0x5e, 0xd5, 0x9a, 0x15, 0x57, 0x93, 0x99, 0x43, 0x38, 0x67, 0x1d, 0x0d, 0x59, 0x49, 0x08, 0x39, |
| 0xe3, 0x4c, 0x23, 0xde, 0x4f, 0x86, 0xe0, 0x94, 0xb8, 0x0e, 0x0b, 0xfa, 0x6d, 0xde, 0x48, 0x86, |
| 0x14, 0xe4, 0xb4, 0x49, 0x1d, 0x4e, 0x6b, 0x94, 0x53, 0xbf, 0x6b, 0xdb, 0x82, 0x06, 0xcb, 0xd5, |
| 0x69, 0xbb, 0x66, 0xd1, 0x56, 0x20, 0xce, 0x87, 0x9c, 0xd6, 0x3d, 0x14, 0x5e, 0x8b, 0x86, 0x82, |
| 0xb4, 0x02, 0x25, 0x60, 0x7e, 0x61, 0x40, 0xba, 0x2c, 0x38, 0x25, 0x2d, 0x84, 0x60, 0xd2, 0x27, |
| 0x2d, 0x5a, 0x30, 0x16, 0x8d, 0xa5, 0x69, 0x2c, 0xbf, 0xd7, 0xdd, 0xaf, 0x6c, 0x02, 0x0f, 0x62, |
| 0x8d, 0xe2, 0x48, 0x2a, 0x52, 0x12, 0x78, 0x61, 0xa9, 0xca, 0x5a, 0x96, 0xc6, 0xbf, 0x1f, 0x70, |
| 0xf6, 0x0b, 0x5a, 0x15, 0xa1, 0x75, 0xa1, 0xbf, 0x2e, 0xad, 0x26, 0xab, 0xaa, 0xe8, 0x5a, 0x17, |
| 0xf1, 0xe7, 0xa5, 0x15, 0x4a, 0xf9, 0xd0, 0xba, 0x50, 0x1f, 0x97, 0x26, 0x83, 0xac, 0xe2, 0x38, |
| 0x66, 0xa1, 0x17, 0xc9, 0xa0, 0x1d, 0x48, 0xab, 0x43, 0xa9, 0xcd, 0xcc, 0xda, 0xa3, 0x52, 0xa2, |
| 0xdc, 0x2a, 0x29, 0x1a, 0xac, 0xc1, 0xe8, 0x1e, 0xa4, 0x59, 0xad, 0x16, 0x52, 0x51, 0x48, 0x2d, |
| 0x1a, 0x4b, 0x13, 0x58, 0xaf, 0xcc, 0xff, 0x4f, 0xc1, 0x0c, 0xa6, 0xc4, 0x2d, 0xd3, 0x30, 0x8c, |
| 0x7e, 0xee, 0x1a, 0xd3, 0xd1, 0x06, 0xcc, 0xd0, 0x5f, 0x05, 0x1e, 0xa7, 0x4e, 0xe4, 0x33, 0x49, |
| 0x30, 0xb3, 0x56, 0x8c, 0xf5, 0x88, 0x1d, 0x5a, 0xaa, 0xc4, 0x0e, 0xc5, 0xa0, 0xc4, 0xa3, 0x0d, |
| 0x54, 0x81, 0x99, 0x28, 0x33, 0x9c, 0xb0, 0xda, 0xa0, 0x2d, 0x52, 0x98, 0x92, 0xe0, 0xd5, 0x84, |
| 0x46, 0xd8, 0x67, 0x9c, 0x95, 0x25, 0x70, 0xef, 0x0e, 0x06, 0xd2, 0x5d, 0xa1, 0xcf, 0x60, 0x56, |
| 0xa6, 0x68, 0x4c, 0x9b, 0x96, 0xb4, 0x6b, 0x49, 0x69, 0x23, 0x68, 0x97, 0x77, 0x86, 0xf4, 0x96, |
| 0xe8, 0x29, 0xdc, 0xd5, 0x41, 0x29, 0x4c, 0x2e, 0x4e, 0x8c, 0xef, 0xef, 0x18, 0x8d, 0x5e, 0x40, |
| 0x6e, 0x28, 0x4b, 0x0b, 0x77, 0xa5, 0x92, 0x3f, 0x4a, 0x48, 0x58, 0x89, 0xd0, 0x38, 0x06, 0xe3, |
| 0xac, 0x18, 0x58, 0xf7, 0xf8, 0x5b, 0xcc, 0xf5, 0x6a, 0x1e, 0xe5, 0x61, 0x21, 0x33, 0x3e, 0xff, |
| 0xc7, 0x31, 0x58, 0xf3, 0x77, 0xd7, 0xc8, 0x85, 0xf9, 0xb0, 0x41, 0xb8, 0xeb, 0xf9, 0x75, 0x27, |
| 0x14, 0x9c, 0x08, 0x5a, 0x3f, 0x2f, 0x4c, 0x2f, 0x1a, 0x4b, 0xd9, 0xb5, 0x27, 0x49, 0x5d, 0xa2, |
| 0xf1, 0x65, 0x0d, 0xc7, 0xf9, 0x70, 0x68, 0x67, 0xfd, 0x97, 0x5f, 0xd9, 0x0d, 0x58, 0xbe, 0xa1, |
| 0xaa, 0xfa, 0xf3, 0x73, 0x3d, 0x71, 0x69, 0x29, 0x40, 0x54, 0x5b, 0xea, 0xeb, 0x72, 0x33, 0x03, |
| 0x69, 0x95, 0x2e, 0xe6, 0xdf, 0x26, 0xa1, 0xb0, 0xc5, 0x29, 0x11, 0xb4, 0x8f, 0x1b, 0xd3, 0xcf, |
| 0xdb, 0x34, 0x14, 0xa8, 0x3a, 0x1a, 0x39, 0xe3, 0x16, 0x91, 0xdb, 0x9c, 0xf8, 0xd2, 0x4e, 0x8d, |
| 0x84, 0x6f, 0x01, 0xd2, 0x01, 0xe1, 0xd4, 0x17, 0x32, 0x75, 0xa7, 0x95, 0x90, 0xde, 0xba, 0x2e, |
| 0xb6, 0xa9, 0x37, 0x19, 0xdb, 0x77, 0x61, 0x9e, 0x2b, 0x63, 0xa9, 0xeb, 0xc4, 0xe9, 0x3e, 0xb1, |
| 0x68, 0x2c, 0x4d, 0xe1, 0x7c, 0xf7, 0xa0, 0xac, 0x13, 0xf9, 0x39, 0xcc, 0xf6, 0x37, 0xea, 0xc2, |
| 0xa4, 0xd4, 0xe4, 0xc9, 0x78, 0xbe, 0x20, 0xee, 0x91, 0x82, 0xe3, 0x19, 0xde, 0x5b, 0xa0, 0x7d, |
| 0x48, 0xd7, 0x18, 0x6f, 0x11, 0x21, 0xfb, 0x42, 0x36, 0x71, 0x5f, 0xd8, 0x26, 0x82, 0xec, 0x4a, |
| 0x20, 0xd6, 0x04, 0xd7, 0xe7, 0xeb, 0xdd, 0x37, 0x9c, 0xaf, 0x26, 0x87, 0x5c, 0x64, 0x0c, 0x66, |
| 0x9d, 0x30, 0x4e, 0x17, 0x07, 0xe6, 0xa4, 0x7f, 0x02, 0xdd, 0xb1, 0xc7, 0x4c, 0x96, 0xc1, 0x76, |
| 0xaf, 0xf2, 0x40, 0x3a, 0x3c, 0xde, 0x32, 0xff, 0x63, 0xc0, 0xac, 0x92, 0x2a, 0x0b, 0x22, 0xda, |
| 0x21, 0x2a, 0xc1, 0x37, 0x68, 0x28, 0xbc, 0x16, 0x89, 0xc2, 0x17, 0x35, 0xc1, 0x2a, 0x6b, 0xfb, |
| 0x42, 0xfe, 0xee, 0x04, 0x9e, 0xef, 0x1e, 0x61, 0xd6, 0xd9, 0x8a, 0x0e, 0xa2, 0x70, 0xd7, 0x38, |
| 0xa9, 0x46, 0x64, 0x4e, 0x95, 0xf9, 0x61, 0xbb, 0x45, 0x5d, 0x99, 0x50, 0x29, 0x9c, 0x8f, 0x0f, |
| 0xb6, 0xf4, 0x3e, 0x7a, 0x06, 0x99, 0x80, 0xb3, 0x3a, 0xa7, 0x61, 0x1c, 0x6a, 0x2b, 0xa1, 0x25, |
| 0xc7, 0x1a, 0x86, 0xbb, 0x04, 0xe8, 0x7b, 0x30, 0xe7, 0x85, 0x4e, 0x18, 0x34, 0x3d, 0x21, 0x53, |
| 0x50, 0x26, 0x59, 0x06, 0xcf, 0x7a, 0x61, 0xb9, 0xbb, 0x67, 0xbe, 0x80, 0x4c, 0x0c, 0x45, 0xcb, |
| 0x30, 0x4f, 0x84, 0xc3, 0x69, 0x18, 0x30, 0x3f, 0xa4, 0x4e, 0x28, 0x08, 0x57, 0x86, 0xa5, 0x70, |
| 0x8e, 0x08, 0xac, 0xf7, 0xcb, 0xd1, 0x36, 0xfa, 0x01, 0xe4, 0xfa, 0x65, 0xa9, 0x1f, 0x1b, 0x35, |
| 0xd7, 0x93, 0xdc, 0xf1, 0x5d, 0x73, 0x03, 0xb2, 0x95, 0x06, 0x67, 0x42, 0x34, 0xa9, 0x76, 0xe0, |
| 0x3b, 0x90, 0x17, 0x7a, 0xc7, 0x09, 0x28, 0xaf, 0x52, 0xed, 0xbd, 0x29, 0x9c, 0x8b, 0xf7, 0x8f, |
| 0xd5, 0xb6, 0xf9, 0x87, 0x09, 0xc8, 0xf7, 0x22, 0xae, 0x48, 0xd1, 0x21, 0x4c, 0xcb, 0x3b, 0x8d, |
| 0xb3, 0x8e, 0xaa, 0x9b, 0xe4, 0x4e, 0x8a, 0x6e, 0xb4, 0x88, 0x6b, 0xef, 0x0e, 0xce, 0x10, 0xfd, |
| 0x8d, 0xea, 0x80, 0xd4, 0x6d, 0xc6, 0x69, 0x95, 0x71, 0xd7, 0x39, 0x25, 0xa2, 0xda, 0x18, 0xb3, |
| 0xd0, 0xe4, 0x9d, 0x86, 0x25, 0x7e, 0x33, 0x82, 0xef, 0xdd, 0xc1, 0x79, 0x32, 0xb4, 0x87, 0x16, |
| 0x60, 0xba, 0x97, 0x2f, 0x69, 0x99, 0x2f, 0x19, 0x1e, 0xa7, 0xc9, 0xb3, 0x68, 0xd2, 0x88, 0xfc, |
| 0xa3, 0x9b, 0xcd, 0xe3, 0xb1, 0x32, 0x58, 0xb9, 0x16, 0x6b, 0x0a, 0xd9, 0xc2, 0x62, 0x17, 0x6b, |
| 0xd6, 0xa9, 0xf1, 0x5a, 0xd8, 0x40, 0xc8, 0x70, 0x56, 0x0c, 0xac, 0x37, 0xd3, 0x30, 0x19, 0x79, |
| 0xdf, 0xfc, 0xb3, 0x01, 0xdf, 0x97, 0xb6, 0x8d, 0xb4, 0x73, 0xdd, 0xbf, 0xe2, 0x32, 0x3d, 0x81, |
| 0xbb, 0xfa, 0x22, 0xd0, 0x05, 0x9a, 0x74, 0x58, 0xe8, 0xa3, 0x54, 0xd5, 0x19, 0xf3, 0xa0, 0x95, |
| 0xeb, 0xda, 0x68, 0xe4, 0xbb, 0x29, 0x25, 0x38, 0xd2, 0x4b, 0xcd, 0x00, 0x1e, 0xdc, 0xa0, 0xac, |
| 0xce, 0xb0, 0xbe, 0x31, 0xc4, 0xb8, 0xcd, 0x18, 0x62, 0xbe, 0x80, 0x6f, 0xee, 0x7a, 0x3e, 0x69, |
| 0x7a, 0xbf, 0xa6, 0xfa, 0x48, 0xfb, 0xa3, 0x37, 0x57, 0xa6, 0x6e, 0x31, 0x57, 0x9a, 0xbf, 0x35, |
| 0xe0, 0x9e, 0xac, 0x65, 0x69, 0xcc, 0xc0, 0x2f, 0x7c, 0x0a, 0x39, 0xc6, 0xbd, 0x7a, 0xf4, 0xeb, |
| 0xce, 0x6d, 0x46, 0xd8, 0x6c, 0xcc, 0xa2, 0xa7, 0xeb, 0x22, 0x64, 0xe2, 0xae, 0xa5, 0x0b, 0xbe, |
| 0xbb, 0x36, 0xff, 0x6e, 0xc0, 0xfd, 0x11, 0x75, 0xb4, 0x4f, 0x2b, 0x90, 0x0d, 0xb8, 0xd7, 0x22, |
| 0xfc, 0xfc, 0x56, 0xea, 0xcc, 0x69, 0x12, 0xad, 0xcd, 0xcf, 0x20, 0xcf, 0x69, 0x8b, 0x78, 0xbe, |
| 0x4b, 0xb9, 0x73, 0x1b, 0x8f, 0xe6, 0xba, 0x34, 0x6a, 0x63, 0xf9, 0x43, 0x80, 0xde, 0x3d, 0x87, |
| 0x16, 0xe0, 0xfe, 0xb6, 0x5d, 0xb1, 0x9d, 0xdd, 0x23, 0xfc, 0xb1, 0x5d, 0x71, 0x3e, 0x39, 0x2c, |
| 0x1f, 0xef, 0x6c, 0xed, 0xef, 0xee, 0xef, 0x6c, 0xe7, 0xef, 0xa0, 0x0c, 0x4c, 0xda, 0x9f, 0xe2, |
| 0xa3, 0xbc, 0x81, 0xa6, 0x61, 0xca, 0xc6, 0xf8, 0xe8, 0xb3, 0xfc, 0xc4, 0xf2, 0x11, 0xe4, 0x87, |
| 0x6f, 0x34, 0xf4, 0x5d, 0x78, 0xbb, 0xbc, 0x67, 0xe3, 0xed, 0xfd, 0xc3, 0xa7, 0x4e, 0xb9, 0x82, |
| 0xed, 0xca, 0xce, 0xd3, 0x9f, 0x0f, 0x71, 0x01, 0xa4, 0x0f, 0xf6, 0x4f, 0x3e, 0xd9, 0xdf, 0xce, |
| 0x1b, 0x68, 0x16, 0x32, 0x9b, 0xf6, 0x81, 0x7d, 0xb8, 0xb5, 0xb3, 0x9d, 0x4f, 0xad, 0xfd, 0x0f, |
| 0x20, 0xb7, 0xe9, 0xd5, 0x4f, 0x22, 0x2b, 0xca, 0xca, 0x08, 0xf4, 0xd7, 0x14, 0xcc, 0x8f, 0x64, |
| 0x33, 0xfa, 0x28, 0xa1, 0xe9, 0x2f, 0x9b, 0xc1, 0x8a, 0xaf, 0x51, 0x9c, 0xe6, 0x9f, 0x8c, 0x2b, |
| 0x7b, 0x69, 0x68, 0x72, 0x7b, 0xa8, 0x26, 0xaa, 0x87, 0x23, 0x75, 0xfa, 0xc5, 0xbf, 0xff, 0xfb, |
| 0xfb, 0x54, 0xc7, 0x5c, 0xe9, 0x3e, 0x29, 0x2f, 0xae, 0x79, 0x53, 0x46, 0xe3, 0xa5, 0xe3, 0xb9, |
| 0x1f, 0x74, 0x67, 0xce, 0xe5, 0xcb, 0x75, 0x63, 0xf9, 0xf9, 0x4f, 0xcd, 0x8d, 0x97, 0xc3, 0x5c, |
| 0x22, 0x48, 0x48, 0x87, 0x60, 0x96, 0xde, 0xd5, 0x0c, 0xe8, 0x9f, 0x06, 0x64, 0xe2, 0x5b, 0x05, |
| 0xbd, 0x37, 0x86, 0xad, 0x7d, 0x83, 0x47, 0xf1, 0xc9, 0xd8, 0x38, 0x55, 0x08, 0xe6, 0xc9, 0x95, |
| 0x3d, 0x38, 0xb2, 0x48, 0x67, 0x6c, 0xa0, 0x1f, 0xf7, 0xac, 0x1a, 0x38, 0x2e, 0x29, 0x9f, 0x95, |
| 0xa2, 0xc7, 0x60, 0xbf, 0x51, 0xf1, 0xeb, 0x75, 0xf9, 0x72, 0xc5, 0x40, 0xbf, 0x4b, 0xc1, 0xdb, |
| 0xaf, 0xec, 0x6d, 0xe8, 0x59, 0x42, 0x7d, 0x93, 0xb4, 0xf3, 0xe2, 0xc1, 0x9b, 0x21, 0xd3, 0x1e, |
| 0xa9, 0x5c, 0xd9, 0x6f, 0xe9, 0xae, 0xfe, 0x92, 0x54, 0x59, 0x33, 0x1f, 0xf5, 0xbc, 0xa3, 0x45, |
| 0x47, 0x1d, 0x12, 0xbf, 0x39, 0x54, 0x94, 0xff, 0x68, 0x40, 0x76, 0xb0, 0xf9, 0xa2, 0x9f, 0x24, |
| 0x54, 0xfb, 0xda, 0x9e, 0x5d, 0xbc, 0x37, 0xf2, 0xe6, 0xde, 0x69, 0x05, 0xe2, 0xdc, 0xfc, 0xe8, |
| 0xca, 0xd6, 0xfd, 0x58, 0xea, 0xba, 0x62, 0xbe, 0xdb, 0xa7, 0xeb, 0x4d, 0xb1, 0x8b, 0x34, 0xfd, |
| 0xd2, 0x80, 0xdc, 0x50, 0xdb, 0x44, 0x1f, 0x24, 0x6d, 0x5f, 0xd7, 0x76, 0xff, 0xe2, 0x87, 0xaf, |
| 0x0b, 0xd7, 0x21, 0x39, 0xbc, 0xb2, 0x87, 0xaf, 0x0f, 0x69, 0xdc, 0x7b, 0xe8, 0x87, 0x3d, 0xe3, |
| 0x86, 0x04, 0x5e, 0x61, 0x65, 0xf1, 0x2f, 0xc6, 0x3f, 0xec, 0x6f, 0xbf, 0xfa, 0xa1, 0xf9, 0x2f, |
| 0xfb, 0x37, 0x46, 0x43, 0x88, 0x20, 0x5c, 0xb7, 0xac, 0x4e, 0xa7, 0x33, 0xfc, 0x0c, 0x25, 0x6d, |
| 0xd1, 0xe8, 0xfe, 0x25, 0xf5, 0x30, 0xa9, 0x60, 0x29, 0x2a, 0x25, 0xe6, 0x37, 0x6f, 0x46, 0x48, |
| 0x47, 0x3d, 0x0a, 0x9a, 0x44, 0x44, 0x4f, 0x9a, 0xcd, 0x73, 0x78, 0xa7, 0xca, 0x5a, 0xc9, 0xbc, |
| 0xf8, 0xfc, 0x40, 0x8b, 0xd5, 0x59, 0x93, 0xf8, 0xf5, 0x12, 0xe3, 0x75, 0xab, 0x4e, 0x7d, 0x99, |
| 0x26, 0x56, 0xef, 0xb7, 0x6e, 0xf8, 0x5b, 0x6d, 0x43, 0xaf, 0x4f, 0xd3, 0x12, 0xf8, 0xf8, 0xeb, |
| 0x00, 0x00, 0x00, 0xff, 0xff, 0xa1, 0xa5, 0xc4, 0x46, 0xfc, 0x14, 0x00, 0x00, |
| } |
| |
| // Reference imports to suppress errors if they are not otherwise used. |
| var _ context.Context |
| var _ grpc.ClientConnInterface |
| |
| // This is a compile-time assertion to ensure that this generated file |
| // is compatible with the grpc package it is being compiled against. |
| const _ = grpc.SupportPackageIsVersion6 |
| |
| // BigQueryStorageClient is the client API for BigQueryStorage service. |
| // |
| // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. |
| type BigQueryStorageClient interface { |
| // Creates a new read session. A read session divides the contents of a |
| // BigQuery table into one or more streams, which can then be used to read |
| // data from the table. The read session also specifies properties of the |
| // data to be read, such as a list of columns or a push-down filter describing |
| // the rows to be returned. |
| // |
| // A particular row can be read by at most one stream. When the caller has |
| // reached the end of each stream in the session, then all the data in the |
| // table has been read. |
| // |
| // Read sessions automatically expire 24 hours after they are created and do |
| // not require manual clean-up by the caller. |
| CreateReadSession(ctx context.Context, in *CreateReadSessionRequest, opts ...grpc.CallOption) (*ReadSession, error) |
| // Reads rows from the table in the format prescribed by the read session. |
| // Each response contains one or more table rows, up to a maximum of 10 MiB |
| // per response; read requests which attempt to read individual rows larger |
| // than this will fail. |
| // |
| // Each request also returns a set of stream statistics reflecting the |
| // estimated total number of rows in the read stream. This number is computed |
| // based on the total table size and the number of active streams in the read |
| // session, and may change as other streams continue to read data. |
| ReadRows(ctx context.Context, in *ReadRowsRequest, opts ...grpc.CallOption) (BigQueryStorage_ReadRowsClient, error) |
| // Creates additional streams for a ReadSession. This API can be used to |
| // dynamically adjust the parallelism of a batch processing task upwards by |
| // adding additional workers. |
| BatchCreateReadSessionStreams(ctx context.Context, in *BatchCreateReadSessionStreamsRequest, opts ...grpc.CallOption) (*BatchCreateReadSessionStreamsResponse, error) |
| // Triggers the graceful termination of a single stream in a ReadSession. This |
| // API can be used to dynamically adjust the parallelism of a batch processing |
| // task downwards without losing data. |
| // |
| // This API does not delete the stream -- it remains visible in the |
| // ReadSession, and any data processed by the stream is not released to other |
| // streams. However, no additional data will be assigned to the stream once |
| // this call completes. Callers must continue reading data on the stream until |
| // the end of the stream is reached so that data which has already been |
| // assigned to the stream will be processed. |
| // |
| // This method will return an error if there are no other live streams |
| // in the Session, or if SplitReadStream() has been called on the given |
| // Stream. |
| FinalizeStream(ctx context.Context, in *FinalizeStreamRequest, opts ...grpc.CallOption) (*empty.Empty, error) |
| // Splits a given read stream into two Streams. These streams are referred to |
| // as the primary and the residual of the split. The original stream can still |
| // be read from in the same manner as before. Both of the returned streams can |
| // also be read from, and the total rows return by both child streams will be |
| // the same as the rows read from the original stream. |
| // |
| // Moreover, the two child streams will be allocated back to back in the |
| // original Stream. Concretely, it is guaranteed that for streams Original, |
| // Primary, and Residual, that Original[0-j] = Primary[0-j] and |
| // Original[j-n] = Residual[0-m] once the streams have been read to |
| // completion. |
| // |
| // This method is guaranteed to be idempotent. |
| SplitReadStream(ctx context.Context, in *SplitReadStreamRequest, opts ...grpc.CallOption) (*SplitReadStreamResponse, error) |
| } |
| |
| type bigQueryStorageClient struct { |
| cc grpc.ClientConnInterface |
| } |
| |
| func NewBigQueryStorageClient(cc grpc.ClientConnInterface) BigQueryStorageClient { |
| return &bigQueryStorageClient{cc} |
| } |
| |
| func (c *bigQueryStorageClient) CreateReadSession(ctx context.Context, in *CreateReadSessionRequest, opts ...grpc.CallOption) (*ReadSession, error) { |
| out := new(ReadSession) |
| err := c.cc.Invoke(ctx, "/google.cloud.bigquery.storage.v1beta1.BigQueryStorage/CreateReadSession", in, out, opts...) |
| if err != nil { |
| return nil, err |
| } |
| return out, nil |
| } |
| |
| func (c *bigQueryStorageClient) ReadRows(ctx context.Context, in *ReadRowsRequest, opts ...grpc.CallOption) (BigQueryStorage_ReadRowsClient, error) { |
| stream, err := c.cc.NewStream(ctx, &_BigQueryStorage_serviceDesc.Streams[0], "/google.cloud.bigquery.storage.v1beta1.BigQueryStorage/ReadRows", opts...) |
| if err != nil { |
| return nil, err |
| } |
| x := &bigQueryStorageReadRowsClient{stream} |
| if err := x.ClientStream.SendMsg(in); err != nil { |
| return nil, err |
| } |
| if err := x.ClientStream.CloseSend(); err != nil { |
| return nil, err |
| } |
| return x, nil |
| } |
| |
| type BigQueryStorage_ReadRowsClient interface { |
| Recv() (*ReadRowsResponse, error) |
| grpc.ClientStream |
| } |
| |
| type bigQueryStorageReadRowsClient struct { |
| grpc.ClientStream |
| } |
| |
| func (x *bigQueryStorageReadRowsClient) Recv() (*ReadRowsResponse, error) { |
| m := new(ReadRowsResponse) |
| if err := x.ClientStream.RecvMsg(m); err != nil { |
| return nil, err |
| } |
| return m, nil |
| } |
| |
| func (c *bigQueryStorageClient) BatchCreateReadSessionStreams(ctx context.Context, in *BatchCreateReadSessionStreamsRequest, opts ...grpc.CallOption) (*BatchCreateReadSessionStreamsResponse, error) { |
| out := new(BatchCreateReadSessionStreamsResponse) |
| err := c.cc.Invoke(ctx, "/google.cloud.bigquery.storage.v1beta1.BigQueryStorage/BatchCreateReadSessionStreams", in, out, opts...) |
| if err != nil { |
| return nil, err |
| } |
| return out, nil |
| } |
| |
| func (c *bigQueryStorageClient) FinalizeStream(ctx context.Context, in *FinalizeStreamRequest, opts ...grpc.CallOption) (*empty.Empty, error) { |
| out := new(empty.Empty) |
| err := c.cc.Invoke(ctx, "/google.cloud.bigquery.storage.v1beta1.BigQueryStorage/FinalizeStream", in, out, opts...) |
| if err != nil { |
| return nil, err |
| } |
| return out, nil |
| } |
| |
| func (c *bigQueryStorageClient) SplitReadStream(ctx context.Context, in *SplitReadStreamRequest, opts ...grpc.CallOption) (*SplitReadStreamResponse, error) { |
| out := new(SplitReadStreamResponse) |
| err := c.cc.Invoke(ctx, "/google.cloud.bigquery.storage.v1beta1.BigQueryStorage/SplitReadStream", in, out, opts...) |
| if err != nil { |
| return nil, err |
| } |
| return out, nil |
| } |
| |
| // BigQueryStorageServer is the server API for BigQueryStorage service. |
| type BigQueryStorageServer interface { |
| // Creates a new read session. A read session divides the contents of a |
| // BigQuery table into one or more streams, which can then be used to read |
| // data from the table. The read session also specifies properties of the |
| // data to be read, such as a list of columns or a push-down filter describing |
| // the rows to be returned. |
| // |
| // A particular row can be read by at most one stream. When the caller has |
| // reached the end of each stream in the session, then all the data in the |
| // table has been read. |
| // |
| // Read sessions automatically expire 24 hours after they are created and do |
| // not require manual clean-up by the caller. |
| CreateReadSession(context.Context, *CreateReadSessionRequest) (*ReadSession, error) |
| // Reads rows from the table in the format prescribed by the read session. |
| // Each response contains one or more table rows, up to a maximum of 10 MiB |
| // per response; read requests which attempt to read individual rows larger |
| // than this will fail. |
| // |
| // Each request also returns a set of stream statistics reflecting the |
| // estimated total number of rows in the read stream. This number is computed |
| // based on the total table size and the number of active streams in the read |
| // session, and may change as other streams continue to read data. |
| ReadRows(*ReadRowsRequest, BigQueryStorage_ReadRowsServer) error |
| // Creates additional streams for a ReadSession. This API can be used to |
| // dynamically adjust the parallelism of a batch processing task upwards by |
| // adding additional workers. |
| BatchCreateReadSessionStreams(context.Context, *BatchCreateReadSessionStreamsRequest) (*BatchCreateReadSessionStreamsResponse, error) |
| // Triggers the graceful termination of a single stream in a ReadSession. This |
| // API can be used to dynamically adjust the parallelism of a batch processing |
| // task downwards without losing data. |
| // |
| // This API does not delete the stream -- it remains visible in the |
| // ReadSession, and any data processed by the stream is not released to other |
| // streams. However, no additional data will be assigned to the stream once |
| // this call completes. Callers must continue reading data on the stream until |
| // the end of the stream is reached so that data which has already been |
| // assigned to the stream will be processed. |
| // |
| // This method will return an error if there are no other live streams |
| // in the Session, or if SplitReadStream() has been called on the given |
| // Stream. |
| FinalizeStream(context.Context, *FinalizeStreamRequest) (*empty.Empty, error) |
| // Splits a given read stream into two Streams. These streams are referred to |
| // as the primary and the residual of the split. The original stream can still |
| // be read from in the same manner as before. Both of the returned streams can |
| // also be read from, and the total rows return by both child streams will be |
| // the same as the rows read from the original stream. |
| // |
| // Moreover, the two child streams will be allocated back to back in the |
| // original Stream. Concretely, it is guaranteed that for streams Original, |
| // Primary, and Residual, that Original[0-j] = Primary[0-j] and |
| // Original[j-n] = Residual[0-m] once the streams have been read to |
| // completion. |
| // |
| // This method is guaranteed to be idempotent. |
| SplitReadStream(context.Context, *SplitReadStreamRequest) (*SplitReadStreamResponse, error) |
| } |
| |
| // UnimplementedBigQueryStorageServer can be embedded to have forward compatible implementations. |
| type UnimplementedBigQueryStorageServer struct { |
| } |
| |
| func (*UnimplementedBigQueryStorageServer) CreateReadSession(ctx context.Context, req *CreateReadSessionRequest) (*ReadSession, error) { |
| return nil, status.Errorf(codes.Unimplemented, "method CreateReadSession not implemented") |
| } |
| func (*UnimplementedBigQueryStorageServer) ReadRows(req *ReadRowsRequest, srv BigQueryStorage_ReadRowsServer) error { |
| return status.Errorf(codes.Unimplemented, "method ReadRows not implemented") |
| } |
| func (*UnimplementedBigQueryStorageServer) BatchCreateReadSessionStreams(ctx context.Context, req *BatchCreateReadSessionStreamsRequest) (*BatchCreateReadSessionStreamsResponse, error) { |
| return nil, status.Errorf(codes.Unimplemented, "method BatchCreateReadSessionStreams not implemented") |
| } |
| func (*UnimplementedBigQueryStorageServer) FinalizeStream(ctx context.Context, req *FinalizeStreamRequest) (*empty.Empty, error) { |
| return nil, status.Errorf(codes.Unimplemented, "method FinalizeStream not implemented") |
| } |
| func (*UnimplementedBigQueryStorageServer) SplitReadStream(ctx context.Context, req *SplitReadStreamRequest) (*SplitReadStreamResponse, error) { |
| return nil, status.Errorf(codes.Unimplemented, "method SplitReadStream not implemented") |
| } |
| |
| func RegisterBigQueryStorageServer(s *grpc.Server, srv BigQueryStorageServer) { |
| s.RegisterService(&_BigQueryStorage_serviceDesc, srv) |
| } |
| |
| func _BigQueryStorage_CreateReadSession_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { |
| in := new(CreateReadSessionRequest) |
| if err := dec(in); err != nil { |
| return nil, err |
| } |
| if interceptor == nil { |
| return srv.(BigQueryStorageServer).CreateReadSession(ctx, in) |
| } |
| info := &grpc.UnaryServerInfo{ |
| Server: srv, |
| FullMethod: "/google.cloud.bigquery.storage.v1beta1.BigQueryStorage/CreateReadSession", |
| } |
| handler := func(ctx context.Context, req interface{}) (interface{}, error) { |
| return srv.(BigQueryStorageServer).CreateReadSession(ctx, req.(*CreateReadSessionRequest)) |
| } |
| return interceptor(ctx, in, info, handler) |
| } |
| |
| func _BigQueryStorage_ReadRows_Handler(srv interface{}, stream grpc.ServerStream) error { |
| m := new(ReadRowsRequest) |
| if err := stream.RecvMsg(m); err != nil { |
| return err |
| } |
| return srv.(BigQueryStorageServer).ReadRows(m, &bigQueryStorageReadRowsServer{stream}) |
| } |
| |
| type BigQueryStorage_ReadRowsServer interface { |
| Send(*ReadRowsResponse) error |
| grpc.ServerStream |
| } |
| |
| type bigQueryStorageReadRowsServer struct { |
| grpc.ServerStream |
| } |
| |
| func (x *bigQueryStorageReadRowsServer) Send(m *ReadRowsResponse) error { |
| return x.ServerStream.SendMsg(m) |
| } |
| |
| func _BigQueryStorage_BatchCreateReadSessionStreams_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { |
| in := new(BatchCreateReadSessionStreamsRequest) |
| if err := dec(in); err != nil { |
| return nil, err |
| } |
| if interceptor == nil { |
| return srv.(BigQueryStorageServer).BatchCreateReadSessionStreams(ctx, in) |
| } |
| info := &grpc.UnaryServerInfo{ |
| Server: srv, |
| FullMethod: "/google.cloud.bigquery.storage.v1beta1.BigQueryStorage/BatchCreateReadSessionStreams", |
| } |
| handler := func(ctx context.Context, req interface{}) (interface{}, error) { |
| return srv.(BigQueryStorageServer).BatchCreateReadSessionStreams(ctx, req.(*BatchCreateReadSessionStreamsRequest)) |
| } |
| return interceptor(ctx, in, info, handler) |
| } |
| |
| func _BigQueryStorage_FinalizeStream_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { |
| in := new(FinalizeStreamRequest) |
| if err := dec(in); err != nil { |
| return nil, err |
| } |
| if interceptor == nil { |
| return srv.(BigQueryStorageServer).FinalizeStream(ctx, in) |
| } |
| info := &grpc.UnaryServerInfo{ |
| Server: srv, |
| FullMethod: "/google.cloud.bigquery.storage.v1beta1.BigQueryStorage/FinalizeStream", |
| } |
| handler := func(ctx context.Context, req interface{}) (interface{}, error) { |
| return srv.(BigQueryStorageServer).FinalizeStream(ctx, req.(*FinalizeStreamRequest)) |
| } |
| return interceptor(ctx, in, info, handler) |
| } |
| |
| func _BigQueryStorage_SplitReadStream_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { |
| in := new(SplitReadStreamRequest) |
| if err := dec(in); err != nil { |
| return nil, err |
| } |
| if interceptor == nil { |
| return srv.(BigQueryStorageServer).SplitReadStream(ctx, in) |
| } |
| info := &grpc.UnaryServerInfo{ |
| Server: srv, |
| FullMethod: "/google.cloud.bigquery.storage.v1beta1.BigQueryStorage/SplitReadStream", |
| } |
| handler := func(ctx context.Context, req interface{}) (interface{}, error) { |
| return srv.(BigQueryStorageServer).SplitReadStream(ctx, req.(*SplitReadStreamRequest)) |
| } |
| return interceptor(ctx, in, info, handler) |
| } |
| |
| var _BigQueryStorage_serviceDesc = grpc.ServiceDesc{ |
| ServiceName: "google.cloud.bigquery.storage.v1beta1.BigQueryStorage", |
| HandlerType: (*BigQueryStorageServer)(nil), |
| Methods: []grpc.MethodDesc{ |
| { |
| MethodName: "CreateReadSession", |
| Handler: _BigQueryStorage_CreateReadSession_Handler, |
| }, |
| { |
| MethodName: "BatchCreateReadSessionStreams", |
| Handler: _BigQueryStorage_BatchCreateReadSessionStreams_Handler, |
| }, |
| { |
| MethodName: "FinalizeStream", |
| Handler: _BigQueryStorage_FinalizeStream_Handler, |
| }, |
| { |
| MethodName: "SplitReadStream", |
| Handler: _BigQueryStorage_SplitReadStream_Handler, |
| }, |
| }, |
| Streams: []grpc.StreamDesc{ |
| { |
| StreamName: "ReadRows", |
| Handler: _BigQueryStorage_ReadRows_Handler, |
| ServerStreams: true, |
| }, |
| }, |
| Metadata: "google/cloud/bigquery/storage/v1beta1/storage.proto", |
| } |