| // Code generated by protoc-gen-go. DO NOT EDIT. |
| // source: google/spanner/v1/spanner.proto |
| |
| package spanner |
| |
| import ( |
| context "context" |
| fmt "fmt" |
| math "math" |
| |
| proto "github.com/golang/protobuf/proto" |
| empty "github.com/golang/protobuf/ptypes/empty" |
| _struct "github.com/golang/protobuf/ptypes/struct" |
| timestamp "github.com/golang/protobuf/ptypes/timestamp" |
| _ "google.golang.org/genproto/googleapis/api/annotations" |
| status "google.golang.org/genproto/googleapis/rpc/status" |
| grpc "google.golang.org/grpc" |
| codes "google.golang.org/grpc/codes" |
| status1 "google.golang.org/grpc/status" |
| ) |
| |
| // Reference imports to suppress errors if they are not otherwise used. |
| var _ = proto.Marshal |
| var _ = fmt.Errorf |
| var _ = math.Inf |
| |
| // This is a compile-time assertion to ensure that this generated file |
| // is compatible with the proto package it is being compiled against. |
| // A compilation error at this line likely means your copy of the |
| // proto package needs to be updated. |
| const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package |
| |
| // Mode in which the statement must be processed. |
| type ExecuteSqlRequest_QueryMode int32 |
| |
| const ( |
| // The default mode. Only the statement results are returned. |
| ExecuteSqlRequest_NORMAL ExecuteSqlRequest_QueryMode = 0 |
| // This mode returns only the query plan, without any results or |
| // execution statistics information. |
| ExecuteSqlRequest_PLAN ExecuteSqlRequest_QueryMode = 1 |
| // This mode returns both the query plan and the execution statistics along |
| // with the results. |
| ExecuteSqlRequest_PROFILE ExecuteSqlRequest_QueryMode = 2 |
| ) |
| |
| var ExecuteSqlRequest_QueryMode_name = map[int32]string{ |
| 0: "NORMAL", |
| 1: "PLAN", |
| 2: "PROFILE", |
| } |
| |
| var ExecuteSqlRequest_QueryMode_value = map[string]int32{ |
| "NORMAL": 0, |
| "PLAN": 1, |
| "PROFILE": 2, |
| } |
| |
| func (x ExecuteSqlRequest_QueryMode) String() string { |
| return proto.EnumName(ExecuteSqlRequest_QueryMode_name, int32(x)) |
| } |
| |
| func (ExecuteSqlRequest_QueryMode) EnumDescriptor() ([]byte, []int) { |
| return fileDescriptor_a420fdbb92791b07, []int{8, 0} |
| } |
| |
| // The request for [CreateSession][google.spanner.v1.Spanner.CreateSession]. |
| type CreateSessionRequest struct { |
| // Required. The database in which the new session is created. |
| Database string `protobuf:"bytes,1,opt,name=database,proto3" json:"database,omitempty"` |
| // The session to create. |
| Session *Session `protobuf:"bytes,2,opt,name=session,proto3" json:"session,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *CreateSessionRequest) Reset() { *m = CreateSessionRequest{} } |
| func (m *CreateSessionRequest) String() string { return proto.CompactTextString(m) } |
| func (*CreateSessionRequest) ProtoMessage() {} |
| func (*CreateSessionRequest) Descriptor() ([]byte, []int) { |
| return fileDescriptor_a420fdbb92791b07, []int{0} |
| } |
| |
| func (m *CreateSessionRequest) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_CreateSessionRequest.Unmarshal(m, b) |
| } |
| func (m *CreateSessionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_CreateSessionRequest.Marshal(b, m, deterministic) |
| } |
| func (m *CreateSessionRequest) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_CreateSessionRequest.Merge(m, src) |
| } |
| func (m *CreateSessionRequest) XXX_Size() int { |
| return xxx_messageInfo_CreateSessionRequest.Size(m) |
| } |
| func (m *CreateSessionRequest) XXX_DiscardUnknown() { |
| xxx_messageInfo_CreateSessionRequest.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_CreateSessionRequest proto.InternalMessageInfo |
| |
| func (m *CreateSessionRequest) GetDatabase() string { |
| if m != nil { |
| return m.Database |
| } |
| return "" |
| } |
| |
| func (m *CreateSessionRequest) GetSession() *Session { |
| if m != nil { |
| return m.Session |
| } |
| return nil |
| } |
| |
| // The request for |
| // [BatchCreateSessions][google.spanner.v1.Spanner.BatchCreateSessions]. |
| type BatchCreateSessionsRequest struct { |
| // Required. The database in which the new sessions are created. |
| Database string `protobuf:"bytes,1,opt,name=database,proto3" json:"database,omitempty"` |
| // Parameters to be applied to each created session. |
| SessionTemplate *Session `protobuf:"bytes,2,opt,name=session_template,json=sessionTemplate,proto3" json:"session_template,omitempty"` |
| // Required. The number of sessions to be created in this batch call. |
| // The API may return fewer than the requested number of sessions. If a |
| // specific number of sessions are desired, the client can make additional |
| // calls to BatchCreateSessions (adjusting |
| // [session_count][google.spanner.v1.BatchCreateSessionsRequest.session_count] |
| // as necessary). |
| SessionCount int32 `protobuf:"varint,3,opt,name=session_count,json=sessionCount,proto3" json:"session_count,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *BatchCreateSessionsRequest) Reset() { *m = BatchCreateSessionsRequest{} } |
| func (m *BatchCreateSessionsRequest) String() string { return proto.CompactTextString(m) } |
| func (*BatchCreateSessionsRequest) ProtoMessage() {} |
| func (*BatchCreateSessionsRequest) Descriptor() ([]byte, []int) { |
| return fileDescriptor_a420fdbb92791b07, []int{1} |
| } |
| |
| func (m *BatchCreateSessionsRequest) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_BatchCreateSessionsRequest.Unmarshal(m, b) |
| } |
| func (m *BatchCreateSessionsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_BatchCreateSessionsRequest.Marshal(b, m, deterministic) |
| } |
| func (m *BatchCreateSessionsRequest) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_BatchCreateSessionsRequest.Merge(m, src) |
| } |
| func (m *BatchCreateSessionsRequest) XXX_Size() int { |
| return xxx_messageInfo_BatchCreateSessionsRequest.Size(m) |
| } |
| func (m *BatchCreateSessionsRequest) XXX_DiscardUnknown() { |
| xxx_messageInfo_BatchCreateSessionsRequest.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_BatchCreateSessionsRequest proto.InternalMessageInfo |
| |
| func (m *BatchCreateSessionsRequest) GetDatabase() string { |
| if m != nil { |
| return m.Database |
| } |
| return "" |
| } |
| |
| func (m *BatchCreateSessionsRequest) GetSessionTemplate() *Session { |
| if m != nil { |
| return m.SessionTemplate |
| } |
| return nil |
| } |
| |
| func (m *BatchCreateSessionsRequest) GetSessionCount() int32 { |
| if m != nil { |
| return m.SessionCount |
| } |
| return 0 |
| } |
| |
| // The response for |
| // [BatchCreateSessions][google.spanner.v1.Spanner.BatchCreateSessions]. |
| type BatchCreateSessionsResponse struct { |
| // The freshly created sessions. |
| Session []*Session `protobuf:"bytes,1,rep,name=session,proto3" json:"session,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *BatchCreateSessionsResponse) Reset() { *m = BatchCreateSessionsResponse{} } |
| func (m *BatchCreateSessionsResponse) String() string { return proto.CompactTextString(m) } |
| func (*BatchCreateSessionsResponse) ProtoMessage() {} |
| func (*BatchCreateSessionsResponse) Descriptor() ([]byte, []int) { |
| return fileDescriptor_a420fdbb92791b07, []int{2} |
| } |
| |
| func (m *BatchCreateSessionsResponse) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_BatchCreateSessionsResponse.Unmarshal(m, b) |
| } |
| func (m *BatchCreateSessionsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_BatchCreateSessionsResponse.Marshal(b, m, deterministic) |
| } |
| func (m *BatchCreateSessionsResponse) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_BatchCreateSessionsResponse.Merge(m, src) |
| } |
| func (m *BatchCreateSessionsResponse) XXX_Size() int { |
| return xxx_messageInfo_BatchCreateSessionsResponse.Size(m) |
| } |
| func (m *BatchCreateSessionsResponse) XXX_DiscardUnknown() { |
| xxx_messageInfo_BatchCreateSessionsResponse.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_BatchCreateSessionsResponse proto.InternalMessageInfo |
| |
| func (m *BatchCreateSessionsResponse) GetSession() []*Session { |
| if m != nil { |
| return m.Session |
| } |
| return nil |
| } |
| |
| // A session in the Cloud Spanner API. |
| type Session struct { |
| // The name of the session. This is always system-assigned; values provided |
| // when creating a session are ignored. |
| Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` |
| // The labels for the session. |
| // |
| // * Label keys must be between 1 and 63 characters long and must conform to |
| // the following regular expression: `[a-z]([-a-z0-9]*[a-z0-9])?`. |
| // * Label values must be between 0 and 63 characters long and must conform |
| // to the regular expression `([a-z]([-a-z0-9]*[a-z0-9])?)?`. |
| // * No more than 64 labels can be associated with a given session. |
| // |
| // See https://goo.gl/xmQnxf for more information on and examples of labels. |
| Labels map[string]string `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` |
| // Output only. The timestamp when the session is created. |
| CreateTime *timestamp.Timestamp `protobuf:"bytes,3,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"` |
| // Output only. The approximate timestamp when the session is last used. It is |
| // typically earlier than the actual last use time. |
| ApproximateLastUseTime *timestamp.Timestamp `protobuf:"bytes,4,opt,name=approximate_last_use_time,json=approximateLastUseTime,proto3" json:"approximate_last_use_time,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *Session) Reset() { *m = Session{} } |
| func (m *Session) String() string { return proto.CompactTextString(m) } |
| func (*Session) ProtoMessage() {} |
| func (*Session) Descriptor() ([]byte, []int) { |
| return fileDescriptor_a420fdbb92791b07, []int{3} |
| } |
| |
| func (m *Session) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_Session.Unmarshal(m, b) |
| } |
| func (m *Session) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_Session.Marshal(b, m, deterministic) |
| } |
| func (m *Session) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_Session.Merge(m, src) |
| } |
| func (m *Session) XXX_Size() int { |
| return xxx_messageInfo_Session.Size(m) |
| } |
| func (m *Session) XXX_DiscardUnknown() { |
| xxx_messageInfo_Session.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_Session proto.InternalMessageInfo |
| |
| func (m *Session) GetName() string { |
| if m != nil { |
| return m.Name |
| } |
| return "" |
| } |
| |
| func (m *Session) GetLabels() map[string]string { |
| if m != nil { |
| return m.Labels |
| } |
| return nil |
| } |
| |
| func (m *Session) GetCreateTime() *timestamp.Timestamp { |
| if m != nil { |
| return m.CreateTime |
| } |
| return nil |
| } |
| |
| func (m *Session) GetApproximateLastUseTime() *timestamp.Timestamp { |
| if m != nil { |
| return m.ApproximateLastUseTime |
| } |
| return nil |
| } |
| |
| // The request for [GetSession][google.spanner.v1.Spanner.GetSession]. |
| type GetSessionRequest struct { |
| // Required. The name of the session to retrieve. |
| Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *GetSessionRequest) Reset() { *m = GetSessionRequest{} } |
| func (m *GetSessionRequest) String() string { return proto.CompactTextString(m) } |
| func (*GetSessionRequest) ProtoMessage() {} |
| func (*GetSessionRequest) Descriptor() ([]byte, []int) { |
| return fileDescriptor_a420fdbb92791b07, []int{4} |
| } |
| |
| func (m *GetSessionRequest) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_GetSessionRequest.Unmarshal(m, b) |
| } |
| func (m *GetSessionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_GetSessionRequest.Marshal(b, m, deterministic) |
| } |
| func (m *GetSessionRequest) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_GetSessionRequest.Merge(m, src) |
| } |
| func (m *GetSessionRequest) XXX_Size() int { |
| return xxx_messageInfo_GetSessionRequest.Size(m) |
| } |
| func (m *GetSessionRequest) XXX_DiscardUnknown() { |
| xxx_messageInfo_GetSessionRequest.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_GetSessionRequest proto.InternalMessageInfo |
| |
| func (m *GetSessionRequest) GetName() string { |
| if m != nil { |
| return m.Name |
| } |
| return "" |
| } |
| |
| // The request for [ListSessions][google.spanner.v1.Spanner.ListSessions]. |
| type ListSessionsRequest struct { |
| // Required. The database in which to list sessions. |
| Database string `protobuf:"bytes,1,opt,name=database,proto3" json:"database,omitempty"` |
| // Number of sessions to be returned in the response. If 0 or less, defaults |
| // to the server's maximum allowed page size. |
| PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` |
| // If non-empty, `page_token` should contain a |
| // [next_page_token][google.spanner.v1.ListSessionsResponse.next_page_token] |
| // from a previous |
| // [ListSessionsResponse][google.spanner.v1.ListSessionsResponse]. |
| PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` |
| // An expression for filtering the results of the request. Filter rules are |
| // case insensitive. The fields eligible for filtering are: |
| // |
| // * `labels.key` where key is the name of a label |
| // |
| // Some examples of using filters are: |
| // |
| // * `labels.env:*` --> The session has the label "env". |
| // * `labels.env:dev` --> The session has the label "env" and the value of |
| // the label contains the string "dev". |
| Filter string `protobuf:"bytes,4,opt,name=filter,proto3" json:"filter,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *ListSessionsRequest) Reset() { *m = ListSessionsRequest{} } |
| func (m *ListSessionsRequest) String() string { return proto.CompactTextString(m) } |
| func (*ListSessionsRequest) ProtoMessage() {} |
| func (*ListSessionsRequest) Descriptor() ([]byte, []int) { |
| return fileDescriptor_a420fdbb92791b07, []int{5} |
| } |
| |
| func (m *ListSessionsRequest) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_ListSessionsRequest.Unmarshal(m, b) |
| } |
| func (m *ListSessionsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_ListSessionsRequest.Marshal(b, m, deterministic) |
| } |
| func (m *ListSessionsRequest) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_ListSessionsRequest.Merge(m, src) |
| } |
| func (m *ListSessionsRequest) XXX_Size() int { |
| return xxx_messageInfo_ListSessionsRequest.Size(m) |
| } |
| func (m *ListSessionsRequest) XXX_DiscardUnknown() { |
| xxx_messageInfo_ListSessionsRequest.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_ListSessionsRequest proto.InternalMessageInfo |
| |
| func (m *ListSessionsRequest) GetDatabase() string { |
| if m != nil { |
| return m.Database |
| } |
| return "" |
| } |
| |
| func (m *ListSessionsRequest) GetPageSize() int32 { |
| if m != nil { |
| return m.PageSize |
| } |
| return 0 |
| } |
| |
| func (m *ListSessionsRequest) GetPageToken() string { |
| if m != nil { |
| return m.PageToken |
| } |
| return "" |
| } |
| |
| func (m *ListSessionsRequest) GetFilter() string { |
| if m != nil { |
| return m.Filter |
| } |
| return "" |
| } |
| |
| // The response for [ListSessions][google.spanner.v1.Spanner.ListSessions]. |
| type ListSessionsResponse struct { |
| // The list of requested sessions. |
| Sessions []*Session `protobuf:"bytes,1,rep,name=sessions,proto3" json:"sessions,omitempty"` |
| // `next_page_token` can be sent in a subsequent |
| // [ListSessions][google.spanner.v1.Spanner.ListSessions] call to fetch more |
| // of the matching sessions. |
| NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *ListSessionsResponse) Reset() { *m = ListSessionsResponse{} } |
| func (m *ListSessionsResponse) String() string { return proto.CompactTextString(m) } |
| func (*ListSessionsResponse) ProtoMessage() {} |
| func (*ListSessionsResponse) Descriptor() ([]byte, []int) { |
| return fileDescriptor_a420fdbb92791b07, []int{6} |
| } |
| |
| func (m *ListSessionsResponse) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_ListSessionsResponse.Unmarshal(m, b) |
| } |
| func (m *ListSessionsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_ListSessionsResponse.Marshal(b, m, deterministic) |
| } |
| func (m *ListSessionsResponse) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_ListSessionsResponse.Merge(m, src) |
| } |
| func (m *ListSessionsResponse) XXX_Size() int { |
| return xxx_messageInfo_ListSessionsResponse.Size(m) |
| } |
| func (m *ListSessionsResponse) XXX_DiscardUnknown() { |
| xxx_messageInfo_ListSessionsResponse.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_ListSessionsResponse proto.InternalMessageInfo |
| |
| func (m *ListSessionsResponse) GetSessions() []*Session { |
| if m != nil { |
| return m.Sessions |
| } |
| return nil |
| } |
| |
| func (m *ListSessionsResponse) GetNextPageToken() string { |
| if m != nil { |
| return m.NextPageToken |
| } |
| return "" |
| } |
| |
| // The request for [DeleteSession][google.spanner.v1.Spanner.DeleteSession]. |
| type DeleteSessionRequest struct { |
| // Required. The name of the session to delete. |
| Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *DeleteSessionRequest) Reset() { *m = DeleteSessionRequest{} } |
| func (m *DeleteSessionRequest) String() string { return proto.CompactTextString(m) } |
| func (*DeleteSessionRequest) ProtoMessage() {} |
| func (*DeleteSessionRequest) Descriptor() ([]byte, []int) { |
| return fileDescriptor_a420fdbb92791b07, []int{7} |
| } |
| |
| func (m *DeleteSessionRequest) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_DeleteSessionRequest.Unmarshal(m, b) |
| } |
| func (m *DeleteSessionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_DeleteSessionRequest.Marshal(b, m, deterministic) |
| } |
| func (m *DeleteSessionRequest) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_DeleteSessionRequest.Merge(m, src) |
| } |
| func (m *DeleteSessionRequest) XXX_Size() int { |
| return xxx_messageInfo_DeleteSessionRequest.Size(m) |
| } |
| func (m *DeleteSessionRequest) XXX_DiscardUnknown() { |
| xxx_messageInfo_DeleteSessionRequest.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_DeleteSessionRequest proto.InternalMessageInfo |
| |
| func (m *DeleteSessionRequest) GetName() string { |
| if m != nil { |
| return m.Name |
| } |
| return "" |
| } |
| |
| // The request for [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] and |
| // [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql]. |
| type ExecuteSqlRequest struct { |
| // Required. The session in which the SQL query should be performed. |
| Session string `protobuf:"bytes,1,opt,name=session,proto3" json:"session,omitempty"` |
| // The transaction to use. |
| // |
| // For queries, if none is provided, the default is a temporary read-only |
| // transaction with strong concurrency. |
| // |
| // Standard DML statements require a read-write transaction. To protect |
| // against replays, single-use transactions are not supported. The caller |
| // must either supply an existing transaction ID or begin a new transaction. |
| // |
| // Partitioned DML requires an existing Partitioned DML transaction ID. |
| Transaction *TransactionSelector `protobuf:"bytes,2,opt,name=transaction,proto3" json:"transaction,omitempty"` |
| // Required. The SQL string. |
| Sql string `protobuf:"bytes,3,opt,name=sql,proto3" json:"sql,omitempty"` |
| // Parameter names and values that bind to placeholders in the SQL string. |
| // |
| // A parameter placeholder consists of the `@` character followed by the |
| // parameter name (for example, `@firstName`). Parameter names can contain |
| // letters, numbers, and underscores. |
| // |
| // Parameters can appear anywhere that a literal value is expected. The same |
| // parameter name can be used more than once, for example: |
| // |
| // `"WHERE id > @msg_id AND id < @msg_id + 100"` |
| // |
| // It is an error to execute a SQL statement with unbound parameters. |
| Params *_struct.Struct `protobuf:"bytes,4,opt,name=params,proto3" json:"params,omitempty"` |
| // It is not always possible for Cloud Spanner to infer the right SQL type |
| // from a JSON value. For example, values of type `BYTES` and values |
| // of type `STRING` both appear in |
| // [params][google.spanner.v1.ExecuteSqlRequest.params] as JSON strings. |
| // |
| // In these cases, `param_types` can be used to specify the exact |
| // SQL type for some or all of the SQL statement parameters. See the |
| // definition of [Type][google.spanner.v1.Type] for more information |
| // about SQL types. |
| ParamTypes map[string]*Type `protobuf:"bytes,5,rep,name=param_types,json=paramTypes,proto3" json:"param_types,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` |
| // If this request is resuming a previously interrupted SQL statement |
| // execution, `resume_token` should be copied from the last |
| // [PartialResultSet][google.spanner.v1.PartialResultSet] yielded before the |
| // interruption. Doing this enables the new SQL statement execution to resume |
| // where the last one left off. The rest of the request parameters must |
| // exactly match the request that yielded this token. |
| ResumeToken []byte `protobuf:"bytes,6,opt,name=resume_token,json=resumeToken,proto3" json:"resume_token,omitempty"` |
| // Used to control the amount of debugging information returned in |
| // [ResultSetStats][google.spanner.v1.ResultSetStats]. If |
| // [partition_token][google.spanner.v1.ExecuteSqlRequest.partition_token] is |
| // set, [query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode] can only |
| // be set to |
| // [QueryMode.NORMAL][google.spanner.v1.ExecuteSqlRequest.QueryMode.NORMAL]. |
| QueryMode ExecuteSqlRequest_QueryMode `protobuf:"varint,7,opt,name=query_mode,json=queryMode,proto3,enum=google.spanner.v1.ExecuteSqlRequest_QueryMode" json:"query_mode,omitempty"` |
| // If present, results will be restricted to the specified partition |
| // previously created using PartitionQuery(). There must be an exact |
| // match for the values of fields common to this message and the |
| // PartitionQueryRequest message used to create this partition_token. |
| PartitionToken []byte `protobuf:"bytes,8,opt,name=partition_token,json=partitionToken,proto3" json:"partition_token,omitempty"` |
| // A per-transaction sequence number used to identify this request. This field |
| // makes each request idempotent such that if the request is received multiple |
| // times, at most one will succeed. |
| // |
| // The sequence number must be monotonically increasing within the |
| // transaction. If a request arrives for the first time with an out-of-order |
| // sequence number, the transaction may be aborted. Replays of previously |
| // handled requests will yield the same response as the first execution. |
| // |
| // Required for DML statements. Ignored for queries. |
| Seqno int64 `protobuf:"varint,9,opt,name=seqno,proto3" json:"seqno,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *ExecuteSqlRequest) Reset() { *m = ExecuteSqlRequest{} } |
| func (m *ExecuteSqlRequest) String() string { return proto.CompactTextString(m) } |
| func (*ExecuteSqlRequest) ProtoMessage() {} |
| func (*ExecuteSqlRequest) Descriptor() ([]byte, []int) { |
| return fileDescriptor_a420fdbb92791b07, []int{8} |
| } |
| |
| func (m *ExecuteSqlRequest) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_ExecuteSqlRequest.Unmarshal(m, b) |
| } |
| func (m *ExecuteSqlRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_ExecuteSqlRequest.Marshal(b, m, deterministic) |
| } |
| func (m *ExecuteSqlRequest) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_ExecuteSqlRequest.Merge(m, src) |
| } |
| func (m *ExecuteSqlRequest) XXX_Size() int { |
| return xxx_messageInfo_ExecuteSqlRequest.Size(m) |
| } |
| func (m *ExecuteSqlRequest) XXX_DiscardUnknown() { |
| xxx_messageInfo_ExecuteSqlRequest.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_ExecuteSqlRequest proto.InternalMessageInfo |
| |
| func (m *ExecuteSqlRequest) GetSession() string { |
| if m != nil { |
| return m.Session |
| } |
| return "" |
| } |
| |
| func (m *ExecuteSqlRequest) GetTransaction() *TransactionSelector { |
| if m != nil { |
| return m.Transaction |
| } |
| return nil |
| } |
| |
| func (m *ExecuteSqlRequest) GetSql() string { |
| if m != nil { |
| return m.Sql |
| } |
| return "" |
| } |
| |
| func (m *ExecuteSqlRequest) GetParams() *_struct.Struct { |
| if m != nil { |
| return m.Params |
| } |
| return nil |
| } |
| |
| func (m *ExecuteSqlRequest) GetParamTypes() map[string]*Type { |
| if m != nil { |
| return m.ParamTypes |
| } |
| return nil |
| } |
| |
| func (m *ExecuteSqlRequest) GetResumeToken() []byte { |
| if m != nil { |
| return m.ResumeToken |
| } |
| return nil |
| } |
| |
| func (m *ExecuteSqlRequest) GetQueryMode() ExecuteSqlRequest_QueryMode { |
| if m != nil { |
| return m.QueryMode |
| } |
| return ExecuteSqlRequest_NORMAL |
| } |
| |
| func (m *ExecuteSqlRequest) GetPartitionToken() []byte { |
| if m != nil { |
| return m.PartitionToken |
| } |
| return nil |
| } |
| |
| func (m *ExecuteSqlRequest) GetSeqno() int64 { |
| if m != nil { |
| return m.Seqno |
| } |
| return 0 |
| } |
| |
| // The request for [ExecuteBatchDml][google.spanner.v1.Spanner.ExecuteBatchDml]. |
| type ExecuteBatchDmlRequest struct { |
| // Required. The session in which the DML statements should be performed. |
| Session string `protobuf:"bytes,1,opt,name=session,proto3" json:"session,omitempty"` |
| // Required. The transaction to use. Must be a read-write transaction. |
| // |
| // To protect against replays, single-use transactions are not supported. The |
| // caller must either supply an existing transaction ID or begin a new |
| // transaction. |
| Transaction *TransactionSelector `protobuf:"bytes,2,opt,name=transaction,proto3" json:"transaction,omitempty"` |
| // Required. The list of statements to execute in this batch. Statements are |
| // executed serially, such that the effects of statement `i` are visible to |
| // statement `i+1`. Each statement must be a DML statement. Execution stops at |
| // the first failed statement; the remaining statements are not executed. |
| // |
| // Callers must provide at least one statement. |
| Statements []*ExecuteBatchDmlRequest_Statement `protobuf:"bytes,3,rep,name=statements,proto3" json:"statements,omitempty"` |
| // Required. A per-transaction sequence number used to identify this request. |
| // This field makes each request idempotent such that if the request is |
| // received multiple times, at most one will succeed. |
| // |
| // The sequence number must be monotonically increasing within the |
| // transaction. If a request arrives for the first time with an out-of-order |
| // sequence number, the transaction may be aborted. Replays of previously |
| // handled requests will yield the same response as the first execution. |
| Seqno int64 `protobuf:"varint,4,opt,name=seqno,proto3" json:"seqno,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *ExecuteBatchDmlRequest) Reset() { *m = ExecuteBatchDmlRequest{} } |
| func (m *ExecuteBatchDmlRequest) String() string { return proto.CompactTextString(m) } |
| func (*ExecuteBatchDmlRequest) ProtoMessage() {} |
| func (*ExecuteBatchDmlRequest) Descriptor() ([]byte, []int) { |
| return fileDescriptor_a420fdbb92791b07, []int{9} |
| } |
| |
| func (m *ExecuteBatchDmlRequest) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_ExecuteBatchDmlRequest.Unmarshal(m, b) |
| } |
| func (m *ExecuteBatchDmlRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_ExecuteBatchDmlRequest.Marshal(b, m, deterministic) |
| } |
| func (m *ExecuteBatchDmlRequest) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_ExecuteBatchDmlRequest.Merge(m, src) |
| } |
| func (m *ExecuteBatchDmlRequest) XXX_Size() int { |
| return xxx_messageInfo_ExecuteBatchDmlRequest.Size(m) |
| } |
| func (m *ExecuteBatchDmlRequest) XXX_DiscardUnknown() { |
| xxx_messageInfo_ExecuteBatchDmlRequest.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_ExecuteBatchDmlRequest proto.InternalMessageInfo |
| |
| func (m *ExecuteBatchDmlRequest) GetSession() string { |
| if m != nil { |
| return m.Session |
| } |
| return "" |
| } |
| |
| func (m *ExecuteBatchDmlRequest) GetTransaction() *TransactionSelector { |
| if m != nil { |
| return m.Transaction |
| } |
| return nil |
| } |
| |
| func (m *ExecuteBatchDmlRequest) GetStatements() []*ExecuteBatchDmlRequest_Statement { |
| if m != nil { |
| return m.Statements |
| } |
| return nil |
| } |
| |
| func (m *ExecuteBatchDmlRequest) GetSeqno() int64 { |
| if m != nil { |
| return m.Seqno |
| } |
| return 0 |
| } |
| |
| // A single DML statement. |
| type ExecuteBatchDmlRequest_Statement struct { |
| // Required. The DML string. |
| Sql string `protobuf:"bytes,1,opt,name=sql,proto3" json:"sql,omitempty"` |
| // Parameter names and values that bind to placeholders in the DML string. |
| // |
| // A parameter placeholder consists of the `@` character followed by the |
| // parameter name (for example, `@firstName`). Parameter names can contain |
| // letters, numbers, and underscores. |
| // |
| // Parameters can appear anywhere that a literal value is expected. The |
| // same parameter name can be used more than once, for example: |
| // |
| // `"WHERE id > @msg_id AND id < @msg_id + 100"` |
| // |
| // It is an error to execute a SQL statement with unbound parameters. |
| Params *_struct.Struct `protobuf:"bytes,2,opt,name=params,proto3" json:"params,omitempty"` |
| // It is not always possible for Cloud Spanner to infer the right SQL type |
| // from a JSON value. For example, values of type `BYTES` and values |
| // of type `STRING` both appear in |
| // [params][google.spanner.v1.ExecuteBatchDmlRequest.Statement.params] as |
| // JSON strings. |
| // |
| // In these cases, `param_types` can be used to specify the exact |
| // SQL type for some or all of the SQL statement parameters. See the |
| // definition of [Type][google.spanner.v1.Type] for more information |
| // about SQL types. |
| ParamTypes map[string]*Type `protobuf:"bytes,3,rep,name=param_types,json=paramTypes,proto3" json:"param_types,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *ExecuteBatchDmlRequest_Statement) Reset() { *m = ExecuteBatchDmlRequest_Statement{} } |
| func (m *ExecuteBatchDmlRequest_Statement) String() string { return proto.CompactTextString(m) } |
| func (*ExecuteBatchDmlRequest_Statement) ProtoMessage() {} |
| func (*ExecuteBatchDmlRequest_Statement) Descriptor() ([]byte, []int) { |
| return fileDescriptor_a420fdbb92791b07, []int{9, 0} |
| } |
| |
| func (m *ExecuteBatchDmlRequest_Statement) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_ExecuteBatchDmlRequest_Statement.Unmarshal(m, b) |
| } |
| func (m *ExecuteBatchDmlRequest_Statement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_ExecuteBatchDmlRequest_Statement.Marshal(b, m, deterministic) |
| } |
| func (m *ExecuteBatchDmlRequest_Statement) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_ExecuteBatchDmlRequest_Statement.Merge(m, src) |
| } |
| func (m *ExecuteBatchDmlRequest_Statement) XXX_Size() int { |
| return xxx_messageInfo_ExecuteBatchDmlRequest_Statement.Size(m) |
| } |
| func (m *ExecuteBatchDmlRequest_Statement) XXX_DiscardUnknown() { |
| xxx_messageInfo_ExecuteBatchDmlRequest_Statement.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_ExecuteBatchDmlRequest_Statement proto.InternalMessageInfo |
| |
| func (m *ExecuteBatchDmlRequest_Statement) GetSql() string { |
| if m != nil { |
| return m.Sql |
| } |
| return "" |
| } |
| |
| func (m *ExecuteBatchDmlRequest_Statement) GetParams() *_struct.Struct { |
| if m != nil { |
| return m.Params |
| } |
| return nil |
| } |
| |
| func (m *ExecuteBatchDmlRequest_Statement) GetParamTypes() map[string]*Type { |
| if m != nil { |
| return m.ParamTypes |
| } |
| return nil |
| } |
| |
| // The response for |
| // [ExecuteBatchDml][google.spanner.v1.Spanner.ExecuteBatchDml]. Contains a list |
| // of [ResultSet][google.spanner.v1.ResultSet] messages, one for each DML |
| // statement that has successfully executed, in the same order as the statements |
| // in the request. If a statement fails, the status in the response body |
| // identifies the cause of the failure. |
| // |
| // To check for DML statements that failed, use the following approach: |
| // |
| // 1. Check the status in the response message. The |
| // [google.rpc.Code][google.rpc.Code] enum |
| // value `OK` indicates that all statements were executed successfully. |
| // 2. If the status was not `OK`, check the number of result sets in the |
| // response. If the response contains `N` |
| // [ResultSet][google.spanner.v1.ResultSet] messages, then statement `N+1` in |
| // the request failed. |
| // |
| // Example 1: |
| // |
| // * Request: 5 DML statements, all executed successfully. |
| // * Response: 5 [ResultSet][google.spanner.v1.ResultSet] messages, with the |
| // status `OK`. |
| // |
| // Example 2: |
| // |
| // * Request: 5 DML statements. The third statement has a syntax error. |
| // * Response: 2 [ResultSet][google.spanner.v1.ResultSet] messages, and a syntax |
| // error (`INVALID_ARGUMENT`) |
| // status. The number of [ResultSet][google.spanner.v1.ResultSet] messages |
| // indicates that the third statement failed, and the fourth and fifth |
| // statements were not executed. |
| type ExecuteBatchDmlResponse struct { |
| // One [ResultSet][google.spanner.v1.ResultSet] for each statement in the |
| // request that ran successfully, in the same order as the statements in the |
| // request. Each [ResultSet][google.spanner.v1.ResultSet] does not contain any |
| // rows. The [ResultSetStats][google.spanner.v1.ResultSetStats] in each |
| // [ResultSet][google.spanner.v1.ResultSet] contain the number of rows |
| // modified by the statement. |
| // |
| // Only the first [ResultSet][google.spanner.v1.ResultSet] in the response |
| // contains valid [ResultSetMetadata][google.spanner.v1.ResultSetMetadata]. |
| ResultSets []*ResultSet `protobuf:"bytes,1,rep,name=result_sets,json=resultSets,proto3" json:"result_sets,omitempty"` |
| // If all DML statements are executed successfully, the status is `OK`. |
| // Otherwise, the error status of the first failed statement. |
| Status *status.Status `protobuf:"bytes,2,opt,name=status,proto3" json:"status,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *ExecuteBatchDmlResponse) Reset() { *m = ExecuteBatchDmlResponse{} } |
| func (m *ExecuteBatchDmlResponse) String() string { return proto.CompactTextString(m) } |
| func (*ExecuteBatchDmlResponse) ProtoMessage() {} |
| func (*ExecuteBatchDmlResponse) Descriptor() ([]byte, []int) { |
| return fileDescriptor_a420fdbb92791b07, []int{10} |
| } |
| |
| func (m *ExecuteBatchDmlResponse) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_ExecuteBatchDmlResponse.Unmarshal(m, b) |
| } |
| func (m *ExecuteBatchDmlResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_ExecuteBatchDmlResponse.Marshal(b, m, deterministic) |
| } |
| func (m *ExecuteBatchDmlResponse) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_ExecuteBatchDmlResponse.Merge(m, src) |
| } |
| func (m *ExecuteBatchDmlResponse) XXX_Size() int { |
| return xxx_messageInfo_ExecuteBatchDmlResponse.Size(m) |
| } |
| func (m *ExecuteBatchDmlResponse) XXX_DiscardUnknown() { |
| xxx_messageInfo_ExecuteBatchDmlResponse.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_ExecuteBatchDmlResponse proto.InternalMessageInfo |
| |
| func (m *ExecuteBatchDmlResponse) GetResultSets() []*ResultSet { |
| if m != nil { |
| return m.ResultSets |
| } |
| return nil |
| } |
| |
| func (m *ExecuteBatchDmlResponse) GetStatus() *status.Status { |
| if m != nil { |
| return m.Status |
| } |
| return nil |
| } |
| |
| // Options for a PartitionQueryRequest and |
| // PartitionReadRequest. |
| type PartitionOptions struct { |
| // **Note:** This hint is currently ignored by PartitionQuery and |
| // PartitionRead requests. |
| // |
| // The desired data size for each partition generated. The default for this |
| // option is currently 1 GiB. This is only a hint. The actual size of each |
| // partition may be smaller or larger than this size request. |
| PartitionSizeBytes int64 `protobuf:"varint,1,opt,name=partition_size_bytes,json=partitionSizeBytes,proto3" json:"partition_size_bytes,omitempty"` |
| // **Note:** This hint is currently ignored by PartitionQuery and |
| // PartitionRead requests. |
| // |
| // The desired maximum number of partitions to return. For example, this may |
| // be set to the number of workers available. The default for this option |
| // is currently 10,000. The maximum value is currently 200,000. This is only |
| // a hint. The actual number of partitions returned may be smaller or larger |
| // than this maximum count request. |
| MaxPartitions int64 `protobuf:"varint,2,opt,name=max_partitions,json=maxPartitions,proto3" json:"max_partitions,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *PartitionOptions) Reset() { *m = PartitionOptions{} } |
| func (m *PartitionOptions) String() string { return proto.CompactTextString(m) } |
| func (*PartitionOptions) ProtoMessage() {} |
| func (*PartitionOptions) Descriptor() ([]byte, []int) { |
| return fileDescriptor_a420fdbb92791b07, []int{11} |
| } |
| |
| func (m *PartitionOptions) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_PartitionOptions.Unmarshal(m, b) |
| } |
| func (m *PartitionOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_PartitionOptions.Marshal(b, m, deterministic) |
| } |
| func (m *PartitionOptions) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_PartitionOptions.Merge(m, src) |
| } |
| func (m *PartitionOptions) XXX_Size() int { |
| return xxx_messageInfo_PartitionOptions.Size(m) |
| } |
| func (m *PartitionOptions) XXX_DiscardUnknown() { |
| xxx_messageInfo_PartitionOptions.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_PartitionOptions proto.InternalMessageInfo |
| |
| func (m *PartitionOptions) GetPartitionSizeBytes() int64 { |
| if m != nil { |
| return m.PartitionSizeBytes |
| } |
| return 0 |
| } |
| |
| func (m *PartitionOptions) GetMaxPartitions() int64 { |
| if m != nil { |
| return m.MaxPartitions |
| } |
| return 0 |
| } |
| |
| // The request for [PartitionQuery][google.spanner.v1.Spanner.PartitionQuery] |
| type PartitionQueryRequest struct { |
| // Required. The session used to create the partitions. |
| Session string `protobuf:"bytes,1,opt,name=session,proto3" json:"session,omitempty"` |
| // Read only snapshot transactions are supported, read/write and single use |
| // transactions are not. |
| Transaction *TransactionSelector `protobuf:"bytes,2,opt,name=transaction,proto3" json:"transaction,omitempty"` |
| // Required. The query request to generate partitions for. The request will |
| // fail if the query is not root partitionable. The query plan of a root |
| // partitionable query has a single distributed union operator. A distributed |
| // union operator conceptually divides one or more tables into multiple |
| // splits, remotely evaluates a subquery independently on each split, and |
| // then unions all results. |
| // |
| // This must not contain DML commands, such as INSERT, UPDATE, or |
| // DELETE. Use |
| // [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql] with a |
| // PartitionedDml transaction for large, partition-friendly DML operations. |
| Sql string `protobuf:"bytes,3,opt,name=sql,proto3" json:"sql,omitempty"` |
| // Parameter names and values that bind to placeholders in the SQL string. |
| // |
| // A parameter placeholder consists of the `@` character followed by the |
| // parameter name (for example, `@firstName`). Parameter names can contain |
| // letters, numbers, and underscores. |
| // |
| // Parameters can appear anywhere that a literal value is expected. The same |
| // parameter name can be used more than once, for example: |
| // |
| // `"WHERE id > @msg_id AND id < @msg_id + 100"` |
| // |
| // It is an error to execute a SQL statement with unbound parameters. |
| Params *_struct.Struct `protobuf:"bytes,4,opt,name=params,proto3" json:"params,omitempty"` |
| // It is not always possible for Cloud Spanner to infer the right SQL type |
| // from a JSON value. For example, values of type `BYTES` and values |
| // of type `STRING` both appear in |
| // [params][google.spanner.v1.PartitionQueryRequest.params] as JSON strings. |
| // |
| // In these cases, `param_types` can be used to specify the exact |
| // SQL type for some or all of the SQL query parameters. See the |
| // definition of [Type][google.spanner.v1.Type] for more information |
| // about SQL types. |
| ParamTypes map[string]*Type `protobuf:"bytes,5,rep,name=param_types,json=paramTypes,proto3" json:"param_types,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` |
| // Additional options that affect how many partitions are created. |
| PartitionOptions *PartitionOptions `protobuf:"bytes,6,opt,name=partition_options,json=partitionOptions,proto3" json:"partition_options,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *PartitionQueryRequest) Reset() { *m = PartitionQueryRequest{} } |
| func (m *PartitionQueryRequest) String() string { return proto.CompactTextString(m) } |
| func (*PartitionQueryRequest) ProtoMessage() {} |
| func (*PartitionQueryRequest) Descriptor() ([]byte, []int) { |
| return fileDescriptor_a420fdbb92791b07, []int{12} |
| } |
| |
| func (m *PartitionQueryRequest) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_PartitionQueryRequest.Unmarshal(m, b) |
| } |
| func (m *PartitionQueryRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_PartitionQueryRequest.Marshal(b, m, deterministic) |
| } |
| func (m *PartitionQueryRequest) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_PartitionQueryRequest.Merge(m, src) |
| } |
| func (m *PartitionQueryRequest) XXX_Size() int { |
| return xxx_messageInfo_PartitionQueryRequest.Size(m) |
| } |
| func (m *PartitionQueryRequest) XXX_DiscardUnknown() { |
| xxx_messageInfo_PartitionQueryRequest.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_PartitionQueryRequest proto.InternalMessageInfo |
| |
| func (m *PartitionQueryRequest) GetSession() string { |
| if m != nil { |
| return m.Session |
| } |
| return "" |
| } |
| |
| func (m *PartitionQueryRequest) GetTransaction() *TransactionSelector { |
| if m != nil { |
| return m.Transaction |
| } |
| return nil |
| } |
| |
| func (m *PartitionQueryRequest) GetSql() string { |
| if m != nil { |
| return m.Sql |
| } |
| return "" |
| } |
| |
| func (m *PartitionQueryRequest) GetParams() *_struct.Struct { |
| if m != nil { |
| return m.Params |
| } |
| return nil |
| } |
| |
| func (m *PartitionQueryRequest) GetParamTypes() map[string]*Type { |
| if m != nil { |
| return m.ParamTypes |
| } |
| return nil |
| } |
| |
| func (m *PartitionQueryRequest) GetPartitionOptions() *PartitionOptions { |
| if m != nil { |
| return m.PartitionOptions |
| } |
| return nil |
| } |
| |
| // The request for [PartitionRead][google.spanner.v1.Spanner.PartitionRead] |
| type PartitionReadRequest struct { |
| // Required. The session used to create the partitions. |
| Session string `protobuf:"bytes,1,opt,name=session,proto3" json:"session,omitempty"` |
| // Read only snapshot transactions are supported, read/write and single use |
| // transactions are not. |
| Transaction *TransactionSelector `protobuf:"bytes,2,opt,name=transaction,proto3" json:"transaction,omitempty"` |
| // Required. The name of the table in the database to be read. |
| Table string `protobuf:"bytes,3,opt,name=table,proto3" json:"table,omitempty"` |
| // If non-empty, the name of an index on |
| // [table][google.spanner.v1.PartitionReadRequest.table]. This index is used |
| // instead of the table primary key when interpreting |
| // [key_set][google.spanner.v1.PartitionReadRequest.key_set] and sorting |
| // result rows. See [key_set][google.spanner.v1.PartitionReadRequest.key_set] |
| // for further information. |
| Index string `protobuf:"bytes,4,opt,name=index,proto3" json:"index,omitempty"` |
| // The columns of [table][google.spanner.v1.PartitionReadRequest.table] to be |
| // returned for each row matching this request. |
| Columns []string `protobuf:"bytes,5,rep,name=columns,proto3" json:"columns,omitempty"` |
| // Required. `key_set` identifies the rows to be yielded. `key_set` names the |
| // primary keys of the rows in |
| // [table][google.spanner.v1.PartitionReadRequest.table] to be yielded, unless |
| // [index][google.spanner.v1.PartitionReadRequest.index] is present. If |
| // [index][google.spanner.v1.PartitionReadRequest.index] is present, then |
| // [key_set][google.spanner.v1.PartitionReadRequest.key_set] instead names |
| // index keys in [index][google.spanner.v1.PartitionReadRequest.index]. |
| // |
| // It is not an error for the `key_set` to name rows that do not |
| // exist in the database. Read yields nothing for nonexistent rows. |
| KeySet *KeySet `protobuf:"bytes,6,opt,name=key_set,json=keySet,proto3" json:"key_set,omitempty"` |
| // Additional options that affect how many partitions are created. |
| PartitionOptions *PartitionOptions `protobuf:"bytes,9,opt,name=partition_options,json=partitionOptions,proto3" json:"partition_options,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *PartitionReadRequest) Reset() { *m = PartitionReadRequest{} } |
| func (m *PartitionReadRequest) String() string { return proto.CompactTextString(m) } |
| func (*PartitionReadRequest) ProtoMessage() {} |
| func (*PartitionReadRequest) Descriptor() ([]byte, []int) { |
| return fileDescriptor_a420fdbb92791b07, []int{13} |
| } |
| |
| func (m *PartitionReadRequest) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_PartitionReadRequest.Unmarshal(m, b) |
| } |
| func (m *PartitionReadRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_PartitionReadRequest.Marshal(b, m, deterministic) |
| } |
| func (m *PartitionReadRequest) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_PartitionReadRequest.Merge(m, src) |
| } |
| func (m *PartitionReadRequest) XXX_Size() int { |
| return xxx_messageInfo_PartitionReadRequest.Size(m) |
| } |
| func (m *PartitionReadRequest) XXX_DiscardUnknown() { |
| xxx_messageInfo_PartitionReadRequest.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_PartitionReadRequest proto.InternalMessageInfo |
| |
| func (m *PartitionReadRequest) GetSession() string { |
| if m != nil { |
| return m.Session |
| } |
| return "" |
| } |
| |
| func (m *PartitionReadRequest) GetTransaction() *TransactionSelector { |
| if m != nil { |
| return m.Transaction |
| } |
| return nil |
| } |
| |
| func (m *PartitionReadRequest) GetTable() string { |
| if m != nil { |
| return m.Table |
| } |
| return "" |
| } |
| |
| func (m *PartitionReadRequest) GetIndex() string { |
| if m != nil { |
| return m.Index |
| } |
| return "" |
| } |
| |
| func (m *PartitionReadRequest) GetColumns() []string { |
| if m != nil { |
| return m.Columns |
| } |
| return nil |
| } |
| |
| func (m *PartitionReadRequest) GetKeySet() *KeySet { |
| if m != nil { |
| return m.KeySet |
| } |
| return nil |
| } |
| |
| func (m *PartitionReadRequest) GetPartitionOptions() *PartitionOptions { |
| if m != nil { |
| return m.PartitionOptions |
| } |
| return nil |
| } |
| |
| // Information returned for each partition returned in a |
| // PartitionResponse. |
| type Partition struct { |
| // This token can be passed to Read, StreamingRead, ExecuteSql, or |
| // ExecuteStreamingSql requests to restrict the results to those identified by |
| // this partition token. |
| PartitionToken []byte `protobuf:"bytes,1,opt,name=partition_token,json=partitionToken,proto3" json:"partition_token,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *Partition) Reset() { *m = Partition{} } |
| func (m *Partition) String() string { return proto.CompactTextString(m) } |
| func (*Partition) ProtoMessage() {} |
| func (*Partition) Descriptor() ([]byte, []int) { |
| return fileDescriptor_a420fdbb92791b07, []int{14} |
| } |
| |
| func (m *Partition) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_Partition.Unmarshal(m, b) |
| } |
| func (m *Partition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_Partition.Marshal(b, m, deterministic) |
| } |
| func (m *Partition) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_Partition.Merge(m, src) |
| } |
| func (m *Partition) XXX_Size() int { |
| return xxx_messageInfo_Partition.Size(m) |
| } |
| func (m *Partition) XXX_DiscardUnknown() { |
| xxx_messageInfo_Partition.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_Partition proto.InternalMessageInfo |
| |
| func (m *Partition) GetPartitionToken() []byte { |
| if m != nil { |
| return m.PartitionToken |
| } |
| return nil |
| } |
| |
| // The response for [PartitionQuery][google.spanner.v1.Spanner.PartitionQuery] |
| // or [PartitionRead][google.spanner.v1.Spanner.PartitionRead] |
| type PartitionResponse struct { |
| // Partitions created by this request. |
| Partitions []*Partition `protobuf:"bytes,1,rep,name=partitions,proto3" json:"partitions,omitempty"` |
| // Transaction created by this request. |
| Transaction *Transaction `protobuf:"bytes,2,opt,name=transaction,proto3" json:"transaction,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *PartitionResponse) Reset() { *m = PartitionResponse{} } |
| func (m *PartitionResponse) String() string { return proto.CompactTextString(m) } |
| func (*PartitionResponse) ProtoMessage() {} |
| func (*PartitionResponse) Descriptor() ([]byte, []int) { |
| return fileDescriptor_a420fdbb92791b07, []int{15} |
| } |
| |
| func (m *PartitionResponse) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_PartitionResponse.Unmarshal(m, b) |
| } |
| func (m *PartitionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_PartitionResponse.Marshal(b, m, deterministic) |
| } |
| func (m *PartitionResponse) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_PartitionResponse.Merge(m, src) |
| } |
| func (m *PartitionResponse) XXX_Size() int { |
| return xxx_messageInfo_PartitionResponse.Size(m) |
| } |
| func (m *PartitionResponse) XXX_DiscardUnknown() { |
| xxx_messageInfo_PartitionResponse.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_PartitionResponse proto.InternalMessageInfo |
| |
| func (m *PartitionResponse) GetPartitions() []*Partition { |
| if m != nil { |
| return m.Partitions |
| } |
| return nil |
| } |
| |
| func (m *PartitionResponse) GetTransaction() *Transaction { |
| if m != nil { |
| return m.Transaction |
| } |
| return nil |
| } |
| |
| // The request for [Read][google.spanner.v1.Spanner.Read] and |
| // [StreamingRead][google.spanner.v1.Spanner.StreamingRead]. |
| type ReadRequest struct { |
| // Required. The session in which the read should be performed. |
| Session string `protobuf:"bytes,1,opt,name=session,proto3" json:"session,omitempty"` |
| // The transaction to use. If none is provided, the default is a |
| // temporary read-only transaction with strong concurrency. |
| Transaction *TransactionSelector `protobuf:"bytes,2,opt,name=transaction,proto3" json:"transaction,omitempty"` |
| // Required. The name of the table in the database to be read. |
| Table string `protobuf:"bytes,3,opt,name=table,proto3" json:"table,omitempty"` |
| // If non-empty, the name of an index on |
| // [table][google.spanner.v1.ReadRequest.table]. This index is used instead of |
| // the table primary key when interpreting |
| // [key_set][google.spanner.v1.ReadRequest.key_set] and sorting result rows. |
| // See [key_set][google.spanner.v1.ReadRequest.key_set] for further |
| // information. |
| Index string `protobuf:"bytes,4,opt,name=index,proto3" json:"index,omitempty"` |
| // Required. The columns of [table][google.spanner.v1.ReadRequest.table] to be |
| // returned for each row matching this request. |
| Columns []string `protobuf:"bytes,5,rep,name=columns,proto3" json:"columns,omitempty"` |
| // Required. `key_set` identifies the rows to be yielded. `key_set` names the |
| // primary keys of the rows in [table][google.spanner.v1.ReadRequest.table] to |
| // be yielded, unless [index][google.spanner.v1.ReadRequest.index] is present. |
| // If [index][google.spanner.v1.ReadRequest.index] is present, then |
| // [key_set][google.spanner.v1.ReadRequest.key_set] instead names index keys |
| // in [index][google.spanner.v1.ReadRequest.index]. |
| // |
| // If the [partition_token][google.spanner.v1.ReadRequest.partition_token] |
| // field is empty, rows are yielded in table primary key order (if |
| // [index][google.spanner.v1.ReadRequest.index] is empty) or index key order |
| // (if [index][google.spanner.v1.ReadRequest.index] is non-empty). If the |
| // [partition_token][google.spanner.v1.ReadRequest.partition_token] field is |
| // not empty, rows will be yielded in an unspecified order. |
| // |
| // It is not an error for the `key_set` to name rows that do not |
| // exist in the database. Read yields nothing for nonexistent rows. |
| KeySet *KeySet `protobuf:"bytes,6,opt,name=key_set,json=keySet,proto3" json:"key_set,omitempty"` |
| // If greater than zero, only the first `limit` rows are yielded. If `limit` |
| // is zero, the default is no limit. A limit cannot be specified if |
| // `partition_token` is set. |
| Limit int64 `protobuf:"varint,8,opt,name=limit,proto3" json:"limit,omitempty"` |
| // If this request is resuming a previously interrupted read, |
| // `resume_token` should be copied from the last |
| // [PartialResultSet][google.spanner.v1.PartialResultSet] yielded before the |
| // interruption. Doing this enables the new read to resume where the last read |
| // left off. The rest of the request parameters must exactly match the request |
| // that yielded this token. |
| ResumeToken []byte `protobuf:"bytes,9,opt,name=resume_token,json=resumeToken,proto3" json:"resume_token,omitempty"` |
| // If present, results will be restricted to the specified partition |
| // previously created using PartitionRead(). There must be an exact |
| // match for the values of fields common to this message and the |
| // PartitionReadRequest message used to create this partition_token. |
| PartitionToken []byte `protobuf:"bytes,10,opt,name=partition_token,json=partitionToken,proto3" json:"partition_token,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *ReadRequest) Reset() { *m = ReadRequest{} } |
| func (m *ReadRequest) String() string { return proto.CompactTextString(m) } |
| func (*ReadRequest) ProtoMessage() {} |
| func (*ReadRequest) Descriptor() ([]byte, []int) { |
| return fileDescriptor_a420fdbb92791b07, []int{16} |
| } |
| |
| func (m *ReadRequest) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_ReadRequest.Unmarshal(m, b) |
| } |
| func (m *ReadRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_ReadRequest.Marshal(b, m, deterministic) |
| } |
| func (m *ReadRequest) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_ReadRequest.Merge(m, src) |
| } |
| func (m *ReadRequest) XXX_Size() int { |
| return xxx_messageInfo_ReadRequest.Size(m) |
| } |
| func (m *ReadRequest) XXX_DiscardUnknown() { |
| xxx_messageInfo_ReadRequest.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_ReadRequest proto.InternalMessageInfo |
| |
| func (m *ReadRequest) GetSession() string { |
| if m != nil { |
| return m.Session |
| } |
| return "" |
| } |
| |
| func (m *ReadRequest) GetTransaction() *TransactionSelector { |
| if m != nil { |
| return m.Transaction |
| } |
| return nil |
| } |
| |
| func (m *ReadRequest) GetTable() string { |
| if m != nil { |
| return m.Table |
| } |
| return "" |
| } |
| |
| func (m *ReadRequest) GetIndex() string { |
| if m != nil { |
| return m.Index |
| } |
| return "" |
| } |
| |
| func (m *ReadRequest) GetColumns() []string { |
| if m != nil { |
| return m.Columns |
| } |
| return nil |
| } |
| |
| func (m *ReadRequest) GetKeySet() *KeySet { |
| if m != nil { |
| return m.KeySet |
| } |
| return nil |
| } |
| |
| func (m *ReadRequest) GetLimit() int64 { |
| if m != nil { |
| return m.Limit |
| } |
| return 0 |
| } |
| |
| func (m *ReadRequest) GetResumeToken() []byte { |
| if m != nil { |
| return m.ResumeToken |
| } |
| return nil |
| } |
| |
| func (m *ReadRequest) GetPartitionToken() []byte { |
| if m != nil { |
| return m.PartitionToken |
| } |
| return nil |
| } |
| |
| // The request for |
| // [BeginTransaction][google.spanner.v1.Spanner.BeginTransaction]. |
| type BeginTransactionRequest struct { |
| // Required. The session in which the transaction runs. |
| Session string `protobuf:"bytes,1,opt,name=session,proto3" json:"session,omitempty"` |
| // Required. Options for the new transaction. |
| Options *TransactionOptions `protobuf:"bytes,2,opt,name=options,proto3" json:"options,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *BeginTransactionRequest) Reset() { *m = BeginTransactionRequest{} } |
| func (m *BeginTransactionRequest) String() string { return proto.CompactTextString(m) } |
| func (*BeginTransactionRequest) ProtoMessage() {} |
| func (*BeginTransactionRequest) Descriptor() ([]byte, []int) { |
| return fileDescriptor_a420fdbb92791b07, []int{17} |
| } |
| |
| func (m *BeginTransactionRequest) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_BeginTransactionRequest.Unmarshal(m, b) |
| } |
| func (m *BeginTransactionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_BeginTransactionRequest.Marshal(b, m, deterministic) |
| } |
| func (m *BeginTransactionRequest) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_BeginTransactionRequest.Merge(m, src) |
| } |
| func (m *BeginTransactionRequest) XXX_Size() int { |
| return xxx_messageInfo_BeginTransactionRequest.Size(m) |
| } |
| func (m *BeginTransactionRequest) XXX_DiscardUnknown() { |
| xxx_messageInfo_BeginTransactionRequest.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_BeginTransactionRequest proto.InternalMessageInfo |
| |
| func (m *BeginTransactionRequest) GetSession() string { |
| if m != nil { |
| return m.Session |
| } |
| return "" |
| } |
| |
| func (m *BeginTransactionRequest) GetOptions() *TransactionOptions { |
| if m != nil { |
| return m.Options |
| } |
| return nil |
| } |
| |
| // The request for [Commit][google.spanner.v1.Spanner.Commit]. |
| type CommitRequest struct { |
| // Required. The session in which the transaction to be committed is running. |
| Session string `protobuf:"bytes,1,opt,name=session,proto3" json:"session,omitempty"` |
| // Required. The transaction in which to commit. |
| // |
| // Types that are valid to be assigned to Transaction: |
| // *CommitRequest_TransactionId |
| // *CommitRequest_SingleUseTransaction |
| Transaction isCommitRequest_Transaction `protobuf_oneof:"transaction"` |
| // The mutations to be executed when this transaction commits. All |
| // mutations are applied atomically, in the order they appear in |
| // this list. |
| Mutations []*Mutation `protobuf:"bytes,4,rep,name=mutations,proto3" json:"mutations,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *CommitRequest) Reset() { *m = CommitRequest{} } |
| func (m *CommitRequest) String() string { return proto.CompactTextString(m) } |
| func (*CommitRequest) ProtoMessage() {} |
| func (*CommitRequest) Descriptor() ([]byte, []int) { |
| return fileDescriptor_a420fdbb92791b07, []int{18} |
| } |
| |
| func (m *CommitRequest) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_CommitRequest.Unmarshal(m, b) |
| } |
| func (m *CommitRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_CommitRequest.Marshal(b, m, deterministic) |
| } |
| func (m *CommitRequest) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_CommitRequest.Merge(m, src) |
| } |
| func (m *CommitRequest) XXX_Size() int { |
| return xxx_messageInfo_CommitRequest.Size(m) |
| } |
| func (m *CommitRequest) XXX_DiscardUnknown() { |
| xxx_messageInfo_CommitRequest.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_CommitRequest proto.InternalMessageInfo |
| |
| func (m *CommitRequest) GetSession() string { |
| if m != nil { |
| return m.Session |
| } |
| return "" |
| } |
| |
| type isCommitRequest_Transaction interface { |
| isCommitRequest_Transaction() |
| } |
| |
| type CommitRequest_TransactionId struct { |
| TransactionId []byte `protobuf:"bytes,2,opt,name=transaction_id,json=transactionId,proto3,oneof"` |
| } |
| |
| type CommitRequest_SingleUseTransaction struct { |
| SingleUseTransaction *TransactionOptions `protobuf:"bytes,3,opt,name=single_use_transaction,json=singleUseTransaction,proto3,oneof"` |
| } |
| |
| func (*CommitRequest_TransactionId) isCommitRequest_Transaction() {} |
| |
| func (*CommitRequest_SingleUseTransaction) isCommitRequest_Transaction() {} |
| |
| func (m *CommitRequest) GetTransaction() isCommitRequest_Transaction { |
| if m != nil { |
| return m.Transaction |
| } |
| return nil |
| } |
| |
| func (m *CommitRequest) GetTransactionId() []byte { |
| if x, ok := m.GetTransaction().(*CommitRequest_TransactionId); ok { |
| return x.TransactionId |
| } |
| return nil |
| } |
| |
| func (m *CommitRequest) GetSingleUseTransaction() *TransactionOptions { |
| if x, ok := m.GetTransaction().(*CommitRequest_SingleUseTransaction); ok { |
| return x.SingleUseTransaction |
| } |
| return nil |
| } |
| |
| func (m *CommitRequest) GetMutations() []*Mutation { |
| if m != nil { |
| return m.Mutations |
| } |
| return nil |
| } |
| |
| // XXX_OneofWrappers is for the internal use of the proto package. |
| func (*CommitRequest) XXX_OneofWrappers() []interface{} { |
| return []interface{}{ |
| (*CommitRequest_TransactionId)(nil), |
| (*CommitRequest_SingleUseTransaction)(nil), |
| } |
| } |
| |
| // The response for [Commit][google.spanner.v1.Spanner.Commit]. |
| type CommitResponse struct { |
| // The Cloud Spanner timestamp at which the transaction committed. |
| CommitTimestamp *timestamp.Timestamp `protobuf:"bytes,1,opt,name=commit_timestamp,json=commitTimestamp,proto3" json:"commit_timestamp,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *CommitResponse) Reset() { *m = CommitResponse{} } |
| func (m *CommitResponse) String() string { return proto.CompactTextString(m) } |
| func (*CommitResponse) ProtoMessage() {} |
| func (*CommitResponse) Descriptor() ([]byte, []int) { |
| return fileDescriptor_a420fdbb92791b07, []int{19} |
| } |
| |
| func (m *CommitResponse) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_CommitResponse.Unmarshal(m, b) |
| } |
| func (m *CommitResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_CommitResponse.Marshal(b, m, deterministic) |
| } |
| func (m *CommitResponse) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_CommitResponse.Merge(m, src) |
| } |
| func (m *CommitResponse) XXX_Size() int { |
| return xxx_messageInfo_CommitResponse.Size(m) |
| } |
| func (m *CommitResponse) XXX_DiscardUnknown() { |
| xxx_messageInfo_CommitResponse.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_CommitResponse proto.InternalMessageInfo |
| |
| func (m *CommitResponse) GetCommitTimestamp() *timestamp.Timestamp { |
| if m != nil { |
| return m.CommitTimestamp |
| } |
| return nil |
| } |
| |
| // The request for [Rollback][google.spanner.v1.Spanner.Rollback]. |
| type RollbackRequest struct { |
| // Required. The session in which the transaction to roll back is running. |
| Session string `protobuf:"bytes,1,opt,name=session,proto3" json:"session,omitempty"` |
| // Required. The transaction to roll back. |
| TransactionId []byte `protobuf:"bytes,2,opt,name=transaction_id,json=transactionId,proto3" json:"transaction_id,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *RollbackRequest) Reset() { *m = RollbackRequest{} } |
| func (m *RollbackRequest) String() string { return proto.CompactTextString(m) } |
| func (*RollbackRequest) ProtoMessage() {} |
| func (*RollbackRequest) Descriptor() ([]byte, []int) { |
| return fileDescriptor_a420fdbb92791b07, []int{20} |
| } |
| |
| func (m *RollbackRequest) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_RollbackRequest.Unmarshal(m, b) |
| } |
| func (m *RollbackRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_RollbackRequest.Marshal(b, m, deterministic) |
| } |
| func (m *RollbackRequest) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_RollbackRequest.Merge(m, src) |
| } |
| func (m *RollbackRequest) XXX_Size() int { |
| return xxx_messageInfo_RollbackRequest.Size(m) |
| } |
| func (m *RollbackRequest) XXX_DiscardUnknown() { |
| xxx_messageInfo_RollbackRequest.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_RollbackRequest proto.InternalMessageInfo |
| |
| func (m *RollbackRequest) GetSession() string { |
| if m != nil { |
| return m.Session |
| } |
| return "" |
| } |
| |
| func (m *RollbackRequest) GetTransactionId() []byte { |
| if m != nil { |
| return m.TransactionId |
| } |
| return nil |
| } |
| |
| func init() { |
| proto.RegisterEnum("google.spanner.v1.ExecuteSqlRequest_QueryMode", ExecuteSqlRequest_QueryMode_name, ExecuteSqlRequest_QueryMode_value) |
| proto.RegisterType((*CreateSessionRequest)(nil), "google.spanner.v1.CreateSessionRequest") |
| proto.RegisterType((*BatchCreateSessionsRequest)(nil), "google.spanner.v1.BatchCreateSessionsRequest") |
| proto.RegisterType((*BatchCreateSessionsResponse)(nil), "google.spanner.v1.BatchCreateSessionsResponse") |
| proto.RegisterType((*Session)(nil), "google.spanner.v1.Session") |
| proto.RegisterMapType((map[string]string)(nil), "google.spanner.v1.Session.LabelsEntry") |
| proto.RegisterType((*GetSessionRequest)(nil), "google.spanner.v1.GetSessionRequest") |
| proto.RegisterType((*ListSessionsRequest)(nil), "google.spanner.v1.ListSessionsRequest") |
| proto.RegisterType((*ListSessionsResponse)(nil), "google.spanner.v1.ListSessionsResponse") |
| proto.RegisterType((*DeleteSessionRequest)(nil), "google.spanner.v1.DeleteSessionRequest") |
| proto.RegisterType((*ExecuteSqlRequest)(nil), "google.spanner.v1.ExecuteSqlRequest") |
| proto.RegisterMapType((map[string]*Type)(nil), "google.spanner.v1.ExecuteSqlRequest.ParamTypesEntry") |
| proto.RegisterType((*ExecuteBatchDmlRequest)(nil), "google.spanner.v1.ExecuteBatchDmlRequest") |
| proto.RegisterType((*ExecuteBatchDmlRequest_Statement)(nil), "google.spanner.v1.ExecuteBatchDmlRequest.Statement") |
| proto.RegisterMapType((map[string]*Type)(nil), "google.spanner.v1.ExecuteBatchDmlRequest.Statement.ParamTypesEntry") |
| proto.RegisterType((*ExecuteBatchDmlResponse)(nil), "google.spanner.v1.ExecuteBatchDmlResponse") |
| proto.RegisterType((*PartitionOptions)(nil), "google.spanner.v1.PartitionOptions") |
| proto.RegisterType((*PartitionQueryRequest)(nil), "google.spanner.v1.PartitionQueryRequest") |
| proto.RegisterMapType((map[string]*Type)(nil), "google.spanner.v1.PartitionQueryRequest.ParamTypesEntry") |
| proto.RegisterType((*PartitionReadRequest)(nil), "google.spanner.v1.PartitionReadRequest") |
| proto.RegisterType((*Partition)(nil), "google.spanner.v1.Partition") |
| proto.RegisterType((*PartitionResponse)(nil), "google.spanner.v1.PartitionResponse") |
| proto.RegisterType((*ReadRequest)(nil), "google.spanner.v1.ReadRequest") |
| proto.RegisterType((*BeginTransactionRequest)(nil), "google.spanner.v1.BeginTransactionRequest") |
| proto.RegisterType((*CommitRequest)(nil), "google.spanner.v1.CommitRequest") |
| proto.RegisterType((*CommitResponse)(nil), "google.spanner.v1.CommitResponse") |
| proto.RegisterType((*RollbackRequest)(nil), "google.spanner.v1.RollbackRequest") |
| } |
| |
| func init() { proto.RegisterFile("google/spanner/v1/spanner.proto", fileDescriptor_a420fdbb92791b07) } |
| |
| var fileDescriptor_a420fdbb92791b07 = []byte{ |
| // 2203 bytes of a gzipped FileDescriptorProto |
| 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x59, 0x4d, 0x6c, 0x1b, 0xc7, |
| 0x15, 0xce, 0x92, 0xfa, 0xe3, 0xd3, 0xaf, 0xc7, 0xb2, 0x2c, 0xd3, 0x8e, 0x4d, 0x6f, 0x12, 0x5b, |
| 0x25, 0x6c, 0x6e, 0xec, 0x18, 0xad, 0xa3, 0x38, 0xad, 0x29, 0x59, 0xb1, 0x5d, 0x4b, 0x96, 0xbc, |
| 0x94, 0xdd, 0x36, 0x75, 0x41, 0x2c, 0xc9, 0x31, 0xbd, 0xd1, 0xfe, 0x69, 0x67, 0x28, 0x8b, 0x31, |
| 0x8c, 0x02, 0x45, 0x8e, 0xbd, 0x04, 0x39, 0x14, 0x45, 0x4f, 0x6d, 0x80, 0x1e, 0x8a, 0x36, 0xe8, |
| 0xa1, 0x40, 0x6f, 0x45, 0xae, 0xcd, 0x31, 0xbd, 0xe9, 0xe4, 0x43, 0x4f, 0xe9, 0xa5, 0xb7, 0x14, |
| 0x3d, 0x15, 0xf3, 0xb7, 0x5c, 0x92, 0x2b, 0x8a, 0x0e, 0x65, 0xa0, 0xc8, 0x6d, 0x77, 0xe6, 0xcd, |
| 0x9b, 0xef, 0xbd, 0xef, 0xbd, 0x99, 0xb7, 0x6f, 0xe1, 0x4c, 0xdd, 0xf7, 0xeb, 0x0e, 0x36, 0x48, |
| 0x60, 0x79, 0x1e, 0x0e, 0x8d, 0x9d, 0x4b, 0xea, 0xb1, 0x10, 0x84, 0x3e, 0xf5, 0xd1, 0x11, 0x21, |
| 0x50, 0x50, 0xa3, 0x3b, 0x97, 0xb2, 0xa7, 0xe4, 0x1a, 0x2b, 0xb0, 0x0d, 0xcb, 0xf3, 0x7c, 0x6a, |
| 0x51, 0xdb, 0xf7, 0x88, 0x58, 0x90, 0x3d, 0x1e, 0x9b, 0xad, 0x3a, 0x36, 0xf6, 0xa8, 0x9c, 0x38, |
| 0x13, 0x9b, 0x78, 0x64, 0x63, 0xa7, 0x56, 0xae, 0xe0, 0xc7, 0xd6, 0x8e, 0xed, 0xcb, 0xad, 0xb2, |
| 0x27, 0x62, 0x02, 0x21, 0x26, 0x7e, 0x23, 0xac, 0x62, 0x39, 0x75, 0x52, 0x4e, 0xf1, 0xb7, 0x4a, |
| 0xe3, 0x91, 0x81, 0xdd, 0x80, 0x36, 0xe5, 0xe4, 0xa9, 0xce, 0x49, 0x42, 0xc3, 0x46, 0xb5, 0x73, |
| 0xdb, 0x68, 0x96, 0xda, 0x2e, 0x26, 0xd4, 0x72, 0x83, 0x0e, 0xc0, 0x61, 0x50, 0x35, 0x08, 0xb5, |
| 0x68, 0x83, 0x74, 0xe8, 0x8d, 0xf9, 0x66, 0x0b, 0x37, 0xd5, 0x6c, 0xae, 0x7b, 0xd6, 0x6d, 0x08, |
| 0x57, 0x48, 0x09, 0xbd, 0x5b, 0x22, 0xc4, 0xa4, 0xe1, 0xd0, 0x32, 0xc1, 0x0a, 0xdd, 0x6b, 0xdd, |
| 0x32, 0x34, 0xb4, 0x3c, 0x62, 0x55, 0x63, 0x8a, 0x12, 0x80, 0xd0, 0x66, 0x20, 0x7d, 0xa3, 0x7f, |
| 0xac, 0xc1, 0xec, 0x72, 0x88, 0x2d, 0x8a, 0x4b, 0x98, 0x10, 0xdb, 0xf7, 0x4c, 0xbc, 0xdd, 0xc0, |
| 0x84, 0xa2, 0x65, 0x18, 0xab, 0x59, 0xd4, 0xaa, 0x58, 0x04, 0xcf, 0x6b, 0x39, 0x6d, 0x21, 0xb3, |
| 0x74, 0xfe, 0x79, 0x31, 0xf5, 0xdf, 0xe2, 0x59, 0x38, 0xa3, 0xd8, 0x14, 0x8a, 0xad, 0xc0, 0x26, |
| 0x85, 0xaa, 0xef, 0x1a, 0x37, 0xa4, 0xb8, 0x19, 0x2d, 0x44, 0x57, 0x60, 0x94, 0x08, 0xb5, 0xf3, |
| 0xa9, 0x9c, 0xb6, 0x30, 0x7e, 0x39, 0x5b, 0xe8, 0x8a, 0x88, 0x82, 0xda, 0x58, 0x89, 0xea, 0x5f, |
| 0x6a, 0x90, 0x5d, 0xb2, 0x68, 0xf5, 0x71, 0x1b, 0x30, 0x72, 0xa8, 0xc8, 0x56, 0x60, 0x46, 0x6e, |
| 0x57, 0xa6, 0xd8, 0x0d, 0x1c, 0x8b, 0xe2, 0x3e, 0x20, 0x4e, 0xcb, 0x35, 0x9b, 0x72, 0x09, 0x5a, |
| 0x80, 0x49, 0xa5, 0xa6, 0xea, 0x37, 0x3c, 0x3a, 0x9f, 0xce, 0x69, 0x0b, 0xc3, 0x4b, 0xe9, 0xe7, |
| 0xc5, 0x94, 0x39, 0x21, 0x67, 0x96, 0xd9, 0x84, 0x5e, 0x82, 0x93, 0x89, 0x36, 0x91, 0xc0, 0xf7, |
| 0xda, 0x3d, 0xa5, 0xe5, 0xd2, 0xfd, 0x7a, 0xea, 0x77, 0x69, 0x18, 0x95, 0x83, 0x08, 0xc1, 0x90, |
| 0x67, 0xb9, 0xd2, 0x25, 0x26, 0x7f, 0x46, 0xdf, 0x87, 0x11, 0xc7, 0xaa, 0x60, 0x87, 0xcc, 0xa7, |
| 0xb8, 0xd2, 0x73, 0xfb, 0x2b, 0x2d, 0xac, 0x72, 0xc1, 0x15, 0x8f, 0x86, 0x4d, 0x53, 0xae, 0x42, |
| 0xef, 0xc0, 0x78, 0x95, 0xe3, 0x2d, 0xb3, 0xb8, 0xe7, 0xc6, 0xc5, 0x90, 0xa9, 0xa4, 0x28, 0x6c, |
| 0xaa, 0xa4, 0x30, 0x41, 0x88, 0xb3, 0x01, 0x74, 0x1f, 0x4e, 0x58, 0x41, 0x10, 0xfa, 0xbb, 0xb6, |
| 0xcb, 0x34, 0x38, 0x16, 0xa1, 0xe5, 0x06, 0x91, 0xaa, 0x86, 0x0e, 0x54, 0x35, 0x17, 0x5b, 0xbc, |
| 0x6a, 0x11, 0x7a, 0x9f, 0x70, 0xb5, 0xd9, 0xb7, 0x61, 0x3c, 0x06, 0x15, 0xcd, 0x40, 0x7a, 0x0b, |
| 0x37, 0xa5, 0xd5, 0xec, 0x11, 0xcd, 0xc2, 0xf0, 0x8e, 0xe5, 0x34, 0x04, 0x9f, 0x19, 0x53, 0xbc, |
| 0x2c, 0xa6, 0xae, 0x6a, 0x8b, 0xf4, 0xab, 0xe2, 0x36, 0x9c, 0xde, 0x27, 0x48, 0x94, 0x1f, 0xd7, |
| 0x83, 0xd0, 0xff, 0x00, 0x57, 0x29, 0x31, 0x9e, 0xca, 0xa7, 0x67, 0x86, 0xed, 0x11, 0x6a, 0x79, |
| 0x55, 0x4c, 0x8c, 0xa7, 0xea, 0xf1, 0x99, 0xa1, 0x62, 0x89, 0x18, 0x4f, 0xd5, 0xe3, 0x33, 0x43, |
| 0x32, 0x43, 0x8c, 0xa7, 0xf2, 0xe9, 0x99, 0xbe, 0x0e, 0x47, 0x6e, 0x62, 0xda, 0x91, 0x5e, 0x8b, |
| 0x71, 0xb6, 0x96, 0xce, 0xf1, 0x00, 0xce, 0x1d, 0x84, 0x4d, 0xb0, 0xaa, 0x7f, 0xa6, 0xc1, 0xd1, |
| 0x55, 0x9b, 0xd0, 0x97, 0x92, 0x18, 0x27, 0x21, 0x13, 0x58, 0x75, 0x5c, 0x26, 0xf6, 0x87, 0xc2, |
| 0x83, 0xc3, 0xe6, 0x18, 0x1b, 0x28, 0xd9, 0x1f, 0x62, 0xf4, 0x2a, 0x00, 0x9f, 0xa4, 0xfe, 0x16, |
| 0xf6, 0x78, 0x38, 0x64, 0x4c, 0x2e, 0xbe, 0xc9, 0x06, 0xd0, 0x1c, 0x8c, 0x3c, 0xb2, 0x1d, 0x8a, |
| 0x43, 0x4e, 0x6f, 0xc6, 0x94, 0x6f, 0xfa, 0x0e, 0xcc, 0xb6, 0xe3, 0x95, 0x41, 0xff, 0x5d, 0x18, |
| 0x53, 0xfe, 0xea, 0x23, 0xea, 0x23, 0x59, 0x74, 0x0e, 0xa6, 0x3d, 0xbc, 0x4b, 0xcb, 0x31, 0x2c, |
| 0x82, 0xeb, 0x49, 0x36, 0xbc, 0xa1, 0xf0, 0xe8, 0x26, 0xcc, 0xde, 0xc0, 0x0e, 0xee, 0x3a, 0xdb, |
| 0x06, 0x71, 0xfe, 0xd7, 0x43, 0x70, 0x64, 0x65, 0x17, 0x57, 0x1b, 0x14, 0x97, 0xb6, 0x1d, 0xa5, |
| 0xf1, 0x7a, 0x3c, 0x7d, 0x5f, 0x44, 0xa9, 0x5a, 0x86, 0x6e, 0xc1, 0x78, 0xec, 0xec, 0x96, 0x67, |
| 0x51, 0x52, 0xbe, 0x6e, 0xb6, 0xa4, 0x4a, 0xd8, 0xc1, 0x55, 0xea, 0x87, 0x66, 0x7c, 0x29, 0x3a, |
| 0x06, 0x69, 0xb2, 0xed, 0x08, 0x76, 0xc4, 0x49, 0xc4, 0xde, 0x91, 0x01, 0x23, 0x81, 0x15, 0x5a, |
| 0x2e, 0x91, 0xb9, 0x77, 0xbc, 0x2b, 0xf7, 0x4a, 0xfc, 0xe6, 0x33, 0xa5, 0x18, 0xba, 0x0f, 0xe3, |
| 0xfc, 0xa9, 0xcc, 0xae, 0x0b, 0x32, 0x3f, 0xcc, 0x09, 0xba, 0x92, 0x80, 0xa8, 0xcb, 0x1d, 0x85, |
| 0x0d, 0xb6, 0x6e, 0x93, 0x2d, 0x13, 0xe7, 0x09, 0x04, 0xd1, 0x00, 0x3a, 0x0b, 0x13, 0xec, 0x22, |
| 0x73, 0x15, 0x73, 0x23, 0x39, 0x6d, 0x61, 0xc2, 0x1c, 0x17, 0x63, 0x22, 0x8e, 0xd6, 0x00, 0xb6, |
| 0x1b, 0x38, 0x6c, 0x96, 0x5d, 0xbf, 0x86, 0xe7, 0x47, 0x73, 0xda, 0xc2, 0xd4, 0xe5, 0x42, 0x5f, |
| 0x1b, 0xdf, 0x63, 0xcb, 0xd6, 0xfc, 0x1a, 0x36, 0x33, 0xdb, 0xea, 0x11, 0x9d, 0x87, 0xe9, 0xc0, |
| 0x0a, 0xa9, 0x4d, 0xf9, 0x69, 0xcf, 0x37, 0x1d, 0xe3, 0x9b, 0x4e, 0x45, 0xc3, 0x62, 0xdf, 0x59, |
| 0x18, 0x26, 0x78, 0xdb, 0xf3, 0xe7, 0x33, 0x39, 0x6d, 0x21, 0x6d, 0x8a, 0x97, 0xec, 0x03, 0x98, |
| 0xee, 0xb0, 0x27, 0xe1, 0xd0, 0xb9, 0x18, 0x3f, 0x74, 0x62, 0xce, 0x8d, 0x13, 0xd7, 0x0c, 0x70, |
| 0xec, 0x34, 0xd2, 0x0b, 0x90, 0x89, 0xe0, 0x22, 0x80, 0x91, 0xbb, 0xeb, 0xe6, 0x5a, 0x71, 0x75, |
| 0xe6, 0x15, 0x34, 0x06, 0x43, 0x1b, 0xab, 0xc5, 0xbb, 0x33, 0x1a, 0x1a, 0x87, 0xd1, 0x0d, 0x73, |
| 0xfd, 0xbd, 0xdb, 0xab, 0x2b, 0x33, 0x29, 0xfd, 0x4f, 0x43, 0x30, 0x27, 0x2d, 0xe6, 0x37, 0xc9, |
| 0x0d, 0xf7, 0x10, 0xc3, 0x6f, 0x6d, 0x80, 0xf0, 0x13, 0x41, 0xd6, 0x16, 0x83, 0x3f, 0x06, 0x60, |
| 0xd5, 0x10, 0x76, 0xb1, 0x47, 0xc9, 0x7c, 0x9a, 0x87, 0xce, 0x5b, 0xfb, 0x33, 0xd8, 0x61, 0x4f, |
| 0xa1, 0xa4, 0xd6, 0x0a, 0xd5, 0x31, 0x5d, 0xe8, 0x84, 0xe2, 0x88, 0x45, 0x71, 0x5a, 0xcc, 0x4b, |
| 0xa2, 0x7e, 0x99, 0x82, 0x4c, 0xb4, 0x92, 0x71, 0xc4, 0xd2, 0x40, 0x72, 0xd4, 0x9e, 0x01, 0xa9, |
| 0xfe, 0x32, 0xa0, 0xd6, 0x9e, 0x01, 0xc2, 0x8c, 0xe5, 0x6f, 0x60, 0x46, 0xaf, 0x84, 0x78, 0x69, |
| 0xf1, 0xf5, 0x91, 0x06, 0xc7, 0xbb, 0x80, 0xc9, 0x93, 0xf7, 0x5d, 0x18, 0x6f, 0x55, 0x93, 0xea, |
| 0xf0, 0x3d, 0x95, 0xa0, 0xd4, 0xe4, 0x52, 0x25, 0x4c, 0x4d, 0x08, 0xd5, 0x23, 0x41, 0x79, 0x18, |
| 0x11, 0xc5, 0xae, 0x84, 0x83, 0xd4, 0xca, 0x30, 0xa8, 0x72, 0xab, 0x1b, 0xc4, 0x94, 0x12, 0xfa, |
| 0x16, 0xcc, 0x6c, 0xa8, 0x34, 0x5b, 0x0f, 0x78, 0xb1, 0x8f, 0xde, 0x84, 0xd9, 0x56, 0x46, 0xb2, |
| 0x9b, 0xa6, 0x5c, 0x69, 0x52, 0x4c, 0xb8, 0xc1, 0x69, 0x13, 0x45, 0x73, 0xec, 0xd2, 0x59, 0x62, |
| 0x33, 0xe8, 0x0d, 0x98, 0x72, 0xad, 0xdd, 0x72, 0x34, 0x23, 0x76, 0x4e, 0x9b, 0x93, 0xae, 0xb5, |
| 0x1b, 0xa9, 0x27, 0xfa, 0xbf, 0xd3, 0x70, 0x2c, 0x7a, 0xe5, 0xd9, 0xf5, 0x6d, 0x3e, 0xa1, 0x7f, |
| 0x92, 0x74, 0x42, 0x5f, 0x4d, 0x40, 0x94, 0xe8, 0x92, 0x9e, 0xa7, 0xf4, 0x06, 0x1c, 0x69, 0x31, |
| 0xe4, 0x0b, 0xda, 0xf8, 0x51, 0x3d, 0x7e, 0xf9, 0xb5, 0x5e, 0x1b, 0x48, 0x86, 0xcd, 0x99, 0xa0, |
| 0x63, 0xe4, 0xa5, 0x85, 0xf9, 0x7f, 0x52, 0x30, 0x1b, 0x6d, 0x6f, 0x62, 0xab, 0xf6, 0xff, 0xc8, |
| 0xf8, 0x09, 0x18, 0xa6, 0x56, 0xc5, 0xc1, 0x71, 0xce, 0xc5, 0x08, 0xbb, 0x74, 0x6c, 0xaf, 0x86, |
| 0x77, 0x65, 0xcd, 0x24, 0x5e, 0xd0, 0x3c, 0x8c, 0x56, 0x7d, 0xa7, 0xe1, 0x7a, 0x82, 0xd6, 0x8c, |
| 0xa9, 0x5e, 0xd1, 0xf7, 0x60, 0x74, 0x0b, 0x37, 0x59, 0xde, 0x4a, 0x3e, 0x4e, 0x24, 0x00, 0xba, |
| 0x83, 0x9b, 0x25, 0x2c, 0x4f, 0xcf, 0x91, 0x2d, 0xfe, 0x92, 0x4c, 0x69, 0x66, 0x00, 0x4a, 0xf5, |
| 0x2b, 0x90, 0x89, 0xa4, 0x92, 0x6e, 0x59, 0x2d, 0xe9, 0x96, 0xd5, 0x3f, 0xd1, 0xe0, 0x48, 0x8c, |
| 0x30, 0x79, 0x22, 0x5d, 0x63, 0xa5, 0x65, 0x94, 0xdc, 0xfb, 0x1f, 0x48, 0xad, 0x95, 0x31, 0x79, |
| 0x74, 0x3d, 0x89, 0xa9, 0xd3, 0xbd, 0x99, 0x6a, 0x63, 0x48, 0xff, 0x28, 0x0d, 0xe3, 0xdf, 0x9a, |
| 0xe8, 0x79, 0xb5, 0x23, 0x7a, 0xc4, 0x92, 0xc1, 0x43, 0x68, 0x16, 0x86, 0x1d, 0xdb, 0xb5, 0x29, |
| 0xaf, 0x9f, 0xd2, 0xa6, 0x78, 0xe9, 0xaa, 0xe8, 0x32, 0xdd, 0x15, 0x5d, 0x42, 0x70, 0x40, 0x62, |
| 0x70, 0xfc, 0x56, 0x83, 0xe3, 0x4b, 0xb8, 0x6e, 0x7b, 0x71, 0xa2, 0x0e, 0x8d, 0x92, 0x1b, 0x30, |
| 0xaa, 0x02, 0x5f, 0xd0, 0xf1, 0x46, 0x6f, 0x3a, 0x64, 0xa0, 0x4b, 0xf7, 0xc9, 0xa5, 0xfa, 0xa7, |
| 0x29, 0x98, 0x5c, 0xf6, 0x5d, 0xd7, 0xa6, 0x87, 0x87, 0xec, 0x3c, 0x4c, 0xc5, 0x18, 0x2f, 0xdb, |
| 0x35, 0x0e, 0x70, 0xe2, 0xd6, 0x2b, 0xe6, 0x64, 0x6c, 0xfc, 0x76, 0x0d, 0xfd, 0x0c, 0xe6, 0x88, |
| 0xed, 0xd5, 0x1d, 0x2c, 0xbe, 0xa5, 0x63, 0x01, 0x96, 0x7e, 0x01, 0x8b, 0x6e, 0xbd, 0x62, 0xce, |
| 0x0a, 0x35, 0xec, 0xb3, 0x3a, 0x16, 0x6a, 0x6f, 0x43, 0x46, 0x35, 0xa2, 0xd8, 0x35, 0xc4, 0xb2, |
| 0xf0, 0x64, 0x82, 0xc6, 0x35, 0x29, 0x63, 0xb6, 0xa4, 0x97, 0x26, 0xdb, 0xe2, 0x5d, 0xff, 0x11, |
| 0x4c, 0x29, 0x27, 0xc9, 0x14, 0x5f, 0x81, 0x99, 0x2a, 0x1f, 0x29, 0x47, 0x5d, 0x34, 0xee, 0xae, |
| 0xde, 0x7d, 0x80, 0x69, 0xb1, 0x26, 0x1a, 0xd0, 0x7f, 0x0e, 0xd3, 0xa6, 0xef, 0x38, 0x15, 0xab, |
| 0xba, 0x75, 0x78, 0xfe, 0xcf, 0x27, 0xfb, 0x5f, 0x30, 0xdf, 0x4e, 0xc1, 0xe5, 0xcf, 0xe7, 0x60, |
| 0xb4, 0x24, 0xf4, 0xa2, 0xdf, 0x6b, 0x30, 0xd9, 0xd6, 0xd2, 0x41, 0xe7, 0x13, 0xdc, 0x95, 0xd4, |
| 0x61, 0xcb, 0xf6, 0xf8, 0xd6, 0xd5, 0x37, 0xf6, 0x8a, 0xd1, 0x27, 0xf9, 0x2f, 0xfe, 0xf1, 0xcf, |
| 0x4f, 0x52, 0x3f, 0xd0, 0x17, 0x8d, 0x9d, 0x4b, 0xad, 0x4e, 0xc3, 0xbb, 0x51, 0x97, 0x22, 0x1f, |
| 0xeb, 0x4e, 0xe4, 0x63, 0x4d, 0x89, 0x7c, 0xab, 0x19, 0xb1, 0xa8, 0xe5, 0xd1, 0x73, 0x0d, 0x8e, |
| 0x26, 0x34, 0xa0, 0xd0, 0xc5, 0x04, 0x14, 0xfb, 0x37, 0xdf, 0xb2, 0x85, 0x7e, 0xc5, 0x05, 0xe7, |
| 0xfa, 0x07, 0x7b, 0xc5, 0x39, 0x85, 0xe9, 0x42, 0x5b, 0xab, 0x8c, 0x9b, 0x75, 0x47, 0x7f, 0x6f, |
| 0x00, 0xb3, 0x2a, 0xad, 0x8d, 0x99, 0x89, 0xbf, 0xd2, 0x00, 0x5a, 0x9d, 0x16, 0xf4, 0x7a, 0x02, |
| 0xd4, 0xae, 0x46, 0x4c, 0x4f, 0x16, 0x6e, 0xee, 0x15, 0xf9, 0x47, 0x3f, 0x87, 0xba, 0x88, 0xae, |
| 0x72, 0xa8, 0x6c, 0xa0, 0x0f, 0x98, 0xad, 0x4e, 0x50, 0xfe, 0x19, 0xfa, 0x4c, 0x83, 0x89, 0x78, |
| 0x07, 0x04, 0x25, 0x5d, 0x03, 0x09, 0x2d, 0x9d, 0xec, 0xf9, 0x03, 0xe5, 0xa4, 0x9f, 0xd7, 0x3a, |
| 0x03, 0xe6, 0x1a, 0x1a, 0x20, 0x60, 0xd0, 0xaf, 0x35, 0x98, 0x6c, 0x6b, 0x9d, 0x24, 0x86, 0x75, |
| 0x52, 0x73, 0x25, 0x3b, 0xd7, 0x95, 0xcb, 0x2b, 0x6e, 0x40, 0x9b, 0x1d, 0xce, 0xcc, 0x7f, 0x73, |
| 0x67, 0x7e, 0xaa, 0x01, 0xb4, 0xbe, 0xfc, 0x13, 0x69, 0xee, 0x6a, 0x0c, 0x64, 0x7b, 0x7e, 0xdb, |
| 0xe8, 0xf7, 0xda, 0x82, 0x51, 0x6e, 0xf6, 0x82, 0xb8, 0x16, 0x71, 0xb4, 0x29, 0x0b, 0xc6, 0xbf, |
| 0x69, 0x70, 0x54, 0xc1, 0xa0, 0x21, 0xb6, 0x5c, 0xdb, 0xab, 0xf7, 0x0f, 0x77, 0xdf, 0x82, 0xcc, |
| 0x72, 0x5a, 0xa8, 0xdf, 0xe7, 0xa8, 0x37, 0xf5, 0xf5, 0xc3, 0x40, 0x1d, 0xc3, 0xb8, 0xa8, 0xe5, |
| 0xdf, 0xd4, 0xd0, 0xe7, 0x1a, 0x4c, 0x77, 0x7c, 0x3e, 0xa2, 0xef, 0xf4, 0xfd, 0xed, 0x9b, 0xcd, |
| 0xf7, 0x23, 0x2a, 0x83, 0xf7, 0x01, 0x37, 0x64, 0x43, 0xbf, 0x73, 0x08, 0x86, 0x28, 0xe5, 0x8c, |
| 0x83, 0x8f, 0x35, 0x18, 0x62, 0x35, 0x1d, 0x3a, 0x9d, 0xc8, 0x7e, 0x54, 0xec, 0x1d, 0x10, 0x1d, |
| 0x77, 0x38, 0xbc, 0x15, 0xfd, 0xfa, 0x20, 0xf0, 0x42, 0x6c, 0xd5, 0x18, 0xa6, 0x3f, 0x6a, 0x30, |
| 0x19, 0x39, 0xbb, 0x2f, 0x70, 0x7d, 0xc5, 0xc2, 0x26, 0xc7, 0x78, 0x57, 0xbf, 0x3d, 0x08, 0x46, |
| 0x12, 0xc7, 0x25, 0xa2, 0xe0, 0xef, 0x1a, 0xcc, 0x74, 0xd6, 0x63, 0x28, 0x89, 0xdb, 0x7d, 0x8a, |
| 0xb6, 0xec, 0x01, 0x45, 0xb8, 0x6e, 0xef, 0x15, 0xd5, 0x5f, 0x95, 0x0b, 0xb2, 0xc0, 0xe2, 0xa6, |
| 0xdc, 0xd3, 0x57, 0x07, 0x31, 0xa5, 0xd2, 0x01, 0x87, 0xb9, 0xfe, 0x5f, 0x1a, 0x8c, 0x88, 0x92, |
| 0x04, 0xe5, 0x92, 0x2e, 0xe9, 0x78, 0x49, 0x97, 0x3d, 0xdb, 0x43, 0x42, 0x86, 0xed, 0x6f, 0xb4, |
| 0xbd, 0x62, 0x4e, 0x61, 0x6f, 0xaf, 0x1e, 0x2e, 0x44, 0x65, 0xd1, 0x5e, 0x71, 0x41, 0x89, 0x24, |
| 0xd7, 0x6d, 0x2d, 0x51, 0x6e, 0xf7, 0x4d, 0x7d, 0x69, 0x10, 0xbb, 0x45, 0xb1, 0xc4, 0xac, 0xfd, |
| 0xb3, 0x06, 0x63, 0xaa, 0x4e, 0x42, 0x7a, 0x52, 0x8c, 0xb5, 0x17, 0x51, 0xfb, 0x1e, 0xdc, 0x8f, |
| 0xf7, 0x8a, 0x73, 0xc9, 0x56, 0x72, 0xc0, 0xb7, 0xf5, 0x1b, 0x03, 0xe5, 0x85, 0x44, 0xc1, 0x20, |
| 0xff, 0x55, 0x83, 0xa9, 0xf6, 0x56, 0x05, 0x5a, 0xe8, 0xb7, 0x9b, 0x91, 0x7d, 0xbd, 0xe7, 0xc7, |
| 0xa2, 0xe2, 0xec, 0x3e, 0xc7, 0xbc, 0xae, 0xff, 0x70, 0x10, 0xcc, 0x41, 0x1b, 0x00, 0x86, 0xfc, |
| 0x2f, 0x1a, 0x4c, 0xb6, 0x35, 0x21, 0x12, 0xef, 0xcb, 0xa4, 0x36, 0x45, 0x9f, 0xb8, 0x0f, 0x25, |
| 0xbf, 0x83, 0xf8, 0xfe, 0x8b, 0x5a, 0x3e, 0xfb, 0xe4, 0x8b, 0xe2, 0x5c, 0x72, 0x85, 0xfc, 0x65, |
| 0xf1, 0xa7, 0x8f, 0x29, 0x0d, 0xc8, 0xa2, 0x61, 0x3c, 0x79, 0xf2, 0xa4, 0xb3, 0x7c, 0xb6, 0x1a, |
| 0xf4, 0xb1, 0x51, 0x75, 0xfc, 0x46, 0xed, 0x62, 0xe0, 0x58, 0xf4, 0x91, 0x1f, 0xba, 0x17, 0x0e, |
| 0x12, 0x57, 0xfb, 0x30, 0x70, 0x4b, 0x5f, 0x6b, 0x5f, 0x15, 0xcb, 0x07, 0xfe, 0x96, 0x42, 0xd7, |
| 0x06, 0xf9, 0x17, 0x07, 0xc7, 0xaa, 0xbe, 0xdb, 0xed, 0xdc, 0xa5, 0x09, 0x59, 0xbb, 0x6f, 0xb0, |
| 0x30, 0xdf, 0xd0, 0xde, 0xbf, 0x2a, 0x45, 0xea, 0xbe, 0x63, 0x79, 0xf5, 0x82, 0x1f, 0xd6, 0x8d, |
| 0x3a, 0xf6, 0x78, 0x12, 0x18, 0x2d, 0x60, 0xb1, 0xff, 0xe7, 0xef, 0xc8, 0xc7, 0x3f, 0xa4, 0x8e, |
| 0xdf, 0x14, 0x4b, 0x97, 0x99, 0x2b, 0x0a, 0x52, 0x6f, 0xe1, 0xc1, 0xa5, 0x2f, 0xd4, 0xcc, 0x43, |
| 0x3e, 0xf3, 0x50, 0xce, 0x3c, 0x7c, 0x70, 0xa9, 0x32, 0xc2, 0x15, 0xbf, 0xf5, 0xbf, 0x00, 0x00, |
| 0x00, 0xff, 0xff, 0x3a, 0xa2, 0xd9, 0xa5, 0x3c, 0x21, 0x00, 0x00, |
| } |
| |
| // Reference imports to suppress errors if they are not otherwise used. |
| var _ context.Context |
| var _ grpc.ClientConnInterface |
| |
| // This is a compile-time assertion to ensure that this generated file |
| // is compatible with the grpc package it is being compiled against. |
| const _ = grpc.SupportPackageIsVersion6 |
| |
| // SpannerClient is the client API for Spanner service. |
| // |
| // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. |
| type SpannerClient interface { |
| // Creates a new session. A session can be used to perform |
| // transactions that read and/or modify data in a Cloud Spanner database. |
| // Sessions are meant to be reused for many consecutive |
| // transactions. |
| // |
| // Sessions can only execute one transaction at a time. To execute |
| // multiple concurrent read-write/write-only transactions, create |
| // multiple sessions. Note that standalone reads and queries use a |
| // transaction internally, and count toward the one transaction |
| // limit. |
| // |
| // Active sessions use additional server resources, so it is a good idea to |
| // delete idle and unneeded sessions. |
| // Aside from explicit deletes, Cloud Spanner can delete sessions for which no |
| // operations are sent for more than an hour. If a session is deleted, |
| // requests to it return `NOT_FOUND`. |
| // |
| // Idle sessions can be kept alive by sending a trivial SQL query |
| // periodically, e.g., `"SELECT 1"`. |
| CreateSession(ctx context.Context, in *CreateSessionRequest, opts ...grpc.CallOption) (*Session, error) |
| // Creates multiple new sessions. |
| // |
| // This API can be used to initialize a session cache on the clients. |
| // See https://goo.gl/TgSFN2 for best practices on session cache management. |
| BatchCreateSessions(ctx context.Context, in *BatchCreateSessionsRequest, opts ...grpc.CallOption) (*BatchCreateSessionsResponse, error) |
| // Gets a session. Returns `NOT_FOUND` if the session does not exist. |
| // This is mainly useful for determining whether a session is still |
| // alive. |
| GetSession(ctx context.Context, in *GetSessionRequest, opts ...grpc.CallOption) (*Session, error) |
| // Lists all sessions in a given database. |
| ListSessions(ctx context.Context, in *ListSessionsRequest, opts ...grpc.CallOption) (*ListSessionsResponse, error) |
| // Ends a session, releasing server resources associated with it. This will |
| // asynchronously trigger cancellation of any operations that are running with |
| // this session. |
| DeleteSession(ctx context.Context, in *DeleteSessionRequest, opts ...grpc.CallOption) (*empty.Empty, error) |
| // Executes an SQL statement, returning all results in a single reply. This |
| // method cannot be used to return a result set larger than 10 MiB; |
| // if the query yields more data than that, the query fails with |
| // a `FAILED_PRECONDITION` error. |
| // |
| // Operations inside read-write transactions might return `ABORTED`. If |
| // this occurs, the application should restart the transaction from |
| // the beginning. See [Transaction][google.spanner.v1.Transaction] for more |
| // details. |
| // |
| // Larger result sets can be fetched in streaming fashion by calling |
| // [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql] |
| // instead. |
| ExecuteSql(ctx context.Context, in *ExecuteSqlRequest, opts ...grpc.CallOption) (*ResultSet, error) |
| // Like [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql], except returns the |
| // result set as a stream. Unlike |
| // [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql], there is no limit on |
| // the size of the returned result set. However, no individual row in the |
| // result set can exceed 100 MiB, and no column value can exceed 10 MiB. |
| ExecuteStreamingSql(ctx context.Context, in *ExecuteSqlRequest, opts ...grpc.CallOption) (Spanner_ExecuteStreamingSqlClient, error) |
| // Executes a batch of SQL DML statements. This method allows many statements |
| // to be run with lower latency than submitting them sequentially with |
| // [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql]. |
| // |
| // Statements are executed in sequential order. A request can succeed even if |
| // a statement fails. The |
| // [ExecuteBatchDmlResponse.status][google.spanner.v1.ExecuteBatchDmlResponse.status] |
| // field in the response provides information about the statement that failed. |
| // Clients must inspect this field to determine whether an error occurred. |
| // |
| // Execution stops after the first failed statement; the remaining statements |
| // are not executed. |
| ExecuteBatchDml(ctx context.Context, in *ExecuteBatchDmlRequest, opts ...grpc.CallOption) (*ExecuteBatchDmlResponse, error) |
| // Reads rows from the database using key lookups and scans, as a |
| // simple key/value style alternative to |
| // [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql]. This method cannot be |
| // used to return a result set larger than 10 MiB; if the read matches more |
| // data than that, the read fails with a `FAILED_PRECONDITION` |
| // error. |
| // |
| // Reads inside read-write transactions might return `ABORTED`. If |
| // this occurs, the application should restart the transaction from |
| // the beginning. See [Transaction][google.spanner.v1.Transaction] for more |
| // details. |
| // |
| // Larger result sets can be yielded in streaming fashion by calling |
| // [StreamingRead][google.spanner.v1.Spanner.StreamingRead] instead. |
| Read(ctx context.Context, in *ReadRequest, opts ...grpc.CallOption) (*ResultSet, error) |
| // Like [Read][google.spanner.v1.Spanner.Read], except returns the result set |
| // as a stream. Unlike [Read][google.spanner.v1.Spanner.Read], there is no |
| // limit on the size of the returned result set. However, no individual row in |
| // the result set can exceed 100 MiB, and no column value can exceed |
| // 10 MiB. |
| StreamingRead(ctx context.Context, in *ReadRequest, opts ...grpc.CallOption) (Spanner_StreamingReadClient, error) |
| // Begins a new transaction. This step can often be skipped: |
| // [Read][google.spanner.v1.Spanner.Read], |
| // [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] and |
| // [Commit][google.spanner.v1.Spanner.Commit] can begin a new transaction as a |
| // side-effect. |
| BeginTransaction(ctx context.Context, in *BeginTransactionRequest, opts ...grpc.CallOption) (*Transaction, error) |
| // Commits a transaction. The request includes the mutations to be |
| // applied to rows in the database. |
| // |
| // `Commit` might return an `ABORTED` error. This can occur at any time; |
| // commonly, the cause is conflicts with concurrent |
| // transactions. However, it can also happen for a variety of other |
| // reasons. If `Commit` returns `ABORTED`, the caller should re-attempt |
| // the transaction from the beginning, re-using the same session. |
| Commit(ctx context.Context, in *CommitRequest, opts ...grpc.CallOption) (*CommitResponse, error) |
| // Rolls back a transaction, releasing any locks it holds. It is a good |
| // idea to call this for any transaction that includes one or more |
| // [Read][google.spanner.v1.Spanner.Read] or |
| // [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] requests and ultimately |
| // decides not to commit. |
| // |
| // `Rollback` returns `OK` if it successfully aborts the transaction, the |
| // transaction was already aborted, or the transaction is not |
| // found. `Rollback` never returns `ABORTED`. |
| Rollback(ctx context.Context, in *RollbackRequest, opts ...grpc.CallOption) (*empty.Empty, error) |
| // Creates a set of partition tokens that can be used to execute a query |
| // operation in parallel. Each of the returned partition tokens can be used |
| // by [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql] to |
| // specify a subset of the query result to read. The same session and |
| // read-only transaction must be used by the PartitionQueryRequest used to |
| // create the partition tokens and the ExecuteSqlRequests that use the |
| // partition tokens. |
| // |
| // Partition tokens become invalid when the session used to create them |
| // is deleted, is idle for too long, begins a new transaction, or becomes too |
| // old. When any of these happen, it is not possible to resume the query, and |
| // the whole operation must be restarted from the beginning. |
| PartitionQuery(ctx context.Context, in *PartitionQueryRequest, opts ...grpc.CallOption) (*PartitionResponse, error) |
| // Creates a set of partition tokens that can be used to execute a read |
| // operation in parallel. Each of the returned partition tokens can be used |
| // by [StreamingRead][google.spanner.v1.Spanner.StreamingRead] to specify a |
| // subset of the read result to read. The same session and read-only |
| // transaction must be used by the PartitionReadRequest used to create the |
| // partition tokens and the ReadRequests that use the partition tokens. There |
| // are no ordering guarantees on rows returned among the returned partition |
| // tokens, or even within each individual StreamingRead call issued with a |
| // partition_token. |
| // |
| // Partition tokens become invalid when the session used to create them |
| // is deleted, is idle for too long, begins a new transaction, or becomes too |
| // old. When any of these happen, it is not possible to resume the read, and |
| // the whole operation must be restarted from the beginning. |
| PartitionRead(ctx context.Context, in *PartitionReadRequest, opts ...grpc.CallOption) (*PartitionResponse, error) |
| } |
| |
| type spannerClient struct { |
| cc grpc.ClientConnInterface |
| } |
| |
| func NewSpannerClient(cc grpc.ClientConnInterface) SpannerClient { |
| return &spannerClient{cc} |
| } |
| |
| func (c *spannerClient) CreateSession(ctx context.Context, in *CreateSessionRequest, opts ...grpc.CallOption) (*Session, error) { |
| out := new(Session) |
| err := c.cc.Invoke(ctx, "/google.spanner.v1.Spanner/CreateSession", in, out, opts...) |
| if err != nil { |
| return nil, err |
| } |
| return out, nil |
| } |
| |
| func (c *spannerClient) BatchCreateSessions(ctx context.Context, in *BatchCreateSessionsRequest, opts ...grpc.CallOption) (*BatchCreateSessionsResponse, error) { |
| out := new(BatchCreateSessionsResponse) |
| err := c.cc.Invoke(ctx, "/google.spanner.v1.Spanner/BatchCreateSessions", in, out, opts...) |
| if err != nil { |
| return nil, err |
| } |
| return out, nil |
| } |
| |
| func (c *spannerClient) GetSession(ctx context.Context, in *GetSessionRequest, opts ...grpc.CallOption) (*Session, error) { |
| out := new(Session) |
| err := c.cc.Invoke(ctx, "/google.spanner.v1.Spanner/GetSession", in, out, opts...) |
| if err != nil { |
| return nil, err |
| } |
| return out, nil |
| } |
| |
| func (c *spannerClient) ListSessions(ctx context.Context, in *ListSessionsRequest, opts ...grpc.CallOption) (*ListSessionsResponse, error) { |
| out := new(ListSessionsResponse) |
| err := c.cc.Invoke(ctx, "/google.spanner.v1.Spanner/ListSessions", in, out, opts...) |
| if err != nil { |
| return nil, err |
| } |
| return out, nil |
| } |
| |
| func (c *spannerClient) DeleteSession(ctx context.Context, in *DeleteSessionRequest, opts ...grpc.CallOption) (*empty.Empty, error) { |
| out := new(empty.Empty) |
| err := c.cc.Invoke(ctx, "/google.spanner.v1.Spanner/DeleteSession", in, out, opts...) |
| if err != nil { |
| return nil, err |
| } |
| return out, nil |
| } |
| |
| func (c *spannerClient) ExecuteSql(ctx context.Context, in *ExecuteSqlRequest, opts ...grpc.CallOption) (*ResultSet, error) { |
| out := new(ResultSet) |
| err := c.cc.Invoke(ctx, "/google.spanner.v1.Spanner/ExecuteSql", in, out, opts...) |
| if err != nil { |
| return nil, err |
| } |
| return out, nil |
| } |
| |
| func (c *spannerClient) ExecuteStreamingSql(ctx context.Context, in *ExecuteSqlRequest, opts ...grpc.CallOption) (Spanner_ExecuteStreamingSqlClient, error) { |
| stream, err := c.cc.NewStream(ctx, &_Spanner_serviceDesc.Streams[0], "/google.spanner.v1.Spanner/ExecuteStreamingSql", opts...) |
| if err != nil { |
| return nil, err |
| } |
| x := &spannerExecuteStreamingSqlClient{stream} |
| if err := x.ClientStream.SendMsg(in); err != nil { |
| return nil, err |
| } |
| if err := x.ClientStream.CloseSend(); err != nil { |
| return nil, err |
| } |
| return x, nil |
| } |
| |
| type Spanner_ExecuteStreamingSqlClient interface { |
| Recv() (*PartialResultSet, error) |
| grpc.ClientStream |
| } |
| |
| type spannerExecuteStreamingSqlClient struct { |
| grpc.ClientStream |
| } |
| |
| func (x *spannerExecuteStreamingSqlClient) Recv() (*PartialResultSet, error) { |
| m := new(PartialResultSet) |
| if err := x.ClientStream.RecvMsg(m); err != nil { |
| return nil, err |
| } |
| return m, nil |
| } |
| |
| func (c *spannerClient) ExecuteBatchDml(ctx context.Context, in *ExecuteBatchDmlRequest, opts ...grpc.CallOption) (*ExecuteBatchDmlResponse, error) { |
| out := new(ExecuteBatchDmlResponse) |
| err := c.cc.Invoke(ctx, "/google.spanner.v1.Spanner/ExecuteBatchDml", in, out, opts...) |
| if err != nil { |
| return nil, err |
| } |
| return out, nil |
| } |
| |
| func (c *spannerClient) Read(ctx context.Context, in *ReadRequest, opts ...grpc.CallOption) (*ResultSet, error) { |
| out := new(ResultSet) |
| err := c.cc.Invoke(ctx, "/google.spanner.v1.Spanner/Read", in, out, opts...) |
| if err != nil { |
| return nil, err |
| } |
| return out, nil |
| } |
| |
| func (c *spannerClient) StreamingRead(ctx context.Context, in *ReadRequest, opts ...grpc.CallOption) (Spanner_StreamingReadClient, error) { |
| stream, err := c.cc.NewStream(ctx, &_Spanner_serviceDesc.Streams[1], "/google.spanner.v1.Spanner/StreamingRead", opts...) |
| if err != nil { |
| return nil, err |
| } |
| x := &spannerStreamingReadClient{stream} |
| if err := x.ClientStream.SendMsg(in); err != nil { |
| return nil, err |
| } |
| if err := x.ClientStream.CloseSend(); err != nil { |
| return nil, err |
| } |
| return x, nil |
| } |
| |
| type Spanner_StreamingReadClient interface { |
| Recv() (*PartialResultSet, error) |
| grpc.ClientStream |
| } |
| |
| type spannerStreamingReadClient struct { |
| grpc.ClientStream |
| } |
| |
| func (x *spannerStreamingReadClient) Recv() (*PartialResultSet, error) { |
| m := new(PartialResultSet) |
| if err := x.ClientStream.RecvMsg(m); err != nil { |
| return nil, err |
| } |
| return m, nil |
| } |
| |
| func (c *spannerClient) BeginTransaction(ctx context.Context, in *BeginTransactionRequest, opts ...grpc.CallOption) (*Transaction, error) { |
| out := new(Transaction) |
| err := c.cc.Invoke(ctx, "/google.spanner.v1.Spanner/BeginTransaction", in, out, opts...) |
| if err != nil { |
| return nil, err |
| } |
| return out, nil |
| } |
| |
| func (c *spannerClient) Commit(ctx context.Context, in *CommitRequest, opts ...grpc.CallOption) (*CommitResponse, error) { |
| out := new(CommitResponse) |
| err := c.cc.Invoke(ctx, "/google.spanner.v1.Spanner/Commit", in, out, opts...) |
| if err != nil { |
| return nil, err |
| } |
| return out, nil |
| } |
| |
| func (c *spannerClient) Rollback(ctx context.Context, in *RollbackRequest, opts ...grpc.CallOption) (*empty.Empty, error) { |
| out := new(empty.Empty) |
| err := c.cc.Invoke(ctx, "/google.spanner.v1.Spanner/Rollback", in, out, opts...) |
| if err != nil { |
| return nil, err |
| } |
| return out, nil |
| } |
| |
| func (c *spannerClient) PartitionQuery(ctx context.Context, in *PartitionQueryRequest, opts ...grpc.CallOption) (*PartitionResponse, error) { |
| out := new(PartitionResponse) |
| err := c.cc.Invoke(ctx, "/google.spanner.v1.Spanner/PartitionQuery", in, out, opts...) |
| if err != nil { |
| return nil, err |
| } |
| return out, nil |
| } |
| |
| func (c *spannerClient) PartitionRead(ctx context.Context, in *PartitionReadRequest, opts ...grpc.CallOption) (*PartitionResponse, error) { |
| out := new(PartitionResponse) |
| err := c.cc.Invoke(ctx, "/google.spanner.v1.Spanner/PartitionRead", in, out, opts...) |
| if err != nil { |
| return nil, err |
| } |
| return out, nil |
| } |
| |
| // SpannerServer is the server API for Spanner service. |
| type SpannerServer interface { |
| // Creates a new session. A session can be used to perform |
| // transactions that read and/or modify data in a Cloud Spanner database. |
| // Sessions are meant to be reused for many consecutive |
| // transactions. |
| // |
| // Sessions can only execute one transaction at a time. To execute |
| // multiple concurrent read-write/write-only transactions, create |
| // multiple sessions. Note that standalone reads and queries use a |
| // transaction internally, and count toward the one transaction |
| // limit. |
| // |
| // Active sessions use additional server resources, so it is a good idea to |
| // delete idle and unneeded sessions. |
| // Aside from explicit deletes, Cloud Spanner can delete sessions for which no |
| // operations are sent for more than an hour. If a session is deleted, |
| // requests to it return `NOT_FOUND`. |
| // |
| // Idle sessions can be kept alive by sending a trivial SQL query |
| // periodically, e.g., `"SELECT 1"`. |
| CreateSession(context.Context, *CreateSessionRequest) (*Session, error) |
| // Creates multiple new sessions. |
| // |
| // This API can be used to initialize a session cache on the clients. |
| // See https://goo.gl/TgSFN2 for best practices on session cache management. |
| BatchCreateSessions(context.Context, *BatchCreateSessionsRequest) (*BatchCreateSessionsResponse, error) |
| // Gets a session. Returns `NOT_FOUND` if the session does not exist. |
| // This is mainly useful for determining whether a session is still |
| // alive. |
| GetSession(context.Context, *GetSessionRequest) (*Session, error) |
| // Lists all sessions in a given database. |
| ListSessions(context.Context, *ListSessionsRequest) (*ListSessionsResponse, error) |
| // Ends a session, releasing server resources associated with it. This will |
| // asynchronously trigger cancellation of any operations that are running with |
| // this session. |
| DeleteSession(context.Context, *DeleteSessionRequest) (*empty.Empty, error) |
| // Executes an SQL statement, returning all results in a single reply. This |
| // method cannot be used to return a result set larger than 10 MiB; |
| // if the query yields more data than that, the query fails with |
| // a `FAILED_PRECONDITION` error. |
| // |
| // Operations inside read-write transactions might return `ABORTED`. If |
| // this occurs, the application should restart the transaction from |
| // the beginning. See [Transaction][google.spanner.v1.Transaction] for more |
| // details. |
| // |
| // Larger result sets can be fetched in streaming fashion by calling |
| // [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql] |
| // instead. |
| ExecuteSql(context.Context, *ExecuteSqlRequest) (*ResultSet, error) |
| // Like [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql], except returns the |
| // result set as a stream. Unlike |
| // [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql], there is no limit on |
| // the size of the returned result set. However, no individual row in the |
| // result set can exceed 100 MiB, and no column value can exceed 10 MiB. |
| ExecuteStreamingSql(*ExecuteSqlRequest, Spanner_ExecuteStreamingSqlServer) error |
| // Executes a batch of SQL DML statements. This method allows many statements |
| // to be run with lower latency than submitting them sequentially with |
| // [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql]. |
| // |
| // Statements are executed in sequential order. A request can succeed even if |
| // a statement fails. The |
| // [ExecuteBatchDmlResponse.status][google.spanner.v1.ExecuteBatchDmlResponse.status] |
| // field in the response provides information about the statement that failed. |
| // Clients must inspect this field to determine whether an error occurred. |
| // |
| // Execution stops after the first failed statement; the remaining statements |
| // are not executed. |
| ExecuteBatchDml(context.Context, *ExecuteBatchDmlRequest) (*ExecuteBatchDmlResponse, error) |
| // Reads rows from the database using key lookups and scans, as a |
| // simple key/value style alternative to |
| // [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql]. This method cannot be |
| // used to return a result set larger than 10 MiB; if the read matches more |
| // data than that, the read fails with a `FAILED_PRECONDITION` |
| // error. |
| // |
| // Reads inside read-write transactions might return `ABORTED`. If |
| // this occurs, the application should restart the transaction from |
| // the beginning. See [Transaction][google.spanner.v1.Transaction] for more |
| // details. |
| // |
| // Larger result sets can be yielded in streaming fashion by calling |
| // [StreamingRead][google.spanner.v1.Spanner.StreamingRead] instead. |
| Read(context.Context, *ReadRequest) (*ResultSet, error) |
| // Like [Read][google.spanner.v1.Spanner.Read], except returns the result set |
| // as a stream. Unlike [Read][google.spanner.v1.Spanner.Read], there is no |
| // limit on the size of the returned result set. However, no individual row in |
| // the result set can exceed 100 MiB, and no column value can exceed |
| // 10 MiB. |
| StreamingRead(*ReadRequest, Spanner_StreamingReadServer) error |
| // Begins a new transaction. This step can often be skipped: |
| // [Read][google.spanner.v1.Spanner.Read], |
| // [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] and |
| // [Commit][google.spanner.v1.Spanner.Commit] can begin a new transaction as a |
| // side-effect. |
| BeginTransaction(context.Context, *BeginTransactionRequest) (*Transaction, error) |
| // Commits a transaction. The request includes the mutations to be |
| // applied to rows in the database. |
| // |
| // `Commit` might return an `ABORTED` error. This can occur at any time; |
| // commonly, the cause is conflicts with concurrent |
| // transactions. However, it can also happen for a variety of other |
| // reasons. If `Commit` returns `ABORTED`, the caller should re-attempt |
| // the transaction from the beginning, re-using the same session. |
| Commit(context.Context, *CommitRequest) (*CommitResponse, error) |
| // Rolls back a transaction, releasing any locks it holds. It is a good |
| // idea to call this for any transaction that includes one or more |
| // [Read][google.spanner.v1.Spanner.Read] or |
| // [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] requests and ultimately |
| // decides not to commit. |
| // |
| // `Rollback` returns `OK` if it successfully aborts the transaction, the |
| // transaction was already aborted, or the transaction is not |
| // found. `Rollback` never returns `ABORTED`. |
| Rollback(context.Context, *RollbackRequest) (*empty.Empty, error) |
| // Creates a set of partition tokens that can be used to execute a query |
| // operation in parallel. Each of the returned partition tokens can be used |
| // by [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql] to |
| // specify a subset of the query result to read. The same session and |
| // read-only transaction must be used by the PartitionQueryRequest used to |
| // create the partition tokens and the ExecuteSqlRequests that use the |
| // partition tokens. |
| // |
| // Partition tokens become invalid when the session used to create them |
| // is deleted, is idle for too long, begins a new transaction, or becomes too |
| // old. When any of these happen, it is not possible to resume the query, and |
| // the whole operation must be restarted from the beginning. |
| PartitionQuery(context.Context, *PartitionQueryRequest) (*PartitionResponse, error) |
| // Creates a set of partition tokens that can be used to execute a read |
| // operation in parallel. Each of the returned partition tokens can be used |
| // by [StreamingRead][google.spanner.v1.Spanner.StreamingRead] to specify a |
| // subset of the read result to read. The same session and read-only |
| // transaction must be used by the PartitionReadRequest used to create the |
| // partition tokens and the ReadRequests that use the partition tokens. There |
| // are no ordering guarantees on rows returned among the returned partition |
| // tokens, or even within each individual StreamingRead call issued with a |
| // partition_token. |
| // |
| // Partition tokens become invalid when the session used to create them |
| // is deleted, is idle for too long, begins a new transaction, or becomes too |
| // old. When any of these happen, it is not possible to resume the read, and |
| // the whole operation must be restarted from the beginning. |
| PartitionRead(context.Context, *PartitionReadRequest) (*PartitionResponse, error) |
| } |
| |
| // UnimplementedSpannerServer can be embedded to have forward compatible implementations. |
| type UnimplementedSpannerServer struct { |
| } |
| |
| func (*UnimplementedSpannerServer) CreateSession(ctx context.Context, req *CreateSessionRequest) (*Session, error) { |
| return nil, status1.Errorf(codes.Unimplemented, "method CreateSession not implemented") |
| } |
| func (*UnimplementedSpannerServer) BatchCreateSessions(ctx context.Context, req *BatchCreateSessionsRequest) (*BatchCreateSessionsResponse, error) { |
| return nil, status1.Errorf(codes.Unimplemented, "method BatchCreateSessions not implemented") |
| } |
| func (*UnimplementedSpannerServer) GetSession(ctx context.Context, req *GetSessionRequest) (*Session, error) { |
| return nil, status1.Errorf(codes.Unimplemented, "method GetSession not implemented") |
| } |
| func (*UnimplementedSpannerServer) ListSessions(ctx context.Context, req *ListSessionsRequest) (*ListSessionsResponse, error) { |
| return nil, status1.Errorf(codes.Unimplemented, "method ListSessions not implemented") |
| } |
| func (*UnimplementedSpannerServer) DeleteSession(ctx context.Context, req *DeleteSessionRequest) (*empty.Empty, error) { |
| return nil, status1.Errorf(codes.Unimplemented, "method DeleteSession not implemented") |
| } |
| func (*UnimplementedSpannerServer) ExecuteSql(ctx context.Context, req *ExecuteSqlRequest) (*ResultSet, error) { |
| return nil, status1.Errorf(codes.Unimplemented, "method ExecuteSql not implemented") |
| } |
| func (*UnimplementedSpannerServer) ExecuteStreamingSql(req *ExecuteSqlRequest, srv Spanner_ExecuteStreamingSqlServer) error { |
| return status1.Errorf(codes.Unimplemented, "method ExecuteStreamingSql not implemented") |
| } |
| func (*UnimplementedSpannerServer) ExecuteBatchDml(ctx context.Context, req *ExecuteBatchDmlRequest) (*ExecuteBatchDmlResponse, error) { |
| return nil, status1.Errorf(codes.Unimplemented, "method ExecuteBatchDml not implemented") |
| } |
| func (*UnimplementedSpannerServer) Read(ctx context.Context, req *ReadRequest) (*ResultSet, error) { |
| return nil, status1.Errorf(codes.Unimplemented, "method Read not implemented") |
| } |
| func (*UnimplementedSpannerServer) StreamingRead(req *ReadRequest, srv Spanner_StreamingReadServer) error { |
| return status1.Errorf(codes.Unimplemented, "method StreamingRead not implemented") |
| } |
| func (*UnimplementedSpannerServer) BeginTransaction(ctx context.Context, req *BeginTransactionRequest) (*Transaction, error) { |
| return nil, status1.Errorf(codes.Unimplemented, "method BeginTransaction not implemented") |
| } |
| func (*UnimplementedSpannerServer) Commit(ctx context.Context, req *CommitRequest) (*CommitResponse, error) { |
| return nil, status1.Errorf(codes.Unimplemented, "method Commit not implemented") |
| } |
| func (*UnimplementedSpannerServer) Rollback(ctx context.Context, req *RollbackRequest) (*empty.Empty, error) { |
| return nil, status1.Errorf(codes.Unimplemented, "method Rollback not implemented") |
| } |
| func (*UnimplementedSpannerServer) PartitionQuery(ctx context.Context, req *PartitionQueryRequest) (*PartitionResponse, error) { |
| return nil, status1.Errorf(codes.Unimplemented, "method PartitionQuery not implemented") |
| } |
| func (*UnimplementedSpannerServer) PartitionRead(ctx context.Context, req *PartitionReadRequest) (*PartitionResponse, error) { |
| return nil, status1.Errorf(codes.Unimplemented, "method PartitionRead not implemented") |
| } |
| |
| func RegisterSpannerServer(s *grpc.Server, srv SpannerServer) { |
| s.RegisterService(&_Spanner_serviceDesc, srv) |
| } |
| |
| func _Spanner_CreateSession_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { |
| in := new(CreateSessionRequest) |
| if err := dec(in); err != nil { |
| return nil, err |
| } |
| if interceptor == nil { |
| return srv.(SpannerServer).CreateSession(ctx, in) |
| } |
| info := &grpc.UnaryServerInfo{ |
| Server: srv, |
| FullMethod: "/google.spanner.v1.Spanner/CreateSession", |
| } |
| handler := func(ctx context.Context, req interface{}) (interface{}, error) { |
| return srv.(SpannerServer).CreateSession(ctx, req.(*CreateSessionRequest)) |
| } |
| return interceptor(ctx, in, info, handler) |
| } |
| |
| func _Spanner_BatchCreateSessions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { |
| in := new(BatchCreateSessionsRequest) |
| if err := dec(in); err != nil { |
| return nil, err |
| } |
| if interceptor == nil { |
| return srv.(SpannerServer).BatchCreateSessions(ctx, in) |
| } |
| info := &grpc.UnaryServerInfo{ |
| Server: srv, |
| FullMethod: "/google.spanner.v1.Spanner/BatchCreateSessions", |
| } |
| handler := func(ctx context.Context, req interface{}) (interface{}, error) { |
| return srv.(SpannerServer).BatchCreateSessions(ctx, req.(*BatchCreateSessionsRequest)) |
| } |
| return interceptor(ctx, in, info, handler) |
| } |
| |
| func _Spanner_GetSession_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { |
| in := new(GetSessionRequest) |
| if err := dec(in); err != nil { |
| return nil, err |
| } |
| if interceptor == nil { |
| return srv.(SpannerServer).GetSession(ctx, in) |
| } |
| info := &grpc.UnaryServerInfo{ |
| Server: srv, |
| FullMethod: "/google.spanner.v1.Spanner/GetSession", |
| } |
| handler := func(ctx context.Context, req interface{}) (interface{}, error) { |
| return srv.(SpannerServer).GetSession(ctx, req.(*GetSessionRequest)) |
| } |
| return interceptor(ctx, in, info, handler) |
| } |
| |
| func _Spanner_ListSessions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { |
| in := new(ListSessionsRequest) |
| if err := dec(in); err != nil { |
| return nil, err |
| } |
| if interceptor == nil { |
| return srv.(SpannerServer).ListSessions(ctx, in) |
| } |
| info := &grpc.UnaryServerInfo{ |
| Server: srv, |
| FullMethod: "/google.spanner.v1.Spanner/ListSessions", |
| } |
| handler := func(ctx context.Context, req interface{}) (interface{}, error) { |
| return srv.(SpannerServer).ListSessions(ctx, req.(*ListSessionsRequest)) |
| } |
| return interceptor(ctx, in, info, handler) |
| } |
| |
| func _Spanner_DeleteSession_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { |
| in := new(DeleteSessionRequest) |
| if err := dec(in); err != nil { |
| return nil, err |
| } |
| if interceptor == nil { |
| return srv.(SpannerServer).DeleteSession(ctx, in) |
| } |
| info := &grpc.UnaryServerInfo{ |
| Server: srv, |
| FullMethod: "/google.spanner.v1.Spanner/DeleteSession", |
| } |
| handler := func(ctx context.Context, req interface{}) (interface{}, error) { |
| return srv.(SpannerServer).DeleteSession(ctx, req.(*DeleteSessionRequest)) |
| } |
| return interceptor(ctx, in, info, handler) |
| } |
| |
| func _Spanner_ExecuteSql_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { |
| in := new(ExecuteSqlRequest) |
| if err := dec(in); err != nil { |
| return nil, err |
| } |
| if interceptor == nil { |
| return srv.(SpannerServer).ExecuteSql(ctx, in) |
| } |
| info := &grpc.UnaryServerInfo{ |
| Server: srv, |
| FullMethod: "/google.spanner.v1.Spanner/ExecuteSql", |
| } |
| handler := func(ctx context.Context, req interface{}) (interface{}, error) { |
| return srv.(SpannerServer).ExecuteSql(ctx, req.(*ExecuteSqlRequest)) |
| } |
| return interceptor(ctx, in, info, handler) |
| } |
| |
| func _Spanner_ExecuteStreamingSql_Handler(srv interface{}, stream grpc.ServerStream) error { |
| m := new(ExecuteSqlRequest) |
| if err := stream.RecvMsg(m); err != nil { |
| return err |
| } |
| return srv.(SpannerServer).ExecuteStreamingSql(m, &spannerExecuteStreamingSqlServer{stream}) |
| } |
| |
| type Spanner_ExecuteStreamingSqlServer interface { |
| Send(*PartialResultSet) error |
| grpc.ServerStream |
| } |
| |
| type spannerExecuteStreamingSqlServer struct { |
| grpc.ServerStream |
| } |
| |
| func (x *spannerExecuteStreamingSqlServer) Send(m *PartialResultSet) error { |
| return x.ServerStream.SendMsg(m) |
| } |
| |
| func _Spanner_ExecuteBatchDml_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { |
| in := new(ExecuteBatchDmlRequest) |
| if err := dec(in); err != nil { |
| return nil, err |
| } |
| if interceptor == nil { |
| return srv.(SpannerServer).ExecuteBatchDml(ctx, in) |
| } |
| info := &grpc.UnaryServerInfo{ |
| Server: srv, |
| FullMethod: "/google.spanner.v1.Spanner/ExecuteBatchDml", |
| } |
| handler := func(ctx context.Context, req interface{}) (interface{}, error) { |
| return srv.(SpannerServer).ExecuteBatchDml(ctx, req.(*ExecuteBatchDmlRequest)) |
| } |
| return interceptor(ctx, in, info, handler) |
| } |
| |
| func _Spanner_Read_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { |
| in := new(ReadRequest) |
| if err := dec(in); err != nil { |
| return nil, err |
| } |
| if interceptor == nil { |
| return srv.(SpannerServer).Read(ctx, in) |
| } |
| info := &grpc.UnaryServerInfo{ |
| Server: srv, |
| FullMethod: "/google.spanner.v1.Spanner/Read", |
| } |
| handler := func(ctx context.Context, req interface{}) (interface{}, error) { |
| return srv.(SpannerServer).Read(ctx, req.(*ReadRequest)) |
| } |
| return interceptor(ctx, in, info, handler) |
| } |
| |
| func _Spanner_StreamingRead_Handler(srv interface{}, stream grpc.ServerStream) error { |
| m := new(ReadRequest) |
| if err := stream.RecvMsg(m); err != nil { |
| return err |
| } |
| return srv.(SpannerServer).StreamingRead(m, &spannerStreamingReadServer{stream}) |
| } |
| |
| type Spanner_StreamingReadServer interface { |
| Send(*PartialResultSet) error |
| grpc.ServerStream |
| } |
| |
| type spannerStreamingReadServer struct { |
| grpc.ServerStream |
| } |
| |
| func (x *spannerStreamingReadServer) Send(m *PartialResultSet) error { |
| return x.ServerStream.SendMsg(m) |
| } |
| |
| func _Spanner_BeginTransaction_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { |
| in := new(BeginTransactionRequest) |
| if err := dec(in); err != nil { |
| return nil, err |
| } |
| if interceptor == nil { |
| return srv.(SpannerServer).BeginTransaction(ctx, in) |
| } |
| info := &grpc.UnaryServerInfo{ |
| Server: srv, |
| FullMethod: "/google.spanner.v1.Spanner/BeginTransaction", |
| } |
| handler := func(ctx context.Context, req interface{}) (interface{}, error) { |
| return srv.(SpannerServer).BeginTransaction(ctx, req.(*BeginTransactionRequest)) |
| } |
| return interceptor(ctx, in, info, handler) |
| } |
| |
| func _Spanner_Commit_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { |
| in := new(CommitRequest) |
| if err := dec(in); err != nil { |
| return nil, err |
| } |
| if interceptor == nil { |
| return srv.(SpannerServer).Commit(ctx, in) |
| } |
| info := &grpc.UnaryServerInfo{ |
| Server: srv, |
| FullMethod: "/google.spanner.v1.Spanner/Commit", |
| } |
| handler := func(ctx context.Context, req interface{}) (interface{}, error) { |
| return srv.(SpannerServer).Commit(ctx, req.(*CommitRequest)) |
| } |
| return interceptor(ctx, in, info, handler) |
| } |
| |
| func _Spanner_Rollback_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { |
| in := new(RollbackRequest) |
| if err := dec(in); err != nil { |
| return nil, err |
| } |
| if interceptor == nil { |
| return srv.(SpannerServer).Rollback(ctx, in) |
| } |
| info := &grpc.UnaryServerInfo{ |
| Server: srv, |
| FullMethod: "/google.spanner.v1.Spanner/Rollback", |
| } |
| handler := func(ctx context.Context, req interface{}) (interface{}, error) { |
| return srv.(SpannerServer).Rollback(ctx, req.(*RollbackRequest)) |
| } |
| return interceptor(ctx, in, info, handler) |
| } |
| |
| func _Spanner_PartitionQuery_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { |
| in := new(PartitionQueryRequest) |
| if err := dec(in); err != nil { |
| return nil, err |
| } |
| if interceptor == nil { |
| return srv.(SpannerServer).PartitionQuery(ctx, in) |
| } |
| info := &grpc.UnaryServerInfo{ |
| Server: srv, |
| FullMethod: "/google.spanner.v1.Spanner/PartitionQuery", |
| } |
| handler := func(ctx context.Context, req interface{}) (interface{}, error) { |
| return srv.(SpannerServer).PartitionQuery(ctx, req.(*PartitionQueryRequest)) |
| } |
| return interceptor(ctx, in, info, handler) |
| } |
| |
| func _Spanner_PartitionRead_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { |
| in := new(PartitionReadRequest) |
| if err := dec(in); err != nil { |
| return nil, err |
| } |
| if interceptor == nil { |
| return srv.(SpannerServer).PartitionRead(ctx, in) |
| } |
| info := &grpc.UnaryServerInfo{ |
| Server: srv, |
| FullMethod: "/google.spanner.v1.Spanner/PartitionRead", |
| } |
| handler := func(ctx context.Context, req interface{}) (interface{}, error) { |
| return srv.(SpannerServer).PartitionRead(ctx, req.(*PartitionReadRequest)) |
| } |
| return interceptor(ctx, in, info, handler) |
| } |
| |
| var _Spanner_serviceDesc = grpc.ServiceDesc{ |
| ServiceName: "google.spanner.v1.Spanner", |
| HandlerType: (*SpannerServer)(nil), |
| Methods: []grpc.MethodDesc{ |
| { |
| MethodName: "CreateSession", |
| Handler: _Spanner_CreateSession_Handler, |
| }, |
| { |
| MethodName: "BatchCreateSessions", |
| Handler: _Spanner_BatchCreateSessions_Handler, |
| }, |
| { |
| MethodName: "GetSession", |
| Handler: _Spanner_GetSession_Handler, |
| }, |
| { |
| MethodName: "ListSessions", |
| Handler: _Spanner_ListSessions_Handler, |
| }, |
| { |
| MethodName: "DeleteSession", |
| Handler: _Spanner_DeleteSession_Handler, |
| }, |
| { |
| MethodName: "ExecuteSql", |
| Handler: _Spanner_ExecuteSql_Handler, |
| }, |
| { |
| MethodName: "ExecuteBatchDml", |
| Handler: _Spanner_ExecuteBatchDml_Handler, |
| }, |
| { |
| MethodName: "Read", |
| Handler: _Spanner_Read_Handler, |
| }, |
| { |
| MethodName: "BeginTransaction", |
| Handler: _Spanner_BeginTransaction_Handler, |
| }, |
| { |
| MethodName: "Commit", |
| Handler: _Spanner_Commit_Handler, |
| }, |
| { |
| MethodName: "Rollback", |
| Handler: _Spanner_Rollback_Handler, |
| }, |
| { |
| MethodName: "PartitionQuery", |
| Handler: _Spanner_PartitionQuery_Handler, |
| }, |
| { |
| MethodName: "PartitionRead", |
| Handler: _Spanner_PartitionRead_Handler, |
| }, |
| }, |
| Streams: []grpc.StreamDesc{ |
| { |
| StreamName: "ExecuteStreamingSql", |
| Handler: _Spanner_ExecuteStreamingSql_Handler, |
| ServerStreams: true, |
| }, |
| { |
| StreamName: "StreamingRead", |
| Handler: _Spanner_StreamingRead_Handler, |
| ServerStreams: true, |
| }, |
| }, |
| Metadata: "google/spanner/v1/spanner.proto", |
| } |