| // Code generated by protoc-gen-go. DO NOT EDIT. |
| // source: google/cloud/speech/v1p1beta1/cloud_speech.proto |
| |
| package speech |
| |
| import ( |
| context "context" |
| fmt "fmt" |
| math "math" |
| |
| proto "github.com/golang/protobuf/proto" |
| _ "github.com/golang/protobuf/ptypes/any" |
| duration "github.com/golang/protobuf/ptypes/duration" |
| timestamp "github.com/golang/protobuf/ptypes/timestamp" |
| _ "google.golang.org/genproto/googleapis/api/annotations" |
| longrunning "google.golang.org/genproto/googleapis/longrunning" |
| status "google.golang.org/genproto/googleapis/rpc/status" |
| grpc "google.golang.org/grpc" |
| codes "google.golang.org/grpc/codes" |
| status1 "google.golang.org/grpc/status" |
| ) |
| |
| // Reference imports to suppress errors if they are not otherwise used. |
| var _ = proto.Marshal |
| var _ = fmt.Errorf |
| var _ = math.Inf |
| |
| // This is a compile-time assertion to ensure that this generated file |
| // is compatible with the proto package it is being compiled against. |
| // A compilation error at this line likely means your copy of the |
| // proto package needs to be updated. |
| const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package |
| |
| // The encoding of the audio data sent in the request. |
| // |
| // All encodings support only 1 channel (mono) audio, unless the |
| // `audio_channel_count` and `enable_separate_recognition_per_channel` fields |
| // are set. |
| // |
| // For best results, the audio source should be captured and transmitted using |
| // a lossless encoding (`FLAC` or `LINEAR16`). The accuracy of the speech |
| // recognition can be reduced if lossy codecs are used to capture or transmit |
| // audio, particularly if background noise is present. Lossy codecs include |
| // `MULAW`, `AMR`, `AMR_WB`, `OGG_OPUS`, `SPEEX_WITH_HEADER_BYTE`, and `MP3`. |
| // |
| // The `FLAC` and `WAV` audio file formats include a header that describes the |
| // included audio content. You can request recognition for `WAV` files that |
| // contain either `LINEAR16` or `MULAW` encoded audio. |
| // If you send `FLAC` or `WAV` audio file format in |
| // your request, you do not need to specify an `AudioEncoding`; the audio |
| // encoding format is determined from the file header. If you specify |
| // an `AudioEncoding` when you send send `FLAC` or `WAV` audio, the |
| // encoding configuration must match the encoding described in the audio |
| // header; otherwise the request returns an |
| // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT] error |
| // code. |
| type RecognitionConfig_AudioEncoding int32 |
| |
| const ( |
| // Not specified. |
| RecognitionConfig_ENCODING_UNSPECIFIED RecognitionConfig_AudioEncoding = 0 |
| // Uncompressed 16-bit signed little-endian samples (Linear PCM). |
| RecognitionConfig_LINEAR16 RecognitionConfig_AudioEncoding = 1 |
| // `FLAC` (Free Lossless Audio |
| // Codec) is the recommended encoding because it is |
| // lossless--therefore recognition is not compromised--and |
| // requires only about half the bandwidth of `LINEAR16`. `FLAC` stream |
| // encoding supports 16-bit and 24-bit samples, however, not all fields in |
| // `STREAMINFO` are supported. |
| RecognitionConfig_FLAC RecognitionConfig_AudioEncoding = 2 |
| // 8-bit samples that compand 14-bit audio samples using G.711 PCMU/mu-law. |
| RecognitionConfig_MULAW RecognitionConfig_AudioEncoding = 3 |
| // Adaptive Multi-Rate Narrowband codec. `sample_rate_hertz` must be 8000. |
| RecognitionConfig_AMR RecognitionConfig_AudioEncoding = 4 |
| // Adaptive Multi-Rate Wideband codec. `sample_rate_hertz` must be 16000. |
| RecognitionConfig_AMR_WB RecognitionConfig_AudioEncoding = 5 |
| // Opus encoded audio frames in Ogg container |
| // ([OggOpus](https://wiki.xiph.org/OggOpus)). |
| // `sample_rate_hertz` must be one of 8000, 12000, 16000, 24000, or 48000. |
| RecognitionConfig_OGG_OPUS RecognitionConfig_AudioEncoding = 6 |
| // Although the use of lossy encodings is not recommended, if a very low |
| // bitrate encoding is required, `OGG_OPUS` is highly preferred over |
| // Speex encoding. The [Speex](https://speex.org/) encoding supported by |
| // Cloud Speech API has a header byte in each block, as in MIME type |
| // `audio/x-speex-with-header-byte`. |
| // It is a variant of the RTP Speex encoding defined in |
| // [RFC 5574](https://tools.ietf.org/html/rfc5574). |
| // The stream is a sequence of blocks, one block per RTP packet. Each block |
| // starts with a byte containing the length of the block, in bytes, followed |
| // by one or more frames of Speex data, padded to an integral number of |
| // bytes (octets) as specified in RFC 5574. In other words, each RTP header |
| // is replaced with a single byte containing the block length. Only Speex |
| // wideband is supported. `sample_rate_hertz` must be 16000. |
| RecognitionConfig_SPEEX_WITH_HEADER_BYTE RecognitionConfig_AudioEncoding = 7 |
| // MP3 audio. Support all standard MP3 bitrates (which range from 32-320 |
| // kbps). When using this encoding, `sample_rate_hertz` has to match the |
| // sample rate of the file being used. |
| RecognitionConfig_MP3 RecognitionConfig_AudioEncoding = 8 |
| ) |
| |
| var RecognitionConfig_AudioEncoding_name = map[int32]string{ |
| 0: "ENCODING_UNSPECIFIED", |
| 1: "LINEAR16", |
| 2: "FLAC", |
| 3: "MULAW", |
| 4: "AMR", |
| 5: "AMR_WB", |
| 6: "OGG_OPUS", |
| 7: "SPEEX_WITH_HEADER_BYTE", |
| 8: "MP3", |
| } |
| |
| var RecognitionConfig_AudioEncoding_value = map[string]int32{ |
| "ENCODING_UNSPECIFIED": 0, |
| "LINEAR16": 1, |
| "FLAC": 2, |
| "MULAW": 3, |
| "AMR": 4, |
| "AMR_WB": 5, |
| "OGG_OPUS": 6, |
| "SPEEX_WITH_HEADER_BYTE": 7, |
| "MP3": 8, |
| } |
| |
| func (x RecognitionConfig_AudioEncoding) String() string { |
| return proto.EnumName(RecognitionConfig_AudioEncoding_name, int32(x)) |
| } |
| |
| func (RecognitionConfig_AudioEncoding) EnumDescriptor() ([]byte, []int) { |
| return fileDescriptor_6adcab595cc29495, []int{4, 0} |
| } |
| |
| // Use case categories that the audio recognition request can be described |
| // by. |
| type RecognitionMetadata_InteractionType int32 |
| |
| const ( |
| // Use case is either unknown or is something other than one of the other |
| // values below. |
| RecognitionMetadata_INTERACTION_TYPE_UNSPECIFIED RecognitionMetadata_InteractionType = 0 |
| // Multiple people in a conversation or discussion. For example in a |
| // meeting with two or more people actively participating. Typically |
| // all the primary people speaking would be in the same room (if not, |
| // see PHONE_CALL) |
| RecognitionMetadata_DISCUSSION RecognitionMetadata_InteractionType = 1 |
| // One or more persons lecturing or presenting to others, mostly |
| // uninterrupted. |
| RecognitionMetadata_PRESENTATION RecognitionMetadata_InteractionType = 2 |
| // A phone-call or video-conference in which two or more people, who are |
| // not in the same room, are actively participating. |
| RecognitionMetadata_PHONE_CALL RecognitionMetadata_InteractionType = 3 |
| // A recorded message intended for another person to listen to. |
| RecognitionMetadata_VOICEMAIL RecognitionMetadata_InteractionType = 4 |
| // Professionally produced audio (eg. TV Show, Podcast). |
| RecognitionMetadata_PROFESSIONALLY_PRODUCED RecognitionMetadata_InteractionType = 5 |
| // Transcribe spoken questions and queries into text. |
| RecognitionMetadata_VOICE_SEARCH RecognitionMetadata_InteractionType = 6 |
| // Transcribe voice commands, such as for controlling a device. |
| RecognitionMetadata_VOICE_COMMAND RecognitionMetadata_InteractionType = 7 |
| // Transcribe speech to text to create a written document, such as a |
| // text-message, email or report. |
| RecognitionMetadata_DICTATION RecognitionMetadata_InteractionType = 8 |
| ) |
| |
| var RecognitionMetadata_InteractionType_name = map[int32]string{ |
| 0: "INTERACTION_TYPE_UNSPECIFIED", |
| 1: "DISCUSSION", |
| 2: "PRESENTATION", |
| 3: "PHONE_CALL", |
| 4: "VOICEMAIL", |
| 5: "PROFESSIONALLY_PRODUCED", |
| 6: "VOICE_SEARCH", |
| 7: "VOICE_COMMAND", |
| 8: "DICTATION", |
| } |
| |
| var RecognitionMetadata_InteractionType_value = map[string]int32{ |
| "INTERACTION_TYPE_UNSPECIFIED": 0, |
| "DISCUSSION": 1, |
| "PRESENTATION": 2, |
| "PHONE_CALL": 3, |
| "VOICEMAIL": 4, |
| "PROFESSIONALLY_PRODUCED": 5, |
| "VOICE_SEARCH": 6, |
| "VOICE_COMMAND": 7, |
| "DICTATION": 8, |
| } |
| |
| func (x RecognitionMetadata_InteractionType) String() string { |
| return proto.EnumName(RecognitionMetadata_InteractionType_name, int32(x)) |
| } |
| |
| func (RecognitionMetadata_InteractionType) EnumDescriptor() ([]byte, []int) { |
| return fileDescriptor_6adcab595cc29495, []int{6, 0} |
| } |
| |
| // Enumerates the types of capture settings describing an audio file. |
| type RecognitionMetadata_MicrophoneDistance int32 |
| |
| const ( |
| // Audio type is not known. |
| RecognitionMetadata_MICROPHONE_DISTANCE_UNSPECIFIED RecognitionMetadata_MicrophoneDistance = 0 |
| // The audio was captured from a closely placed microphone. Eg. phone, |
| // dictaphone, or handheld microphone. Generally if there speaker is within |
| // 1 meter of the microphone. |
| RecognitionMetadata_NEARFIELD RecognitionMetadata_MicrophoneDistance = 1 |
| // The speaker if within 3 meters of the microphone. |
| RecognitionMetadata_MIDFIELD RecognitionMetadata_MicrophoneDistance = 2 |
| // The speaker is more than 3 meters away from the microphone. |
| RecognitionMetadata_FARFIELD RecognitionMetadata_MicrophoneDistance = 3 |
| ) |
| |
| var RecognitionMetadata_MicrophoneDistance_name = map[int32]string{ |
| 0: "MICROPHONE_DISTANCE_UNSPECIFIED", |
| 1: "NEARFIELD", |
| 2: "MIDFIELD", |
| 3: "FARFIELD", |
| } |
| |
| var RecognitionMetadata_MicrophoneDistance_value = map[string]int32{ |
| "MICROPHONE_DISTANCE_UNSPECIFIED": 0, |
| "NEARFIELD": 1, |
| "MIDFIELD": 2, |
| "FARFIELD": 3, |
| } |
| |
| func (x RecognitionMetadata_MicrophoneDistance) String() string { |
| return proto.EnumName(RecognitionMetadata_MicrophoneDistance_name, int32(x)) |
| } |
| |
| func (RecognitionMetadata_MicrophoneDistance) EnumDescriptor() ([]byte, []int) { |
| return fileDescriptor_6adcab595cc29495, []int{6, 1} |
| } |
| |
| // The original media the speech was recorded on. |
| type RecognitionMetadata_OriginalMediaType int32 |
| |
| const ( |
| // Unknown original media type. |
| RecognitionMetadata_ORIGINAL_MEDIA_TYPE_UNSPECIFIED RecognitionMetadata_OriginalMediaType = 0 |
| // The speech data is an audio recording. |
| RecognitionMetadata_AUDIO RecognitionMetadata_OriginalMediaType = 1 |
| // The speech data originally recorded on a video. |
| RecognitionMetadata_VIDEO RecognitionMetadata_OriginalMediaType = 2 |
| ) |
| |
| var RecognitionMetadata_OriginalMediaType_name = map[int32]string{ |
| 0: "ORIGINAL_MEDIA_TYPE_UNSPECIFIED", |
| 1: "AUDIO", |
| 2: "VIDEO", |
| } |
| |
| var RecognitionMetadata_OriginalMediaType_value = map[string]int32{ |
| "ORIGINAL_MEDIA_TYPE_UNSPECIFIED": 0, |
| "AUDIO": 1, |
| "VIDEO": 2, |
| } |
| |
| func (x RecognitionMetadata_OriginalMediaType) String() string { |
| return proto.EnumName(RecognitionMetadata_OriginalMediaType_name, int32(x)) |
| } |
| |
| func (RecognitionMetadata_OriginalMediaType) EnumDescriptor() ([]byte, []int) { |
| return fileDescriptor_6adcab595cc29495, []int{6, 2} |
| } |
| |
| // The type of device the speech was recorded with. |
| type RecognitionMetadata_RecordingDeviceType int32 |
| |
| const ( |
| // The recording device is unknown. |
| RecognitionMetadata_RECORDING_DEVICE_TYPE_UNSPECIFIED RecognitionMetadata_RecordingDeviceType = 0 |
| // Speech was recorded on a smartphone. |
| RecognitionMetadata_SMARTPHONE RecognitionMetadata_RecordingDeviceType = 1 |
| // Speech was recorded using a personal computer or tablet. |
| RecognitionMetadata_PC RecognitionMetadata_RecordingDeviceType = 2 |
| // Speech was recorded over a phone line. |
| RecognitionMetadata_PHONE_LINE RecognitionMetadata_RecordingDeviceType = 3 |
| // Speech was recorded in a vehicle. |
| RecognitionMetadata_VEHICLE RecognitionMetadata_RecordingDeviceType = 4 |
| // Speech was recorded outdoors. |
| RecognitionMetadata_OTHER_OUTDOOR_DEVICE RecognitionMetadata_RecordingDeviceType = 5 |
| // Speech was recorded indoors. |
| RecognitionMetadata_OTHER_INDOOR_DEVICE RecognitionMetadata_RecordingDeviceType = 6 |
| ) |
| |
| var RecognitionMetadata_RecordingDeviceType_name = map[int32]string{ |
| 0: "RECORDING_DEVICE_TYPE_UNSPECIFIED", |
| 1: "SMARTPHONE", |
| 2: "PC", |
| 3: "PHONE_LINE", |
| 4: "VEHICLE", |
| 5: "OTHER_OUTDOOR_DEVICE", |
| 6: "OTHER_INDOOR_DEVICE", |
| } |
| |
| var RecognitionMetadata_RecordingDeviceType_value = map[string]int32{ |
| "RECORDING_DEVICE_TYPE_UNSPECIFIED": 0, |
| "SMARTPHONE": 1, |
| "PC": 2, |
| "PHONE_LINE": 3, |
| "VEHICLE": 4, |
| "OTHER_OUTDOOR_DEVICE": 5, |
| "OTHER_INDOOR_DEVICE": 6, |
| } |
| |
| func (x RecognitionMetadata_RecordingDeviceType) String() string { |
| return proto.EnumName(RecognitionMetadata_RecordingDeviceType_name, int32(x)) |
| } |
| |
| func (RecognitionMetadata_RecordingDeviceType) EnumDescriptor() ([]byte, []int) { |
| return fileDescriptor_6adcab595cc29495, []int{6, 3} |
| } |
| |
| // Indicates the type of speech event. |
| type StreamingRecognizeResponse_SpeechEventType int32 |
| |
| const ( |
| // No speech event specified. |
| StreamingRecognizeResponse_SPEECH_EVENT_UNSPECIFIED StreamingRecognizeResponse_SpeechEventType = 0 |
| // This event indicates that the server has detected the end of the user's |
| // speech utterance and expects no additional speech. Therefore, the server |
| // will not process additional audio (although it may subsequently return |
| // additional results). The client should stop sending additional audio |
| // data, half-close the gRPC connection, and wait for any additional results |
| // until the server closes the gRPC connection. This event is only sent if |
| // `single_utterance` was set to `true`, and is not used otherwise. |
| StreamingRecognizeResponse_END_OF_SINGLE_UTTERANCE StreamingRecognizeResponse_SpeechEventType = 1 |
| ) |
| |
| var StreamingRecognizeResponse_SpeechEventType_name = map[int32]string{ |
| 0: "SPEECH_EVENT_UNSPECIFIED", |
| 1: "END_OF_SINGLE_UTTERANCE", |
| } |
| |
| var StreamingRecognizeResponse_SpeechEventType_value = map[string]int32{ |
| "SPEECH_EVENT_UNSPECIFIED": 0, |
| "END_OF_SINGLE_UTTERANCE": 1, |
| } |
| |
| func (x StreamingRecognizeResponse_SpeechEventType) String() string { |
| return proto.EnumName(StreamingRecognizeResponse_SpeechEventType_name, int32(x)) |
| } |
| |
| func (StreamingRecognizeResponse_SpeechEventType) EnumDescriptor() ([]byte, []int) { |
| return fileDescriptor_6adcab595cc29495, []int{12, 0} |
| } |
| |
| // The top-level message sent by the client for the `Recognize` method. |
| type RecognizeRequest struct { |
| // Required. Provides information to the recognizer that specifies how to |
| // process the request. |
| Config *RecognitionConfig `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"` |
| // Required. The audio data to be recognized. |
| Audio *RecognitionAudio `protobuf:"bytes,2,opt,name=audio,proto3" json:"audio,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *RecognizeRequest) Reset() { *m = RecognizeRequest{} } |
| func (m *RecognizeRequest) String() string { return proto.CompactTextString(m) } |
| func (*RecognizeRequest) ProtoMessage() {} |
| func (*RecognizeRequest) Descriptor() ([]byte, []int) { |
| return fileDescriptor_6adcab595cc29495, []int{0} |
| } |
| |
| func (m *RecognizeRequest) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_RecognizeRequest.Unmarshal(m, b) |
| } |
| func (m *RecognizeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_RecognizeRequest.Marshal(b, m, deterministic) |
| } |
| func (m *RecognizeRequest) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_RecognizeRequest.Merge(m, src) |
| } |
| func (m *RecognizeRequest) XXX_Size() int { |
| return xxx_messageInfo_RecognizeRequest.Size(m) |
| } |
| func (m *RecognizeRequest) XXX_DiscardUnknown() { |
| xxx_messageInfo_RecognizeRequest.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_RecognizeRequest proto.InternalMessageInfo |
| |
| func (m *RecognizeRequest) GetConfig() *RecognitionConfig { |
| if m != nil { |
| return m.Config |
| } |
| return nil |
| } |
| |
| func (m *RecognizeRequest) GetAudio() *RecognitionAudio { |
| if m != nil { |
| return m.Audio |
| } |
| return nil |
| } |
| |
| // The top-level message sent by the client for the `LongRunningRecognize` |
| // method. |
| type LongRunningRecognizeRequest struct { |
| // Required. Provides information to the recognizer that specifies how to |
| // process the request. |
| Config *RecognitionConfig `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"` |
| // Required. The audio data to be recognized. |
| Audio *RecognitionAudio `protobuf:"bytes,2,opt,name=audio,proto3" json:"audio,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *LongRunningRecognizeRequest) Reset() { *m = LongRunningRecognizeRequest{} } |
| func (m *LongRunningRecognizeRequest) String() string { return proto.CompactTextString(m) } |
| func (*LongRunningRecognizeRequest) ProtoMessage() {} |
| func (*LongRunningRecognizeRequest) Descriptor() ([]byte, []int) { |
| return fileDescriptor_6adcab595cc29495, []int{1} |
| } |
| |
| func (m *LongRunningRecognizeRequest) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_LongRunningRecognizeRequest.Unmarshal(m, b) |
| } |
| func (m *LongRunningRecognizeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_LongRunningRecognizeRequest.Marshal(b, m, deterministic) |
| } |
| func (m *LongRunningRecognizeRequest) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_LongRunningRecognizeRequest.Merge(m, src) |
| } |
| func (m *LongRunningRecognizeRequest) XXX_Size() int { |
| return xxx_messageInfo_LongRunningRecognizeRequest.Size(m) |
| } |
| func (m *LongRunningRecognizeRequest) XXX_DiscardUnknown() { |
| xxx_messageInfo_LongRunningRecognizeRequest.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_LongRunningRecognizeRequest proto.InternalMessageInfo |
| |
| func (m *LongRunningRecognizeRequest) GetConfig() *RecognitionConfig { |
| if m != nil { |
| return m.Config |
| } |
| return nil |
| } |
| |
| func (m *LongRunningRecognizeRequest) GetAudio() *RecognitionAudio { |
| if m != nil { |
| return m.Audio |
| } |
| return nil |
| } |
| |
| // The top-level message sent by the client for the `StreamingRecognize` method. |
| // Multiple `StreamingRecognizeRequest` messages are sent. The first message |
| // must contain a `streaming_config` message and must not contain |
| // `audio_content`. All subsequent messages must contain `audio_content` and |
| // must not contain a `streaming_config` message. |
| type StreamingRecognizeRequest struct { |
| // The streaming request, which is either a streaming config or audio content. |
| // |
| // Types that are valid to be assigned to StreamingRequest: |
| // *StreamingRecognizeRequest_StreamingConfig |
| // *StreamingRecognizeRequest_AudioContent |
| StreamingRequest isStreamingRecognizeRequest_StreamingRequest `protobuf_oneof:"streaming_request"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *StreamingRecognizeRequest) Reset() { *m = StreamingRecognizeRequest{} } |
| func (m *StreamingRecognizeRequest) String() string { return proto.CompactTextString(m) } |
| func (*StreamingRecognizeRequest) ProtoMessage() {} |
| func (*StreamingRecognizeRequest) Descriptor() ([]byte, []int) { |
| return fileDescriptor_6adcab595cc29495, []int{2} |
| } |
| |
| func (m *StreamingRecognizeRequest) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_StreamingRecognizeRequest.Unmarshal(m, b) |
| } |
| func (m *StreamingRecognizeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_StreamingRecognizeRequest.Marshal(b, m, deterministic) |
| } |
| func (m *StreamingRecognizeRequest) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_StreamingRecognizeRequest.Merge(m, src) |
| } |
| func (m *StreamingRecognizeRequest) XXX_Size() int { |
| return xxx_messageInfo_StreamingRecognizeRequest.Size(m) |
| } |
| func (m *StreamingRecognizeRequest) XXX_DiscardUnknown() { |
| xxx_messageInfo_StreamingRecognizeRequest.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_StreamingRecognizeRequest proto.InternalMessageInfo |
| |
| type isStreamingRecognizeRequest_StreamingRequest interface { |
| isStreamingRecognizeRequest_StreamingRequest() |
| } |
| |
| type StreamingRecognizeRequest_StreamingConfig struct { |
| StreamingConfig *StreamingRecognitionConfig `protobuf:"bytes,1,opt,name=streaming_config,json=streamingConfig,proto3,oneof"` |
| } |
| |
| type StreamingRecognizeRequest_AudioContent struct { |
| AudioContent []byte `protobuf:"bytes,2,opt,name=audio_content,json=audioContent,proto3,oneof"` |
| } |
| |
| func (*StreamingRecognizeRequest_StreamingConfig) isStreamingRecognizeRequest_StreamingRequest() {} |
| |
| func (*StreamingRecognizeRequest_AudioContent) isStreamingRecognizeRequest_StreamingRequest() {} |
| |
| func (m *StreamingRecognizeRequest) GetStreamingRequest() isStreamingRecognizeRequest_StreamingRequest { |
| if m != nil { |
| return m.StreamingRequest |
| } |
| return nil |
| } |
| |
| func (m *StreamingRecognizeRequest) GetStreamingConfig() *StreamingRecognitionConfig { |
| if x, ok := m.GetStreamingRequest().(*StreamingRecognizeRequest_StreamingConfig); ok { |
| return x.StreamingConfig |
| } |
| return nil |
| } |
| |
| func (m *StreamingRecognizeRequest) GetAudioContent() []byte { |
| if x, ok := m.GetStreamingRequest().(*StreamingRecognizeRequest_AudioContent); ok { |
| return x.AudioContent |
| } |
| return nil |
| } |
| |
| // XXX_OneofWrappers is for the internal use of the proto package. |
| func (*StreamingRecognizeRequest) XXX_OneofWrappers() []interface{} { |
| return []interface{}{ |
| (*StreamingRecognizeRequest_StreamingConfig)(nil), |
| (*StreamingRecognizeRequest_AudioContent)(nil), |
| } |
| } |
| |
| // Provides information to the recognizer that specifies how to process the |
| // request. |
| type StreamingRecognitionConfig struct { |
| // Required. Provides information to the recognizer that specifies how to |
| // process the request. |
| Config *RecognitionConfig `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"` |
| // If `false` or omitted, the recognizer will perform continuous |
| // recognition (continuing to wait for and process audio even if the user |
| // pauses speaking) until the client closes the input stream (gRPC API) or |
| // until the maximum time limit has been reached. May return multiple |
| // `StreamingRecognitionResult`s with the `is_final` flag set to `true`. |
| // |
| // If `true`, the recognizer will detect a single spoken utterance. When it |
| // detects that the user has paused or stopped speaking, it will return an |
| // `END_OF_SINGLE_UTTERANCE` event and cease recognition. It will return no |
| // more than one `StreamingRecognitionResult` with the `is_final` flag set to |
| // `true`. |
| SingleUtterance bool `protobuf:"varint,2,opt,name=single_utterance,json=singleUtterance,proto3" json:"single_utterance,omitempty"` |
| // If `true`, interim results (tentative hypotheses) may be |
| // returned as they become available (these interim results are indicated with |
| // the `is_final=false` flag). |
| // If `false` or omitted, only `is_final=true` result(s) are returned. |
| InterimResults bool `protobuf:"varint,3,opt,name=interim_results,json=interimResults,proto3" json:"interim_results,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *StreamingRecognitionConfig) Reset() { *m = StreamingRecognitionConfig{} } |
| func (m *StreamingRecognitionConfig) String() string { return proto.CompactTextString(m) } |
| func (*StreamingRecognitionConfig) ProtoMessage() {} |
| func (*StreamingRecognitionConfig) Descriptor() ([]byte, []int) { |
| return fileDescriptor_6adcab595cc29495, []int{3} |
| } |
| |
| func (m *StreamingRecognitionConfig) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_StreamingRecognitionConfig.Unmarshal(m, b) |
| } |
| func (m *StreamingRecognitionConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_StreamingRecognitionConfig.Marshal(b, m, deterministic) |
| } |
| func (m *StreamingRecognitionConfig) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_StreamingRecognitionConfig.Merge(m, src) |
| } |
| func (m *StreamingRecognitionConfig) XXX_Size() int { |
| return xxx_messageInfo_StreamingRecognitionConfig.Size(m) |
| } |
| func (m *StreamingRecognitionConfig) XXX_DiscardUnknown() { |
| xxx_messageInfo_StreamingRecognitionConfig.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_StreamingRecognitionConfig proto.InternalMessageInfo |
| |
| func (m *StreamingRecognitionConfig) GetConfig() *RecognitionConfig { |
| if m != nil { |
| return m.Config |
| } |
| return nil |
| } |
| |
| func (m *StreamingRecognitionConfig) GetSingleUtterance() bool { |
| if m != nil { |
| return m.SingleUtterance |
| } |
| return false |
| } |
| |
| func (m *StreamingRecognitionConfig) GetInterimResults() bool { |
| if m != nil { |
| return m.InterimResults |
| } |
| return false |
| } |
| |
| // Provides information to the recognizer that specifies how to process the |
| // request. |
| type RecognitionConfig struct { |
| // Encoding of audio data sent in all `RecognitionAudio` messages. |
| // This field is optional for `FLAC` and `WAV` audio files and required |
| // for all other audio formats. For details, see |
| // [AudioEncoding][google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding]. |
| Encoding RecognitionConfig_AudioEncoding `protobuf:"varint,1,opt,name=encoding,proto3,enum=google.cloud.speech.v1p1beta1.RecognitionConfig_AudioEncoding" json:"encoding,omitempty"` |
| // Sample rate in Hertz of the audio data sent in all |
| // `RecognitionAudio` messages. Valid values are: 8000-48000. |
| // 16000 is optimal. For best results, set the sampling rate of the audio |
| // source to 16000 Hz. If that's not possible, use the native sample rate of |
| // the audio source (instead of re-sampling). |
| // This field is optional for FLAC and WAV audio files, but is |
| // required for all other audio formats. For details, see |
| // [AudioEncoding][google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding]. |
| SampleRateHertz int32 `protobuf:"varint,2,opt,name=sample_rate_hertz,json=sampleRateHertz,proto3" json:"sample_rate_hertz,omitempty"` |
| // The number of channels in the input audio data. |
| // ONLY set this for MULTI-CHANNEL recognition. |
| // Valid values for LINEAR16 and FLAC are `1`-`8`. |
| // Valid values for OGG_OPUS are '1'-'254'. |
| // Valid value for MULAW, AMR, AMR_WB and SPEEX_WITH_HEADER_BYTE is only `1`. |
| // If `0` or omitted, defaults to one channel (mono). |
| // Note: We only recognize the first channel by default. |
| // To perform independent recognition on each channel set |
| // `enable_separate_recognition_per_channel` to 'true'. |
| AudioChannelCount int32 `protobuf:"varint,7,opt,name=audio_channel_count,json=audioChannelCount,proto3" json:"audio_channel_count,omitempty"` |
| // This needs to be set to `true` explicitly and `audio_channel_count` > 1 |
| // to get each channel recognized separately. The recognition result will |
| // contain a `channel_tag` field to state which channel that result belongs |
| // to. If this is not true, we will only recognize the first channel. The |
| // request is billed cumulatively for all channels recognized: |
| // `audio_channel_count` multiplied by the length of the audio. |
| EnableSeparateRecognitionPerChannel bool `protobuf:"varint,12,opt,name=enable_separate_recognition_per_channel,json=enableSeparateRecognitionPerChannel,proto3" json:"enable_separate_recognition_per_channel,omitempty"` |
| // Required. The language of the supplied audio as a |
| // [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag. |
| // Example: "en-US". |
| // See [Language |
| // Support](https://cloud.google.com/speech-to-text/docs/languages) for a list |
| // of the currently supported language codes. |
| LanguageCode string `protobuf:"bytes,3,opt,name=language_code,json=languageCode,proto3" json:"language_code,omitempty"` |
| // A list of up to 3 additional |
| // [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags, |
| // listing possible alternative languages of the supplied audio. |
| // See [Language |
| // Support](https://cloud.google.com/speech-to-text/docs/languages) for a list |
| // of the currently supported language codes. If alternative languages are |
| // listed, recognition result will contain recognition in the most likely |
| // language detected including the main language_code. The recognition result |
| // will include the language tag of the language detected in the audio. Note: |
| // This feature is only supported for Voice Command and Voice Search use cases |
| // and performance may vary for other use cases (e.g., phone call |
| // transcription). |
| AlternativeLanguageCodes []string `protobuf:"bytes,18,rep,name=alternative_language_codes,json=alternativeLanguageCodes,proto3" json:"alternative_language_codes,omitempty"` |
| // Maximum number of recognition hypotheses to be returned. |
| // Specifically, the maximum number of `SpeechRecognitionAlternative` messages |
| // within each `SpeechRecognitionResult`. |
| // The server may return fewer than `max_alternatives`. |
| // Valid values are `0`-`30`. A value of `0` or `1` will return a maximum of |
| // one. If omitted, will return a maximum of one. |
| MaxAlternatives int32 `protobuf:"varint,4,opt,name=max_alternatives,json=maxAlternatives,proto3" json:"max_alternatives,omitempty"` |
| // If set to `true`, the server will attempt to filter out |
| // profanities, replacing all but the initial character in each filtered word |
| // with asterisks, e.g. "f***". If set to `false` or omitted, profanities |
| // won't be filtered out. |
| ProfanityFilter bool `protobuf:"varint,5,opt,name=profanity_filter,json=profanityFilter,proto3" json:"profanity_filter,omitempty"` |
| // Speech adaptation configuration improves the accuracy of speech |
| // recognition. When speech adaptation is set it supersedes the |
| // `speech_contexts` field. For more information, see the [speech |
| // adaptation](https://cloud.google.com/speech-to-text/docs/context-strength) |
| // documentation. |
| Adaptation *SpeechAdaptation `protobuf:"bytes,20,opt,name=adaptation,proto3" json:"adaptation,omitempty"` |
| // Array of [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext]. |
| // A means to provide context to assist the speech recognition. For more |
| // information, see |
| // [speech |
| // adaptation](https://cloud.google.com/speech-to-text/docs/context-strength). |
| SpeechContexts []*SpeechContext `protobuf:"bytes,6,rep,name=speech_contexts,json=speechContexts,proto3" json:"speech_contexts,omitempty"` |
| // If `true`, the top result includes a list of words and |
| // the start and end time offsets (timestamps) for those words. If |
| // `false`, no word-level time offset information is returned. The default is |
| // `false`. |
| EnableWordTimeOffsets bool `protobuf:"varint,8,opt,name=enable_word_time_offsets,json=enableWordTimeOffsets,proto3" json:"enable_word_time_offsets,omitempty"` |
| // If `true`, the top result includes a list of words and the |
| // confidence for those words. If `false`, no word-level confidence |
| // information is returned. The default is `false`. |
| EnableWordConfidence bool `protobuf:"varint,15,opt,name=enable_word_confidence,json=enableWordConfidence,proto3" json:"enable_word_confidence,omitempty"` |
| // If 'true', adds punctuation to recognition result hypotheses. |
| // This feature is only available in select languages. Setting this for |
| // requests in other languages has no effect at all. |
| // The default 'false' value does not add punctuation to result hypotheses. |
| EnableAutomaticPunctuation bool `protobuf:"varint,11,opt,name=enable_automatic_punctuation,json=enableAutomaticPunctuation,proto3" json:"enable_automatic_punctuation,omitempty"` |
| // If 'true', enables speaker detection for each recognized word in |
| // the top alternative of the recognition result using a speaker_tag provided |
| // in the WordInfo. |
| // Note: Use diarization_config instead. |
| EnableSpeakerDiarization bool `protobuf:"varint,16,opt,name=enable_speaker_diarization,json=enableSpeakerDiarization,proto3" json:"enable_speaker_diarization,omitempty"` // Deprecated: Do not use. |
| // If set, specifies the estimated number of speakers in the conversation. |
| // Defaults to '2'. Ignored unless enable_speaker_diarization is set to true. |
| // Note: Use diarization_config instead. |
| DiarizationSpeakerCount int32 `protobuf:"varint,17,opt,name=diarization_speaker_count,json=diarizationSpeakerCount,proto3" json:"diarization_speaker_count,omitempty"` // Deprecated: Do not use. |
| // Config to enable speaker diarization and set additional |
| // parameters to make diarization better suited for your application. |
| // Note: When this is enabled, we send all the words from the beginning of the |
| // audio for the top alternative in every consecutive STREAMING responses. |
| // This is done in order to improve our speaker tags as our models learn to |
| // identify the speakers in the conversation over time. |
| // For non-streaming requests, the diarization results will be provided only |
| // in the top alternative of the FINAL SpeechRecognitionResult. |
| DiarizationConfig *SpeakerDiarizationConfig `protobuf:"bytes,19,opt,name=diarization_config,json=diarizationConfig,proto3" json:"diarization_config,omitempty"` |
| // Metadata regarding this request. |
| Metadata *RecognitionMetadata `protobuf:"bytes,9,opt,name=metadata,proto3" json:"metadata,omitempty"` |
| // Which model to select for the given request. Select the model |
| // best suited to your domain to get best results. If a model is not |
| // explicitly specified, then we auto-select a model based on the parameters |
| // in the RecognitionConfig. |
| // <table> |
| // <tr> |
| // <td><b>Model</b></td> |
| // <td><b>Description</b></td> |
| // </tr> |
| // <tr> |
| // <td><code>command_and_search</code></td> |
| // <td>Best for short queries such as voice commands or voice search.</td> |
| // </tr> |
| // <tr> |
| // <td><code>phone_call</code></td> |
| // <td>Best for audio that originated from a phone call (typically |
| // recorded at an 8khz sampling rate).</td> |
| // </tr> |
| // <tr> |
| // <td><code>video</code></td> |
| // <td>Best for audio that originated from from video or includes multiple |
| // speakers. Ideally the audio is recorded at a 16khz or greater |
| // sampling rate. This is a premium model that costs more than the |
| // standard rate.</td> |
| // </tr> |
| // <tr> |
| // <td><code>default</code></td> |
| // <td>Best for audio that is not one of the specific audio models. |
| // For example, long-form audio. Ideally the audio is high-fidelity, |
| // recorded at a 16khz or greater sampling rate.</td> |
| // </tr> |
| // </table> |
| Model string `protobuf:"bytes,13,opt,name=model,proto3" json:"model,omitempty"` |
| // Set to true to use an enhanced model for speech recognition. |
| // If `use_enhanced` is set to true and the `model` field is not set, then |
| // an appropriate enhanced model is chosen if an enhanced model exists for |
| // the audio. |
| // |
| // If `use_enhanced` is true and an enhanced version of the specified model |
| // does not exist, then the speech is recognized using the standard version |
| // of the specified model. |
| UseEnhanced bool `protobuf:"varint,14,opt,name=use_enhanced,json=useEnhanced,proto3" json:"use_enhanced,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *RecognitionConfig) Reset() { *m = RecognitionConfig{} } |
| func (m *RecognitionConfig) String() string { return proto.CompactTextString(m) } |
| func (*RecognitionConfig) ProtoMessage() {} |
| func (*RecognitionConfig) Descriptor() ([]byte, []int) { |
| return fileDescriptor_6adcab595cc29495, []int{4} |
| } |
| |
| func (m *RecognitionConfig) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_RecognitionConfig.Unmarshal(m, b) |
| } |
| func (m *RecognitionConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_RecognitionConfig.Marshal(b, m, deterministic) |
| } |
| func (m *RecognitionConfig) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_RecognitionConfig.Merge(m, src) |
| } |
| func (m *RecognitionConfig) XXX_Size() int { |
| return xxx_messageInfo_RecognitionConfig.Size(m) |
| } |
| func (m *RecognitionConfig) XXX_DiscardUnknown() { |
| xxx_messageInfo_RecognitionConfig.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_RecognitionConfig proto.InternalMessageInfo |
| |
| func (m *RecognitionConfig) GetEncoding() RecognitionConfig_AudioEncoding { |
| if m != nil { |
| return m.Encoding |
| } |
| return RecognitionConfig_ENCODING_UNSPECIFIED |
| } |
| |
| func (m *RecognitionConfig) GetSampleRateHertz() int32 { |
| if m != nil { |
| return m.SampleRateHertz |
| } |
| return 0 |
| } |
| |
| func (m *RecognitionConfig) GetAudioChannelCount() int32 { |
| if m != nil { |
| return m.AudioChannelCount |
| } |
| return 0 |
| } |
| |
| func (m *RecognitionConfig) GetEnableSeparateRecognitionPerChannel() bool { |
| if m != nil { |
| return m.EnableSeparateRecognitionPerChannel |
| } |
| return false |
| } |
| |
| func (m *RecognitionConfig) GetLanguageCode() string { |
| if m != nil { |
| return m.LanguageCode |
| } |
| return "" |
| } |
| |
| func (m *RecognitionConfig) GetAlternativeLanguageCodes() []string { |
| if m != nil { |
| return m.AlternativeLanguageCodes |
| } |
| return nil |
| } |
| |
| func (m *RecognitionConfig) GetMaxAlternatives() int32 { |
| if m != nil { |
| return m.MaxAlternatives |
| } |
| return 0 |
| } |
| |
| func (m *RecognitionConfig) GetProfanityFilter() bool { |
| if m != nil { |
| return m.ProfanityFilter |
| } |
| return false |
| } |
| |
| func (m *RecognitionConfig) GetAdaptation() *SpeechAdaptation { |
| if m != nil { |
| return m.Adaptation |
| } |
| return nil |
| } |
| |
| func (m *RecognitionConfig) GetSpeechContexts() []*SpeechContext { |
| if m != nil { |
| return m.SpeechContexts |
| } |
| return nil |
| } |
| |
| func (m *RecognitionConfig) GetEnableWordTimeOffsets() bool { |
| if m != nil { |
| return m.EnableWordTimeOffsets |
| } |
| return false |
| } |
| |
| func (m *RecognitionConfig) GetEnableWordConfidence() bool { |
| if m != nil { |
| return m.EnableWordConfidence |
| } |
| return false |
| } |
| |
| func (m *RecognitionConfig) GetEnableAutomaticPunctuation() bool { |
| if m != nil { |
| return m.EnableAutomaticPunctuation |
| } |
| return false |
| } |
| |
| // Deprecated: Do not use. |
| func (m *RecognitionConfig) GetEnableSpeakerDiarization() bool { |
| if m != nil { |
| return m.EnableSpeakerDiarization |
| } |
| return false |
| } |
| |
| // Deprecated: Do not use. |
| func (m *RecognitionConfig) GetDiarizationSpeakerCount() int32 { |
| if m != nil { |
| return m.DiarizationSpeakerCount |
| } |
| return 0 |
| } |
| |
| func (m *RecognitionConfig) GetDiarizationConfig() *SpeakerDiarizationConfig { |
| if m != nil { |
| return m.DiarizationConfig |
| } |
| return nil |
| } |
| |
| func (m *RecognitionConfig) GetMetadata() *RecognitionMetadata { |
| if m != nil { |
| return m.Metadata |
| } |
| return nil |
| } |
| |
| func (m *RecognitionConfig) GetModel() string { |
| if m != nil { |
| return m.Model |
| } |
| return "" |
| } |
| |
| func (m *RecognitionConfig) GetUseEnhanced() bool { |
| if m != nil { |
| return m.UseEnhanced |
| } |
| return false |
| } |
| |
| // Config to enable speaker diarization. |
| type SpeakerDiarizationConfig struct { |
| // If 'true', enables speaker detection for each recognized word in |
| // the top alternative of the recognition result using a speaker_tag provided |
| // in the WordInfo. |
| EnableSpeakerDiarization bool `protobuf:"varint,1,opt,name=enable_speaker_diarization,json=enableSpeakerDiarization,proto3" json:"enable_speaker_diarization,omitempty"` |
| // Minimum number of speakers in the conversation. This range gives you more |
| // flexibility by allowing the system to automatically determine the correct |
| // number of speakers. If not set, the default value is 2. |
| MinSpeakerCount int32 `protobuf:"varint,2,opt,name=min_speaker_count,json=minSpeakerCount,proto3" json:"min_speaker_count,omitempty"` |
| // Maximum number of speakers in the conversation. This range gives you more |
| // flexibility by allowing the system to automatically determine the correct |
| // number of speakers. If not set, the default value is 6. |
| MaxSpeakerCount int32 `protobuf:"varint,3,opt,name=max_speaker_count,json=maxSpeakerCount,proto3" json:"max_speaker_count,omitempty"` |
| // Output only. Unused. |
| SpeakerTag int32 `protobuf:"varint,5,opt,name=speaker_tag,json=speakerTag,proto3" json:"speaker_tag,omitempty"` // Deprecated: Do not use. |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *SpeakerDiarizationConfig) Reset() { *m = SpeakerDiarizationConfig{} } |
| func (m *SpeakerDiarizationConfig) String() string { return proto.CompactTextString(m) } |
| func (*SpeakerDiarizationConfig) ProtoMessage() {} |
| func (*SpeakerDiarizationConfig) Descriptor() ([]byte, []int) { |
| return fileDescriptor_6adcab595cc29495, []int{5} |
| } |
| |
| func (m *SpeakerDiarizationConfig) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_SpeakerDiarizationConfig.Unmarshal(m, b) |
| } |
| func (m *SpeakerDiarizationConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_SpeakerDiarizationConfig.Marshal(b, m, deterministic) |
| } |
| func (m *SpeakerDiarizationConfig) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_SpeakerDiarizationConfig.Merge(m, src) |
| } |
| func (m *SpeakerDiarizationConfig) XXX_Size() int { |
| return xxx_messageInfo_SpeakerDiarizationConfig.Size(m) |
| } |
| func (m *SpeakerDiarizationConfig) XXX_DiscardUnknown() { |
| xxx_messageInfo_SpeakerDiarizationConfig.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_SpeakerDiarizationConfig proto.InternalMessageInfo |
| |
| func (m *SpeakerDiarizationConfig) GetEnableSpeakerDiarization() bool { |
| if m != nil { |
| return m.EnableSpeakerDiarization |
| } |
| return false |
| } |
| |
| func (m *SpeakerDiarizationConfig) GetMinSpeakerCount() int32 { |
| if m != nil { |
| return m.MinSpeakerCount |
| } |
| return 0 |
| } |
| |
| func (m *SpeakerDiarizationConfig) GetMaxSpeakerCount() int32 { |
| if m != nil { |
| return m.MaxSpeakerCount |
| } |
| return 0 |
| } |
| |
| // Deprecated: Do not use. |
| func (m *SpeakerDiarizationConfig) GetSpeakerTag() int32 { |
| if m != nil { |
| return m.SpeakerTag |
| } |
| return 0 |
| } |
| |
| // Description of audio data to be recognized. |
| type RecognitionMetadata struct { |
| // The use case most closely describing the audio content to be recognized. |
| InteractionType RecognitionMetadata_InteractionType `protobuf:"varint,1,opt,name=interaction_type,json=interactionType,proto3,enum=google.cloud.speech.v1p1beta1.RecognitionMetadata_InteractionType" json:"interaction_type,omitempty"` |
| // The industry vertical to which this speech recognition request most |
| // closely applies. This is most indicative of the topics contained |
| // in the audio. Use the 6-digit NAICS code to identify the industry |
| // vertical - see https://www.naics.com/search/. |
| IndustryNaicsCodeOfAudio uint32 `protobuf:"varint,3,opt,name=industry_naics_code_of_audio,json=industryNaicsCodeOfAudio,proto3" json:"industry_naics_code_of_audio,omitempty"` |
| // The audio type that most closely describes the audio being recognized. |
| MicrophoneDistance RecognitionMetadata_MicrophoneDistance `protobuf:"varint,4,opt,name=microphone_distance,json=microphoneDistance,proto3,enum=google.cloud.speech.v1p1beta1.RecognitionMetadata_MicrophoneDistance" json:"microphone_distance,omitempty"` |
| // The original media the speech was recorded on. |
| OriginalMediaType RecognitionMetadata_OriginalMediaType `protobuf:"varint,5,opt,name=original_media_type,json=originalMediaType,proto3,enum=google.cloud.speech.v1p1beta1.RecognitionMetadata_OriginalMediaType" json:"original_media_type,omitempty"` |
| // The type of device the speech was recorded with. |
| RecordingDeviceType RecognitionMetadata_RecordingDeviceType `protobuf:"varint,6,opt,name=recording_device_type,json=recordingDeviceType,proto3,enum=google.cloud.speech.v1p1beta1.RecognitionMetadata_RecordingDeviceType" json:"recording_device_type,omitempty"` |
| // The device used to make the recording. Examples 'Nexus 5X' or |
| // 'Polycom SoundStation IP 6000' or 'POTS' or 'VoIP' or |
| // 'Cardioid Microphone'. |
| RecordingDeviceName string `protobuf:"bytes,7,opt,name=recording_device_name,json=recordingDeviceName,proto3" json:"recording_device_name,omitempty"` |
| // Mime type of the original audio file. For example `audio/m4a`, |
| // `audio/x-alaw-basic`, `audio/mp3`, `audio/3gpp`. |
| // A list of possible audio mime types is maintained at |
| // http://www.iana.org/assignments/media-types/media-types.xhtml#audio |
| OriginalMimeType string `protobuf:"bytes,8,opt,name=original_mime_type,json=originalMimeType,proto3" json:"original_mime_type,omitempty"` |
| // Obfuscated (privacy-protected) ID of the user, to identify number of |
| // unique users using the service. |
| ObfuscatedId int64 `protobuf:"varint,9,opt,name=obfuscated_id,json=obfuscatedId,proto3" json:"obfuscated_id,omitempty"` // Deprecated: Do not use. |
| // Description of the content. Eg. "Recordings of federal supreme court |
| // hearings from 2012". |
| AudioTopic string `protobuf:"bytes,10,opt,name=audio_topic,json=audioTopic,proto3" json:"audio_topic,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *RecognitionMetadata) Reset() { *m = RecognitionMetadata{} } |
| func (m *RecognitionMetadata) String() string { return proto.CompactTextString(m) } |
| func (*RecognitionMetadata) ProtoMessage() {} |
| func (*RecognitionMetadata) Descriptor() ([]byte, []int) { |
| return fileDescriptor_6adcab595cc29495, []int{6} |
| } |
| |
| func (m *RecognitionMetadata) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_RecognitionMetadata.Unmarshal(m, b) |
| } |
| func (m *RecognitionMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_RecognitionMetadata.Marshal(b, m, deterministic) |
| } |
| func (m *RecognitionMetadata) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_RecognitionMetadata.Merge(m, src) |
| } |
| func (m *RecognitionMetadata) XXX_Size() int { |
| return xxx_messageInfo_RecognitionMetadata.Size(m) |
| } |
| func (m *RecognitionMetadata) XXX_DiscardUnknown() { |
| xxx_messageInfo_RecognitionMetadata.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_RecognitionMetadata proto.InternalMessageInfo |
| |
| func (m *RecognitionMetadata) GetInteractionType() RecognitionMetadata_InteractionType { |
| if m != nil { |
| return m.InteractionType |
| } |
| return RecognitionMetadata_INTERACTION_TYPE_UNSPECIFIED |
| } |
| |
| func (m *RecognitionMetadata) GetIndustryNaicsCodeOfAudio() uint32 { |
| if m != nil { |
| return m.IndustryNaicsCodeOfAudio |
| } |
| return 0 |
| } |
| |
| func (m *RecognitionMetadata) GetMicrophoneDistance() RecognitionMetadata_MicrophoneDistance { |
| if m != nil { |
| return m.MicrophoneDistance |
| } |
| return RecognitionMetadata_MICROPHONE_DISTANCE_UNSPECIFIED |
| } |
| |
| func (m *RecognitionMetadata) GetOriginalMediaType() RecognitionMetadata_OriginalMediaType { |
| if m != nil { |
| return m.OriginalMediaType |
| } |
| return RecognitionMetadata_ORIGINAL_MEDIA_TYPE_UNSPECIFIED |
| } |
| |
| func (m *RecognitionMetadata) GetRecordingDeviceType() RecognitionMetadata_RecordingDeviceType { |
| if m != nil { |
| return m.RecordingDeviceType |
| } |
| return RecognitionMetadata_RECORDING_DEVICE_TYPE_UNSPECIFIED |
| } |
| |
| func (m *RecognitionMetadata) GetRecordingDeviceName() string { |
| if m != nil { |
| return m.RecordingDeviceName |
| } |
| return "" |
| } |
| |
| func (m *RecognitionMetadata) GetOriginalMimeType() string { |
| if m != nil { |
| return m.OriginalMimeType |
| } |
| return "" |
| } |
| |
| // Deprecated: Do not use. |
| func (m *RecognitionMetadata) GetObfuscatedId() int64 { |
| if m != nil { |
| return m.ObfuscatedId |
| } |
| return 0 |
| } |
| |
| func (m *RecognitionMetadata) GetAudioTopic() string { |
| if m != nil { |
| return m.AudioTopic |
| } |
| return "" |
| } |
| |
| // Provides "hints" to the speech recognizer to favor specific words and phrases |
| // in the results. |
| type SpeechContext struct { |
| // A list of strings containing words and phrases "hints" so that |
| // the speech recognition is more likely to recognize them. This can be used |
| // to improve the accuracy for specific words and phrases, for example, if |
| // specific commands are typically spoken by the user. This can also be used |
| // to add additional words to the vocabulary of the recognizer. See |
| // [usage limits](https://cloud.google.com/speech-to-text/quotas#content). |
| // |
| // List items can also be set to classes for groups of words that represent |
| // common concepts that occur in natural language. For example, rather than |
| // providing phrase hints for every month of the year, using the $MONTH class |
| // improves the likelihood of correctly transcribing audio that includes |
| // months. |
| Phrases []string `protobuf:"bytes,1,rep,name=phrases,proto3" json:"phrases,omitempty"` |
| // Hint Boost. Positive value will increase the probability that a specific |
| // phrase will be recognized over other similar sounding phrases. The higher |
| // the boost, the higher the chance of false positive recognition as well. |
| // Negative boost values would correspond to anti-biasing. Anti-biasing is not |
| // enabled, so negative boost will simply be ignored. Though `boost` can |
| // accept a wide range of positive values, most use cases are best served with |
| // values between 0 and 20. We recommend using a binary search approach to |
| // finding the optimal value for your use case. |
| Boost float32 `protobuf:"fixed32,4,opt,name=boost,proto3" json:"boost,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *SpeechContext) Reset() { *m = SpeechContext{} } |
| func (m *SpeechContext) String() string { return proto.CompactTextString(m) } |
| func (*SpeechContext) ProtoMessage() {} |
| func (*SpeechContext) Descriptor() ([]byte, []int) { |
| return fileDescriptor_6adcab595cc29495, []int{7} |
| } |
| |
| func (m *SpeechContext) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_SpeechContext.Unmarshal(m, b) |
| } |
| func (m *SpeechContext) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_SpeechContext.Marshal(b, m, deterministic) |
| } |
| func (m *SpeechContext) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_SpeechContext.Merge(m, src) |
| } |
| func (m *SpeechContext) XXX_Size() int { |
| return xxx_messageInfo_SpeechContext.Size(m) |
| } |
| func (m *SpeechContext) XXX_DiscardUnknown() { |
| xxx_messageInfo_SpeechContext.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_SpeechContext proto.InternalMessageInfo |
| |
| func (m *SpeechContext) GetPhrases() []string { |
| if m != nil { |
| return m.Phrases |
| } |
| return nil |
| } |
| |
| func (m *SpeechContext) GetBoost() float32 { |
| if m != nil { |
| return m.Boost |
| } |
| return 0 |
| } |
| |
| // Contains audio data in the encoding specified in the `RecognitionConfig`. |
| // Either `content` or `uri` must be supplied. Supplying both or neither |
| // returns [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]. |
| // See [content limits](https://cloud.google.com/speech-to-text/quotas#content). |
| type RecognitionAudio struct { |
| // The audio source, which is either inline content or a Google Cloud |
| // Storage uri. |
| // |
| // Types that are valid to be assigned to AudioSource: |
| // *RecognitionAudio_Content |
| // *RecognitionAudio_Uri |
| AudioSource isRecognitionAudio_AudioSource `protobuf_oneof:"audio_source"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *RecognitionAudio) Reset() { *m = RecognitionAudio{} } |
| func (m *RecognitionAudio) String() string { return proto.CompactTextString(m) } |
| func (*RecognitionAudio) ProtoMessage() {} |
| func (*RecognitionAudio) Descriptor() ([]byte, []int) { |
| return fileDescriptor_6adcab595cc29495, []int{8} |
| } |
| |
| func (m *RecognitionAudio) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_RecognitionAudio.Unmarshal(m, b) |
| } |
| func (m *RecognitionAudio) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_RecognitionAudio.Marshal(b, m, deterministic) |
| } |
| func (m *RecognitionAudio) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_RecognitionAudio.Merge(m, src) |
| } |
| func (m *RecognitionAudio) XXX_Size() int { |
| return xxx_messageInfo_RecognitionAudio.Size(m) |
| } |
| func (m *RecognitionAudio) XXX_DiscardUnknown() { |
| xxx_messageInfo_RecognitionAudio.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_RecognitionAudio proto.InternalMessageInfo |
| |
| type isRecognitionAudio_AudioSource interface { |
| isRecognitionAudio_AudioSource() |
| } |
| |
| type RecognitionAudio_Content struct { |
| Content []byte `protobuf:"bytes,1,opt,name=content,proto3,oneof"` |
| } |
| |
| type RecognitionAudio_Uri struct { |
| Uri string `protobuf:"bytes,2,opt,name=uri,proto3,oneof"` |
| } |
| |
| func (*RecognitionAudio_Content) isRecognitionAudio_AudioSource() {} |
| |
| func (*RecognitionAudio_Uri) isRecognitionAudio_AudioSource() {} |
| |
| func (m *RecognitionAudio) GetAudioSource() isRecognitionAudio_AudioSource { |
| if m != nil { |
| return m.AudioSource |
| } |
| return nil |
| } |
| |
| func (m *RecognitionAudio) GetContent() []byte { |
| if x, ok := m.GetAudioSource().(*RecognitionAudio_Content); ok { |
| return x.Content |
| } |
| return nil |
| } |
| |
| func (m *RecognitionAudio) GetUri() string { |
| if x, ok := m.GetAudioSource().(*RecognitionAudio_Uri); ok { |
| return x.Uri |
| } |
| return "" |
| } |
| |
| // XXX_OneofWrappers is for the internal use of the proto package. |
| func (*RecognitionAudio) XXX_OneofWrappers() []interface{} { |
| return []interface{}{ |
| (*RecognitionAudio_Content)(nil), |
| (*RecognitionAudio_Uri)(nil), |
| } |
| } |
| |
| // The only message returned to the client by the `Recognize` method. It |
| // contains the result as zero or more sequential `SpeechRecognitionResult` |
| // messages. |
| type RecognizeResponse struct { |
| // Sequential list of transcription results corresponding to |
| // sequential portions of audio. |
| Results []*SpeechRecognitionResult `protobuf:"bytes,2,rep,name=results,proto3" json:"results,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *RecognizeResponse) Reset() { *m = RecognizeResponse{} } |
| func (m *RecognizeResponse) String() string { return proto.CompactTextString(m) } |
| func (*RecognizeResponse) ProtoMessage() {} |
| func (*RecognizeResponse) Descriptor() ([]byte, []int) { |
| return fileDescriptor_6adcab595cc29495, []int{9} |
| } |
| |
| func (m *RecognizeResponse) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_RecognizeResponse.Unmarshal(m, b) |
| } |
| func (m *RecognizeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_RecognizeResponse.Marshal(b, m, deterministic) |
| } |
| func (m *RecognizeResponse) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_RecognizeResponse.Merge(m, src) |
| } |
| func (m *RecognizeResponse) XXX_Size() int { |
| return xxx_messageInfo_RecognizeResponse.Size(m) |
| } |
| func (m *RecognizeResponse) XXX_DiscardUnknown() { |
| xxx_messageInfo_RecognizeResponse.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_RecognizeResponse proto.InternalMessageInfo |
| |
| func (m *RecognizeResponse) GetResults() []*SpeechRecognitionResult { |
| if m != nil { |
| return m.Results |
| } |
| return nil |
| } |
| |
| // The only message returned to the client by the `LongRunningRecognize` method. |
| // It contains the result as zero or more sequential `SpeechRecognitionResult` |
| // messages. It is included in the `result.response` field of the `Operation` |
| // returned by the `GetOperation` call of the `google::longrunning::Operations` |
| // service. |
| type LongRunningRecognizeResponse struct { |
| // Sequential list of transcription results corresponding to |
| // sequential portions of audio. |
| Results []*SpeechRecognitionResult `protobuf:"bytes,2,rep,name=results,proto3" json:"results,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *LongRunningRecognizeResponse) Reset() { *m = LongRunningRecognizeResponse{} } |
| func (m *LongRunningRecognizeResponse) String() string { return proto.CompactTextString(m) } |
| func (*LongRunningRecognizeResponse) ProtoMessage() {} |
| func (*LongRunningRecognizeResponse) Descriptor() ([]byte, []int) { |
| return fileDescriptor_6adcab595cc29495, []int{10} |
| } |
| |
| func (m *LongRunningRecognizeResponse) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_LongRunningRecognizeResponse.Unmarshal(m, b) |
| } |
| func (m *LongRunningRecognizeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_LongRunningRecognizeResponse.Marshal(b, m, deterministic) |
| } |
| func (m *LongRunningRecognizeResponse) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_LongRunningRecognizeResponse.Merge(m, src) |
| } |
| func (m *LongRunningRecognizeResponse) XXX_Size() int { |
| return xxx_messageInfo_LongRunningRecognizeResponse.Size(m) |
| } |
| func (m *LongRunningRecognizeResponse) XXX_DiscardUnknown() { |
| xxx_messageInfo_LongRunningRecognizeResponse.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_LongRunningRecognizeResponse proto.InternalMessageInfo |
| |
| func (m *LongRunningRecognizeResponse) GetResults() []*SpeechRecognitionResult { |
| if m != nil { |
| return m.Results |
| } |
| return nil |
| } |
| |
| // Describes the progress of a long-running `LongRunningRecognize` call. It is |
| // included in the `metadata` field of the `Operation` returned by the |
| // `GetOperation` call of the `google::longrunning::Operations` service. |
| type LongRunningRecognizeMetadata struct { |
| // Approximate percentage of audio processed thus far. Guaranteed to be 100 |
| // when the audio is fully processed and the results are available. |
| ProgressPercent int32 `protobuf:"varint,1,opt,name=progress_percent,json=progressPercent,proto3" json:"progress_percent,omitempty"` |
| // Time when the request was received. |
| StartTime *timestamp.Timestamp `protobuf:"bytes,2,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"` |
| // Time of the most recent processing update. |
| LastUpdateTime *timestamp.Timestamp `protobuf:"bytes,3,opt,name=last_update_time,json=lastUpdateTime,proto3" json:"last_update_time,omitempty"` |
| // The URI of the audio file being transcribed. Empty if the audio was sent |
| // as byte content. |
| Uri string `protobuf:"bytes,4,opt,name=uri,proto3" json:"uri,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *LongRunningRecognizeMetadata) Reset() { *m = LongRunningRecognizeMetadata{} } |
| func (m *LongRunningRecognizeMetadata) String() string { return proto.CompactTextString(m) } |
| func (*LongRunningRecognizeMetadata) ProtoMessage() {} |
| func (*LongRunningRecognizeMetadata) Descriptor() ([]byte, []int) { |
| return fileDescriptor_6adcab595cc29495, []int{11} |
| } |
| |
| func (m *LongRunningRecognizeMetadata) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_LongRunningRecognizeMetadata.Unmarshal(m, b) |
| } |
| func (m *LongRunningRecognizeMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_LongRunningRecognizeMetadata.Marshal(b, m, deterministic) |
| } |
| func (m *LongRunningRecognizeMetadata) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_LongRunningRecognizeMetadata.Merge(m, src) |
| } |
| func (m *LongRunningRecognizeMetadata) XXX_Size() int { |
| return xxx_messageInfo_LongRunningRecognizeMetadata.Size(m) |
| } |
| func (m *LongRunningRecognizeMetadata) XXX_DiscardUnknown() { |
| xxx_messageInfo_LongRunningRecognizeMetadata.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_LongRunningRecognizeMetadata proto.InternalMessageInfo |
| |
| func (m *LongRunningRecognizeMetadata) GetProgressPercent() int32 { |
| if m != nil { |
| return m.ProgressPercent |
| } |
| return 0 |
| } |
| |
| func (m *LongRunningRecognizeMetadata) GetStartTime() *timestamp.Timestamp { |
| if m != nil { |
| return m.StartTime |
| } |
| return nil |
| } |
| |
| func (m *LongRunningRecognizeMetadata) GetLastUpdateTime() *timestamp.Timestamp { |
| if m != nil { |
| return m.LastUpdateTime |
| } |
| return nil |
| } |
| |
| func (m *LongRunningRecognizeMetadata) GetUri() string { |
| if m != nil { |
| return m.Uri |
| } |
| return "" |
| } |
| |
| // `StreamingRecognizeResponse` is the only message returned to the client by |
| // `StreamingRecognize`. A series of zero or more `StreamingRecognizeResponse` |
| // messages are streamed back to the client. If there is no recognizable |
| // audio, and `single_utterance` is set to false, then no messages are streamed |
| // back to the client. |
| // |
| // Here's an example of a series of ten `StreamingRecognizeResponse`s that might |
| // be returned while processing audio: |
| // |
| // 1. results { alternatives { transcript: "tube" } stability: 0.01 } |
| // |
| // 2. results { alternatives { transcript: "to be a" } stability: 0.01 } |
| // |
| // 3. results { alternatives { transcript: "to be" } stability: 0.9 } |
| // results { alternatives { transcript: " or not to be" } stability: 0.01 } |
| // |
| // 4. results { alternatives { transcript: "to be or not to be" |
| // confidence: 0.92 } |
| // alternatives { transcript: "to bee or not to bee" } |
| // is_final: true } |
| // |
| // 5. results { alternatives { transcript: " that's" } stability: 0.01 } |
| // |
| // 6. results { alternatives { transcript: " that is" } stability: 0.9 } |
| // results { alternatives { transcript: " the question" } stability: 0.01 } |
| // |
| // 7. results { alternatives { transcript: " that is the question" |
| // confidence: 0.98 } |
| // alternatives { transcript: " that was the question" } |
| // is_final: true } |
| // |
| // Notes: |
| // |
| // - Only two of the above responses #4 and #7 contain final results; they are |
| // indicated by `is_final: true`. Concatenating these together generates the |
| // full transcript: "to be or not to be that is the question". |
| // |
| // - The others contain interim `results`. #3 and #6 contain two interim |
| // `results`: the first portion has a high stability and is less likely to |
| // change; the second portion has a low stability and is very likely to |
| // change. A UI designer might choose to show only high stability `results`. |
| // |
| // - The specific `stability` and `confidence` values shown above are only for |
| // illustrative purposes. Actual values may vary. |
| // |
| // - In each response, only one of these fields will be set: |
| // `error`, |
| // `speech_event_type`, or |
| // one or more (repeated) `results`. |
| type StreamingRecognizeResponse struct { |
| // If set, returns a [google.rpc.Status][google.rpc.Status] message that |
| // specifies the error for the operation. |
| Error *status.Status `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` |
| // This repeated list contains zero or more results that |
| // correspond to consecutive portions of the audio currently being processed. |
| // It contains zero or one `is_final=true` result (the newly settled portion), |
| // followed by zero or more `is_final=false` results (the interim results). |
| Results []*StreamingRecognitionResult `protobuf:"bytes,2,rep,name=results,proto3" json:"results,omitempty"` |
| // Indicates the type of speech event. |
| SpeechEventType StreamingRecognizeResponse_SpeechEventType `protobuf:"varint,4,opt,name=speech_event_type,json=speechEventType,proto3,enum=google.cloud.speech.v1p1beta1.StreamingRecognizeResponse_SpeechEventType" json:"speech_event_type,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *StreamingRecognizeResponse) Reset() { *m = StreamingRecognizeResponse{} } |
| func (m *StreamingRecognizeResponse) String() string { return proto.CompactTextString(m) } |
| func (*StreamingRecognizeResponse) ProtoMessage() {} |
| func (*StreamingRecognizeResponse) Descriptor() ([]byte, []int) { |
| return fileDescriptor_6adcab595cc29495, []int{12} |
| } |
| |
| func (m *StreamingRecognizeResponse) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_StreamingRecognizeResponse.Unmarshal(m, b) |
| } |
| func (m *StreamingRecognizeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_StreamingRecognizeResponse.Marshal(b, m, deterministic) |
| } |
| func (m *StreamingRecognizeResponse) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_StreamingRecognizeResponse.Merge(m, src) |
| } |
| func (m *StreamingRecognizeResponse) XXX_Size() int { |
| return xxx_messageInfo_StreamingRecognizeResponse.Size(m) |
| } |
| func (m *StreamingRecognizeResponse) XXX_DiscardUnknown() { |
| xxx_messageInfo_StreamingRecognizeResponse.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_StreamingRecognizeResponse proto.InternalMessageInfo |
| |
| func (m *StreamingRecognizeResponse) GetError() *status.Status { |
| if m != nil { |
| return m.Error |
| } |
| return nil |
| } |
| |
| func (m *StreamingRecognizeResponse) GetResults() []*StreamingRecognitionResult { |
| if m != nil { |
| return m.Results |
| } |
| return nil |
| } |
| |
| func (m *StreamingRecognizeResponse) GetSpeechEventType() StreamingRecognizeResponse_SpeechEventType { |
| if m != nil { |
| return m.SpeechEventType |
| } |
| return StreamingRecognizeResponse_SPEECH_EVENT_UNSPECIFIED |
| } |
| |
| // A streaming speech recognition result corresponding to a portion of the audio |
| // that is currently being processed. |
| type StreamingRecognitionResult struct { |
| // May contain one or more recognition hypotheses (up to the |
| // maximum specified in `max_alternatives`). |
| // These alternatives are ordered in terms of accuracy, with the top (first) |
| // alternative being the most probable, as ranked by the recognizer. |
| Alternatives []*SpeechRecognitionAlternative `protobuf:"bytes,1,rep,name=alternatives,proto3" json:"alternatives,omitempty"` |
| // If `false`, this `StreamingRecognitionResult` represents an |
| // interim result that may change. If `true`, this is the final time the |
| // speech service will return this particular `StreamingRecognitionResult`, |
| // the recognizer will not return any further hypotheses for this portion of |
| // the transcript and corresponding audio. |
| IsFinal bool `protobuf:"varint,2,opt,name=is_final,json=isFinal,proto3" json:"is_final,omitempty"` |
| // An estimate of the likelihood that the recognizer will not |
| // change its guess about this interim result. Values range from 0.0 |
| // (completely unstable) to 1.0 (completely stable). |
| // This field is only provided for interim results (`is_final=false`). |
| // The default of 0.0 is a sentinel value indicating `stability` was not set. |
| Stability float32 `protobuf:"fixed32,3,opt,name=stability,proto3" json:"stability,omitempty"` |
| // Time offset of the end of this result relative to the |
| // beginning of the audio. |
| ResultEndTime *duration.Duration `protobuf:"bytes,4,opt,name=result_end_time,json=resultEndTime,proto3" json:"result_end_time,omitempty"` |
| // For multi-channel audio, this is the channel number corresponding to the |
| // recognized result for the audio from that channel. |
| // For audio_channel_count = N, its output values can range from '1' to 'N'. |
| ChannelTag int32 `protobuf:"varint,5,opt,name=channel_tag,json=channelTag,proto3" json:"channel_tag,omitempty"` |
| // Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) |
| // language tag of the language in this result. This language code was |
| // detected to have the most likelihood of being spoken in the audio. |
| LanguageCode string `protobuf:"bytes,6,opt,name=language_code,json=languageCode,proto3" json:"language_code,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *StreamingRecognitionResult) Reset() { *m = StreamingRecognitionResult{} } |
| func (m *StreamingRecognitionResult) String() string { return proto.CompactTextString(m) } |
| func (*StreamingRecognitionResult) ProtoMessage() {} |
| func (*StreamingRecognitionResult) Descriptor() ([]byte, []int) { |
| return fileDescriptor_6adcab595cc29495, []int{13} |
| } |
| |
| func (m *StreamingRecognitionResult) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_StreamingRecognitionResult.Unmarshal(m, b) |
| } |
| func (m *StreamingRecognitionResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_StreamingRecognitionResult.Marshal(b, m, deterministic) |
| } |
| func (m *StreamingRecognitionResult) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_StreamingRecognitionResult.Merge(m, src) |
| } |
| func (m *StreamingRecognitionResult) XXX_Size() int { |
| return xxx_messageInfo_StreamingRecognitionResult.Size(m) |
| } |
| func (m *StreamingRecognitionResult) XXX_DiscardUnknown() { |
| xxx_messageInfo_StreamingRecognitionResult.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_StreamingRecognitionResult proto.InternalMessageInfo |
| |
| func (m *StreamingRecognitionResult) GetAlternatives() []*SpeechRecognitionAlternative { |
| if m != nil { |
| return m.Alternatives |
| } |
| return nil |
| } |
| |
| func (m *StreamingRecognitionResult) GetIsFinal() bool { |
| if m != nil { |
| return m.IsFinal |
| } |
| return false |
| } |
| |
| func (m *StreamingRecognitionResult) GetStability() float32 { |
| if m != nil { |
| return m.Stability |
| } |
| return 0 |
| } |
| |
| func (m *StreamingRecognitionResult) GetResultEndTime() *duration.Duration { |
| if m != nil { |
| return m.ResultEndTime |
| } |
| return nil |
| } |
| |
| func (m *StreamingRecognitionResult) GetChannelTag() int32 { |
| if m != nil { |
| return m.ChannelTag |
| } |
| return 0 |
| } |
| |
| func (m *StreamingRecognitionResult) GetLanguageCode() string { |
| if m != nil { |
| return m.LanguageCode |
| } |
| return "" |
| } |
| |
| // A speech recognition result corresponding to a portion of the audio. |
| type SpeechRecognitionResult struct { |
| // May contain one or more recognition hypotheses (up to the |
| // maximum specified in `max_alternatives`). |
| // These alternatives are ordered in terms of accuracy, with the top (first) |
| // alternative being the most probable, as ranked by the recognizer. |
| Alternatives []*SpeechRecognitionAlternative `protobuf:"bytes,1,rep,name=alternatives,proto3" json:"alternatives,omitempty"` |
| // For multi-channel audio, this is the channel number corresponding to the |
| // recognized result for the audio from that channel. |
| // For audio_channel_count = N, its output values can range from '1' to 'N'. |
| ChannelTag int32 `protobuf:"varint,2,opt,name=channel_tag,json=channelTag,proto3" json:"channel_tag,omitempty"` |
| // Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) |
| // language tag of the language in this result. This language code was |
| // detected to have the most likelihood of being spoken in the audio. |
| LanguageCode string `protobuf:"bytes,5,opt,name=language_code,json=languageCode,proto3" json:"language_code,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *SpeechRecognitionResult) Reset() { *m = SpeechRecognitionResult{} } |
| func (m *SpeechRecognitionResult) String() string { return proto.CompactTextString(m) } |
| func (*SpeechRecognitionResult) ProtoMessage() {} |
| func (*SpeechRecognitionResult) Descriptor() ([]byte, []int) { |
| return fileDescriptor_6adcab595cc29495, []int{14} |
| } |
| |
| func (m *SpeechRecognitionResult) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_SpeechRecognitionResult.Unmarshal(m, b) |
| } |
| func (m *SpeechRecognitionResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_SpeechRecognitionResult.Marshal(b, m, deterministic) |
| } |
| func (m *SpeechRecognitionResult) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_SpeechRecognitionResult.Merge(m, src) |
| } |
| func (m *SpeechRecognitionResult) XXX_Size() int { |
| return xxx_messageInfo_SpeechRecognitionResult.Size(m) |
| } |
| func (m *SpeechRecognitionResult) XXX_DiscardUnknown() { |
| xxx_messageInfo_SpeechRecognitionResult.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_SpeechRecognitionResult proto.InternalMessageInfo |
| |
| func (m *SpeechRecognitionResult) GetAlternatives() []*SpeechRecognitionAlternative { |
| if m != nil { |
| return m.Alternatives |
| } |
| return nil |
| } |
| |
| func (m *SpeechRecognitionResult) GetChannelTag() int32 { |
| if m != nil { |
| return m.ChannelTag |
| } |
| return 0 |
| } |
| |
| func (m *SpeechRecognitionResult) GetLanguageCode() string { |
| if m != nil { |
| return m.LanguageCode |
| } |
| return "" |
| } |
| |
| // Alternative hypotheses (a.k.a. n-best list). |
| type SpeechRecognitionAlternative struct { |
| // Transcript text representing the words that the user spoke. |
| Transcript string `protobuf:"bytes,1,opt,name=transcript,proto3" json:"transcript,omitempty"` |
| // The confidence estimate between 0.0 and 1.0. A higher number |
| // indicates an estimated greater likelihood that the recognized words are |
| // correct. This field is set only for the top alternative of a non-streaming |
| // result or, of a streaming result where `is_final=true`. |
| // This field is not guaranteed to be accurate and users should not rely on it |
| // to be always provided. |
| // The default of 0.0 is a sentinel value indicating `confidence` was not set. |
| Confidence float32 `protobuf:"fixed32,2,opt,name=confidence,proto3" json:"confidence,omitempty"` |
| // A list of word-specific information for each recognized word. |
| // Note: When `enable_speaker_diarization` is true, you will see all the words |
| // from the beginning of the audio. |
| Words []*WordInfo `protobuf:"bytes,3,rep,name=words,proto3" json:"words,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *SpeechRecognitionAlternative) Reset() { *m = SpeechRecognitionAlternative{} } |
| func (m *SpeechRecognitionAlternative) String() string { return proto.CompactTextString(m) } |
| func (*SpeechRecognitionAlternative) ProtoMessage() {} |
| func (*SpeechRecognitionAlternative) Descriptor() ([]byte, []int) { |
| return fileDescriptor_6adcab595cc29495, []int{15} |
| } |
| |
| func (m *SpeechRecognitionAlternative) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_SpeechRecognitionAlternative.Unmarshal(m, b) |
| } |
| func (m *SpeechRecognitionAlternative) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_SpeechRecognitionAlternative.Marshal(b, m, deterministic) |
| } |
| func (m *SpeechRecognitionAlternative) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_SpeechRecognitionAlternative.Merge(m, src) |
| } |
| func (m *SpeechRecognitionAlternative) XXX_Size() int { |
| return xxx_messageInfo_SpeechRecognitionAlternative.Size(m) |
| } |
| func (m *SpeechRecognitionAlternative) XXX_DiscardUnknown() { |
| xxx_messageInfo_SpeechRecognitionAlternative.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_SpeechRecognitionAlternative proto.InternalMessageInfo |
| |
| func (m *SpeechRecognitionAlternative) GetTranscript() string { |
| if m != nil { |
| return m.Transcript |
| } |
| return "" |
| } |
| |
| func (m *SpeechRecognitionAlternative) GetConfidence() float32 { |
| if m != nil { |
| return m.Confidence |
| } |
| return 0 |
| } |
| |
| func (m *SpeechRecognitionAlternative) GetWords() []*WordInfo { |
| if m != nil { |
| return m.Words |
| } |
| return nil |
| } |
| |
| // Word-specific information for recognized words. |
| type WordInfo struct { |
| // Time offset relative to the beginning of the audio, |
| // and corresponding to the start of the spoken word. |
| // This field is only set if `enable_word_time_offsets=true` and only |
| // in the top hypothesis. |
| // This is an experimental feature and the accuracy of the time offset can |
| // vary. |
| StartTime *duration.Duration `protobuf:"bytes,1,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"` |
| // Time offset relative to the beginning of the audio, |
| // and corresponding to the end of the spoken word. |
| // This field is only set if `enable_word_time_offsets=true` and only |
| // in the top hypothesis. |
| // This is an experimental feature and the accuracy of the time offset can |
| // vary. |
| EndTime *duration.Duration `protobuf:"bytes,2,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"` |
| // The word corresponding to this set of information. |
| Word string `protobuf:"bytes,3,opt,name=word,proto3" json:"word,omitempty"` |
| // The confidence estimate between 0.0 and 1.0. A higher number |
| // indicates an estimated greater likelihood that the recognized words are |
| // correct. This field is set only for the top alternative of a non-streaming |
| // result or, of a streaming result where `is_final=true`. |
| // This field is not guaranteed to be accurate and users should not rely on it |
| // to be always provided. |
| // The default of 0.0 is a sentinel value indicating `confidence` was not set. |
| Confidence float32 `protobuf:"fixed32,4,opt,name=confidence,proto3" json:"confidence,omitempty"` |
| // Output only. A distinct integer value is assigned for every speaker within |
| // the audio. This field specifies which one of those speakers was detected to |
| // have spoken this word. Value ranges from '1' to diarization_speaker_count. |
| // speaker_tag is set if enable_speaker_diarization = 'true' and only in the |
| // top alternative. |
| SpeakerTag int32 `protobuf:"varint,5,opt,name=speaker_tag,json=speakerTag,proto3" json:"speaker_tag,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *WordInfo) Reset() { *m = WordInfo{} } |
| func (m *WordInfo) String() string { return proto.CompactTextString(m) } |
| func (*WordInfo) ProtoMessage() {} |
| func (*WordInfo) Descriptor() ([]byte, []int) { |
| return fileDescriptor_6adcab595cc29495, []int{16} |
| } |
| |
| func (m *WordInfo) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_WordInfo.Unmarshal(m, b) |
| } |
| func (m *WordInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_WordInfo.Marshal(b, m, deterministic) |
| } |
| func (m *WordInfo) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_WordInfo.Merge(m, src) |
| } |
| func (m *WordInfo) XXX_Size() int { |
| return xxx_messageInfo_WordInfo.Size(m) |
| } |
| func (m *WordInfo) XXX_DiscardUnknown() { |
| xxx_messageInfo_WordInfo.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_WordInfo proto.InternalMessageInfo |
| |
| func (m *WordInfo) GetStartTime() *duration.Duration { |
| if m != nil { |
| return m.StartTime |
| } |
| return nil |
| } |
| |
| func (m *WordInfo) GetEndTime() *duration.Duration { |
| if m != nil { |
| return m.EndTime |
| } |
| return nil |
| } |
| |
| func (m *WordInfo) GetWord() string { |
| if m != nil { |
| return m.Word |
| } |
| return "" |
| } |
| |
| func (m *WordInfo) GetConfidence() float32 { |
| if m != nil { |
| return m.Confidence |
| } |
| return 0 |
| } |
| |
| func (m *WordInfo) GetSpeakerTag() int32 { |
| if m != nil { |
| return m.SpeakerTag |
| } |
| return 0 |
| } |
| |
| func init() { |
| proto.RegisterEnum("google.cloud.speech.v1p1beta1.RecognitionConfig_AudioEncoding", RecognitionConfig_AudioEncoding_name, RecognitionConfig_AudioEncoding_value) |
| proto.RegisterEnum("google.cloud.speech.v1p1beta1.RecognitionMetadata_InteractionType", RecognitionMetadata_InteractionType_name, RecognitionMetadata_InteractionType_value) |
| proto.RegisterEnum("google.cloud.speech.v1p1beta1.RecognitionMetadata_MicrophoneDistance", RecognitionMetadata_MicrophoneDistance_name, RecognitionMetadata_MicrophoneDistance_value) |
| proto.RegisterEnum("google.cloud.speech.v1p1beta1.RecognitionMetadata_OriginalMediaType", RecognitionMetadata_OriginalMediaType_name, RecognitionMetadata_OriginalMediaType_value) |
| proto.RegisterEnum("google.cloud.speech.v1p1beta1.RecognitionMetadata_RecordingDeviceType", RecognitionMetadata_RecordingDeviceType_name, RecognitionMetadata_RecordingDeviceType_value) |
| proto.RegisterEnum("google.cloud.speech.v1p1beta1.StreamingRecognizeResponse_SpeechEventType", StreamingRecognizeResponse_SpeechEventType_name, StreamingRecognizeResponse_SpeechEventType_value) |
| proto.RegisterType((*RecognizeRequest)(nil), "google.cloud.speech.v1p1beta1.RecognizeRequest") |
| proto.RegisterType((*LongRunningRecognizeRequest)(nil), "google.cloud.speech.v1p1beta1.LongRunningRecognizeRequest") |
| proto.RegisterType((*StreamingRecognizeRequest)(nil), "google.cloud.speech.v1p1beta1.StreamingRecognizeRequest") |
| proto.RegisterType((*StreamingRecognitionConfig)(nil), "google.cloud.speech.v1p1beta1.StreamingRecognitionConfig") |
| proto.RegisterType((*RecognitionConfig)(nil), "google.cloud.speech.v1p1beta1.RecognitionConfig") |
| proto.RegisterType((*SpeakerDiarizationConfig)(nil), "google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig") |
| proto.RegisterType((*RecognitionMetadata)(nil), "google.cloud.speech.v1p1beta1.RecognitionMetadata") |
| proto.RegisterType((*SpeechContext)(nil), "google.cloud.speech.v1p1beta1.SpeechContext") |
| proto.RegisterType((*RecognitionAudio)(nil), "google.cloud.speech.v1p1beta1.RecognitionAudio") |
| proto.RegisterType((*RecognizeResponse)(nil), "google.cloud.speech.v1p1beta1.RecognizeResponse") |
| proto.RegisterType((*LongRunningRecognizeResponse)(nil), "google.cloud.speech.v1p1beta1.LongRunningRecognizeResponse") |
| proto.RegisterType((*LongRunningRecognizeMetadata)(nil), "google.cloud.speech.v1p1beta1.LongRunningRecognizeMetadata") |
| proto.RegisterType((*StreamingRecognizeResponse)(nil), "google.cloud.speech.v1p1beta1.StreamingRecognizeResponse") |
| proto.RegisterType((*StreamingRecognitionResult)(nil), "google.cloud.speech.v1p1beta1.StreamingRecognitionResult") |
| proto.RegisterType((*SpeechRecognitionResult)(nil), "google.cloud.speech.v1p1beta1.SpeechRecognitionResult") |
| proto.RegisterType((*SpeechRecognitionAlternative)(nil), "google.cloud.speech.v1p1beta1.SpeechRecognitionAlternative") |
| proto.RegisterType((*WordInfo)(nil), "google.cloud.speech.v1p1beta1.WordInfo") |
| } |
| |
| func init() { |
| proto.RegisterFile("google/cloud/speech/v1p1beta1/cloud_speech.proto", fileDescriptor_6adcab595cc29495) |
| } |
| |
| var fileDescriptor_6adcab595cc29495 = []byte{ |
| // 2421 bytes of a gzipped FileDescriptorProto |
| 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x59, 0x4d, 0x73, 0xdb, 0xc6, |
| 0xf9, 0x37, 0x48, 0x51, 0x12, 0x1f, 0xbd, 0x41, 0x2b, 0x3b, 0x86, 0x69, 0x25, 0x71, 0x98, 0x7f, |
| 0x62, 0xff, 0x53, 0x97, 0xb4, 0x95, 0x34, 0x2f, 0x4e, 0x9a, 0x06, 0x02, 0x21, 0x0b, 0x1d, 0x92, |
| 0xe0, 0x2c, 0x29, 0xbb, 0xc9, 0x05, 0xb3, 0x02, 0x96, 0x14, 0xa6, 0x24, 0x80, 0x02, 0x4b, 0xbf, |
| 0x5d, 0xda, 0xc9, 0x17, 0xe8, 0xa1, 0xbd, 0xf6, 0xd2, 0x69, 0x8f, 0xbd, 0xf7, 0xd6, 0x99, 0x4e, |
| 0x7b, 0xc9, 0xb1, 0xed, 0xa1, 0xd3, 0x53, 0x3a, 0xd3, 0x4f, 0xd0, 0x53, 0x6f, 0xed, 0xec, 0x2e, |
| 0x40, 0x82, 0xa4, 0x2c, 0xc9, 0x9e, 0xe6, 0xd0, 0x1b, 0xf1, 0xbc, 0xfc, 0x9e, 0x97, 0x7d, 0xf6, |
| 0xd9, 0x7d, 0x96, 0x70, 0x67, 0x10, 0x86, 0x83, 0x21, 0xad, 0xbb, 0xc3, 0x70, 0xec, 0xd5, 0x93, |
| 0x88, 0x52, 0xf7, 0xa4, 0xfe, 0xe8, 0x6e, 0x74, 0xf7, 0x98, 0x32, 0x72, 0x57, 0x92, 0x1d, 0x49, |
| 0xae, 0x45, 0x71, 0xc8, 0x42, 0xf4, 0xaa, 0xd4, 0xa8, 0x09, 0x56, 0x2d, 0x65, 0x4d, 0x34, 0x2a, |
| 0xbb, 0x29, 0x20, 0x89, 0xfc, 0x3a, 0x09, 0x82, 0x90, 0x11, 0xe6, 0x87, 0x41, 0x22, 0x95, 0x2b, |
| 0x57, 0x73, 0x5c, 0x77, 0xe8, 0xd3, 0x80, 0xa5, 0x8c, 0xd7, 0x73, 0x8c, 0xbe, 0x4f, 0x87, 0x9e, |
| 0x73, 0x4c, 0x4f, 0xc8, 0x23, 0x3f, 0x8c, 0x53, 0x81, 0x6b, 0x39, 0x81, 0x98, 0x26, 0xe1, 0x38, |
| 0x76, 0x69, 0xca, 0xba, 0x7d, 0x76, 0x0c, 0x73, 0xd2, 0x6f, 0xa6, 0xd2, 0xc3, 0x30, 0x18, 0xc4, |
| 0xe3, 0x20, 0xf0, 0x83, 0x41, 0x3d, 0x8c, 0x68, 0x3c, 0xe3, 0x67, 0x66, 0x4d, 0x7c, 0x1d, 0x8f, |
| 0xfb, 0x75, 0x12, 0x3c, 0x4d, 0x59, 0xaf, 0xcd, 0xb3, 0xbc, 0xb1, 0xd4, 0x9d, 0x8b, 0x64, 0xc2, |
| 0x67, 0xfe, 0x88, 0x26, 0x8c, 0x8c, 0xa2, 0xb9, 0x1c, 0xc4, 0x91, 0x5b, 0x4f, 0x18, 0x61, 0xe3, |
| 0xd4, 0x68, 0xf5, 0x37, 0x0a, 0xa8, 0x98, 0xba, 0xe1, 0x20, 0xf0, 0x9f, 0x51, 0x4c, 0x7f, 0x34, |
| 0xa6, 0x09, 0x43, 0x2d, 0x58, 0x76, 0xc3, 0xa0, 0xef, 0x0f, 0x34, 0xe5, 0x86, 0x72, 0x6b, 0x6d, |
| 0xef, 0x4e, 0xed, 0xcc, 0xfc, 0xd7, 0x52, 0x00, 0xee, 0x90, 0x21, 0xf4, 0xf6, 0x8b, 0x5f, 0xeb, |
| 0x05, 0x9c, 0x82, 0xa0, 0xef, 0x43, 0x89, 0x8c, 0x3d, 0x3f, 0xd4, 0x0a, 0x02, 0xad, 0x7e, 0x71, |
| 0x34, 0x9d, 0xab, 0x49, 0x30, 0x09, 0x51, 0xfd, 0xad, 0x02, 0xd7, 0x9b, 0x61, 0x30, 0xc0, 0x32, |
| 0x8b, 0xff, 0x4b, 0xae, 0xff, 0x4e, 0x81, 0x6b, 0x5d, 0x16, 0x53, 0x32, 0x3a, 0xcd, 0xf1, 0x3e, |
| 0xa8, 0x49, 0xc6, 0x74, 0x66, 0x42, 0xf8, 0xe8, 0x1c, 0xa3, 0xf3, 0x98, 0xd3, 0x58, 0x0e, 0x2f, |
| 0xe1, 0xad, 0x09, 0xa8, 0x24, 0xa1, 0xb7, 0x60, 0x43, 0xb8, 0xc3, 0x6d, 0x30, 0x1a, 0x30, 0x11, |
| 0xd9, 0xfa, 0xe1, 0x25, 0xbc, 0x2e, 0xc8, 0x86, 0xa4, 0xee, 0xef, 0xc0, 0xf6, 0xd4, 0x9d, 0x58, |
| 0xfa, 0xc8, 0x23, 0xa8, 0x3c, 0xdf, 0xda, 0x7f, 0x3b, 0xf7, 0xff, 0x0f, 0x6a, 0xe2, 0x07, 0x83, |
| 0x21, 0x75, 0xc6, 0x8c, 0xd1, 0x98, 0x04, 0x2e, 0x15, 0xce, 0xae, 0xe2, 0x2d, 0x49, 0x3f, 0xca, |
| 0xc8, 0xe8, 0x26, 0x6c, 0xf9, 0x01, 0xa3, 0xb1, 0x3f, 0x72, 0x62, 0x9a, 0x8c, 0x87, 0x2c, 0xd1, |
| 0x8a, 0x42, 0x72, 0x33, 0x25, 0x63, 0x49, 0xad, 0xfe, 0x1e, 0x60, 0x7b, 0xd1, 0xf1, 0x2f, 0x60, |
| 0x95, 0x06, 0x6e, 0xe8, 0xf9, 0x81, 0x74, 0x7d, 0x73, 0xef, 0xd3, 0x17, 0x75, 0xbd, 0x26, 0xd6, |
| 0xdb, 0x4c, 0x51, 0xf0, 0x04, 0x0f, 0xbd, 0x03, 0xdb, 0x09, 0x19, 0x45, 0x43, 0xea, 0xc4, 0x84, |
| 0x51, 0xe7, 0x84, 0xc6, 0xec, 0x99, 0x08, 0xa3, 0x84, 0xb7, 0x24, 0x03, 0x13, 0x46, 0x0f, 0x39, |
| 0x19, 0xd5, 0x60, 0x27, 0x5d, 0x9b, 0x13, 0x12, 0x04, 0x74, 0xe8, 0xb8, 0xe1, 0x38, 0x60, 0xda, |
| 0x8a, 0x90, 0xde, 0x96, 0xeb, 0x23, 0x39, 0x06, 0x67, 0xa0, 0x1e, 0xdc, 0xa4, 0x01, 0x39, 0x1e, |
| 0x52, 0x27, 0xa1, 0x11, 0x11, 0xf8, 0xf1, 0xd4, 0x31, 0x27, 0xa2, 0x71, 0x86, 0xa4, 0xad, 0x8b, |
| 0x74, 0xbc, 0x29, 0xc5, 0xbb, 0xa9, 0x74, 0x2e, 0x8a, 0x0e, 0x8d, 0x53, 0x68, 0x74, 0x0b, 0x36, |
| 0x86, 0x24, 0x18, 0x8c, 0xc9, 0x80, 0x3a, 0x6e, 0xe8, 0x51, 0x91, 0xca, 0xb2, 0x5c, 0x9b, 0xf5, |
| 0x8c, 0x63, 0x84, 0x1e, 0x45, 0x9f, 0x40, 0x85, 0x0c, 0x19, 0x8d, 0x03, 0xc2, 0xfc, 0x47, 0xd4, |
| 0x99, 0xd1, 0x4a, 0x34, 0x74, 0xa3, 0x78, 0xab, 0x8c, 0xb5, 0x9c, 0x44, 0x33, 0xa7, 0x9c, 0xf0, |
| 0xf5, 0x1d, 0x91, 0x27, 0x4e, 0x8e, 0x9f, 0x68, 0x4b, 0x32, 0x31, 0x23, 0xf2, 0x44, 0xcf, 0x91, |
| 0xb9, 0x68, 0x14, 0x87, 0x7d, 0x12, 0xf8, 0xec, 0xa9, 0xd3, 0xf7, 0x39, 0x4b, 0x2b, 0xc9, 0x52, |
| 0x98, 0xd0, 0x0f, 0x04, 0x19, 0xd9, 0x00, 0xc4, 0x23, 0x91, 0x3c, 0x02, 0xb4, 0xcb, 0x17, 0xda, |
| 0xb6, 0x5d, 0x41, 0xd0, 0x27, 0x6a, 0x38, 0x07, 0x81, 0x8e, 0x60, 0x4b, 0x2a, 0xc8, 0x1d, 0xf3, |
| 0x84, 0x25, 0xda, 0xf2, 0x8d, 0xe2, 0xad, 0xb5, 0xbd, 0xdb, 0x17, 0x42, 0x35, 0xa4, 0x12, 0xde, |
| 0x4c, 0xf2, 0x9f, 0x09, 0xfa, 0x00, 0xb4, 0x74, 0xed, 0x1e, 0x87, 0xb1, 0xe7, 0xf0, 0x86, 0xed, |
| 0x84, 0xfd, 0x7e, 0x42, 0x59, 0xa2, 0xad, 0x8a, 0xd0, 0xae, 0x48, 0xfe, 0xc3, 0x30, 0xf6, 0x7a, |
| 0xfe, 0x88, 0xda, 0x92, 0x89, 0xde, 0x83, 0x57, 0xf2, 0x8a, 0x62, 0xb3, 0x78, 0x94, 0x6f, 0x8e, |
| 0x2d, 0xa1, 0x76, 0x79, 0xaa, 0x66, 0x4c, 0x78, 0xe8, 0x33, 0xd8, 0x4d, 0xb5, 0xc8, 0x98, 0x85, |
| 0x23, 0xc2, 0x7c, 0xd7, 0x89, 0xc6, 0x81, 0xcb, 0xc6, 0x32, 0x51, 0x6b, 0x42, 0xb7, 0x22, 0x65, |
| 0xf4, 0x4c, 0xa4, 0x33, 0x95, 0x40, 0x9f, 0x41, 0x25, 0x2b, 0xb6, 0x88, 0x92, 0x1f, 0xd2, 0xd8, |
| 0xf1, 0x7c, 0x12, 0xfb, 0xcf, 0xa4, 0xbe, 0xca, 0xf5, 0xf7, 0x0b, 0x9a, 0x82, 0xd3, 0xb0, 0xba, |
| 0x52, 0xa8, 0x31, 0x95, 0x41, 0x9f, 0xc2, 0xb5, 0x9c, 0xca, 0x04, 0x46, 0x16, 0xf9, 0x36, 0x5f, |
| 0x79, 0x01, 0x70, 0x35, 0x27, 0x94, 0xa2, 0xc8, 0x72, 0xef, 0x03, 0xca, 0xeb, 0xa7, 0xbd, 0x66, |
| 0x47, 0x2c, 0xf1, 0x07, 0xe7, 0x2f, 0xc6, 0x9c, 0x3b, 0x72, 0xdf, 0xe2, 0x6d, 0x6f, 0x9e, 0x84, |
| 0xda, 0xb0, 0x3a, 0xa2, 0x8c, 0x78, 0x84, 0x11, 0xad, 0x2c, 0xd0, 0xf7, 0x2e, 0xde, 0x0e, 0x5a, |
| 0xa9, 0x26, 0x9e, 0x60, 0xa0, 0xcb, 0x50, 0x1a, 0x85, 0x1e, 0x1d, 0x6a, 0x1b, 0x7c, 0x23, 0x61, |
| 0xf9, 0x81, 0xde, 0x80, 0xf5, 0x71, 0x42, 0x1d, 0x1a, 0x9c, 0xf0, 0x16, 0xe6, 0x69, 0x9b, 0x62, |
| 0x05, 0xd6, 0xc6, 0x09, 0x35, 0x53, 0x52, 0xf5, 0xe7, 0x0a, 0x6c, 0xcc, 0xf4, 0x15, 0xa4, 0xc1, |
| 0x65, 0xb3, 0x6d, 0xd8, 0x0d, 0xab, 0x7d, 0xdf, 0x39, 0x6a, 0x77, 0x3b, 0xa6, 0x61, 0x1d, 0x58, |
| 0x66, 0x43, 0xbd, 0x84, 0xd6, 0x61, 0xb5, 0x69, 0xb5, 0x4d, 0x1d, 0xdf, 0x7d, 0x5f, 0x55, 0xd0, |
| 0x2a, 0x2c, 0x1d, 0x34, 0x75, 0x43, 0x2d, 0xa0, 0x32, 0x94, 0x5a, 0x47, 0x4d, 0xfd, 0xa1, 0x5a, |
| 0x44, 0x2b, 0x50, 0xd4, 0x5b, 0x58, 0x5d, 0x42, 0x00, 0xcb, 0x7a, 0x0b, 0x3b, 0x0f, 0xf7, 0xd5, |
| 0x12, 0xd7, 0xb3, 0xef, 0xdf, 0x77, 0xec, 0xce, 0x51, 0x57, 0x5d, 0x46, 0x15, 0x78, 0xa5, 0xdb, |
| 0x31, 0xcd, 0x1f, 0x38, 0x0f, 0xad, 0xde, 0xa1, 0x73, 0x68, 0xea, 0x0d, 0x13, 0x3b, 0xfb, 0x9f, |
| 0xf7, 0x4c, 0x75, 0x85, 0xab, 0xb7, 0x3a, 0xef, 0xaa, 0xab, 0xd5, 0xbf, 0x2a, 0xa0, 0x3d, 0x2f, |
| 0x9f, 0xbc, 0x27, 0x9c, 0x51, 0x26, 0x8a, 0x08, 0xf2, 0xf9, 0x25, 0xf2, 0x0e, 0x6c, 0x8f, 0xfc, |
| 0xf9, 0xd2, 0x48, 0xbb, 0xe5, 0xc8, 0x9f, 0x2d, 0x07, 0x2e, 0x4b, 0x9e, 0xcc, 0xc9, 0x16, 0x27, |
| 0x0d, 0x64, 0x46, 0xf6, 0x6d, 0x58, 0xcb, 0xe4, 0x18, 0x19, 0x88, 0xde, 0x51, 0xda, 0x2f, 0x7d, |
| 0xad, 0x17, 0x35, 0x05, 0x43, 0xca, 0xe9, 0x91, 0x41, 0xf5, 0xdf, 0x65, 0xd8, 0x39, 0x65, 0x31, |
| 0xd1, 0x08, 0x54, 0x71, 0x92, 0x10, 0x57, 0x94, 0x1e, 0x7b, 0x1a, 0xd1, 0xf4, 0xa4, 0xd8, 0x7f, |
| 0xf1, 0xd2, 0xa8, 0x59, 0x53, 0xa8, 0xde, 0xd3, 0x88, 0xe2, 0x2d, 0x7f, 0x96, 0x80, 0x3e, 0x85, |
| 0x5d, 0x3f, 0xf0, 0xc6, 0x09, 0x8b, 0x9f, 0x3a, 0x01, 0xf1, 0xdd, 0x44, 0xb4, 0x54, 0x27, 0xec, |
| 0x3b, 0xf2, 0x36, 0xc2, 0xa3, 0xdc, 0xc0, 0x5a, 0x26, 0xd3, 0xe6, 0x22, 0xbc, 0xa9, 0xda, 0x7d, |
| 0x51, 0x2e, 0xe8, 0x11, 0xec, 0x8c, 0x7c, 0x37, 0x0e, 0xa3, 0x93, 0x30, 0xa0, 0x8e, 0xe7, 0x27, |
| 0x4c, 0x9c, 0x9e, 0x4b, 0xc2, 0x63, 0xf3, 0x25, 0x3c, 0x6e, 0x4d, 0xd0, 0x1a, 0x29, 0x18, 0x46, |
| 0xa3, 0x05, 0x1a, 0x62, 0xb0, 0x13, 0xc6, 0xfe, 0xc0, 0x0f, 0xc8, 0xd0, 0x19, 0x51, 0xcf, 0x27, |
| 0x32, 0x53, 0x25, 0x61, 0xb7, 0xf1, 0x12, 0x76, 0xed, 0x14, 0xad, 0xc5, 0xc1, 0x44, 0xae, 0xb6, |
| 0xc3, 0x79, 0x12, 0x7a, 0x06, 0x57, 0xf8, 0xb1, 0x17, 0xf3, 0x1d, 0xe2, 0x78, 0xf4, 0x91, 0xef, |
| 0x52, 0x69, 0x77, 0x59, 0xd8, 0x3d, 0x78, 0x09, 0xbb, 0x38, 0xc3, 0x6b, 0x08, 0x38, 0x61, 0x79, |
| 0x27, 0x5e, 0x24, 0xa2, 0xbd, 0x53, 0x6c, 0x07, 0x64, 0x44, 0xc5, 0xa1, 0x5d, 0x5e, 0xd0, 0x69, |
| 0x93, 0x11, 0x45, 0xb7, 0x01, 0x4d, 0xb3, 0xc4, 0xfb, 0xbe, 0x70, 0x76, 0x55, 0x28, 0xa8, 0x93, |
| 0xf0, 0xfc, 0x91, 0xb4, 0x70, 0x13, 0x36, 0xc2, 0xe3, 0xfe, 0x38, 0x71, 0x09, 0xa3, 0x9e, 0xe3, |
| 0x7b, 0xa2, 0x25, 0x15, 0x45, 0xa7, 0x5c, 0x9f, 0x32, 0x2c, 0x0f, 0xbd, 0x0e, 0x6b, 0xf2, 0xf6, |
| 0xc0, 0xc2, 0xc8, 0x77, 0x35, 0x10, 0x78, 0x20, 0x48, 0x3d, 0x4e, 0xa9, 0xfe, 0x51, 0x81, 0xad, |
| 0xb9, 0xd2, 0x43, 0x37, 0x60, 0xd7, 0x6a, 0xf7, 0x4c, 0xac, 0x1b, 0x3d, 0xcb, 0x6e, 0x3b, 0xbd, |
| 0xcf, 0x3b, 0xe6, 0x5c, 0x63, 0xd9, 0x04, 0x68, 0x58, 0x5d, 0xe3, 0xa8, 0xdb, 0xb5, 0xec, 0xb6, |
| 0xaa, 0x20, 0x15, 0xd6, 0x3b, 0xd8, 0xec, 0x9a, 0xed, 0x9e, 0xce, 0x55, 0xd4, 0x02, 0x97, 0xe8, |
| 0x1c, 0xda, 0x6d, 0xd3, 0x31, 0xf4, 0x66, 0x53, 0x2d, 0xa2, 0x0d, 0x28, 0x3f, 0xb0, 0x2d, 0xc3, |
| 0x6c, 0xe9, 0x56, 0x53, 0x5d, 0x42, 0xd7, 0xe1, 0x6a, 0x07, 0xdb, 0x07, 0xa6, 0x00, 0xd0, 0x9b, |
| 0xcd, 0xcf, 0x9d, 0x0e, 0xb6, 0x1b, 0x47, 0x86, 0xd9, 0x50, 0x4b, 0x1c, 0x4d, 0xc8, 0x3a, 0x5d, |
| 0x53, 0xc7, 0xc6, 0xa1, 0xba, 0x8c, 0xb6, 0x61, 0x43, 0x52, 0x0c, 0xbb, 0xd5, 0xd2, 0xdb, 0x0d, |
| 0x75, 0x85, 0x03, 0x36, 0x2c, 0x23, 0xb5, 0xb7, 0x5a, 0xf5, 0x00, 0x2d, 0xd6, 0x23, 0x7a, 0x13, |
| 0x5e, 0x6f, 0x59, 0x06, 0xb6, 0xa5, 0x2b, 0x0d, 0xab, 0xdb, 0xd3, 0xdb, 0xc6, 0x7c, 0x30, 0x1b, |
| 0x50, 0xe6, 0x3d, 0xf2, 0xc0, 0x32, 0x9b, 0x0d, 0x55, 0xe1, 0xcd, 0xaf, 0x65, 0x35, 0xe4, 0x57, |
| 0x81, 0x7f, 0x1d, 0x64, 0xbc, 0x62, 0xb5, 0x0d, 0xdb, 0x0b, 0xd5, 0xc7, 0x8d, 0xd8, 0xd8, 0xba, |
| 0x6f, 0xb5, 0xf5, 0xa6, 0xd3, 0x32, 0x1b, 0x96, 0x7e, 0x5a, 0xc6, 0xca, 0x50, 0xd2, 0x8f, 0x1a, |
| 0x96, 0xad, 0x2a, 0xfc, 0xe7, 0x03, 0xab, 0x61, 0xda, 0x6a, 0xa1, 0xfa, 0x2b, 0x45, 0xb6, 0x96, |
| 0xf9, 0x0a, 0x7a, 0x0b, 0xde, 0xc0, 0xa6, 0x61, 0x63, 0xd1, 0xd3, 0x1b, 0xe6, 0x03, 0x1e, 0xfa, |
| 0xe9, 0xcb, 0xd0, 0x6d, 0xe9, 0xb8, 0x27, 0xc2, 0x53, 0x15, 0xb4, 0x0c, 0x85, 0x8e, 0x91, 0x4f, |
| 0x3e, 0xef, 0xfe, 0x6a, 0x11, 0xad, 0xc1, 0xca, 0x03, 0xf3, 0xd0, 0x32, 0x9a, 0xa6, 0xba, 0xc4, |
| 0x8f, 0x0b, 0xbb, 0x77, 0x68, 0x62, 0xc7, 0x3e, 0xea, 0x35, 0x6c, 0x1b, 0xa7, 0xf8, 0x6a, 0x09, |
| 0x5d, 0x85, 0x1d, 0xc9, 0xb1, 0xda, 0x79, 0xc6, 0x72, 0xf5, 0x7b, 0xb0, 0x31, 0x73, 0x71, 0x41, |
| 0x1a, 0xac, 0x44, 0x27, 0x31, 0x49, 0x68, 0xa2, 0x29, 0xe2, 0x46, 0x97, 0x7d, 0xf2, 0x73, 0xed, |
| 0x38, 0x0c, 0x13, 0x26, 0xfa, 0x4a, 0x01, 0xcb, 0x8f, 0x2a, 0x9e, 0x0c, 0x94, 0x93, 0x31, 0x08, |
| 0x55, 0x60, 0x25, 0x1b, 0x37, 0x94, 0x74, 0xdc, 0xc8, 0x08, 0x08, 0x41, 0x71, 0x1c, 0xfb, 0xa2, |
| 0xc9, 0x97, 0x0f, 0x2f, 0x61, 0xfe, 0xb1, 0xbf, 0x09, 0x72, 0x1a, 0x71, 0xe4, 0x14, 0x5d, 0xa5, |
| 0x93, 0x5b, 0x3b, 0x1f, 0x98, 0x92, 0x28, 0x0c, 0x12, 0x8a, 0x3a, 0xb0, 0x92, 0x5d, 0xf6, 0x0b, |
| 0xe2, 0x42, 0xf6, 0xfe, 0x85, 0x2e, 0x64, 0x39, 0xe7, 0xe4, 0x54, 0x80, 0x33, 0x98, 0x6a, 0x04, |
| 0xbb, 0xa7, 0xcf, 0x96, 0xdf, 0x98, 0xc5, 0xbf, 0x2b, 0xa7, 0x9b, 0x9c, 0x1c, 0x3c, 0xf2, 0xe6, |
| 0x3b, 0x88, 0x69, 0x92, 0xf0, 0xfb, 0xbc, 0x9b, 0xa5, 0xb0, 0x24, 0x6e, 0xbe, 0x82, 0xde, 0x91, |
| 0x64, 0xf4, 0x11, 0x40, 0xc2, 0x48, 0xcc, 0xc4, 0x5d, 0x32, 0x1d, 0x58, 0x2b, 0x99, 0x83, 0xd9, |
| 0xcb, 0x40, 0xad, 0x97, 0xbd, 0x0c, 0xe0, 0xb2, 0x90, 0xe6, 0xdf, 0xa8, 0x01, 0xea, 0x90, 0x24, |
| 0xcc, 0x19, 0x47, 0x1e, 0x1f, 0x22, 0x04, 0x40, 0xf1, 0x5c, 0x80, 0x4d, 0xae, 0x73, 0x24, 0x54, |
| 0x04, 0xca, 0x15, 0xb9, 0x92, 0x4b, 0xd9, 0xb8, 0x50, 0x14, 0x8b, 0x59, 0xfd, 0xba, 0xb0, 0x38, |
| 0x35, 0xe6, 0x92, 0x7a, 0x0b, 0x4a, 0x34, 0x8e, 0xc3, 0x38, 0x1d, 0x1a, 0x51, 0x66, 0x30, 0x8e, |
| 0xdc, 0x5a, 0x57, 0x3c, 0x55, 0x60, 0x29, 0x80, 0xba, 0xf3, 0xe9, 0x7f, 0x99, 0xc9, 0x78, 0x6e, |
| 0x05, 0xd0, 0x18, 0xb6, 0xd3, 0xeb, 0x3d, 0x7d, 0x44, 0x03, 0x26, 0x7b, 0xb1, 0x3c, 0x28, 0xad, |
| 0x17, 0x84, 0x9f, 0x06, 0x95, 0x2e, 0xbc, 0xc9, 0x11, 0xe5, 0x09, 0x9f, 0xcc, 0x12, 0xaa, 0x4d, |
| 0xd8, 0x9a, 0x93, 0x41, 0xbb, 0xa0, 0xf1, 0xbb, 0x97, 0x71, 0xe8, 0x98, 0x0f, 0xcc, 0x76, 0x6f, |
| 0x6e, 0xff, 0x5f, 0x87, 0xab, 0x66, 0xbb, 0xe1, 0xd8, 0x07, 0x4e, 0xd7, 0x6a, 0xdf, 0x6f, 0x9a, |
| 0xce, 0x51, 0x8f, 0xb7, 0xed, 0xb6, 0x61, 0xaa, 0x4a, 0xf5, 0x0f, 0x85, 0xd3, 0x07, 0x73, 0x19, |
| 0x2c, 0x72, 0x60, 0x7d, 0x66, 0xca, 0x52, 0x44, 0xf6, 0x3e, 0x7e, 0xd1, 0xe2, 0xcd, 0x8d, 0x64, |
| 0x78, 0x06, 0x10, 0x5d, 0x83, 0x55, 0x3f, 0x71, 0xfa, 0xbc, 0x57, 0xa6, 0x23, 0xfa, 0x8a, 0x9f, |
| 0x1c, 0xf0, 0x4f, 0xb4, 0x0b, 0xbc, 0xce, 0x8e, 0xfd, 0xa1, 0xcf, 0x9e, 0x8a, 0x9a, 0x2a, 0xe0, |
| 0x29, 0x01, 0xe9, 0xb0, 0x25, 0x17, 0xc2, 0xa1, 0x81, 0x1c, 0x82, 0x44, 0xee, 0xd7, 0xf6, 0xae, |
| 0x2d, 0xd4, 0x5d, 0x23, 0x7d, 0xf2, 0xc2, 0x1b, 0x52, 0xc3, 0x0c, 0xc4, 0x58, 0xc4, 0x8f, 0xbd, |
| 0x6c, 0x5c, 0x9e, 0x5c, 0xed, 0x30, 0xa4, 0xa4, 0x1e, 0x19, 0x2c, 0xce, 0xb3, 0xcb, 0xd3, 0x02, |
| 0x9d, 0x99, 0x67, 0xf9, 0x01, 0x79, 0xf5, 0x39, 0x5b, 0xf6, 0x9b, 0xcf, 0xe1, 0x5c, 0x1c, 0x85, |
| 0xf3, 0xe3, 0x28, 0x3d, 0x2f, 0x8e, 0x5f, 0x28, 0xb0, 0x7b, 0x96, 0x65, 0xf4, 0x1a, 0x00, 0x8b, |
| 0x49, 0x90, 0xb8, 0xb1, 0x1f, 0xc9, 0x7e, 0x52, 0xc6, 0x39, 0x0a, 0xe7, 0xe7, 0xe6, 0xca, 0x82, |
| 0x58, 0xb5, 0x1c, 0x05, 0x7d, 0x17, 0x4a, 0x7c, 0xf8, 0x4c, 0xb4, 0xa2, 0xc8, 0xc2, 0xcd, 0x73, |
| 0xb2, 0xc0, 0x67, 0x51, 0x2b, 0xe8, 0x87, 0x58, 0x6a, 0x55, 0xff, 0xa2, 0xc0, 0x6a, 0x46, 0x43, |
| 0x1f, 0xce, 0xb4, 0x2d, 0xe5, 0xbc, 0xd5, 0xcf, 0x75, 0xad, 0xf7, 0x60, 0x75, 0x52, 0x35, 0x85, |
| 0xf3, 0xf4, 0x56, 0x68, 0x5a, 0x2f, 0x08, 0x96, 0xb8, 0x17, 0xf2, 0x55, 0x03, 0x8b, 0xdf, 0x73, |
| 0xf1, 0x2e, 0x2d, 0xc4, 0xfb, 0x7f, 0xa7, 0x8d, 0x0f, 0x22, 0xf1, 0xb9, 0xe1, 0x61, 0xef, 0xcb, |
| 0x12, 0x2c, 0xcb, 0xb4, 0xa3, 0x5f, 0x2b, 0x50, 0x9e, 0x74, 0x05, 0x74, 0xc1, 0x67, 0xc3, 0xc9, |
| 0x63, 0x60, 0xe5, 0xce, 0xc5, 0x15, 0x64, 0xc3, 0xa9, 0x7e, 0xe7, 0x6f, 0xfa, 0xba, 0x1c, 0x88, |
| 0x6f, 0x8b, 0x93, 0xf3, 0xcb, 0x3f, 0xff, 0xe3, 0x67, 0x85, 0x1b, 0xd5, 0xeb, 0xb9, 0x37, 0x69, |
| 0x89, 0x72, 0x2f, 0xce, 0x74, 0xef, 0x29, 0xef, 0xa0, 0x7f, 0x2a, 0x70, 0xf9, 0xb4, 0xf3, 0x07, |
| 0xdd, 0x3b, 0xc7, 0x83, 0x33, 0xde, 0x60, 0x2b, 0xaf, 0x66, 0xba, 0xb9, 0xe7, 0xee, 0x9a, 0x9d, |
| 0x3d, 0x77, 0x57, 0x7f, 0xfc, 0x95, 0xfe, 0xc9, 0x39, 0x07, 0xed, 0x99, 0x67, 0xe2, 0x69, 0x91, |
| 0x7e, 0xab, 0xfa, 0xf6, 0x62, 0xa4, 0x39, 0xf3, 0x33, 0x41, 0xff, 0x54, 0x01, 0xb4, 0xd8, 0xbb, |
| 0xd1, 0x87, 0x2f, 0xd1, 0xee, 0x65, 0xc0, 0x1f, 0xbd, 0xf4, 0x41, 0x51, 0xbd, 0x74, 0x4b, 0xb9, |
| 0xa3, 0x54, 0xac, 0xaf, 0xf4, 0x2b, 0xa9, 0x96, 0x44, 0x22, 0x91, 0x9f, 0xd4, 0xdc, 0x70, 0xf4, |
| 0x27, 0xbd, 0x76, 0xc2, 0x58, 0x94, 0xdc, 0xab, 0xd7, 0x1f, 0x3f, 0x7e, 0x3c, 0xc7, 0xac, 0x93, |
| 0x31, 0x3b, 0x91, 0xff, 0x40, 0x7c, 0x3b, 0x1a, 0x12, 0xd6, 0x0f, 0xe3, 0xd1, 0xfe, 0x4f, 0x14, |
| 0x78, 0xc3, 0x0d, 0x47, 0x67, 0x7b, 0xb4, 0xbf, 0x26, 0xeb, 0xb4, 0xc3, 0xb7, 0x49, 0x47, 0xf9, |
| 0xc2, 0x48, 0xa5, 0x07, 0x21, 0xef, 0x23, 0xb5, 0x30, 0x1e, 0xd4, 0x07, 0x34, 0x10, 0x9b, 0xa8, |
| 0x3e, 0xb5, 0xf9, 0x9c, 0x3f, 0x3b, 0x3e, 0x96, 0x84, 0x7f, 0x29, 0xca, 0x2f, 0x0b, 0xc5, 0xfb, |
| 0x46, 0xf7, 0x78, 0x59, 0x28, 0xbe, 0xfb, 0x9f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x96, 0x3c, 0x75, |
| 0x77, 0xe8, 0x19, 0x00, 0x00, |
| } |
| |
| // Reference imports to suppress errors if they are not otherwise used. |
| var _ context.Context |
| var _ grpc.ClientConnInterface |
| |
| // This is a compile-time assertion to ensure that this generated file |
| // is compatible with the grpc package it is being compiled against. |
| const _ = grpc.SupportPackageIsVersion6 |
| |
| // SpeechClient is the client API for Speech service. |
| // |
| // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. |
| type SpeechClient interface { |
| // Performs synchronous speech recognition: receive results after all audio |
| // has been sent and processed. |
| Recognize(ctx context.Context, in *RecognizeRequest, opts ...grpc.CallOption) (*RecognizeResponse, error) |
| // Performs asynchronous speech recognition: receive results via the |
| // google.longrunning.Operations interface. Returns either an |
| // `Operation.error` or an `Operation.response` which contains |
| // a `LongRunningRecognizeResponse` message. |
| // For more information on asynchronous speech recognition, see the |
| // [how-to](https://cloud.google.com/speech-to-text/docs/async-recognize). |
| LongRunningRecognize(ctx context.Context, in *LongRunningRecognizeRequest, opts ...grpc.CallOption) (*longrunning.Operation, error) |
| // Performs bidirectional streaming speech recognition: receive results while |
| // sending audio. This method is only available via the gRPC API (not REST). |
| StreamingRecognize(ctx context.Context, opts ...grpc.CallOption) (Speech_StreamingRecognizeClient, error) |
| } |
| |
| type speechClient struct { |
| cc grpc.ClientConnInterface |
| } |
| |
| func NewSpeechClient(cc grpc.ClientConnInterface) SpeechClient { |
| return &speechClient{cc} |
| } |
| |
| func (c *speechClient) Recognize(ctx context.Context, in *RecognizeRequest, opts ...grpc.CallOption) (*RecognizeResponse, error) { |
| out := new(RecognizeResponse) |
| err := c.cc.Invoke(ctx, "/google.cloud.speech.v1p1beta1.Speech/Recognize", in, out, opts...) |
| if err != nil { |
| return nil, err |
| } |
| return out, nil |
| } |
| |
| func (c *speechClient) LongRunningRecognize(ctx context.Context, in *LongRunningRecognizeRequest, opts ...grpc.CallOption) (*longrunning.Operation, error) { |
| out := new(longrunning.Operation) |
| err := c.cc.Invoke(ctx, "/google.cloud.speech.v1p1beta1.Speech/LongRunningRecognize", in, out, opts...) |
| if err != nil { |
| return nil, err |
| } |
| return out, nil |
| } |
| |
| func (c *speechClient) StreamingRecognize(ctx context.Context, opts ...grpc.CallOption) (Speech_StreamingRecognizeClient, error) { |
| stream, err := c.cc.NewStream(ctx, &_Speech_serviceDesc.Streams[0], "/google.cloud.speech.v1p1beta1.Speech/StreamingRecognize", opts...) |
| if err != nil { |
| return nil, err |
| } |
| x := &speechStreamingRecognizeClient{stream} |
| return x, nil |
| } |
| |
| type Speech_StreamingRecognizeClient interface { |
| Send(*StreamingRecognizeRequest) error |
| Recv() (*StreamingRecognizeResponse, error) |
| grpc.ClientStream |
| } |
| |
| type speechStreamingRecognizeClient struct { |
| grpc.ClientStream |
| } |
| |
| func (x *speechStreamingRecognizeClient) Send(m *StreamingRecognizeRequest) error { |
| return x.ClientStream.SendMsg(m) |
| } |
| |
| func (x *speechStreamingRecognizeClient) Recv() (*StreamingRecognizeResponse, error) { |
| m := new(StreamingRecognizeResponse) |
| if err := x.ClientStream.RecvMsg(m); err != nil { |
| return nil, err |
| } |
| return m, nil |
| } |
| |
| // SpeechServer is the server API for Speech service. |
| type SpeechServer interface { |
| // Performs synchronous speech recognition: receive results after all audio |
| // has been sent and processed. |
| Recognize(context.Context, *RecognizeRequest) (*RecognizeResponse, error) |
| // Performs asynchronous speech recognition: receive results via the |
| // google.longrunning.Operations interface. Returns either an |
| // `Operation.error` or an `Operation.response` which contains |
| // a `LongRunningRecognizeResponse` message. |
| // For more information on asynchronous speech recognition, see the |
| // [how-to](https://cloud.google.com/speech-to-text/docs/async-recognize). |
| LongRunningRecognize(context.Context, *LongRunningRecognizeRequest) (*longrunning.Operation, error) |
| // Performs bidirectional streaming speech recognition: receive results while |
| // sending audio. This method is only available via the gRPC API (not REST). |
| StreamingRecognize(Speech_StreamingRecognizeServer) error |
| } |
| |
| // UnimplementedSpeechServer can be embedded to have forward compatible implementations. |
| type UnimplementedSpeechServer struct { |
| } |
| |
| func (*UnimplementedSpeechServer) Recognize(ctx context.Context, req *RecognizeRequest) (*RecognizeResponse, error) { |
| return nil, status1.Errorf(codes.Unimplemented, "method Recognize not implemented") |
| } |
| func (*UnimplementedSpeechServer) LongRunningRecognize(ctx context.Context, req *LongRunningRecognizeRequest) (*longrunning.Operation, error) { |
| return nil, status1.Errorf(codes.Unimplemented, "method LongRunningRecognize not implemented") |
| } |
| func (*UnimplementedSpeechServer) StreamingRecognize(srv Speech_StreamingRecognizeServer) error { |
| return status1.Errorf(codes.Unimplemented, "method StreamingRecognize not implemented") |
| } |
| |
| func RegisterSpeechServer(s *grpc.Server, srv SpeechServer) { |
| s.RegisterService(&_Speech_serviceDesc, srv) |
| } |
| |
| func _Speech_Recognize_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { |
| in := new(RecognizeRequest) |
| if err := dec(in); err != nil { |
| return nil, err |
| } |
| if interceptor == nil { |
| return srv.(SpeechServer).Recognize(ctx, in) |
| } |
| info := &grpc.UnaryServerInfo{ |
| Server: srv, |
| FullMethod: "/google.cloud.speech.v1p1beta1.Speech/Recognize", |
| } |
| handler := func(ctx context.Context, req interface{}) (interface{}, error) { |
| return srv.(SpeechServer).Recognize(ctx, req.(*RecognizeRequest)) |
| } |
| return interceptor(ctx, in, info, handler) |
| } |
| |
| func _Speech_LongRunningRecognize_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { |
| in := new(LongRunningRecognizeRequest) |
| if err := dec(in); err != nil { |
| return nil, err |
| } |
| if interceptor == nil { |
| return srv.(SpeechServer).LongRunningRecognize(ctx, in) |
| } |
| info := &grpc.UnaryServerInfo{ |
| Server: srv, |
| FullMethod: "/google.cloud.speech.v1p1beta1.Speech/LongRunningRecognize", |
| } |
| handler := func(ctx context.Context, req interface{}) (interface{}, error) { |
| return srv.(SpeechServer).LongRunningRecognize(ctx, req.(*LongRunningRecognizeRequest)) |
| } |
| return interceptor(ctx, in, info, handler) |
| } |
| |
| func _Speech_StreamingRecognize_Handler(srv interface{}, stream grpc.ServerStream) error { |
| return srv.(SpeechServer).StreamingRecognize(&speechStreamingRecognizeServer{stream}) |
| } |
| |
| type Speech_StreamingRecognizeServer interface { |
| Send(*StreamingRecognizeResponse) error |
| Recv() (*StreamingRecognizeRequest, error) |
| grpc.ServerStream |
| } |
| |
| type speechStreamingRecognizeServer struct { |
| grpc.ServerStream |
| } |
| |
| func (x *speechStreamingRecognizeServer) Send(m *StreamingRecognizeResponse) error { |
| return x.ServerStream.SendMsg(m) |
| } |
| |
| func (x *speechStreamingRecognizeServer) Recv() (*StreamingRecognizeRequest, error) { |
| m := new(StreamingRecognizeRequest) |
| if err := x.ServerStream.RecvMsg(m); err != nil { |
| return nil, err |
| } |
| return m, nil |
| } |
| |
| var _Speech_serviceDesc = grpc.ServiceDesc{ |
| ServiceName: "google.cloud.speech.v1p1beta1.Speech", |
| HandlerType: (*SpeechServer)(nil), |
| Methods: []grpc.MethodDesc{ |
| { |
| MethodName: "Recognize", |
| Handler: _Speech_Recognize_Handler, |
| }, |
| { |
| MethodName: "LongRunningRecognize", |
| Handler: _Speech_LongRunningRecognize_Handler, |
| }, |
| }, |
| Streams: []grpc.StreamDesc{ |
| { |
| StreamName: "StreamingRecognize", |
| Handler: _Speech_StreamingRecognize_Handler, |
| ServerStreams: true, |
| ClientStreams: true, |
| }, |
| }, |
| Metadata: "google/cloud/speech/v1p1beta1/cloud_speech.proto", |
| } |