speech

package
v0.0.0-...-5fc9ac5 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Nov 7, 2018 License: Apache-2.0 Imports: 12 Imported by: 0

Documentation

Index

Constants

This section is empty.

Variables

View Source
var RecognitionConfig_AudioEncoding_name = map[int32]string{
	0: "ENCODING_UNSPECIFIED",
	1: "LINEAR16",
	2: "FLAC",
	3: "MULAW",
	4: "AMR",
	5: "AMR_WB",
	6: "OGG_OPUS",
	7: "SPEEX_WITH_HEADER_BYTE",
}
View Source
var RecognitionConfig_AudioEncoding_value = map[string]int32{
	"ENCODING_UNSPECIFIED":   0,
	"LINEAR16":               1,
	"FLAC":                   2,
	"MULAW":                  3,
	"AMR":                    4,
	"AMR_WB":                 5,
	"OGG_OPUS":               6,
	"SPEEX_WITH_HEADER_BYTE": 7,
}
View Source
var StreamingRecognizeResponse_SpeechEventType_name = map[int32]string{
	0: "SPEECH_EVENT_UNSPECIFIED",
	1: "END_OF_SINGLE_UTTERANCE",
}
View Source
var StreamingRecognizeResponse_SpeechEventType_value = map[string]int32{
	"SPEECH_EVENT_UNSPECIFIED": 0,
	"END_OF_SINGLE_UTTERANCE":  1,
}

Functions

func RegisterSpeechServer

func RegisterSpeechServer(s *grpc.Server, srv SpeechServer)

Types

type LongRunningRecognizeMetadata

type LongRunningRecognizeMetadata struct {
	// Approximate percentage of audio processed thus far. Guaranteed to be 100
	// when the audio is fully processed and the results are available.
	ProgressPercent int32 `protobuf:"varint,1,opt,name=progress_percent,json=progressPercent,proto3" json:"progress_percent,omitempty"`
	// Time when the request was received.
	StartTime *timestamp.Timestamp `protobuf:"bytes,2,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"`
	// Time of the most recent processing update.
	LastUpdateTime       *timestamp.Timestamp `protobuf:"bytes,3,opt,name=last_update_time,json=lastUpdateTime,proto3" json:"last_update_time,omitempty"`
	XXX_NoUnkeyedLiteral struct{}             `json:"-"`
	XXX_unrecognized     []byte               `json:"-"`
	XXX_sizecache        int32                `json:"-"`
}

Describes the progress of a long-running `LongRunningRecognize` call. It is included in the `metadata` field of the `Operation` returned by the `GetOperation` call of the `google::longrunning::Operations` service.

func (*LongRunningRecognizeMetadata) Descriptor

func (*LongRunningRecognizeMetadata) Descriptor() ([]byte, []int)

func (*LongRunningRecognizeMetadata) GetLastUpdateTime

func (m *LongRunningRecognizeMetadata) GetLastUpdateTime() *timestamp.Timestamp

func (*LongRunningRecognizeMetadata) GetProgressPercent

func (m *LongRunningRecognizeMetadata) GetProgressPercent() int32

func (*LongRunningRecognizeMetadata) GetStartTime

func (*LongRunningRecognizeMetadata) ProtoMessage

func (*LongRunningRecognizeMetadata) ProtoMessage()

func (*LongRunningRecognizeMetadata) Reset

func (m *LongRunningRecognizeMetadata) Reset()

func (*LongRunningRecognizeMetadata) String

func (*LongRunningRecognizeMetadata) XXX_DiscardUnknown

func (m *LongRunningRecognizeMetadata) XXX_DiscardUnknown()

func (*LongRunningRecognizeMetadata) XXX_Marshal

func (m *LongRunningRecognizeMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*LongRunningRecognizeMetadata) XXX_Merge

func (m *LongRunningRecognizeMetadata) XXX_Merge(src proto.Message)

func (*LongRunningRecognizeMetadata) XXX_Size

func (m *LongRunningRecognizeMetadata) XXX_Size() int

func (*LongRunningRecognizeMetadata) XXX_Unmarshal

func (m *LongRunningRecognizeMetadata) XXX_Unmarshal(b []byte) error

type LongRunningRecognizeRequest

type LongRunningRecognizeRequest struct {
	// *Required* Provides information to the recognizer that specifies how to
	// process the request.
	Config *RecognitionConfig `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"`
	// *Required* The audio data to be recognized.
	Audio                *RecognitionAudio `protobuf:"bytes,2,opt,name=audio,proto3" json:"audio,omitempty"`
	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
	XXX_unrecognized     []byte            `json:"-"`
	XXX_sizecache        int32             `json:"-"`
}

The top-level message sent by the client for the `LongRunningRecognize` method.

func (*LongRunningRecognizeRequest) Descriptor

func (*LongRunningRecognizeRequest) Descriptor() ([]byte, []int)

func (*LongRunningRecognizeRequest) GetAudio

func (*LongRunningRecognizeRequest) GetConfig

func (*LongRunningRecognizeRequest) ProtoMessage

func (*LongRunningRecognizeRequest) ProtoMessage()

func (*LongRunningRecognizeRequest) Reset

func (m *LongRunningRecognizeRequest) Reset()

func (*LongRunningRecognizeRequest) String

func (m *LongRunningRecognizeRequest) String() string

func (*LongRunningRecognizeRequest) XXX_DiscardUnknown

func (m *LongRunningRecognizeRequest) XXX_DiscardUnknown()

func (*LongRunningRecognizeRequest) XXX_Marshal

func (m *LongRunningRecognizeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*LongRunningRecognizeRequest) XXX_Merge

func (m *LongRunningRecognizeRequest) XXX_Merge(src proto.Message)

func (*LongRunningRecognizeRequest) XXX_Size

func (m *LongRunningRecognizeRequest) XXX_Size() int

func (*LongRunningRecognizeRequest) XXX_Unmarshal

func (m *LongRunningRecognizeRequest) XXX_Unmarshal(b []byte) error

type LongRunningRecognizeResponse

type LongRunningRecognizeResponse struct {
	// Output only. Sequential list of transcription results corresponding to
	// sequential portions of audio.
	Results              []*SpeechRecognitionResult `protobuf:"bytes,2,rep,name=results,proto3" json:"results,omitempty"`
	XXX_NoUnkeyedLiteral struct{}                   `json:"-"`
	XXX_unrecognized     []byte                     `json:"-"`
	XXX_sizecache        int32                      `json:"-"`
}

The only message returned to the client by the `LongRunningRecognize` method. It contains the result as zero or more sequential `SpeechRecognitionResult` messages. It is included in the `result.response` field of the `Operation` returned by the `GetOperation` call of the `google::longrunning::Operations` service.

func (*LongRunningRecognizeResponse) Descriptor

func (*LongRunningRecognizeResponse) Descriptor() ([]byte, []int)

func (*LongRunningRecognizeResponse) GetResults

func (*LongRunningRecognizeResponse) ProtoMessage

func (*LongRunningRecognizeResponse) ProtoMessage()

func (*LongRunningRecognizeResponse) Reset

func (m *LongRunningRecognizeResponse) Reset()

func (*LongRunningRecognizeResponse) String

func (*LongRunningRecognizeResponse) XXX_DiscardUnknown

func (m *LongRunningRecognizeResponse) XXX_DiscardUnknown()

func (*LongRunningRecognizeResponse) XXX_Marshal

func (m *LongRunningRecognizeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*LongRunningRecognizeResponse) XXX_Merge

func (m *LongRunningRecognizeResponse) XXX_Merge(src proto.Message)

func (*LongRunningRecognizeResponse) XXX_Size

func (m *LongRunningRecognizeResponse) XXX_Size() int

func (*LongRunningRecognizeResponse) XXX_Unmarshal

func (m *LongRunningRecognizeResponse) XXX_Unmarshal(b []byte) error

type RecognitionAudio

type RecognitionAudio struct {
	// The audio source, which is either inline content or a Google Cloud
	// Storage uri.
	//
	// Types that are valid to be assigned to AudioSource:
	//	*RecognitionAudio_Content
	//	*RecognitionAudio_Uri
	AudioSource          isRecognitionAudio_AudioSource `protobuf_oneof:"audio_source"`
	XXX_NoUnkeyedLiteral struct{}                       `json:"-"`
	XXX_unrecognized     []byte                         `json:"-"`
	XXX_sizecache        int32                          `json:"-"`
}

Contains audio data in the encoding specified in the `RecognitionConfig`. Either `content` or `uri` must be supplied. Supplying both or neither returns [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]. See [content limits](/speech-to-text/quotas#content).

func (*RecognitionAudio) Descriptor

func (*RecognitionAudio) Descriptor() ([]byte, []int)

func (*RecognitionAudio) GetAudioSource

func (m *RecognitionAudio) GetAudioSource() isRecognitionAudio_AudioSource

func (*RecognitionAudio) GetContent

func (m *RecognitionAudio) GetContent() []byte

func (*RecognitionAudio) GetUri

func (m *RecognitionAudio) GetUri() string

func (*RecognitionAudio) ProtoMessage

func (*RecognitionAudio) ProtoMessage()

func (*RecognitionAudio) Reset

func (m *RecognitionAudio) Reset()

func (*RecognitionAudio) String

func (m *RecognitionAudio) String() string

func (*RecognitionAudio) XXX_DiscardUnknown

func (m *RecognitionAudio) XXX_DiscardUnknown()

func (*RecognitionAudio) XXX_Marshal

func (m *RecognitionAudio) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*RecognitionAudio) XXX_Merge

func (m *RecognitionAudio) XXX_Merge(src proto.Message)

func (*RecognitionAudio) XXX_OneofFuncs

func (*RecognitionAudio) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{})

XXX_OneofFuncs is for the internal use of the proto package.

func (*RecognitionAudio) XXX_Size

func (m *RecognitionAudio) XXX_Size() int

func (*RecognitionAudio) XXX_Unmarshal

func (m *RecognitionAudio) XXX_Unmarshal(b []byte) error

type RecognitionAudio_Content

type RecognitionAudio_Content struct {
	Content []byte `protobuf:"bytes,1,opt,name=content,proto3,oneof"`
}

type RecognitionAudio_Uri

type RecognitionAudio_Uri struct {
	Uri string `protobuf:"bytes,2,opt,name=uri,proto3,oneof"`
}

type RecognitionConfig

type RecognitionConfig struct {
	// Encoding of audio data sent in all `RecognitionAudio` messages.
	// This field is optional for `FLAC` and `WAV` audio files and required
	// for all other audio formats. For details, see [AudioEncoding][google.cloud.speech.v1.RecognitionConfig.AudioEncoding].
	Encoding RecognitionConfig_AudioEncoding `` /* 130-byte string literal not displayed */
	// Sample rate in Hertz of the audio data sent in all
	// `RecognitionAudio` messages. Valid values are: 8000-48000.
	// 16000 is optimal. For best results, set the sampling rate of the audio
	// source to 16000 Hz. If that's not possible, use the native sample rate of
	// the audio source (instead of re-sampling).
	// This field is optional for `FLAC` and `WAV` audio files and required
	// for all other audio formats. For details, see [AudioEncoding][google.cloud.speech.v1.RecognitionConfig.AudioEncoding].
	SampleRateHertz int32 `protobuf:"varint,2,opt,name=sample_rate_hertz,json=sampleRateHertz,proto3" json:"sample_rate_hertz,omitempty"`
	// *Required* The language of the supplied audio as a
	// [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
	// Example: "en-US".
	// See [Language Support](/speech-to-text/docs/languages)
	// for a list of the currently supported language codes.
	LanguageCode string `protobuf:"bytes,3,opt,name=language_code,json=languageCode,proto3" json:"language_code,omitempty"`
	// *Optional* Maximum number of recognition hypotheses to be returned.
	// Specifically, the maximum number of `SpeechRecognitionAlternative` messages
	// within each `SpeechRecognitionResult`.
	// The server may return fewer than `max_alternatives`.
	// Valid values are `0`-`30`. A value of `0` or `1` will return a maximum of
	// one. If omitted, will return a maximum of one.
	MaxAlternatives int32 `protobuf:"varint,4,opt,name=max_alternatives,json=maxAlternatives,proto3" json:"max_alternatives,omitempty"`
	// *Optional* If set to `true`, the server will attempt to filter out
	// profanities, replacing all but the initial character in each filtered word
	// with asterisks, e.g. "f***". If set to `false` or omitted, profanities
	// won't be filtered out.
	ProfanityFilter bool `protobuf:"varint,5,opt,name=profanity_filter,json=profanityFilter,proto3" json:"profanity_filter,omitempty"`
	// *Optional* array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
	// A means to provide context to assist the speech recognition. For more
	// information, see [Phrase Hints](/speech-to-text/docs/basics#phrase-hints).
	SpeechContexts []*SpeechContext `protobuf:"bytes,6,rep,name=speech_contexts,json=speechContexts,proto3" json:"speech_contexts,omitempty"`
	// *Optional* If `true`, the top result includes a list of words and
	// the start and end time offsets (timestamps) for those words. If
	// `false`, no word-level time offset information is returned. The default is
	// `false`.
	EnableWordTimeOffsets bool `` /* 129-byte string literal not displayed */
	// *Optional* If 'true', adds punctuation to recognition result hypotheses.
	// This feature is only available in select languages. Setting this for
	// requests in other languages has no effect at all.
	// The default 'false' value does not add punctuation to result hypotheses.
	// Note: This is currently offered as an experimental service, complimentary
	// to all users. In the future this may be exclusively available as a
	// premium feature.
	EnableAutomaticPunctuation bool `` /* 143-byte string literal not displayed */
	// *Optional* Which model to select for the given request. Select the model
	// best suited to your domain to get best results. If a model is not
	// explicitly specified, then we auto-select a model based on the parameters
	// in the RecognitionConfig.
	// <table>
	//   <tr>
	//     <td><b>Model</b></td>
	//     <td><b>Description</b></td>
	//   </tr>
	//   <tr>
	//     <td><code>command_and_search</code></td>
	//     <td>Best for short queries such as voice commands or voice search.</td>
	//   </tr>
	//   <tr>
	//     <td><code>phone_call</code></td>
	//     <td>Best for audio that originated from a phone call (typically
	//     recorded at an 8khz sampling rate).</td>
	//   </tr>
	//   <tr>
	//     <td><code>video</code></td>
	//     <td>Best for audio that originated from from video or includes multiple
	//         speakers. Ideally the audio is recorded at a 16khz or greater
	//         sampling rate. This is a premium model that costs more than the
	//         standard rate.</td>
	//   </tr>
	//   <tr>
	//     <td><code>default</code></td>
	//     <td>Best for audio that is not one of the specific audio models.
	//         For example, long-form audio. Ideally the audio is high-fidelity,
	//         recorded at a 16khz or greater sampling rate.</td>
	//   </tr>
	// </table>
	Model string `protobuf:"bytes,13,opt,name=model,proto3" json:"model,omitempty"`
	// *Optional* Set to true to use an enhanced model for speech recognition.
	// You must also set the `model` field to a valid, enhanced model. If
	// `use_enhanced` is set to true and the `model` field is not set, then
	// `use_enhanced` is ignored. If `use_enhanced` is true and an enhanced
	// version of the specified model does not exist, then the speech is
	// recognized using the standard version of the specified model.
	//
	// Enhanced speech models require that you opt-in to data logging using
	// instructions in the [documentation](/speech-to-text/enable-data-logging).
	// If you set `use_enhanced` to true and you have not enabled audio logging,
	// then you will receive an error.
	UseEnhanced          bool     `protobuf:"varint,14,opt,name=use_enhanced,json=useEnhanced,proto3" json:"use_enhanced,omitempty"`
	XXX_NoUnkeyedLiteral struct{} `json:"-"`
	XXX_unrecognized     []byte   `json:"-"`
	XXX_sizecache        int32    `json:"-"`
}

Provides information to the recognizer that specifies how to process the request.

func (*RecognitionConfig) Descriptor

func (*RecognitionConfig) Descriptor() ([]byte, []int)

func (*RecognitionConfig) GetEnableAutomaticPunctuation

func (m *RecognitionConfig) GetEnableAutomaticPunctuation() bool

func (*RecognitionConfig) GetEnableWordTimeOffsets

func (m *RecognitionConfig) GetEnableWordTimeOffsets() bool

func (*RecognitionConfig) GetEncoding

func (*RecognitionConfig) GetLanguageCode

func (m *RecognitionConfig) GetLanguageCode() string

func (*RecognitionConfig) GetMaxAlternatives

func (m *RecognitionConfig) GetMaxAlternatives() int32

func (*RecognitionConfig) GetModel

func (m *RecognitionConfig) GetModel() string

func (*RecognitionConfig) GetProfanityFilter

func (m *RecognitionConfig) GetProfanityFilter() bool

func (*RecognitionConfig) GetSampleRateHertz

func (m *RecognitionConfig) GetSampleRateHertz() int32

func (*RecognitionConfig) GetSpeechContexts

func (m *RecognitionConfig) GetSpeechContexts() []*SpeechContext

func (*RecognitionConfig) GetUseEnhanced

func (m *RecognitionConfig) GetUseEnhanced() bool

func (*RecognitionConfig) ProtoMessage

func (*RecognitionConfig) ProtoMessage()

func (*RecognitionConfig) Reset

func (m *RecognitionConfig) Reset()

func (*RecognitionConfig) String

func (m *RecognitionConfig) String() string

func (*RecognitionConfig) XXX_DiscardUnknown

func (m *RecognitionConfig) XXX_DiscardUnknown()

func (*RecognitionConfig) XXX_Marshal

func (m *RecognitionConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*RecognitionConfig) XXX_Merge

func (m *RecognitionConfig) XXX_Merge(src proto.Message)

func (*RecognitionConfig) XXX_Size

func (m *RecognitionConfig) XXX_Size() int

func (*RecognitionConfig) XXX_Unmarshal

func (m *RecognitionConfig) XXX_Unmarshal(b []byte) error

type RecognitionConfig_AudioEncoding

type RecognitionConfig_AudioEncoding int32

The encoding of the audio data sent in the request.

All encodings support only 1 channel (mono) audio.

For best results, the audio source should be captured and transmitted using a lossless encoding (`FLAC` or `LINEAR16`). The accuracy of the speech recognition can be reduced if lossy codecs are used to capture or transmit audio, particularly if background noise is present. Lossy codecs include `MULAW`, `AMR`, `AMR_WB`, `OGG_OPUS`, and `SPEEX_WITH_HEADER_BYTE`.

The `FLAC` and `WAV` audio file formats include a header that describes the included audio content. You can request recognition for `WAV` files that contain either `LINEAR16` or `MULAW` encoded audio. If you send `FLAC` or `WAV` audio file format in your request, you do not need to specify an `AudioEncoding`; the audio encoding format is determined from the file header. If you specify an `AudioEncoding` when you send send `FLAC` or `WAV` audio, the encoding configuration must match the encoding described in the audio header; otherwise the request returns an [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT] error code.

const (
	// Not specified.
	RecognitionConfig_ENCODING_UNSPECIFIED RecognitionConfig_AudioEncoding = 0
	// Uncompressed 16-bit signed little-endian samples (Linear PCM).
	RecognitionConfig_LINEAR16 RecognitionConfig_AudioEncoding = 1
	// `FLAC` (Free Lossless Audio
	// Codec) is the recommended encoding because it is
	// lossless--therefore recognition is not compromised--and
	// requires only about half the bandwidth of `LINEAR16`. `FLAC` stream
	// encoding supports 16-bit and 24-bit samples, however, not all fields in
	// `STREAMINFO` are supported.
	RecognitionConfig_FLAC RecognitionConfig_AudioEncoding = 2
	// 8-bit samples that compand 14-bit audio samples using G.711 PCMU/mu-law.
	RecognitionConfig_MULAW RecognitionConfig_AudioEncoding = 3
	// Adaptive Multi-Rate Narrowband codec. `sample_rate_hertz` must be 8000.
	RecognitionConfig_AMR RecognitionConfig_AudioEncoding = 4
	// Adaptive Multi-Rate Wideband codec. `sample_rate_hertz` must be 16000.
	RecognitionConfig_AMR_WB RecognitionConfig_AudioEncoding = 5
	// Opus encoded audio frames in Ogg container
	// ([OggOpus](https://wiki.xiph.org/OggOpus)).
	// `sample_rate_hertz` must be one of 8000, 12000, 16000, 24000, or 48000.
	RecognitionConfig_OGG_OPUS RecognitionConfig_AudioEncoding = 6
	// Although the use of lossy encodings is not recommended, if a very low
	// bitrate encoding is required, `OGG_OPUS` is highly preferred over
	// Speex encoding. The [Speex](https://speex.org/)  encoding supported by
	// Cloud Speech API has a header byte in each block, as in MIME type
	// `audio/x-speex-with-header-byte`.
	// It is a variant of the RTP Speex encoding defined in
	// [RFC 5574](https://tools.ietf.org/html/rfc5574).
	// The stream is a sequence of blocks, one block per RTP packet. Each block
	// starts with a byte containing the length of the block, in bytes, followed
	// by one or more frames of Speex data, padded to an integral number of
	// bytes (octets) as specified in RFC 5574. In other words, each RTP header
	// is replaced with a single byte containing the block length. Only Speex
	// wideband is supported. `sample_rate_hertz` must be 16000.
	RecognitionConfig_SPEEX_WITH_HEADER_BYTE RecognitionConfig_AudioEncoding = 7
)

func (RecognitionConfig_AudioEncoding) EnumDescriptor

func (RecognitionConfig_AudioEncoding) EnumDescriptor() ([]byte, []int)

func (RecognitionConfig_AudioEncoding) String

type RecognizeRequest

type RecognizeRequest struct {
	// *Required* Provides information to the recognizer that specifies how to
	// process the request.
	Config *RecognitionConfig `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"`
	// *Required* The audio data to be recognized.
	Audio                *RecognitionAudio `protobuf:"bytes,2,opt,name=audio,proto3" json:"audio,omitempty"`
	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
	XXX_unrecognized     []byte            `json:"-"`
	XXX_sizecache        int32             `json:"-"`
}

The top-level message sent by the client for the `Recognize` method.

func (*RecognizeRequest) Descriptor

func (*RecognizeRequest) Descriptor() ([]byte, []int)

func (*RecognizeRequest) GetAudio

func (m *RecognizeRequest) GetAudio() *RecognitionAudio

func (*RecognizeRequest) GetConfig

func (m *RecognizeRequest) GetConfig() *RecognitionConfig

func (*RecognizeRequest) ProtoMessage

func (*RecognizeRequest) ProtoMessage()

func (*RecognizeRequest) Reset

func (m *RecognizeRequest) Reset()

func (*RecognizeRequest) String

func (m *RecognizeRequest) String() string

func (*RecognizeRequest) XXX_DiscardUnknown

func (m *RecognizeRequest) XXX_DiscardUnknown()

func (*RecognizeRequest) XXX_Marshal

func (m *RecognizeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*RecognizeRequest) XXX_Merge

func (m *RecognizeRequest) XXX_Merge(src proto.Message)

func (*RecognizeRequest) XXX_Size

func (m *RecognizeRequest) XXX_Size() int

func (*RecognizeRequest) XXX_Unmarshal

func (m *RecognizeRequest) XXX_Unmarshal(b []byte) error

type RecognizeResponse

type RecognizeResponse struct {
	// Output only. Sequential list of transcription results corresponding to
	// sequential portions of audio.
	Results              []*SpeechRecognitionResult `protobuf:"bytes,2,rep,name=results,proto3" json:"results,omitempty"`
	XXX_NoUnkeyedLiteral struct{}                   `json:"-"`
	XXX_unrecognized     []byte                     `json:"-"`
	XXX_sizecache        int32                      `json:"-"`
}

The only message returned to the client by the `Recognize` method. It contains the result as zero or more sequential `SpeechRecognitionResult` messages.

func (*RecognizeResponse) Descriptor

func (*RecognizeResponse) Descriptor() ([]byte, []int)

func (*RecognizeResponse) GetResults

func (m *RecognizeResponse) GetResults() []*SpeechRecognitionResult

func (*RecognizeResponse) ProtoMessage

func (*RecognizeResponse) ProtoMessage()

func (*RecognizeResponse) Reset

func (m *RecognizeResponse) Reset()

func (*RecognizeResponse) String

func (m *RecognizeResponse) String() string

func (*RecognizeResponse) XXX_DiscardUnknown

func (m *RecognizeResponse) XXX_DiscardUnknown()

func (*RecognizeResponse) XXX_Marshal

func (m *RecognizeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*RecognizeResponse) XXX_Merge

func (m *RecognizeResponse) XXX_Merge(src proto.Message)

func (*RecognizeResponse) XXX_Size

func (m *RecognizeResponse) XXX_Size() int

func (*RecognizeResponse) XXX_Unmarshal

func (m *RecognizeResponse) XXX_Unmarshal(b []byte) error

type SpeechClient

type SpeechClient interface {
	// Performs synchronous speech recognition: receive results after all audio
	// has been sent and processed.
	Recognize(ctx context.Context, in *RecognizeRequest, opts ...grpc.CallOption) (*RecognizeResponse, error)
	// Performs asynchronous speech recognition: receive results via the
	// google.longrunning.Operations interface. Returns either an
	// `Operation.error` or an `Operation.response` which contains
	// a `LongRunningRecognizeResponse` message.
	LongRunningRecognize(ctx context.Context, in *LongRunningRecognizeRequest, opts ...grpc.CallOption) (*longrunning.Operation, error)
	// Performs bidirectional streaming speech recognition: receive results while
	// sending audio. This method is only available via the gRPC API (not REST).
	StreamingRecognize(ctx context.Context, opts ...grpc.CallOption) (Speech_StreamingRecognizeClient, error)
}

SpeechClient is the client API for Speech service.

For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.

func NewSpeechClient

func NewSpeechClient(cc *grpc.ClientConn) SpeechClient

type SpeechContext

type SpeechContext struct {
	// *Optional* A list of strings containing words and phrases "hints" so that
	// the speech recognition is more likely to recognize them. This can be used
	// to improve the accuracy for specific words and phrases, for example, if
	// specific commands are typically spoken by the user. This can also be used
	// to add additional words to the vocabulary of the recognizer. See
	// [usage limits](/speech-to-text/quotas#content).
	Phrases              []string `protobuf:"bytes,1,rep,name=phrases,proto3" json:"phrases,omitempty"`
	XXX_NoUnkeyedLiteral struct{} `json:"-"`
	XXX_unrecognized     []byte   `json:"-"`
	XXX_sizecache        int32    `json:"-"`
}

Provides "hints" to the speech recognizer to favor specific words and phrases in the results.

func (*SpeechContext) Descriptor

func (*SpeechContext) Descriptor() ([]byte, []int)

func (*SpeechContext) GetPhrases

func (m *SpeechContext) GetPhrases() []string

func (*SpeechContext) ProtoMessage

func (*SpeechContext) ProtoMessage()

func (*SpeechContext) Reset

func (m *SpeechContext) Reset()

func (*SpeechContext) String

func (m *SpeechContext) String() string

func (*SpeechContext) XXX_DiscardUnknown

func (m *SpeechContext) XXX_DiscardUnknown()

func (*SpeechContext) XXX_Marshal

func (m *SpeechContext) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*SpeechContext) XXX_Merge

func (m *SpeechContext) XXX_Merge(src proto.Message)

func (*SpeechContext) XXX_Size

func (m *SpeechContext) XXX_Size() int

func (*SpeechContext) XXX_Unmarshal

func (m *SpeechContext) XXX_Unmarshal(b []byte) error

type SpeechRecognitionAlternative

type SpeechRecognitionAlternative struct {
	// Output only. Transcript text representing the words that the user spoke.
	Transcript string `protobuf:"bytes,1,opt,name=transcript,proto3" json:"transcript,omitempty"`
	// Output only. The confidence estimate between 0.0 and 1.0. A higher number
	// indicates an estimated greater likelihood that the recognized words are
	// correct. This field is set only for the top alternative of a non-streaming
	// result or, of a streaming result where `is_final=true`.
	// This field is not guaranteed to be accurate and users should not rely on it
	// to be always provided.
	// The default of 0.0 is a sentinel value indicating `confidence` was not set.
	Confidence float32 `protobuf:"fixed32,2,opt,name=confidence,proto3" json:"confidence,omitempty"`
	// Output only. A list of word-specific information for each recognized word.
	Words                []*WordInfo `protobuf:"bytes,3,rep,name=words,proto3" json:"words,omitempty"`
	XXX_NoUnkeyedLiteral struct{}    `json:"-"`
	XXX_unrecognized     []byte      `json:"-"`
	XXX_sizecache        int32       `json:"-"`
}

Alternative hypotheses (a.k.a. n-best list).

func (*SpeechRecognitionAlternative) Descriptor

func (*SpeechRecognitionAlternative) Descriptor() ([]byte, []int)

func (*SpeechRecognitionAlternative) GetConfidence

func (m *SpeechRecognitionAlternative) GetConfidence() float32

func (*SpeechRecognitionAlternative) GetTranscript

func (m *SpeechRecognitionAlternative) GetTranscript() string

func (*SpeechRecognitionAlternative) GetWords

func (m *SpeechRecognitionAlternative) GetWords() []*WordInfo

func (*SpeechRecognitionAlternative) ProtoMessage

func (*SpeechRecognitionAlternative) ProtoMessage()

func (*SpeechRecognitionAlternative) Reset

func (m *SpeechRecognitionAlternative) Reset()

func (*SpeechRecognitionAlternative) String

func (*SpeechRecognitionAlternative) XXX_DiscardUnknown

func (m *SpeechRecognitionAlternative) XXX_DiscardUnknown()

func (*SpeechRecognitionAlternative) XXX_Marshal

func (m *SpeechRecognitionAlternative) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*SpeechRecognitionAlternative) XXX_Merge

func (m *SpeechRecognitionAlternative) XXX_Merge(src proto.Message)

func (*SpeechRecognitionAlternative) XXX_Size

func (m *SpeechRecognitionAlternative) XXX_Size() int

func (*SpeechRecognitionAlternative) XXX_Unmarshal

func (m *SpeechRecognitionAlternative) XXX_Unmarshal(b []byte) error

type SpeechRecognitionResult

type SpeechRecognitionResult struct {
	// Output only. May contain one or more recognition hypotheses (up to the
	// maximum specified in `max_alternatives`).
	// These alternatives are ordered in terms of accuracy, with the top (first)
	// alternative being the most probable, as ranked by the recognizer.
	Alternatives         []*SpeechRecognitionAlternative `protobuf:"bytes,1,rep,name=alternatives,proto3" json:"alternatives,omitempty"`
	XXX_NoUnkeyedLiteral struct{}                        `json:"-"`
	XXX_unrecognized     []byte                          `json:"-"`
	XXX_sizecache        int32                           `json:"-"`
}

A speech recognition result corresponding to a portion of the audio.

func (*SpeechRecognitionResult) Descriptor

func (*SpeechRecognitionResult) Descriptor() ([]byte, []int)

func (*SpeechRecognitionResult) GetAlternatives

func (m *SpeechRecognitionResult) GetAlternatives() []*SpeechRecognitionAlternative

func (*SpeechRecognitionResult) ProtoMessage

func (*SpeechRecognitionResult) ProtoMessage()

func (*SpeechRecognitionResult) Reset

func (m *SpeechRecognitionResult) Reset()

func (*SpeechRecognitionResult) String

func (m *SpeechRecognitionResult) String() string

func (*SpeechRecognitionResult) XXX_DiscardUnknown

func (m *SpeechRecognitionResult) XXX_DiscardUnknown()

func (*SpeechRecognitionResult) XXX_Marshal

func (m *SpeechRecognitionResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*SpeechRecognitionResult) XXX_Merge

func (m *SpeechRecognitionResult) XXX_Merge(src proto.Message)

func (*SpeechRecognitionResult) XXX_Size

func (m *SpeechRecognitionResult) XXX_Size() int

func (*SpeechRecognitionResult) XXX_Unmarshal

func (m *SpeechRecognitionResult) XXX_Unmarshal(b []byte) error

type SpeechServer

type SpeechServer interface {
	// Performs synchronous speech recognition: receive results after all audio
	// has been sent and processed.
	Recognize(context.Context, *RecognizeRequest) (*RecognizeResponse, error)
	// Performs asynchronous speech recognition: receive results via the
	// google.longrunning.Operations interface. Returns either an
	// `Operation.error` or an `Operation.response` which contains
	// a `LongRunningRecognizeResponse` message.
	LongRunningRecognize(context.Context, *LongRunningRecognizeRequest) (*longrunning.Operation, error)
	// Performs bidirectional streaming speech recognition: receive results while
	// sending audio. This method is only available via the gRPC API (not REST).
	StreamingRecognize(Speech_StreamingRecognizeServer) error
}

SpeechServer is the server API for Speech service.

type Speech_StreamingRecognizeClient

type Speech_StreamingRecognizeClient interface {
	Send(*StreamingRecognizeRequest) error
	Recv() (*StreamingRecognizeResponse, error)
	grpc.ClientStream
}

type Speech_StreamingRecognizeServer

type Speech_StreamingRecognizeServer interface {
	Send(*StreamingRecognizeResponse) error
	Recv() (*StreamingRecognizeRequest, error)
	grpc.ServerStream
}

type StreamingRecognitionConfig

type StreamingRecognitionConfig struct {
	// *Required* Provides information to the recognizer that specifies how to
	// process the request.
	Config *RecognitionConfig `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"`
	// *Optional* If `false` or omitted, the recognizer will perform continuous
	// recognition (continuing to wait for and process audio even if the user
	// pauses speaking) until the client closes the input stream (gRPC API) or
	// until the maximum time limit has been reached. May return multiple
	// `StreamingRecognitionResult`s with the `is_final` flag set to `true`.
	//
	// If `true`, the recognizer will detect a single spoken utterance. When it
	// detects that the user has paused or stopped speaking, it will return an
	// `END_OF_SINGLE_UTTERANCE` event and cease recognition. It will return no
	// more than one `StreamingRecognitionResult` with the `is_final` flag set to
	// `true`.
	SingleUtterance bool `protobuf:"varint,2,opt,name=single_utterance,json=singleUtterance,proto3" json:"single_utterance,omitempty"`
	// *Optional* If `true`, interim results (tentative hypotheses) may be
	// returned as they become available (these interim results are indicated with
	// the `is_final=false` flag).
	// If `false` or omitted, only `is_final=true` result(s) are returned.
	InterimResults       bool     `protobuf:"varint,3,opt,name=interim_results,json=interimResults,proto3" json:"interim_results,omitempty"`
	XXX_NoUnkeyedLiteral struct{} `json:"-"`
	XXX_unrecognized     []byte   `json:"-"`
	XXX_sizecache        int32    `json:"-"`
}

Provides information to the recognizer that specifies how to process the request.

func (*StreamingRecognitionConfig) Descriptor

func (*StreamingRecognitionConfig) Descriptor() ([]byte, []int)

func (*StreamingRecognitionConfig) GetConfig

func (*StreamingRecognitionConfig) GetInterimResults

func (m *StreamingRecognitionConfig) GetInterimResults() bool

func (*StreamingRecognitionConfig) GetSingleUtterance

func (m *StreamingRecognitionConfig) GetSingleUtterance() bool

func (*StreamingRecognitionConfig) ProtoMessage

func (*StreamingRecognitionConfig) ProtoMessage()

func (*StreamingRecognitionConfig) Reset

func (m *StreamingRecognitionConfig) Reset()

func (*StreamingRecognitionConfig) String

func (m *StreamingRecognitionConfig) String() string

func (*StreamingRecognitionConfig) XXX_DiscardUnknown

func (m *StreamingRecognitionConfig) XXX_DiscardUnknown()

func (*StreamingRecognitionConfig) XXX_Marshal

func (m *StreamingRecognitionConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*StreamingRecognitionConfig) XXX_Merge

func (m *StreamingRecognitionConfig) XXX_Merge(src proto.Message)

func (*StreamingRecognitionConfig) XXX_Size

func (m *StreamingRecognitionConfig) XXX_Size() int

func (*StreamingRecognitionConfig) XXX_Unmarshal

func (m *StreamingRecognitionConfig) XXX_Unmarshal(b []byte) error

type StreamingRecognitionResult

type StreamingRecognitionResult struct {
	// Output only. May contain one or more recognition hypotheses (up to the
	// maximum specified in `max_alternatives`).
	// These alternatives are ordered in terms of accuracy, with the top (first)
	// alternative being the most probable, as ranked by the recognizer.
	Alternatives []*SpeechRecognitionAlternative `protobuf:"bytes,1,rep,name=alternatives,proto3" json:"alternatives,omitempty"`
	// Output only. If `false`, this `StreamingRecognitionResult` represents an
	// interim result that may change. If `true`, this is the final time the
	// speech service will return this particular `StreamingRecognitionResult`,
	// the recognizer will not return any further hypotheses for this portion of
	// the transcript and corresponding audio.
	IsFinal bool `protobuf:"varint,2,opt,name=is_final,json=isFinal,proto3" json:"is_final,omitempty"`
	// Output only. An estimate of the likelihood that the recognizer will not
	// change its guess about this interim result. Values range from 0.0
	// (completely unstable) to 1.0 (completely stable).
	// This field is only provided for interim results (`is_final=false`).
	// The default of 0.0 is a sentinel value indicating `stability` was not set.
	Stability            float32  `protobuf:"fixed32,3,opt,name=stability,proto3" json:"stability,omitempty"`
	XXX_NoUnkeyedLiteral struct{} `json:"-"`
	XXX_unrecognized     []byte   `json:"-"`
	XXX_sizecache        int32    `json:"-"`
}

A streaming speech recognition result corresponding to a portion of the audio that is currently being processed.

func (*StreamingRecognitionResult) Descriptor

func (*StreamingRecognitionResult) Descriptor() ([]byte, []int)

func (*StreamingRecognitionResult) GetAlternatives

func (*StreamingRecognitionResult) GetIsFinal

func (m *StreamingRecognitionResult) GetIsFinal() bool

func (*StreamingRecognitionResult) GetStability

func (m *StreamingRecognitionResult) GetStability() float32

func (*StreamingRecognitionResult) ProtoMessage

func (*StreamingRecognitionResult) ProtoMessage()

func (*StreamingRecognitionResult) Reset

func (m *StreamingRecognitionResult) Reset()

func (*StreamingRecognitionResult) String

func (m *StreamingRecognitionResult) String() string

func (*StreamingRecognitionResult) XXX_DiscardUnknown

func (m *StreamingRecognitionResult) XXX_DiscardUnknown()

func (*StreamingRecognitionResult) XXX_Marshal

func (m *StreamingRecognitionResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*StreamingRecognitionResult) XXX_Merge

func (m *StreamingRecognitionResult) XXX_Merge(src proto.Message)

func (*StreamingRecognitionResult) XXX_Size

func (m *StreamingRecognitionResult) XXX_Size() int

func (*StreamingRecognitionResult) XXX_Unmarshal

func (m *StreamingRecognitionResult) XXX_Unmarshal(b []byte) error

type StreamingRecognizeRequest

type StreamingRecognizeRequest struct {
	// The streaming request, which is either a streaming config or audio content.
	//
	// Types that are valid to be assigned to StreamingRequest:
	//	*StreamingRecognizeRequest_StreamingConfig
	//	*StreamingRecognizeRequest_AudioContent
	StreamingRequest     isStreamingRecognizeRequest_StreamingRequest `protobuf_oneof:"streaming_request"`
	XXX_NoUnkeyedLiteral struct{}                                     `json:"-"`
	XXX_unrecognized     []byte                                       `json:"-"`
	XXX_sizecache        int32                                        `json:"-"`
}

The top-level message sent by the client for the `StreamingRecognize` method. Multiple `StreamingRecognizeRequest` messages are sent. The first message must contain a `streaming_config` message and must not contain `audio` data. All subsequent messages must contain `audio` data and must not contain a `streaming_config` message.

func (*StreamingRecognizeRequest) Descriptor

func (*StreamingRecognizeRequest) Descriptor() ([]byte, []int)

func (*StreamingRecognizeRequest) GetAudioContent

func (m *StreamingRecognizeRequest) GetAudioContent() []byte

func (*StreamingRecognizeRequest) GetStreamingConfig

func (m *StreamingRecognizeRequest) GetStreamingConfig() *StreamingRecognitionConfig

func (*StreamingRecognizeRequest) GetStreamingRequest

func (m *StreamingRecognizeRequest) GetStreamingRequest() isStreamingRecognizeRequest_StreamingRequest

func (*StreamingRecognizeRequest) ProtoMessage

func (*StreamingRecognizeRequest) ProtoMessage()

func (*StreamingRecognizeRequest) Reset

func (m *StreamingRecognizeRequest) Reset()

func (*StreamingRecognizeRequest) String

func (m *StreamingRecognizeRequest) String() string

func (*StreamingRecognizeRequest) XXX_DiscardUnknown

func (m *StreamingRecognizeRequest) XXX_DiscardUnknown()

func (*StreamingRecognizeRequest) XXX_Marshal

func (m *StreamingRecognizeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*StreamingRecognizeRequest) XXX_Merge

func (m *StreamingRecognizeRequest) XXX_Merge(src proto.Message)

func (*StreamingRecognizeRequest) XXX_OneofFuncs

func (*StreamingRecognizeRequest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{})

XXX_OneofFuncs is for the internal use of the proto package.

func (*StreamingRecognizeRequest) XXX_Size

func (m *StreamingRecognizeRequest) XXX_Size() int

func (*StreamingRecognizeRequest) XXX_Unmarshal

func (m *StreamingRecognizeRequest) XXX_Unmarshal(b []byte) error

type StreamingRecognizeRequest_AudioContent

type StreamingRecognizeRequest_AudioContent struct {
	AudioContent []byte `protobuf:"bytes,2,opt,name=audio_content,json=audioContent,proto3,oneof"`
}

type StreamingRecognizeRequest_StreamingConfig

type StreamingRecognizeRequest_StreamingConfig struct {
	StreamingConfig *StreamingRecognitionConfig `protobuf:"bytes,1,opt,name=streaming_config,json=streamingConfig,proto3,oneof"`
}

type StreamingRecognizeResponse

type StreamingRecognizeResponse struct {
	// Output only. If set, returns a [google.rpc.Status][google.rpc.Status] message that
	// specifies the error for the operation.
	Error *status.Status `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"`
	// Output only. This repeated list contains zero or more results that
	// correspond to consecutive portions of the audio currently being processed.
	// It contains zero or one `is_final=true` result (the newly settled portion),
	// followed by zero or more `is_final=false` results (the interim results).
	Results []*StreamingRecognitionResult `protobuf:"bytes,2,rep,name=results,proto3" json:"results,omitempty"`
	// Output only. Indicates the type of speech event.
	SpeechEventType      StreamingRecognizeResponse_SpeechEventType `` /* 180-byte string literal not displayed */
	XXX_NoUnkeyedLiteral struct{}                                   `json:"-"`
	XXX_unrecognized     []byte                                     `json:"-"`
	XXX_sizecache        int32                                      `json:"-"`
}

`StreamingRecognizeResponse` is the only message returned to the client by `StreamingRecognize`. A series of zero or more `StreamingRecognizeResponse` messages are streamed back to the client. If there is no recognizable audio, and `single_utterance` is set to false, then no messages are streamed back to the client.

Here's an example of a series of ten `StreamingRecognizeResponse`s that might be returned while processing audio:

1. results { alternatives { transcript: "tube" } stability: 0.01 }

2. results { alternatives { transcript: "to be a" } stability: 0.01 }

  1. results { alternatives { transcript: "to be" } stability: 0.9 } results { alternatives { transcript: " or not to be" } stability: 0.01 }
  1. results { alternatives { transcript: "to be or not to be" confidence: 0.92 } alternatives { transcript: "to bee or not to bee" } is_final: true }

5. results { alternatives { transcript: " that's" } stability: 0.01 }

  1. results { alternatives { transcript: " that is" } stability: 0.9 } results { alternatives { transcript: " the question" } stability: 0.01 }
  1. results { alternatives { transcript: " that is the question" confidence: 0.98 } alternatives { transcript: " that was the question" } is_final: true }

Notes:

  • Only two of the above responses #4 and #7 contain final results; they are indicated by `is_final: true`. Concatenating these together generates the full transcript: "to be or not to be that is the question".
  • The others contain interim `results`. #3 and #6 contain two interim `results`: the first portion has a high stability and is less likely to change; the second portion has a low stability and is very likely to change. A UI designer might choose to show only high stability `results`.
  • The specific `stability` and `confidence` values shown above are only for illustrative purposes. Actual values may vary.
  • In each response, only one of these fields will be set: `error`, `speech_event_type`, or one or more (repeated) `results`.

func (*StreamingRecognizeResponse) Descriptor

func (*StreamingRecognizeResponse) Descriptor() ([]byte, []int)

func (*StreamingRecognizeResponse) GetError

func (m *StreamingRecognizeResponse) GetError() *status.Status

func (*StreamingRecognizeResponse) GetResults

func (*StreamingRecognizeResponse) GetSpeechEventType

func (*StreamingRecognizeResponse) ProtoMessage

func (*StreamingRecognizeResponse) ProtoMessage()

func (*StreamingRecognizeResponse) Reset

func (m *StreamingRecognizeResponse) Reset()

func (*StreamingRecognizeResponse) String

func (m *StreamingRecognizeResponse) String() string

func (*StreamingRecognizeResponse) XXX_DiscardUnknown

func (m *StreamingRecognizeResponse) XXX_DiscardUnknown()

func (*StreamingRecognizeResponse) XXX_Marshal

func (m *StreamingRecognizeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*StreamingRecognizeResponse) XXX_Merge

func (m *StreamingRecognizeResponse) XXX_Merge(src proto.Message)

func (*StreamingRecognizeResponse) XXX_Size

func (m *StreamingRecognizeResponse) XXX_Size() int

func (*StreamingRecognizeResponse) XXX_Unmarshal

func (m *StreamingRecognizeResponse) XXX_Unmarshal(b []byte) error

type StreamingRecognizeResponse_SpeechEventType

type StreamingRecognizeResponse_SpeechEventType int32

Indicates the type of speech event.

const (
	// No speech event specified.
	StreamingRecognizeResponse_SPEECH_EVENT_UNSPECIFIED StreamingRecognizeResponse_SpeechEventType = 0
	// This event indicates that the server has detected the end of the user's
	// speech utterance and expects no additional speech. Therefore, the server
	// will not process additional audio (although it may subsequently return
	// additional results). The client should stop sending additional audio
	// data, half-close the gRPC connection, and wait for any additional results
	// until the server closes the gRPC connection. This event is only sent if
	// `single_utterance` was set to `true`, and is not used otherwise.
	StreamingRecognizeResponse_END_OF_SINGLE_UTTERANCE StreamingRecognizeResponse_SpeechEventType = 1
)

func (StreamingRecognizeResponse_SpeechEventType) EnumDescriptor

func (StreamingRecognizeResponse_SpeechEventType) EnumDescriptor() ([]byte, []int)

func (StreamingRecognizeResponse_SpeechEventType) String

type WordInfo

type WordInfo struct {
	// Output only. Time offset relative to the beginning of the audio,
	// and corresponding to the start of the spoken word.
	// This field is only set if `enable_word_time_offsets=true` and only
	// in the top hypothesis.
	// This is an experimental feature and the accuracy of the time offset can
	// vary.
	StartTime *duration.Duration `protobuf:"bytes,1,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"`
	// Output only. Time offset relative to the beginning of the audio,
	// and corresponding to the end of the spoken word.
	// This field is only set if `enable_word_time_offsets=true` and only
	// in the top hypothesis.
	// This is an experimental feature and the accuracy of the time offset can
	// vary.
	EndTime *duration.Duration `protobuf:"bytes,2,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"`
	// Output only. The word corresponding to this set of information.
	Word                 string   `protobuf:"bytes,3,opt,name=word,proto3" json:"word,omitempty"`
	XXX_NoUnkeyedLiteral struct{} `json:"-"`
	XXX_unrecognized     []byte   `json:"-"`
	XXX_sizecache        int32    `json:"-"`
}

Word-specific information for recognized words.

func (*WordInfo) Descriptor

func (*WordInfo) Descriptor() ([]byte, []int)

func (*WordInfo) GetEndTime

func (m *WordInfo) GetEndTime() *duration.Duration

func (*WordInfo) GetStartTime

func (m *WordInfo) GetStartTime() *duration.Duration

func (*WordInfo) GetWord

func (m *WordInfo) GetWord() string

func (*WordInfo) ProtoMessage

func (*WordInfo) ProtoMessage()

func (*WordInfo) Reset

func (m *WordInfo) Reset()

func (*WordInfo) String

func (m *WordInfo) String() string

func (*WordInfo) XXX_DiscardUnknown

func (m *WordInfo) XXX_DiscardUnknown()

func (*WordInfo) XXX_Marshal

func (m *WordInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*WordInfo) XXX_Merge

func (m *WordInfo) XXX_Merge(src proto.Message)

func (*WordInfo) XXX_Size

func (m *WordInfo) XXX_Size() int

func (*WordInfo) XXX_Unmarshal

func (m *WordInfo) XXX_Unmarshal(b []byte) error

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL