Documentation
¶
Index ¶
- Constants
- Variables
- func RegisterVideoIntelligenceServiceServer(s grpc.ServiceRegistrar, srv VideoIntelligenceServiceServer)
- type AnnotateVideoProgress
- func (*AnnotateVideoProgress) Descriptor() ([]byte, []int)deprecated
- func (x *AnnotateVideoProgress) GetAnnotationProgress() []*VideoAnnotationProgress
- func (*AnnotateVideoProgress) ProtoMessage()
- func (x *AnnotateVideoProgress) ProtoReflect() protoreflect.Message
- func (x *AnnotateVideoProgress) Reset()
- func (x *AnnotateVideoProgress) String() string
- type AnnotateVideoRequest
- func (*AnnotateVideoRequest) Descriptor() ([]byte, []int)deprecated
- func (x *AnnotateVideoRequest) GetFeatures() []Feature
- func (x *AnnotateVideoRequest) GetInputContent() []byte
- func (x *AnnotateVideoRequest) GetInputUri() string
- func (x *AnnotateVideoRequest) GetLocationId() string
- func (x *AnnotateVideoRequest) GetOutputUri() string
- func (x *AnnotateVideoRequest) GetVideoContext() *VideoContext
- func (*AnnotateVideoRequest) ProtoMessage()
- func (x *AnnotateVideoRequest) ProtoReflect() protoreflect.Message
- func (x *AnnotateVideoRequest) Reset()
- func (x *AnnotateVideoRequest) String() string
- type AnnotateVideoResponse
- func (*AnnotateVideoResponse) Descriptor() ([]byte, []int)deprecated
- func (x *AnnotateVideoResponse) GetAnnotationResults() []*VideoAnnotationResults
- func (*AnnotateVideoResponse) ProtoMessage()
- func (x *AnnotateVideoResponse) ProtoReflect() protoreflect.Message
- func (x *AnnotateVideoResponse) Reset()
- func (x *AnnotateVideoResponse) String() string
- type DetectedAttribute
- func (*DetectedAttribute) Descriptor() ([]byte, []int)deprecated
- func (x *DetectedAttribute) GetConfidence() float32
- func (x *DetectedAttribute) GetName() string
- func (x *DetectedAttribute) GetValue() string
- func (*DetectedAttribute) ProtoMessage()
- func (x *DetectedAttribute) ProtoReflect() protoreflect.Message
- func (x *DetectedAttribute) Reset()
- func (x *DetectedAttribute) String() string
- type DetectedLandmark
- func (*DetectedLandmark) Descriptor() ([]byte, []int)deprecated
- func (x *DetectedLandmark) GetConfidence() float32
- func (x *DetectedLandmark) GetName() string
- func (x *DetectedLandmark) GetPoint() *NormalizedVertex
- func (*DetectedLandmark) ProtoMessage()
- func (x *DetectedLandmark) ProtoReflect() protoreflect.Message
- func (x *DetectedLandmark) Reset()
- func (x *DetectedLandmark) String() string
- type Entity
- func (*Entity) Descriptor() ([]byte, []int)deprecated
- func (x *Entity) GetDescription() string
- func (x *Entity) GetEntityId() string
- func (x *Entity) GetLanguageCode() string
- func (*Entity) ProtoMessage()
- func (x *Entity) ProtoReflect() protoreflect.Message
- func (x *Entity) Reset()
- func (x *Entity) String() string
- type ExplicitContentAnnotation
- func (*ExplicitContentAnnotation) Descriptor() ([]byte, []int)deprecated
- func (x *ExplicitContentAnnotation) GetFrames() []*ExplicitContentFrame
- func (x *ExplicitContentAnnotation) GetVersion() string
- func (*ExplicitContentAnnotation) ProtoMessage()
- func (x *ExplicitContentAnnotation) ProtoReflect() protoreflect.Message
- func (x *ExplicitContentAnnotation) Reset()
- func (x *ExplicitContentAnnotation) String() string
- type ExplicitContentDetectionConfig
- func (*ExplicitContentDetectionConfig) Descriptor() ([]byte, []int)deprecated
- func (x *ExplicitContentDetectionConfig) GetModel() string
- func (*ExplicitContentDetectionConfig) ProtoMessage()
- func (x *ExplicitContentDetectionConfig) ProtoReflect() protoreflect.Message
- func (x *ExplicitContentDetectionConfig) Reset()
- func (x *ExplicitContentDetectionConfig) String() string
- type ExplicitContentFrame
- func (*ExplicitContentFrame) Descriptor() ([]byte, []int)deprecated
- func (x *ExplicitContentFrame) GetPornographyLikelihood() Likelihood
- func (x *ExplicitContentFrame) GetTimeOffset() *durationpb.Duration
- func (*ExplicitContentFrame) ProtoMessage()
- func (x *ExplicitContentFrame) ProtoReflect() protoreflect.Message
- func (x *ExplicitContentFrame) Reset()
- func (x *ExplicitContentFrame) String() string
- type FaceAnnotationdeprecated
- func (*FaceAnnotation) Descriptor() ([]byte, []int)deprecated
- func (x *FaceAnnotation) GetFrames() []*FaceFrame
- func (x *FaceAnnotation) GetSegments() []*FaceSegment
- func (x *FaceAnnotation) GetThumbnail() []byte
- func (*FaceAnnotation) ProtoMessage()
- func (x *FaceAnnotation) ProtoReflect() protoreflect.Message
- func (x *FaceAnnotation) Reset()
- func (x *FaceAnnotation) String() string
- type FaceDetectionAnnotation
- func (*FaceDetectionAnnotation) Descriptor() ([]byte, []int)deprecated
- func (x *FaceDetectionAnnotation) GetThumbnail() []byte
- func (x *FaceDetectionAnnotation) GetTracks() []*Track
- func (x *FaceDetectionAnnotation) GetVersion() string
- func (*FaceDetectionAnnotation) ProtoMessage()
- func (x *FaceDetectionAnnotation) ProtoReflect() protoreflect.Message
- func (x *FaceDetectionAnnotation) Reset()
- func (x *FaceDetectionAnnotation) String() string
- type FaceDetectionConfig
- func (*FaceDetectionConfig) Descriptor() ([]byte, []int)deprecated
- func (x *FaceDetectionConfig) GetIncludeAttributes() bool
- func (x *FaceDetectionConfig) GetIncludeBoundingBoxes() bool
- func (x *FaceDetectionConfig) GetModel() string
- func (*FaceDetectionConfig) ProtoMessage()
- func (x *FaceDetectionConfig) ProtoReflect() protoreflect.Message
- func (x *FaceDetectionConfig) Reset()
- func (x *FaceDetectionConfig) String() string
- type FaceFramedeprecated
- func (*FaceFrame) Descriptor() ([]byte, []int)deprecated
- func (x *FaceFrame) GetNormalizedBoundingBoxes() []*NormalizedBoundingBox
- func (x *FaceFrame) GetTimeOffset() *durationpb.Duration
- func (*FaceFrame) ProtoMessage()
- func (x *FaceFrame) ProtoReflect() protoreflect.Message
- func (x *FaceFrame) Reset()
- func (x *FaceFrame) String() string
- type FaceSegment
- type Feature
- type LabelAnnotation
- func (*LabelAnnotation) Descriptor() ([]byte, []int)deprecated
- func (x *LabelAnnotation) GetCategoryEntities() []*Entity
- func (x *LabelAnnotation) GetEntity() *Entity
- func (x *LabelAnnotation) GetFrames() []*LabelFrame
- func (x *LabelAnnotation) GetSegments() []*LabelSegment
- func (x *LabelAnnotation) GetVersion() string
- func (*LabelAnnotation) ProtoMessage()
- func (x *LabelAnnotation) ProtoReflect() protoreflect.Message
- func (x *LabelAnnotation) Reset()
- func (x *LabelAnnotation) String() string
- type LabelDetectionConfig
- func (*LabelDetectionConfig) Descriptor() ([]byte, []int)deprecated
- func (x *LabelDetectionConfig) GetFrameConfidenceThreshold() float32
- func (x *LabelDetectionConfig) GetLabelDetectionMode() LabelDetectionMode
- func (x *LabelDetectionConfig) GetModel() string
- func (x *LabelDetectionConfig) GetStationaryCamera() bool
- func (x *LabelDetectionConfig) GetVideoConfidenceThreshold() float32
- func (*LabelDetectionConfig) ProtoMessage()
- func (x *LabelDetectionConfig) ProtoReflect() protoreflect.Message
- func (x *LabelDetectionConfig) Reset()
- func (x *LabelDetectionConfig) String() string
- type LabelDetectionMode
- func (LabelDetectionMode) Descriptor() protoreflect.EnumDescriptor
- func (x LabelDetectionMode) Enum() *LabelDetectionMode
- func (LabelDetectionMode) EnumDescriptor() ([]byte, []int)deprecated
- func (x LabelDetectionMode) Number() protoreflect.EnumNumber
- func (x LabelDetectionMode) String() string
- func (LabelDetectionMode) Type() protoreflect.EnumType
- type LabelFrame
- func (*LabelFrame) Descriptor() ([]byte, []int)deprecated
- func (x *LabelFrame) GetConfidence() float32
- func (x *LabelFrame) GetTimeOffset() *durationpb.Duration
- func (*LabelFrame) ProtoMessage()
- func (x *LabelFrame) ProtoReflect() protoreflect.Message
- func (x *LabelFrame) Reset()
- func (x *LabelFrame) String() string
- type LabelSegment
- func (*LabelSegment) Descriptor() ([]byte, []int)deprecated
- func (x *LabelSegment) GetConfidence() float32
- func (x *LabelSegment) GetSegment() *VideoSegment
- func (*LabelSegment) ProtoMessage()
- func (x *LabelSegment) ProtoReflect() protoreflect.Message
- func (x *LabelSegment) Reset()
- func (x *LabelSegment) String() string
- type Likelihood
- type LogoRecognitionAnnotation
- func (*LogoRecognitionAnnotation) Descriptor() ([]byte, []int)deprecated
- func (x *LogoRecognitionAnnotation) GetEntity() *Entity
- func (x *LogoRecognitionAnnotation) GetSegments() []*VideoSegment
- func (x *LogoRecognitionAnnotation) GetTracks() []*Track
- func (*LogoRecognitionAnnotation) ProtoMessage()
- func (x *LogoRecognitionAnnotation) ProtoReflect() protoreflect.Message
- func (x *LogoRecognitionAnnotation) Reset()
- func (x *LogoRecognitionAnnotation) String() string
- type NormalizedBoundingBox
- func (*NormalizedBoundingBox) Descriptor() ([]byte, []int)deprecated
- func (x *NormalizedBoundingBox) GetBottom() float32
- func (x *NormalizedBoundingBox) GetLeft() float32
- func (x *NormalizedBoundingBox) GetRight() float32
- func (x *NormalizedBoundingBox) GetTop() float32
- func (*NormalizedBoundingBox) ProtoMessage()
- func (x *NormalizedBoundingBox) ProtoReflect() protoreflect.Message
- func (x *NormalizedBoundingBox) Reset()
- func (x *NormalizedBoundingBox) String() string
- type NormalizedBoundingPoly
- func (*NormalizedBoundingPoly) Descriptor() ([]byte, []int)deprecated
- func (x *NormalizedBoundingPoly) GetVertices() []*NormalizedVertex
- func (*NormalizedBoundingPoly) ProtoMessage()
- func (x *NormalizedBoundingPoly) ProtoReflect() protoreflect.Message
- func (x *NormalizedBoundingPoly) Reset()
- func (x *NormalizedBoundingPoly) String() string
- type NormalizedVertex
- func (*NormalizedVertex) Descriptor() ([]byte, []int)deprecated
- func (x *NormalizedVertex) GetX() float32
- func (x *NormalizedVertex) GetY() float32
- func (*NormalizedVertex) ProtoMessage()
- func (x *NormalizedVertex) ProtoReflect() protoreflect.Message
- func (x *NormalizedVertex) Reset()
- func (x *NormalizedVertex) String() string
- type ObjectTrackingAnnotation
- func (*ObjectTrackingAnnotation) Descriptor() ([]byte, []int)deprecated
- func (x *ObjectTrackingAnnotation) GetConfidence() float32
- func (x *ObjectTrackingAnnotation) GetEntity() *Entity
- func (x *ObjectTrackingAnnotation) GetFrames() []*ObjectTrackingFrame
- func (x *ObjectTrackingAnnotation) GetSegment() *VideoSegment
- func (x *ObjectTrackingAnnotation) GetTrackId() int64
- func (x *ObjectTrackingAnnotation) GetTrackInfo() isObjectTrackingAnnotation_TrackInfo
- func (x *ObjectTrackingAnnotation) GetVersion() string
- func (*ObjectTrackingAnnotation) ProtoMessage()
- func (x *ObjectTrackingAnnotation) ProtoReflect() protoreflect.Message
- func (x *ObjectTrackingAnnotation) Reset()
- func (x *ObjectTrackingAnnotation) String() string
- type ObjectTrackingAnnotation_Segment
- type ObjectTrackingAnnotation_TrackId
- type ObjectTrackingConfig
- func (*ObjectTrackingConfig) Descriptor() ([]byte, []int)deprecated
- func (x *ObjectTrackingConfig) GetModel() string
- func (*ObjectTrackingConfig) ProtoMessage()
- func (x *ObjectTrackingConfig) ProtoReflect() protoreflect.Message
- func (x *ObjectTrackingConfig) Reset()
- func (x *ObjectTrackingConfig) String() string
- type ObjectTrackingFrame
- func (*ObjectTrackingFrame) Descriptor() ([]byte, []int)deprecated
- func (x *ObjectTrackingFrame) GetNormalizedBoundingBox() *NormalizedBoundingBox
- func (x *ObjectTrackingFrame) GetTimeOffset() *durationpb.Duration
- func (*ObjectTrackingFrame) ProtoMessage()
- func (x *ObjectTrackingFrame) ProtoReflect() protoreflect.Message
- func (x *ObjectTrackingFrame) Reset()
- func (x *ObjectTrackingFrame) String() string
- type PersonDetectionAnnotation
- func (*PersonDetectionAnnotation) Descriptor() ([]byte, []int)deprecated
- func (x *PersonDetectionAnnotation) GetTracks() []*Track
- func (x *PersonDetectionAnnotation) GetVersion() string
- func (*PersonDetectionAnnotation) ProtoMessage()
- func (x *PersonDetectionAnnotation) ProtoReflect() protoreflect.Message
- func (x *PersonDetectionAnnotation) Reset()
- func (x *PersonDetectionAnnotation) String() string
- type PersonDetectionConfig
- func (*PersonDetectionConfig) Descriptor() ([]byte, []int)deprecated
- func (x *PersonDetectionConfig) GetIncludeAttributes() bool
- func (x *PersonDetectionConfig) GetIncludeBoundingBoxes() bool
- func (x *PersonDetectionConfig) GetIncludePoseLandmarks() bool
- func (*PersonDetectionConfig) ProtoMessage()
- func (x *PersonDetectionConfig) ProtoReflect() protoreflect.Message
- func (x *PersonDetectionConfig) Reset()
- func (x *PersonDetectionConfig) String() string
- type ShotChangeDetectionConfig
- func (*ShotChangeDetectionConfig) Descriptor() ([]byte, []int)deprecated
- func (x *ShotChangeDetectionConfig) GetModel() string
- func (*ShotChangeDetectionConfig) ProtoMessage()
- func (x *ShotChangeDetectionConfig) ProtoReflect() protoreflect.Message
- func (x *ShotChangeDetectionConfig) Reset()
- func (x *ShotChangeDetectionConfig) String() string
- type SpeechContext
- type SpeechRecognitionAlternative
- func (*SpeechRecognitionAlternative) Descriptor() ([]byte, []int)deprecated
- func (x *SpeechRecognitionAlternative) GetConfidence() float32
- func (x *SpeechRecognitionAlternative) GetTranscript() string
- func (x *SpeechRecognitionAlternative) GetWords() []*WordInfo
- func (*SpeechRecognitionAlternative) ProtoMessage()
- func (x *SpeechRecognitionAlternative) ProtoReflect() protoreflect.Message
- func (x *SpeechRecognitionAlternative) Reset()
- func (x *SpeechRecognitionAlternative) String() string
- type SpeechTranscription
- func (*SpeechTranscription) Descriptor() ([]byte, []int)deprecated
- func (x *SpeechTranscription) GetAlternatives() []*SpeechRecognitionAlternative
- func (x *SpeechTranscription) GetLanguageCode() string
- func (*SpeechTranscription) ProtoMessage()
- func (x *SpeechTranscription) ProtoReflect() protoreflect.Message
- func (x *SpeechTranscription) Reset()
- func (x *SpeechTranscription) String() string
- type SpeechTranscriptionConfig
- func (*SpeechTranscriptionConfig) Descriptor() ([]byte, []int)deprecated
- func (x *SpeechTranscriptionConfig) GetAudioTracks() []int32
- func (x *SpeechTranscriptionConfig) GetDiarizationSpeakerCount() int32
- func (x *SpeechTranscriptionConfig) GetEnableAutomaticPunctuation() bool
- func (x *SpeechTranscriptionConfig) GetEnableSpeakerDiarization() bool
- func (x *SpeechTranscriptionConfig) GetEnableWordConfidence() bool
- func (x *SpeechTranscriptionConfig) GetFilterProfanity() bool
- func (x *SpeechTranscriptionConfig) GetLanguageCode() string
- func (x *SpeechTranscriptionConfig) GetMaxAlternatives() int32
- func (x *SpeechTranscriptionConfig) GetSpeechContexts() []*SpeechContext
- func (*SpeechTranscriptionConfig) ProtoMessage()
- func (x *SpeechTranscriptionConfig) ProtoReflect() protoreflect.Message
- func (x *SpeechTranscriptionConfig) Reset()
- func (x *SpeechTranscriptionConfig) String() string
- type TextAnnotation
- func (*TextAnnotation) Descriptor() ([]byte, []int)deprecated
- func (x *TextAnnotation) GetSegments() []*TextSegment
- func (x *TextAnnotation) GetText() string
- func (x *TextAnnotation) GetVersion() string
- func (*TextAnnotation) ProtoMessage()
- func (x *TextAnnotation) ProtoReflect() protoreflect.Message
- func (x *TextAnnotation) Reset()
- func (x *TextAnnotation) String() string
- type TextDetectionConfig
- func (*TextDetectionConfig) Descriptor() ([]byte, []int)deprecated
- func (x *TextDetectionConfig) GetLanguageHints() []string
- func (x *TextDetectionConfig) GetModel() string
- func (*TextDetectionConfig) ProtoMessage()
- func (x *TextDetectionConfig) ProtoReflect() protoreflect.Message
- func (x *TextDetectionConfig) Reset()
- func (x *TextDetectionConfig) String() string
- type TextFrame
- func (*TextFrame) Descriptor() ([]byte, []int)deprecated
- func (x *TextFrame) GetRotatedBoundingBox() *NormalizedBoundingPoly
- func (x *TextFrame) GetTimeOffset() *durationpb.Duration
- func (*TextFrame) ProtoMessage()
- func (x *TextFrame) ProtoReflect() protoreflect.Message
- func (x *TextFrame) Reset()
- func (x *TextFrame) String() string
- type TextSegment
- func (*TextSegment) Descriptor() ([]byte, []int)deprecated
- func (x *TextSegment) GetConfidence() float32
- func (x *TextSegment) GetFrames() []*TextFrame
- func (x *TextSegment) GetSegment() *VideoSegment
- func (*TextSegment) ProtoMessage()
- func (x *TextSegment) ProtoReflect() protoreflect.Message
- func (x *TextSegment) Reset()
- func (x *TextSegment) String() string
- type TimestampedObject
- func (*TimestampedObject) Descriptor() ([]byte, []int)deprecated
- func (x *TimestampedObject) GetAttributes() []*DetectedAttribute
- func (x *TimestampedObject) GetLandmarks() []*DetectedLandmark
- func (x *TimestampedObject) GetNormalizedBoundingBox() *NormalizedBoundingBox
- func (x *TimestampedObject) GetTimeOffset() *durationpb.Duration
- func (*TimestampedObject) ProtoMessage()
- func (x *TimestampedObject) ProtoReflect() protoreflect.Message
- func (x *TimestampedObject) Reset()
- func (x *TimestampedObject) String() string
- type Track
- func (*Track) Descriptor() ([]byte, []int)deprecated
- func (x *Track) GetAttributes() []*DetectedAttribute
- func (x *Track) GetConfidence() float32
- func (x *Track) GetSegment() *VideoSegment
- func (x *Track) GetTimestampedObjects() []*TimestampedObject
- func (*Track) ProtoMessage()
- func (x *Track) ProtoReflect() protoreflect.Message
- func (x *Track) Reset()
- func (x *Track) String() string
- type UnimplementedVideoIntelligenceServiceServer
- type UnsafeVideoIntelligenceServiceServer
- type VideoAnnotationProgress
- func (*VideoAnnotationProgress) Descriptor() ([]byte, []int)deprecated
- func (x *VideoAnnotationProgress) GetFeature() Feature
- func (x *VideoAnnotationProgress) GetInputUri() string
- func (x *VideoAnnotationProgress) GetProgressPercent() int32
- func (x *VideoAnnotationProgress) GetSegment() *VideoSegment
- func (x *VideoAnnotationProgress) GetStartTime() *timestamppb.Timestamp
- func (x *VideoAnnotationProgress) GetUpdateTime() *timestamppb.Timestamp
- func (*VideoAnnotationProgress) ProtoMessage()
- func (x *VideoAnnotationProgress) ProtoReflect() protoreflect.Message
- func (x *VideoAnnotationProgress) Reset()
- func (x *VideoAnnotationProgress) String() string
- type VideoAnnotationResults
- func (*VideoAnnotationResults) Descriptor() ([]byte, []int)deprecated
- func (x *VideoAnnotationResults) GetError() *status.Status
- func (x *VideoAnnotationResults) GetExplicitAnnotation() *ExplicitContentAnnotation
- func (x *VideoAnnotationResults) GetFaceAnnotations() []*FaceAnnotationdeprecated
- func (x *VideoAnnotationResults) GetFaceDetectionAnnotations() []*FaceDetectionAnnotation
- func (x *VideoAnnotationResults) GetFrameLabelAnnotations() []*LabelAnnotation
- func (x *VideoAnnotationResults) GetInputUri() string
- func (x *VideoAnnotationResults) GetLogoRecognitionAnnotations() []*LogoRecognitionAnnotation
- func (x *VideoAnnotationResults) GetObjectAnnotations() []*ObjectTrackingAnnotation
- func (x *VideoAnnotationResults) GetPersonDetectionAnnotations() []*PersonDetectionAnnotation
- func (x *VideoAnnotationResults) GetSegment() *VideoSegment
- func (x *VideoAnnotationResults) GetSegmentLabelAnnotations() []*LabelAnnotation
- func (x *VideoAnnotationResults) GetSegmentPresenceLabelAnnotations() []*LabelAnnotation
- func (x *VideoAnnotationResults) GetShotAnnotations() []*VideoSegment
- func (x *VideoAnnotationResults) GetShotLabelAnnotations() []*LabelAnnotation
- func (x *VideoAnnotationResults) GetShotPresenceLabelAnnotations() []*LabelAnnotation
- func (x *VideoAnnotationResults) GetSpeechTranscriptions() []*SpeechTranscription
- func (x *VideoAnnotationResults) GetTextAnnotations() []*TextAnnotation
- func (*VideoAnnotationResults) ProtoMessage()
- func (x *VideoAnnotationResults) ProtoReflect() protoreflect.Message
- func (x *VideoAnnotationResults) Reset()
- func (x *VideoAnnotationResults) String() string
- type VideoContext
- func (*VideoContext) Descriptor() ([]byte, []int)deprecated
- func (x *VideoContext) GetExplicitContentDetectionConfig() *ExplicitContentDetectionConfig
- func (x *VideoContext) GetFaceDetectionConfig() *FaceDetectionConfig
- func (x *VideoContext) GetLabelDetectionConfig() *LabelDetectionConfig
- func (x *VideoContext) GetObjectTrackingConfig() *ObjectTrackingConfig
- func (x *VideoContext) GetPersonDetectionConfig() *PersonDetectionConfig
- func (x *VideoContext) GetSegments() []*VideoSegment
- func (x *VideoContext) GetShotChangeDetectionConfig() *ShotChangeDetectionConfig
- func (x *VideoContext) GetSpeechTranscriptionConfig() *SpeechTranscriptionConfig
- func (x *VideoContext) GetTextDetectionConfig() *TextDetectionConfig
- func (*VideoContext) ProtoMessage()
- func (x *VideoContext) ProtoReflect() protoreflect.Message
- func (x *VideoContext) Reset()
- func (x *VideoContext) String() string
- type VideoIntelligenceServiceClient
- type VideoIntelligenceServiceServer
- type VideoSegment
- func (*VideoSegment) Descriptor() ([]byte, []int)deprecated
- func (x *VideoSegment) GetEndTimeOffset() *durationpb.Duration
- func (x *VideoSegment) GetStartTimeOffset() *durationpb.Duration
- func (*VideoSegment) ProtoMessage()
- func (x *VideoSegment) ProtoReflect() protoreflect.Message
- func (x *VideoSegment) Reset()
- func (x *VideoSegment) String() string
- type WordInfo
- func (*WordInfo) Descriptor() ([]byte, []int)deprecated
- func (x *WordInfo) GetConfidence() float32
- func (x *WordInfo) GetEndTime() *durationpb.Duration
- func (x *WordInfo) GetSpeakerTag() int32
- func (x *WordInfo) GetStartTime() *durationpb.Duration
- func (x *WordInfo) GetWord() string
- func (*WordInfo) ProtoMessage()
- func (x *WordInfo) ProtoReflect() protoreflect.Message
- func (x *WordInfo) Reset()
- func (x *WordInfo) String() string
Constants ¶
const (
VideoIntelligenceService_AnnotateVideo_FullMethodName = "/google.cloud.videointelligence.v1.VideoIntelligenceService/AnnotateVideo"
)
Variables ¶
var ( Feature_name = map[int32]string{ 0: "FEATURE_UNSPECIFIED", 1: "LABEL_DETECTION", 2: "SHOT_CHANGE_DETECTION", 3: "EXPLICIT_CONTENT_DETECTION", 4: "FACE_DETECTION", 6: "SPEECH_TRANSCRIPTION", 7: "TEXT_DETECTION", 9: "OBJECT_TRACKING", 12: "LOGO_RECOGNITION", 14: "PERSON_DETECTION", } Feature_value = map[string]int32{ "FEATURE_UNSPECIFIED": 0, "LABEL_DETECTION": 1, "SHOT_CHANGE_DETECTION": 2, "EXPLICIT_CONTENT_DETECTION": 3, "FACE_DETECTION": 4, "SPEECH_TRANSCRIPTION": 6, "TEXT_DETECTION": 7, "OBJECT_TRACKING": 9, "LOGO_RECOGNITION": 12, "PERSON_DETECTION": 14, } )
Enum value maps for Feature.
var ( LabelDetectionMode_name = map[int32]string{ 0: "LABEL_DETECTION_MODE_UNSPECIFIED", 1: "SHOT_MODE", 2: "FRAME_MODE", 3: "SHOT_AND_FRAME_MODE", } LabelDetectionMode_value = map[string]int32{ "LABEL_DETECTION_MODE_UNSPECIFIED": 0, "SHOT_MODE": 1, "FRAME_MODE": 2, "SHOT_AND_FRAME_MODE": 3, } )
Enum value maps for LabelDetectionMode.
var ( Likelihood_name = map[int32]string{ 0: "LIKELIHOOD_UNSPECIFIED", 1: "VERY_UNLIKELY", 2: "UNLIKELY", 3: "POSSIBLE", 4: "LIKELY", 5: "VERY_LIKELY", } Likelihood_value = map[string]int32{ "LIKELIHOOD_UNSPECIFIED": 0, "VERY_UNLIKELY": 1, "UNLIKELY": 2, "POSSIBLE": 3, "LIKELY": 4, "VERY_LIKELY": 5, } )
Enum value maps for Likelihood.
var File_google_cloud_videointelligence_v1_video_intelligence_proto protoreflect.FileDescriptor
var VideoIntelligenceService_ServiceDesc = grpc.ServiceDesc{ ServiceName: "google.cloud.videointelligence.v1.VideoIntelligenceService", HandlerType: (*VideoIntelligenceServiceServer)(nil), Methods: []grpc.MethodDesc{ { MethodName: "AnnotateVideo", Handler: _VideoIntelligenceService_AnnotateVideo_Handler, }, }, Streams: []grpc.StreamDesc{}, Metadata: "google/cloud/videointelligence/v1/video_intelligence.proto", }
VideoIntelligenceService_ServiceDesc is the grpc.ServiceDesc for VideoIntelligenceService service. It's only intended for direct use with grpc.RegisterService, and not to be introspected or modified (even as a copy)
Functions ¶
func RegisterVideoIntelligenceServiceServer ¶
func RegisterVideoIntelligenceServiceServer(s grpc.ServiceRegistrar, srv VideoIntelligenceServiceServer)
Types ¶
type AnnotateVideoProgress ¶
type AnnotateVideoProgress struct {
// Progress metadata for all videos specified in `AnnotateVideoRequest`.
AnnotationProgress []*VideoAnnotationProgress `protobuf:"bytes,1,rep,name=annotation_progress,json=annotationProgress,proto3" json:"annotation_progress,omitempty"`
// contains filtered or unexported fields
}
Video annotation progress. Included in the `metadata` field of the `Operation` returned by the `GetOperation` call of the `google::longrunning::Operations` service.
func (*AnnotateVideoProgress) Descriptor
deprecated
func (*AnnotateVideoProgress) Descriptor() ([]byte, []int)
Deprecated: Use AnnotateVideoProgress.ProtoReflect.Descriptor instead.
func (*AnnotateVideoProgress) GetAnnotationProgress ¶
func (x *AnnotateVideoProgress) GetAnnotationProgress() []*VideoAnnotationProgress
func (*AnnotateVideoProgress) ProtoMessage ¶
func (*AnnotateVideoProgress) ProtoMessage()
func (*AnnotateVideoProgress) ProtoReflect ¶
func (x *AnnotateVideoProgress) ProtoReflect() protoreflect.Message
func (*AnnotateVideoProgress) Reset ¶
func (x *AnnotateVideoProgress) Reset()
func (*AnnotateVideoProgress) String ¶
func (x *AnnotateVideoProgress) String() string
type AnnotateVideoRequest ¶
type AnnotateVideoRequest struct {
// Input video location. Currently, only
// [Cloud Storage](https://cloud.google.com/storage/) URIs are
// supported. URIs must be specified in the following format:
// `gs://bucket-id/object-id` (other URI formats return
// [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For
// more information, see [Request
// URIs](https://cloud.google.com/storage/docs/request-endpoints). To identify
// multiple videos, a video URI may include wildcards in the `object-id`.
// Supported wildcards: '*' to match 0 or more characters;
// '?' to match 1 character. If unset, the input video should be embedded
// in the request as `input_content`. If set, `input_content` must be unset.
InputUri string `protobuf:"bytes,1,opt,name=input_uri,json=inputUri,proto3" json:"input_uri,omitempty"`
// The video data bytes.
// If unset, the input video(s) should be specified via the `input_uri`.
// If set, `input_uri` must be unset.
InputContent []byte `protobuf:"bytes,6,opt,name=input_content,json=inputContent,proto3" json:"input_content,omitempty"`
// Required. Requested video annotation features.
Features []Feature `protobuf:"varint,2,rep,packed,name=features,proto3,enum=google.cloud.videointelligence.v1.Feature" json:"features,omitempty"`
// Additional video context and/or feature-specific parameters.
VideoContext *VideoContext `protobuf:"bytes,3,opt,name=video_context,json=videoContext,proto3" json:"video_context,omitempty"`
// Optional. Location where the output (in JSON format) should be stored.
// Currently, only [Cloud Storage](https://cloud.google.com/storage/)
// URIs are supported. These must be specified in the following format:
// `gs://bucket-id/object-id` (other URI formats return
// [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For
// more information, see [Request
// URIs](https://cloud.google.com/storage/docs/request-endpoints).
OutputUri string `protobuf:"bytes,4,opt,name=output_uri,json=outputUri,proto3" json:"output_uri,omitempty"`
// Optional. Cloud region where annotation should take place. Supported cloud
// regions are: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no
// region is specified, the region will be determined based on video file
// location.
LocationId string `protobuf:"bytes,5,opt,name=location_id,json=locationId,proto3" json:"location_id,omitempty"`
// contains filtered or unexported fields
}
Video annotation request.
func (*AnnotateVideoRequest) Descriptor
deprecated
func (*AnnotateVideoRequest) Descriptor() ([]byte, []int)
Deprecated: Use AnnotateVideoRequest.ProtoReflect.Descriptor instead.
func (*AnnotateVideoRequest) GetFeatures ¶
func (x *AnnotateVideoRequest) GetFeatures() []Feature
func (*AnnotateVideoRequest) GetInputContent ¶
func (x *AnnotateVideoRequest) GetInputContent() []byte
func (*AnnotateVideoRequest) GetInputUri ¶
func (x *AnnotateVideoRequest) GetInputUri() string
func (*AnnotateVideoRequest) GetLocationId ¶
func (x *AnnotateVideoRequest) GetLocationId() string
func (*AnnotateVideoRequest) GetOutputUri ¶
func (x *AnnotateVideoRequest) GetOutputUri() string
func (*AnnotateVideoRequest) GetVideoContext ¶
func (x *AnnotateVideoRequest) GetVideoContext() *VideoContext
func (*AnnotateVideoRequest) ProtoMessage ¶
func (*AnnotateVideoRequest) ProtoMessage()
func (*AnnotateVideoRequest) ProtoReflect ¶
func (x *AnnotateVideoRequest) ProtoReflect() protoreflect.Message
func (*AnnotateVideoRequest) Reset ¶
func (x *AnnotateVideoRequest) Reset()
func (*AnnotateVideoRequest) String ¶
func (x *AnnotateVideoRequest) String() string
type AnnotateVideoResponse ¶
type AnnotateVideoResponse struct {
// Annotation results for all videos specified in `AnnotateVideoRequest`.
AnnotationResults []*VideoAnnotationResults `protobuf:"bytes,1,rep,name=annotation_results,json=annotationResults,proto3" json:"annotation_results,omitempty"`
// contains filtered or unexported fields
}
Video annotation response. Included in the `response` field of the `Operation` returned by the `GetOperation` call of the `google::longrunning::Operations` service.
func (*AnnotateVideoResponse) Descriptor
deprecated
func (*AnnotateVideoResponse) Descriptor() ([]byte, []int)
Deprecated: Use AnnotateVideoResponse.ProtoReflect.Descriptor instead.
func (*AnnotateVideoResponse) GetAnnotationResults ¶
func (x *AnnotateVideoResponse) GetAnnotationResults() []*VideoAnnotationResults
func (*AnnotateVideoResponse) ProtoMessage ¶
func (*AnnotateVideoResponse) ProtoMessage()
func (*AnnotateVideoResponse) ProtoReflect ¶
func (x *AnnotateVideoResponse) ProtoReflect() protoreflect.Message
func (*AnnotateVideoResponse) Reset ¶
func (x *AnnotateVideoResponse) Reset()
func (*AnnotateVideoResponse) String ¶
func (x *AnnotateVideoResponse) String() string
type DetectedAttribute ¶
type DetectedAttribute struct {
// The name of the attribute, for example, glasses, dark_glasses, mouth_open.
// A full list of supported type names will be provided in the document.
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// Detected attribute confidence. Range [0, 1].
Confidence float32 `protobuf:"fixed32,2,opt,name=confidence,proto3" json:"confidence,omitempty"`
// Text value of the detection result. For example, the value for "HairColor"
// can be "black", "blonde", etc.
Value string `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"`
// contains filtered or unexported fields
}
A generic detected attribute represented by name in string format.
func (*DetectedAttribute) Descriptor
deprecated
func (*DetectedAttribute) Descriptor() ([]byte, []int)
Deprecated: Use DetectedAttribute.ProtoReflect.Descriptor instead.
func (*DetectedAttribute) GetConfidence ¶
func (x *DetectedAttribute) GetConfidence() float32
func (*DetectedAttribute) GetName ¶
func (x *DetectedAttribute) GetName() string
func (*DetectedAttribute) GetValue ¶
func (x *DetectedAttribute) GetValue() string
func (*DetectedAttribute) ProtoMessage ¶
func (*DetectedAttribute) ProtoMessage()
func (*DetectedAttribute) ProtoReflect ¶
func (x *DetectedAttribute) ProtoReflect() protoreflect.Message
func (*DetectedAttribute) Reset ¶
func (x *DetectedAttribute) Reset()
func (*DetectedAttribute) String ¶
func (x *DetectedAttribute) String() string
type DetectedLandmark ¶
type DetectedLandmark struct {
// The name of this landmark, for example, left_hand, right_shoulder.
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// The 2D point of the detected landmark using the normalized image
// coordindate system. The normalized coordinates have the range from 0 to 1.
Point *NormalizedVertex `protobuf:"bytes,2,opt,name=point,proto3" json:"point,omitempty"`
// The confidence score of the detected landmark. Range [0, 1].
Confidence float32 `protobuf:"fixed32,3,opt,name=confidence,proto3" json:"confidence,omitempty"`
// contains filtered or unexported fields
}
A generic detected landmark represented by name in string format and a 2D location.
func (*DetectedLandmark) Descriptor
deprecated
func (*DetectedLandmark) Descriptor() ([]byte, []int)
Deprecated: Use DetectedLandmark.ProtoReflect.Descriptor instead.
func (*DetectedLandmark) GetConfidence ¶
func (x *DetectedLandmark) GetConfidence() float32
func (*DetectedLandmark) GetName ¶
func (x *DetectedLandmark) GetName() string
func (*DetectedLandmark) GetPoint ¶
func (x *DetectedLandmark) GetPoint() *NormalizedVertex
func (*DetectedLandmark) ProtoMessage ¶
func (*DetectedLandmark) ProtoMessage()
func (*DetectedLandmark) ProtoReflect ¶
func (x *DetectedLandmark) ProtoReflect() protoreflect.Message
func (*DetectedLandmark) Reset ¶
func (x *DetectedLandmark) Reset()
func (*DetectedLandmark) String ¶
func (x *DetectedLandmark) String() string
type Entity ¶
type Entity struct {
// Opaque entity ID. Some IDs may be available in
// [Google Knowledge Graph Search
// API](https://developers.google.com/knowledge-graph/).
EntityId string `protobuf:"bytes,1,opt,name=entity_id,json=entityId,proto3" json:"entity_id,omitempty"`
// Textual description, e.g., `Fixed-gear bicycle`.
Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
// Language code for `description` in BCP-47 format.
LanguageCode string `protobuf:"bytes,3,opt,name=language_code,json=languageCode,proto3" json:"language_code,omitempty"`
// contains filtered or unexported fields
}
Detected entity from video analysis.
func (*Entity) Descriptor
deprecated
func (*Entity) GetDescription ¶
func (*Entity) GetEntityId ¶
func (*Entity) GetLanguageCode ¶
func (*Entity) ProtoMessage ¶
func (*Entity) ProtoMessage()
func (*Entity) ProtoReflect ¶
func (x *Entity) ProtoReflect() protoreflect.Message
type ExplicitContentAnnotation ¶
type ExplicitContentAnnotation struct {
// All video frames where explicit content was detected.
Frames []*ExplicitContentFrame `protobuf:"bytes,1,rep,name=frames,proto3" json:"frames,omitempty"`
// Feature version.
Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"`
// contains filtered or unexported fields
}
Explicit content annotation (based on per-frame visual signals only). If no explicit content has been detected in a frame, no annotations are present for that frame.
func (*ExplicitContentAnnotation) Descriptor
deprecated
func (*ExplicitContentAnnotation) Descriptor() ([]byte, []int)
Deprecated: Use ExplicitContentAnnotation.ProtoReflect.Descriptor instead.
func (*ExplicitContentAnnotation) GetFrames ¶
func (x *ExplicitContentAnnotation) GetFrames() []*ExplicitContentFrame
func (*ExplicitContentAnnotation) GetVersion ¶
func (x *ExplicitContentAnnotation) GetVersion() string
func (*ExplicitContentAnnotation) ProtoMessage ¶
func (*ExplicitContentAnnotation) ProtoMessage()
func (*ExplicitContentAnnotation) ProtoReflect ¶
func (x *ExplicitContentAnnotation) ProtoReflect() protoreflect.Message
func (*ExplicitContentAnnotation) Reset ¶
func (x *ExplicitContentAnnotation) Reset()
func (*ExplicitContentAnnotation) String ¶
func (x *ExplicitContentAnnotation) String() string
type ExplicitContentDetectionConfig ¶
type ExplicitContentDetectionConfig struct {
// Model to use for explicit content detection.
// Supported values: "builtin/stable" (the default if unset) and
// "builtin/latest".
Model string `protobuf:"bytes,1,opt,name=model,proto3" json:"model,omitempty"`
// contains filtered or unexported fields
}
Config for EXPLICIT_CONTENT_DETECTION.
func (*ExplicitContentDetectionConfig) Descriptor
deprecated
func (*ExplicitContentDetectionConfig) Descriptor() ([]byte, []int)
Deprecated: Use ExplicitContentDetectionConfig.ProtoReflect.Descriptor instead.
func (*ExplicitContentDetectionConfig) GetModel ¶
func (x *ExplicitContentDetectionConfig) GetModel() string
func (*ExplicitContentDetectionConfig) ProtoMessage ¶
func (*ExplicitContentDetectionConfig) ProtoMessage()
func (*ExplicitContentDetectionConfig) ProtoReflect ¶
func (x *ExplicitContentDetectionConfig) ProtoReflect() protoreflect.Message
func (*ExplicitContentDetectionConfig) Reset ¶
func (x *ExplicitContentDetectionConfig) Reset()
func (*ExplicitContentDetectionConfig) String ¶
func (x *ExplicitContentDetectionConfig) String() string
type ExplicitContentFrame ¶
type ExplicitContentFrame struct {
// Time-offset, relative to the beginning of the video, corresponding to the
// video frame for this location.
TimeOffset *durationpb.Duration `protobuf:"bytes,1,opt,name=time_offset,json=timeOffset,proto3" json:"time_offset,omitempty"`
// Likelihood of the pornography content..
PornographyLikelihood Likelihood `` /* 175-byte string literal not displayed */
// contains filtered or unexported fields
}
Video frame level annotation results for explicit content.
func (*ExplicitContentFrame) Descriptor
deprecated
func (*ExplicitContentFrame) Descriptor() ([]byte, []int)
Deprecated: Use ExplicitContentFrame.ProtoReflect.Descriptor instead.
func (*ExplicitContentFrame) GetPornographyLikelihood ¶
func (x *ExplicitContentFrame) GetPornographyLikelihood() Likelihood
func (*ExplicitContentFrame) GetTimeOffset ¶
func (x *ExplicitContentFrame) GetTimeOffset() *durationpb.Duration
func (*ExplicitContentFrame) ProtoMessage ¶
func (*ExplicitContentFrame) ProtoMessage()
func (*ExplicitContentFrame) ProtoReflect ¶
func (x *ExplicitContentFrame) ProtoReflect() protoreflect.Message
func (*ExplicitContentFrame) Reset ¶
func (x *ExplicitContentFrame) Reset()
func (*ExplicitContentFrame) String ¶
func (x *ExplicitContentFrame) String() string
type FaceAnnotation
deprecated
type FaceAnnotation struct {
// Thumbnail of a representative face view (in JPEG format).
Thumbnail []byte `protobuf:"bytes,1,opt,name=thumbnail,proto3" json:"thumbnail,omitempty"`
// All video segments where a face was detected.
Segments []*FaceSegment `protobuf:"bytes,2,rep,name=segments,proto3" json:"segments,omitempty"`
// All video frames where a face was detected.
Frames []*FaceFrame `protobuf:"bytes,3,rep,name=frames,proto3" json:"frames,omitempty"`
// contains filtered or unexported fields
}
Deprecated. No effect.
Deprecated: Marked as deprecated in google/cloud/videointelligence/v1/video_intelligence.proto.
func (*FaceAnnotation) Descriptor
deprecated
func (*FaceAnnotation) Descriptor() ([]byte, []int)
Deprecated: Use FaceAnnotation.ProtoReflect.Descriptor instead.
func (*FaceAnnotation) GetFrames ¶
func (x *FaceAnnotation) GetFrames() []*FaceFrame
func (*FaceAnnotation) GetSegments ¶
func (x *FaceAnnotation) GetSegments() []*FaceSegment
func (*FaceAnnotation) GetThumbnail ¶
func (x *FaceAnnotation) GetThumbnail() []byte
func (*FaceAnnotation) ProtoMessage ¶
func (*FaceAnnotation) ProtoMessage()
func (*FaceAnnotation) ProtoReflect ¶
func (x *FaceAnnotation) ProtoReflect() protoreflect.Message
func (*FaceAnnotation) Reset ¶
func (x *FaceAnnotation) Reset()
func (*FaceAnnotation) String ¶
func (x *FaceAnnotation) String() string
type FaceDetectionAnnotation ¶
type FaceDetectionAnnotation struct {
// The face tracks with attributes.
Tracks []*Track `protobuf:"bytes,3,rep,name=tracks,proto3" json:"tracks,omitempty"`
// The thumbnail of a person's face.
Thumbnail []byte `protobuf:"bytes,4,opt,name=thumbnail,proto3" json:"thumbnail,omitempty"`
// Feature version.
Version string `protobuf:"bytes,5,opt,name=version,proto3" json:"version,omitempty"`
// contains filtered or unexported fields
}
Face detection annotation.
func (*FaceDetectionAnnotation) Descriptor
deprecated
func (*FaceDetectionAnnotation) Descriptor() ([]byte, []int)
Deprecated: Use FaceDetectionAnnotation.ProtoReflect.Descriptor instead.
func (*FaceDetectionAnnotation) GetThumbnail ¶
func (x *FaceDetectionAnnotation) GetThumbnail() []byte
func (*FaceDetectionAnnotation) GetTracks ¶
func (x *FaceDetectionAnnotation) GetTracks() []*Track
func (*FaceDetectionAnnotation) GetVersion ¶
func (x *FaceDetectionAnnotation) GetVersion() string
func (*FaceDetectionAnnotation) ProtoMessage ¶
func (*FaceDetectionAnnotation) ProtoMessage()
func (*FaceDetectionAnnotation) ProtoReflect ¶
func (x *FaceDetectionAnnotation) ProtoReflect() protoreflect.Message
func (*FaceDetectionAnnotation) Reset ¶
func (x *FaceDetectionAnnotation) Reset()
func (*FaceDetectionAnnotation) String ¶
func (x *FaceDetectionAnnotation) String() string
type FaceDetectionConfig ¶
type FaceDetectionConfig struct {
// Model to use for face detection.
// Supported values: "builtin/stable" (the default if unset) and
// "builtin/latest".
Model string `protobuf:"bytes,1,opt,name=model,proto3" json:"model,omitempty"`
// Whether bounding boxes are included in the face annotation output.
IncludeBoundingBoxes bool `protobuf:"varint,2,opt,name=include_bounding_boxes,json=includeBoundingBoxes,proto3" json:"include_bounding_boxes,omitempty"`
// Whether to enable face attributes detection, such as glasses, dark_glasses,
// mouth_open etc. Ignored if 'include_bounding_boxes' is set to false.
IncludeAttributes bool `protobuf:"varint,5,opt,name=include_attributes,json=includeAttributes,proto3" json:"include_attributes,omitempty"`
// contains filtered or unexported fields
}
Config for FACE_DETECTION.
func (*FaceDetectionConfig) Descriptor
deprecated
func (*FaceDetectionConfig) Descriptor() ([]byte, []int)
Deprecated: Use FaceDetectionConfig.ProtoReflect.Descriptor instead.
func (*FaceDetectionConfig) GetIncludeAttributes ¶
func (x *FaceDetectionConfig) GetIncludeAttributes() bool
func (*FaceDetectionConfig) GetIncludeBoundingBoxes ¶
func (x *FaceDetectionConfig) GetIncludeBoundingBoxes() bool
func (*FaceDetectionConfig) GetModel ¶
func (x *FaceDetectionConfig) GetModel() string
func (*FaceDetectionConfig) ProtoMessage ¶
func (*FaceDetectionConfig) ProtoMessage()
func (*FaceDetectionConfig) ProtoReflect ¶
func (x *FaceDetectionConfig) ProtoReflect() protoreflect.Message
func (*FaceDetectionConfig) Reset ¶
func (x *FaceDetectionConfig) Reset()
func (*FaceDetectionConfig) String ¶
func (x *FaceDetectionConfig) String() string
type FaceFrame
deprecated
type FaceFrame struct {
// Normalized Bounding boxes in a frame.
// There can be more than one boxes if the same face is detected in multiple
// locations within the current frame.
NormalizedBoundingBoxes []*NormalizedBoundingBox `` /* 132-byte string literal not displayed */
// Time-offset, relative to the beginning of the video,
// corresponding to the video frame for this location.
TimeOffset *durationpb.Duration `protobuf:"bytes,2,opt,name=time_offset,json=timeOffset,proto3" json:"time_offset,omitempty"`
// contains filtered or unexported fields
}
Deprecated. No effect.
Deprecated: Marked as deprecated in google/cloud/videointelligence/v1/video_intelligence.proto.
func (*FaceFrame) Descriptor
deprecated
func (*FaceFrame) GetNormalizedBoundingBoxes ¶
func (x *FaceFrame) GetNormalizedBoundingBoxes() []*NormalizedBoundingBox
func (*FaceFrame) GetTimeOffset ¶
func (x *FaceFrame) GetTimeOffset() *durationpb.Duration
func (*FaceFrame) ProtoMessage ¶
func (*FaceFrame) ProtoMessage()
func (*FaceFrame) ProtoReflect ¶
func (x *FaceFrame) ProtoReflect() protoreflect.Message
type FaceSegment ¶
type FaceSegment struct {
// Video segment where a face was detected.
Segment *VideoSegment `protobuf:"bytes,1,opt,name=segment,proto3" json:"segment,omitempty"`
// contains filtered or unexported fields
}
Video segment level annotation results for face detection.
func (*FaceSegment) Descriptor
deprecated
func (*FaceSegment) Descriptor() ([]byte, []int)
Deprecated: Use FaceSegment.ProtoReflect.Descriptor instead.
func (*FaceSegment) GetSegment ¶
func (x *FaceSegment) GetSegment() *VideoSegment
func (*FaceSegment) ProtoMessage ¶
func (*FaceSegment) ProtoMessage()
func (*FaceSegment) ProtoReflect ¶
func (x *FaceSegment) ProtoReflect() protoreflect.Message
func (*FaceSegment) Reset ¶
func (x *FaceSegment) Reset()
func (*FaceSegment) String ¶
func (x *FaceSegment) String() string
type Feature ¶
type Feature int32
Video annotation feature.
const ( // Unspecified. Feature_FEATURE_UNSPECIFIED Feature = 0 // Label detection. Detect objects, such as dog or flower. Feature_LABEL_DETECTION Feature = 1 // Shot change detection. Feature_SHOT_CHANGE_DETECTION Feature = 2 // Explicit content detection. Feature_EXPLICIT_CONTENT_DETECTION Feature = 3 // Human face detection. Feature_FACE_DETECTION Feature = 4 // Speech transcription. Feature_SPEECH_TRANSCRIPTION Feature = 6 // OCR text detection and tracking. Feature_TEXT_DETECTION Feature = 7 // Object detection and tracking. Feature_OBJECT_TRACKING Feature = 9 // Logo detection, tracking, and recognition. Feature_LOGO_RECOGNITION Feature = 12 // Person detection. Feature_PERSON_DETECTION Feature = 14 )
func (Feature) Descriptor ¶
func (Feature) Descriptor() protoreflect.EnumDescriptor
func (Feature) EnumDescriptor
deprecated
func (Feature) Number ¶
func (x Feature) Number() protoreflect.EnumNumber
func (Feature) Type ¶
func (Feature) Type() protoreflect.EnumType
type LabelAnnotation ¶
type LabelAnnotation struct {
// Detected entity.
Entity *Entity `protobuf:"bytes,1,opt,name=entity,proto3" json:"entity,omitempty"`
// Common categories for the detected entity.
// For example, when the label is `Terrier`, the category is likely `dog`. And
// in some cases there might be more than one categories e.g., `Terrier` could
// also be a `pet`.
CategoryEntities []*Entity `protobuf:"bytes,2,rep,name=category_entities,json=categoryEntities,proto3" json:"category_entities,omitempty"`
// All video segments where a label was detected.
Segments []*LabelSegment `protobuf:"bytes,3,rep,name=segments,proto3" json:"segments,omitempty"`
// All video frames where a label was detected.
Frames []*LabelFrame `protobuf:"bytes,4,rep,name=frames,proto3" json:"frames,omitempty"`
// Feature version.
Version string `protobuf:"bytes,5,opt,name=version,proto3" json:"version,omitempty"`
// contains filtered or unexported fields
}
Label annotation.
func (*LabelAnnotation) Descriptor
deprecated
func (*LabelAnnotation) Descriptor() ([]byte, []int)
Deprecated: Use LabelAnnotation.ProtoReflect.Descriptor instead.
func (*LabelAnnotation) GetCategoryEntities ¶
func (x *LabelAnnotation) GetCategoryEntities() []*Entity
func (*LabelAnnotation) GetEntity ¶
func (x *LabelAnnotation) GetEntity() *Entity
func (*LabelAnnotation) GetFrames ¶
func (x *LabelAnnotation) GetFrames() []*LabelFrame
func (*LabelAnnotation) GetSegments ¶
func (x *LabelAnnotation) GetSegments() []*LabelSegment
func (*LabelAnnotation) GetVersion ¶
func (x *LabelAnnotation) GetVersion() string
func (*LabelAnnotation) ProtoMessage ¶
func (*LabelAnnotation) ProtoMessage()
func (*LabelAnnotation) ProtoReflect ¶
func (x *LabelAnnotation) ProtoReflect() protoreflect.Message
func (*LabelAnnotation) Reset ¶
func (x *LabelAnnotation) Reset()
func (*LabelAnnotation) String ¶
func (x *LabelAnnotation) String() string
type LabelDetectionConfig ¶
type LabelDetectionConfig struct {
// What labels should be detected with LABEL_DETECTION, in addition to
// video-level labels or segment-level labels.
// If unspecified, defaults to `SHOT_MODE`.
LabelDetectionMode LabelDetectionMode `` /* 176-byte string literal not displayed */
// Whether the video has been shot from a stationary (i.e., non-moving)
// camera. When set to true, might improve detection accuracy for moving
// objects. Should be used with `SHOT_AND_FRAME_MODE` enabled.
StationaryCamera bool `protobuf:"varint,2,opt,name=stationary_camera,json=stationaryCamera,proto3" json:"stationary_camera,omitempty"`
// Model to use for label detection.
// Supported values: "builtin/stable" (the default if unset) and
// "builtin/latest".
Model string `protobuf:"bytes,3,opt,name=model,proto3" json:"model,omitempty"`
// The confidence threshold we perform filtering on the labels from
// frame-level detection. If not set, it is set to 0.4 by default. The valid
// range for this threshold is [0.1, 0.9]. Any value set outside of this
// range will be clipped.
// Note: For best results, follow the default threshold. We will update
// the default threshold everytime when we release a new model.
FrameConfidenceThreshold float32 `` /* 137-byte string literal not displayed */
// The confidence threshold we perform filtering on the labels from
// video-level and shot-level detections. If not set, it's set to 0.3 by
// default. The valid range for this threshold is [0.1, 0.9]. Any value set
// outside of this range will be clipped.
// Note: For best results, follow the default threshold. We will update
// the default threshold everytime when we release a new model.
VideoConfidenceThreshold float32 `` /* 137-byte string literal not displayed */
// contains filtered or unexported fields
}
Config for LABEL_DETECTION.
func (*LabelDetectionConfig) Descriptor
deprecated
func (*LabelDetectionConfig) Descriptor() ([]byte, []int)
Deprecated: Use LabelDetectionConfig.ProtoReflect.Descriptor instead.
func (*LabelDetectionConfig) GetFrameConfidenceThreshold ¶
func (x *LabelDetectionConfig) GetFrameConfidenceThreshold() float32
func (*LabelDetectionConfig) GetLabelDetectionMode ¶
func (x *LabelDetectionConfig) GetLabelDetectionMode() LabelDetectionMode
func (*LabelDetectionConfig) GetModel ¶
func (x *LabelDetectionConfig) GetModel() string
func (*LabelDetectionConfig) GetStationaryCamera ¶
func (x *LabelDetectionConfig) GetStationaryCamera() bool
func (*LabelDetectionConfig) GetVideoConfidenceThreshold ¶
func (x *LabelDetectionConfig) GetVideoConfidenceThreshold() float32
func (*LabelDetectionConfig) ProtoMessage ¶
func (*LabelDetectionConfig) ProtoMessage()
func (*LabelDetectionConfig) ProtoReflect ¶
func (x *LabelDetectionConfig) ProtoReflect() protoreflect.Message
func (*LabelDetectionConfig) Reset ¶
func (x *LabelDetectionConfig) Reset()
func (*LabelDetectionConfig) String ¶
func (x *LabelDetectionConfig) String() string
type LabelDetectionMode ¶
type LabelDetectionMode int32
Label detection mode.
const ( // Unspecified. LabelDetectionMode_LABEL_DETECTION_MODE_UNSPECIFIED LabelDetectionMode = 0 // Detect shot-level labels. LabelDetectionMode_SHOT_MODE LabelDetectionMode = 1 // Detect frame-level labels. LabelDetectionMode_FRAME_MODE LabelDetectionMode = 2 // Detect both shot-level and frame-level labels. LabelDetectionMode_SHOT_AND_FRAME_MODE LabelDetectionMode = 3 )
func (LabelDetectionMode) Descriptor ¶
func (LabelDetectionMode) Descriptor() protoreflect.EnumDescriptor
func (LabelDetectionMode) Enum ¶
func (x LabelDetectionMode) Enum() *LabelDetectionMode
func (LabelDetectionMode) EnumDescriptor
deprecated
func (LabelDetectionMode) EnumDescriptor() ([]byte, []int)
Deprecated: Use LabelDetectionMode.Descriptor instead.
func (LabelDetectionMode) Number ¶
func (x LabelDetectionMode) Number() protoreflect.EnumNumber
func (LabelDetectionMode) String ¶
func (x LabelDetectionMode) String() string
func (LabelDetectionMode) Type ¶
func (LabelDetectionMode) Type() protoreflect.EnumType
type LabelFrame ¶
type LabelFrame struct {
// Time-offset, relative to the beginning of the video, corresponding to the
// video frame for this location.
TimeOffset *durationpb.Duration `protobuf:"bytes,1,opt,name=time_offset,json=timeOffset,proto3" json:"time_offset,omitempty"`
// Confidence that the label is accurate. Range: [0, 1].
Confidence float32 `protobuf:"fixed32,2,opt,name=confidence,proto3" json:"confidence,omitempty"`
// contains filtered or unexported fields
}
Video frame level annotation results for label detection.
func (*LabelFrame) Descriptor
deprecated
func (*LabelFrame) Descriptor() ([]byte, []int)
Deprecated: Use LabelFrame.ProtoReflect.Descriptor instead.
func (*LabelFrame) GetConfidence ¶
func (x *LabelFrame) GetConfidence() float32
func (*LabelFrame) GetTimeOffset ¶
func (x *LabelFrame) GetTimeOffset() *durationpb.Duration
func (*LabelFrame) ProtoMessage ¶
func (*LabelFrame) ProtoMessage()
func (*LabelFrame) ProtoReflect ¶
func (x *LabelFrame) ProtoReflect() protoreflect.Message
func (*LabelFrame) Reset ¶
func (x *LabelFrame) Reset()
func (*LabelFrame) String ¶
func (x *LabelFrame) String() string
type LabelSegment ¶
type LabelSegment struct {
// Video segment where a label was detected.
Segment *VideoSegment `protobuf:"bytes,1,opt,name=segment,proto3" json:"segment,omitempty"`
// Confidence that the label is accurate. Range: [0, 1].
Confidence float32 `protobuf:"fixed32,2,opt,name=confidence,proto3" json:"confidence,omitempty"`
// contains filtered or unexported fields
}
Video segment level annotation results for label detection.
func (*LabelSegment) Descriptor
deprecated
func (*LabelSegment) Descriptor() ([]byte, []int)
Deprecated: Use LabelSegment.ProtoReflect.Descriptor instead.
func (*LabelSegment) GetConfidence ¶
func (x *LabelSegment) GetConfidence() float32
func (*LabelSegment) GetSegment ¶
func (x *LabelSegment) GetSegment() *VideoSegment
func (*LabelSegment) ProtoMessage ¶
func (*LabelSegment) ProtoMessage()
func (*LabelSegment) ProtoReflect ¶
func (x *LabelSegment) ProtoReflect() protoreflect.Message
func (*LabelSegment) Reset ¶
func (x *LabelSegment) Reset()
func (*LabelSegment) String ¶
func (x *LabelSegment) String() string
type Likelihood ¶
type Likelihood int32
Bucketized representation of likelihood.
const ( // Unspecified likelihood. Likelihood_LIKELIHOOD_UNSPECIFIED Likelihood = 0 // Very unlikely. Likelihood_VERY_UNLIKELY Likelihood = 1 // Unlikely. Likelihood_UNLIKELY Likelihood = 2 // Possible. Likelihood_POSSIBLE Likelihood = 3 // Likely. Likelihood_LIKELY Likelihood = 4 // Very likely. Likelihood_VERY_LIKELY Likelihood = 5 )
func (Likelihood) Descriptor ¶
func (Likelihood) Descriptor() protoreflect.EnumDescriptor
func (Likelihood) Enum ¶
func (x Likelihood) Enum() *Likelihood
func (Likelihood) EnumDescriptor
deprecated
func (Likelihood) EnumDescriptor() ([]byte, []int)
Deprecated: Use Likelihood.Descriptor instead.
func (Likelihood) Number ¶
func (x Likelihood) Number() protoreflect.EnumNumber
func (Likelihood) String ¶
func (x Likelihood) String() string
func (Likelihood) Type ¶
func (Likelihood) Type() protoreflect.EnumType
type LogoRecognitionAnnotation ¶
type LogoRecognitionAnnotation struct {
// Entity category information to specify the logo class that all the logo
// tracks within this LogoRecognitionAnnotation are recognized as.
Entity *Entity `protobuf:"bytes,1,opt,name=entity,proto3" json:"entity,omitempty"`
// All logo tracks where the recognized logo appears. Each track corresponds
// to one logo instance appearing in consecutive frames.
Tracks []*Track `protobuf:"bytes,2,rep,name=tracks,proto3" json:"tracks,omitempty"`
// All video segments where the recognized logo appears. There might be
// multiple instances of the same logo class appearing in one VideoSegment.
Segments []*VideoSegment `protobuf:"bytes,3,rep,name=segments,proto3" json:"segments,omitempty"`
// contains filtered or unexported fields
}
Annotation corresponding to one detected, tracked and recognized logo class.
func (*LogoRecognitionAnnotation) Descriptor
deprecated
func (*LogoRecognitionAnnotation) Descriptor() ([]byte, []int)
Deprecated: Use LogoRecognitionAnnotation.ProtoReflect.Descriptor instead.
func (*LogoRecognitionAnnotation) GetEntity ¶
func (x *LogoRecognitionAnnotation) GetEntity() *Entity
func (*LogoRecognitionAnnotation) GetSegments ¶
func (x *LogoRecognitionAnnotation) GetSegments() []*VideoSegment
func (*LogoRecognitionAnnotation) GetTracks ¶
func (x *LogoRecognitionAnnotation) GetTracks() []*Track
func (*LogoRecognitionAnnotation) ProtoMessage ¶
func (*LogoRecognitionAnnotation) ProtoMessage()
func (*LogoRecognitionAnnotation) ProtoReflect ¶
func (x *LogoRecognitionAnnotation) ProtoReflect() protoreflect.Message
func (*LogoRecognitionAnnotation) Reset ¶
func (x *LogoRecognitionAnnotation) Reset()
func (*LogoRecognitionAnnotation) String ¶
func (x *LogoRecognitionAnnotation) String() string
type NormalizedBoundingBox ¶
type NormalizedBoundingBox struct {
// Left X coordinate.
Left float32 `protobuf:"fixed32,1,opt,name=left,proto3" json:"left,omitempty"`
// Top Y coordinate.
Top float32 `protobuf:"fixed32,2,opt,name=top,proto3" json:"top,omitempty"`
// Right X coordinate.
Right float32 `protobuf:"fixed32,3,opt,name=right,proto3" json:"right,omitempty"`
// Bottom Y coordinate.
Bottom float32 `protobuf:"fixed32,4,opt,name=bottom,proto3" json:"bottom,omitempty"`
// contains filtered or unexported fields
}
Normalized bounding box. The normalized vertex coordinates are relative to the original image. Range: [0, 1].
func (*NormalizedBoundingBox) Descriptor
deprecated
func (*NormalizedBoundingBox) Descriptor() ([]byte, []int)
Deprecated: Use NormalizedBoundingBox.ProtoReflect.Descriptor instead.
func (*NormalizedBoundingBox) GetBottom ¶
func (x *NormalizedBoundingBox) GetBottom() float32
func (*NormalizedBoundingBox) GetLeft ¶
func (x *NormalizedBoundingBox) GetLeft() float32
func (*NormalizedBoundingBox) GetRight ¶
func (x *NormalizedBoundingBox) GetRight() float32
func (*NormalizedBoundingBox) GetTop ¶
func (x *NormalizedBoundingBox) GetTop() float32
func (*NormalizedBoundingBox) ProtoMessage ¶
func (*NormalizedBoundingBox) ProtoMessage()
func (*NormalizedBoundingBox) ProtoReflect ¶
func (x *NormalizedBoundingBox) ProtoReflect() protoreflect.Message
func (*NormalizedBoundingBox) Reset ¶
func (x *NormalizedBoundingBox) Reset()
func (*NormalizedBoundingBox) String ¶
func (x *NormalizedBoundingBox) String() string
type NormalizedBoundingPoly ¶
type NormalizedBoundingPoly struct {
// Normalized vertices of the bounding polygon.
Vertices []*NormalizedVertex `protobuf:"bytes,1,rep,name=vertices,proto3" json:"vertices,omitempty"`
// contains filtered or unexported fields
}
Normalized bounding polygon for text (that might not be aligned with axis). Contains list of the corner points in clockwise order starting from top-left corner. For example, for a rectangular bounding box: When the text is horizontal it might look like:
0----1 | | 3----2
When it's clockwise rotated 180 degrees around the top-left corner it becomes:
2----3 | | 1----0
and the vertex order will still be (0, 1, 2, 3). Note that values can be less than 0, or greater than 1 due to trignometric calculations for location of the box.
func (*NormalizedBoundingPoly) Descriptor
deprecated
func (*NormalizedBoundingPoly) Descriptor() ([]byte, []int)
Deprecated: Use NormalizedBoundingPoly.ProtoReflect.Descriptor instead.
func (*NormalizedBoundingPoly) GetVertices ¶
func (x *NormalizedBoundingPoly) GetVertices() []*NormalizedVertex
func (*NormalizedBoundingPoly) ProtoMessage ¶
func (*NormalizedBoundingPoly) ProtoMessage()
func (*NormalizedBoundingPoly) ProtoReflect ¶
func (x *NormalizedBoundingPoly) ProtoReflect() protoreflect.Message
func (*NormalizedBoundingPoly) Reset ¶
func (x *NormalizedBoundingPoly) Reset()
func (*NormalizedBoundingPoly) String ¶
func (x *NormalizedBoundingPoly) String() string
type NormalizedVertex ¶
type NormalizedVertex struct {
// X coordinate.
X float32 `protobuf:"fixed32,1,opt,name=x,proto3" json:"x,omitempty"`
// Y coordinate.
Y float32 `protobuf:"fixed32,2,opt,name=y,proto3" json:"y,omitempty"`
// contains filtered or unexported fields
}
A vertex represents a 2D point in the image. NOTE: the normalized vertex coordinates are relative to the original image and range from 0 to 1.
func (*NormalizedVertex) Descriptor
deprecated
func (*NormalizedVertex) Descriptor() ([]byte, []int)
Deprecated: Use NormalizedVertex.ProtoReflect.Descriptor instead.
func (*NormalizedVertex) GetX ¶
func (x *NormalizedVertex) GetX() float32
func (*NormalizedVertex) GetY ¶
func (x *NormalizedVertex) GetY() float32
func (*NormalizedVertex) ProtoMessage ¶
func (*NormalizedVertex) ProtoMessage()
func (*NormalizedVertex) ProtoReflect ¶
func (x *NormalizedVertex) ProtoReflect() protoreflect.Message
func (*NormalizedVertex) Reset ¶
func (x *NormalizedVertex) Reset()
func (*NormalizedVertex) String ¶
func (x *NormalizedVertex) String() string
type ObjectTrackingAnnotation ¶
type ObjectTrackingAnnotation struct {
// Different representation of tracking info in non-streaming batch
// and streaming modes.
//
// Types that are valid to be assigned to TrackInfo:
//
// *ObjectTrackingAnnotation_Segment
// *ObjectTrackingAnnotation_TrackId
TrackInfo isObjectTrackingAnnotation_TrackInfo `protobuf_oneof:"track_info"`
// Entity to specify the object category that this track is labeled as.
Entity *Entity `protobuf:"bytes,1,opt,name=entity,proto3" json:"entity,omitempty"`
// Object category's labeling confidence of this track.
Confidence float32 `protobuf:"fixed32,4,opt,name=confidence,proto3" json:"confidence,omitempty"`
// Information corresponding to all frames where this object track appears.
// Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame
// messages in frames.
// Streaming mode: it can only be one ObjectTrackingFrame message in frames.
Frames []*ObjectTrackingFrame `protobuf:"bytes,2,rep,name=frames,proto3" json:"frames,omitempty"`
// Feature version.
Version string `protobuf:"bytes,6,opt,name=version,proto3" json:"version,omitempty"`
// contains filtered or unexported fields
}
Annotations corresponding to one tracked object.
func (*ObjectTrackingAnnotation) Descriptor
deprecated
func (*ObjectTrackingAnnotation) Descriptor() ([]byte, []int)
Deprecated: Use ObjectTrackingAnnotation.ProtoReflect.Descriptor instead.
func (*ObjectTrackingAnnotation) GetConfidence ¶
func (x *ObjectTrackingAnnotation) GetConfidence() float32
func (*ObjectTrackingAnnotation) GetEntity ¶
func (x *ObjectTrackingAnnotation) GetEntity() *Entity
func (*ObjectTrackingAnnotation) GetFrames ¶
func (x *ObjectTrackingAnnotation) GetFrames() []*ObjectTrackingFrame
func (*ObjectTrackingAnnotation) GetSegment ¶
func (x *ObjectTrackingAnnotation) GetSegment() *VideoSegment
func (*ObjectTrackingAnnotation) GetTrackId ¶
func (x *ObjectTrackingAnnotation) GetTrackId() int64
func (*ObjectTrackingAnnotation) GetTrackInfo ¶
func (x *ObjectTrackingAnnotation) GetTrackInfo() isObjectTrackingAnnotation_TrackInfo
func (*ObjectTrackingAnnotation) GetVersion ¶
func (x *ObjectTrackingAnnotation) GetVersion() string
func (*ObjectTrackingAnnotation) ProtoMessage ¶
func (*ObjectTrackingAnnotation) ProtoMessage()
func (*ObjectTrackingAnnotation) ProtoReflect ¶
func (x *ObjectTrackingAnnotation) ProtoReflect() protoreflect.Message
func (*ObjectTrackingAnnotation) Reset ¶
func (x *ObjectTrackingAnnotation) Reset()
func (*ObjectTrackingAnnotation) String ¶
func (x *ObjectTrackingAnnotation) String() string
type ObjectTrackingAnnotation_Segment ¶
type ObjectTrackingAnnotation_Segment struct {
// Non-streaming batch mode ONLY.
// Each object track corresponds to one video segment where it appears.
Segment *VideoSegment `protobuf:"bytes,3,opt,name=segment,proto3,oneof"`
}
type ObjectTrackingAnnotation_TrackId ¶
type ObjectTrackingAnnotation_TrackId struct {
// Streaming mode ONLY.
// In streaming mode, we do not know the end time of a tracked object
// before it is completed. Hence, there is no VideoSegment info returned.
// Instead, we provide a unique identifiable integer track_id so that
// the customers can correlate the results of the ongoing
// ObjectTrackAnnotation of the same track_id over time.
TrackId int64 `protobuf:"varint,5,opt,name=track_id,json=trackId,proto3,oneof"`
}
type ObjectTrackingConfig ¶
type ObjectTrackingConfig struct {
// Model to use for object tracking.
// Supported values: "builtin/stable" (the default if unset) and
// "builtin/latest".
Model string `protobuf:"bytes,1,opt,name=model,proto3" json:"model,omitempty"`
// contains filtered or unexported fields
}
Config for OBJECT_TRACKING.
func (*ObjectTrackingConfig) Descriptor
deprecated
func (*ObjectTrackingConfig) Descriptor() ([]byte, []int)
Deprecated: Use ObjectTrackingConfig.ProtoReflect.Descriptor instead.
func (*ObjectTrackingConfig) GetModel ¶
func (x *ObjectTrackingConfig) GetModel() string
func (*ObjectTrackingConfig) ProtoMessage ¶
func (*ObjectTrackingConfig) ProtoMessage()
func (*ObjectTrackingConfig) ProtoReflect ¶
func (x *ObjectTrackingConfig) ProtoReflect() protoreflect.Message
func (*ObjectTrackingConfig) Reset ¶
func (x *ObjectTrackingConfig) Reset()
func (*ObjectTrackingConfig) String ¶
func (x *ObjectTrackingConfig) String() string
type ObjectTrackingFrame ¶
type ObjectTrackingFrame struct {
// The normalized bounding box location of this object track for the frame.
NormalizedBoundingBox *NormalizedBoundingBox `` /* 126-byte string literal not displayed */
// The timestamp of the frame in microseconds.
TimeOffset *durationpb.Duration `protobuf:"bytes,2,opt,name=time_offset,json=timeOffset,proto3" json:"time_offset,omitempty"`
// contains filtered or unexported fields
}
Video frame level annotations for object detection and tracking. This field stores per frame location, time offset, and confidence.
func (*ObjectTrackingFrame) Descriptor
deprecated
func (*ObjectTrackingFrame) Descriptor() ([]byte, []int)
Deprecated: Use ObjectTrackingFrame.ProtoReflect.Descriptor instead.
func (*ObjectTrackingFrame) GetNormalizedBoundingBox ¶
func (x *ObjectTrackingFrame) GetNormalizedBoundingBox() *NormalizedBoundingBox
func (*ObjectTrackingFrame) GetTimeOffset ¶
func (x *ObjectTrackingFrame) GetTimeOffset() *durationpb.Duration
func (*ObjectTrackingFrame) ProtoMessage ¶
func (*ObjectTrackingFrame) ProtoMessage()
func (*ObjectTrackingFrame) ProtoReflect ¶
func (x *ObjectTrackingFrame) ProtoReflect() protoreflect.Message
func (*ObjectTrackingFrame) Reset ¶
func (x *ObjectTrackingFrame) Reset()
func (*ObjectTrackingFrame) String ¶
func (x *ObjectTrackingFrame) String() string
type PersonDetectionAnnotation ¶
type PersonDetectionAnnotation struct {
// The detected tracks of a person.
Tracks []*Track `protobuf:"bytes,1,rep,name=tracks,proto3" json:"tracks,omitempty"`
// Feature version.
Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"`
// contains filtered or unexported fields
}
Person detection annotation per video.
func (*PersonDetectionAnnotation) Descriptor
deprecated
func (*PersonDetectionAnnotation) Descriptor() ([]byte, []int)
Deprecated: Use PersonDetectionAnnotation.ProtoReflect.Descriptor instead.
func (*PersonDetectionAnnotation) GetTracks ¶
func (x *PersonDetectionAnnotation) GetTracks() []*Track
func (*PersonDetectionAnnotation) GetVersion ¶
func (x *PersonDetectionAnnotation) GetVersion() string
func (*PersonDetectionAnnotation) ProtoMessage ¶
func (*PersonDetectionAnnotation) ProtoMessage()
func (*PersonDetectionAnnotation) ProtoReflect ¶
func (x *PersonDetectionAnnotation) ProtoReflect() protoreflect.Message
func (*PersonDetectionAnnotation) Reset ¶
func (x *PersonDetectionAnnotation) Reset()
func (*PersonDetectionAnnotation) String ¶
func (x *PersonDetectionAnnotation) String() string
type PersonDetectionConfig ¶
type PersonDetectionConfig struct {
// Whether bounding boxes are included in the person detection annotation
// output.
IncludeBoundingBoxes bool `protobuf:"varint,1,opt,name=include_bounding_boxes,json=includeBoundingBoxes,proto3" json:"include_bounding_boxes,omitempty"`
// Whether to enable pose landmarks detection. Ignored if
// 'include_bounding_boxes' is set to false.
IncludePoseLandmarks bool `protobuf:"varint,2,opt,name=include_pose_landmarks,json=includePoseLandmarks,proto3" json:"include_pose_landmarks,omitempty"`
// Whether to enable person attributes detection, such as cloth color (black,
// blue, etc), type (coat, dress, etc), pattern (plain, floral, etc), hair,
// etc.
// Ignored if 'include_bounding_boxes' is set to false.
IncludeAttributes bool `protobuf:"varint,3,opt,name=include_attributes,json=includeAttributes,proto3" json:"include_attributes,omitempty"`
// contains filtered or unexported fields
}
Config for PERSON_DETECTION.
func (*PersonDetectionConfig) Descriptor
deprecated
func (*PersonDetectionConfig) Descriptor() ([]byte, []int)
Deprecated: Use PersonDetectionConfig.ProtoReflect.Descriptor instead.
func (*PersonDetectionConfig) GetIncludeAttributes ¶
func (x *PersonDetectionConfig) GetIncludeAttributes() bool
func (*PersonDetectionConfig) GetIncludeBoundingBoxes ¶
func (x *PersonDetectionConfig) GetIncludeBoundingBoxes() bool
func (*PersonDetectionConfig) GetIncludePoseLandmarks ¶
func (x *PersonDetectionConfig) GetIncludePoseLandmarks() bool
func (*PersonDetectionConfig) ProtoMessage ¶
func (*PersonDetectionConfig) ProtoMessage()
func (*PersonDetectionConfig) ProtoReflect ¶
func (x *PersonDetectionConfig) ProtoReflect() protoreflect.Message
func (*PersonDetectionConfig) Reset ¶
func (x *PersonDetectionConfig) Reset()
func (*PersonDetectionConfig) String ¶
func (x *PersonDetectionConfig) String() string
type ShotChangeDetectionConfig ¶
type ShotChangeDetectionConfig struct {
// Model to use for shot change detection.
// Supported values: "builtin/stable" (the default if unset) and
// "builtin/latest".
Model string `protobuf:"bytes,1,opt,name=model,proto3" json:"model,omitempty"`
// contains filtered or unexported fields
}
Config for SHOT_CHANGE_DETECTION.
func (*ShotChangeDetectionConfig) Descriptor
deprecated
func (*ShotChangeDetectionConfig) Descriptor() ([]byte, []int)
Deprecated: Use ShotChangeDetectionConfig.ProtoReflect.Descriptor instead.
func (*ShotChangeDetectionConfig) GetModel ¶
func (x *ShotChangeDetectionConfig) GetModel() string
func (*ShotChangeDetectionConfig) ProtoMessage ¶
func (*ShotChangeDetectionConfig) ProtoMessage()
func (*ShotChangeDetectionConfig) ProtoReflect ¶
func (x *ShotChangeDetectionConfig) ProtoReflect() protoreflect.Message
func (*ShotChangeDetectionConfig) Reset ¶
func (x *ShotChangeDetectionConfig) Reset()
func (*ShotChangeDetectionConfig) String ¶
func (x *ShotChangeDetectionConfig) String() string
type SpeechContext ¶
type SpeechContext struct {
// Optional. A list of strings containing words and phrases "hints" so that
// the speech recognition is more likely to recognize them. This can be used
// to improve the accuracy for specific words and phrases, for example, if
// specific commands are typically spoken by the user. This can also be used
// to add additional words to the vocabulary of the recognizer. See
// [usage limits](https://cloud.google.com/speech/limits#content).
Phrases []string `protobuf:"bytes,1,rep,name=phrases,proto3" json:"phrases,omitempty"`
// contains filtered or unexported fields
}
Provides "hints" to the speech recognizer to favor specific words and phrases in the results.
func (*SpeechContext) Descriptor
deprecated
func (*SpeechContext) Descriptor() ([]byte, []int)
Deprecated: Use SpeechContext.ProtoReflect.Descriptor instead.
func (*SpeechContext) GetPhrases ¶
func (x *SpeechContext) GetPhrases() []string
func (*SpeechContext) ProtoMessage ¶
func (*SpeechContext) ProtoMessage()
func (*SpeechContext) ProtoReflect ¶
func (x *SpeechContext) ProtoReflect() protoreflect.Message
func (*SpeechContext) Reset ¶
func (x *SpeechContext) Reset()
func (*SpeechContext) String ¶
func (x *SpeechContext) String() string
type SpeechRecognitionAlternative ¶
type SpeechRecognitionAlternative struct {
// Transcript text representing the words that the user spoke.
Transcript string `protobuf:"bytes,1,opt,name=transcript,proto3" json:"transcript,omitempty"`
// Output only. The confidence estimate between 0.0 and 1.0. A higher number
// indicates an estimated greater likelihood that the recognized words are
// correct. This field is set only for the top alternative.
// This field is not guaranteed to be accurate and users should not rely on it
// to be always provided.
// The default of 0.0 is a sentinel value indicating `confidence` was not set.
Confidence float32 `protobuf:"fixed32,2,opt,name=confidence,proto3" json:"confidence,omitempty"`
// Output only. A list of word-specific information for each recognized word.
// Note: When `enable_speaker_diarization` is set to true, you will see all
// the words from the beginning of the audio.
Words []*WordInfo `protobuf:"bytes,3,rep,name=words,proto3" json:"words,omitempty"`
// contains filtered or unexported fields
}
Alternative hypotheses (a.k.a. n-best list).
func (*SpeechRecognitionAlternative) Descriptor
deprecated
func (*SpeechRecognitionAlternative) Descriptor() ([]byte, []int)
Deprecated: Use SpeechRecognitionAlternative.ProtoReflect.Descriptor instead.
func (*SpeechRecognitionAlternative) GetConfidence ¶
func (x *SpeechRecognitionAlternative) GetConfidence() float32
func (*SpeechRecognitionAlternative) GetTranscript ¶
func (x *SpeechRecognitionAlternative) GetTranscript() string
func (*SpeechRecognitionAlternative) GetWords ¶
func (x *SpeechRecognitionAlternative) GetWords() []*WordInfo
func (*SpeechRecognitionAlternative) ProtoMessage ¶
func (*SpeechRecognitionAlternative) ProtoMessage()
func (*SpeechRecognitionAlternative) ProtoReflect ¶
func (x *SpeechRecognitionAlternative) ProtoReflect() protoreflect.Message
func (*SpeechRecognitionAlternative) Reset ¶
func (x *SpeechRecognitionAlternative) Reset()
func (*SpeechRecognitionAlternative) String ¶
func (x *SpeechRecognitionAlternative) String() string
type SpeechTranscription ¶
type SpeechTranscription struct {
// May contain one or more recognition hypotheses (up to the maximum specified
// in `max_alternatives`). These alternatives are ordered in terms of
// accuracy, with the top (first) alternative being the most probable, as
// ranked by the recognizer.
Alternatives []*SpeechRecognitionAlternative `protobuf:"bytes,1,rep,name=alternatives,proto3" json:"alternatives,omitempty"`
// Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt)
// language tag of the language in this result. This language code was
// detected to have the most likelihood of being spoken in the audio.
LanguageCode string `protobuf:"bytes,2,opt,name=language_code,json=languageCode,proto3" json:"language_code,omitempty"`
// contains filtered or unexported fields
}
A speech recognition result corresponding to a portion of the audio.
func (*SpeechTranscription) Descriptor
deprecated
func (*SpeechTranscription) Descriptor() ([]byte, []int)
Deprecated: Use SpeechTranscription.ProtoReflect.Descriptor instead.
func (*SpeechTranscription) GetAlternatives ¶
func (x *SpeechTranscription) GetAlternatives() []*SpeechRecognitionAlternative
func (*SpeechTranscription) GetLanguageCode ¶
func (x *SpeechTranscription) GetLanguageCode() string
func (*SpeechTranscription) ProtoMessage ¶
func (*SpeechTranscription) ProtoMessage()
func (*SpeechTranscription) ProtoReflect ¶
func (x *SpeechTranscription) ProtoReflect() protoreflect.Message
func (*SpeechTranscription) Reset ¶
func (x *SpeechTranscription) Reset()
func (*SpeechTranscription) String ¶
func (x *SpeechTranscription) String() string
type SpeechTranscriptionConfig ¶
type SpeechTranscriptionConfig struct {
// Required. *Required* The language of the supplied audio as a
// [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
// Example: "en-US".
// See [Language Support](https://cloud.google.com/speech/docs/languages)
// for a list of the currently supported language codes.
LanguageCode string `protobuf:"bytes,1,opt,name=language_code,json=languageCode,proto3" json:"language_code,omitempty"`
// Optional. Maximum number of recognition hypotheses to be returned.
// Specifically, the maximum number of `SpeechRecognitionAlternative` messages
// within each `SpeechTranscription`. The server may return fewer than
// `max_alternatives`. Valid values are `0`-`30`. A value of `0` or `1` will
// return a maximum of one. If omitted, will return a maximum of one.
MaxAlternatives int32 `protobuf:"varint,2,opt,name=max_alternatives,json=maxAlternatives,proto3" json:"max_alternatives,omitempty"`
// Optional. If set to `true`, the server will attempt to filter out
// profanities, replacing all but the initial character in each filtered word
// with asterisks, e.g. "f***". If set to `false` or omitted, profanities
// won't be filtered out.
FilterProfanity bool `protobuf:"varint,3,opt,name=filter_profanity,json=filterProfanity,proto3" json:"filter_profanity,omitempty"`
// Optional. A means to provide context to assist the speech recognition.
SpeechContexts []*SpeechContext `protobuf:"bytes,4,rep,name=speech_contexts,json=speechContexts,proto3" json:"speech_contexts,omitempty"`
// Optional. If 'true', adds punctuation to recognition result hypotheses.
// This feature is only available in select languages. Setting this for
// requests in other languages has no effect at all. The default 'false' value
// does not add punctuation to result hypotheses. NOTE: "This is currently
// offered as an experimental service, complimentary to all users. In the
// future this may be exclusively available as a premium feature."
EnableAutomaticPunctuation bool `` /* 142-byte string literal not displayed */
// Optional. For file formats, such as MXF or MKV, supporting multiple audio
// tracks, specify up to two tracks. Default: track 0.
AudioTracks []int32 `protobuf:"varint,6,rep,packed,name=audio_tracks,json=audioTracks,proto3" json:"audio_tracks,omitempty"`
// Optional. If 'true', enables speaker detection for each recognized word in
// the top alternative of the recognition result using a speaker_tag provided
// in the WordInfo.
// Note: When this is true, we send all the words from the beginning of the
// audio for the top alternative in every consecutive response.
// This is done in order to improve our speaker tags as our models learn to
// identify the speakers in the conversation over time.
EnableSpeakerDiarization bool `` /* 136-byte string literal not displayed */
// Optional. If set, specifies the estimated number of speakers in the
// conversation. If not set, defaults to '2'. Ignored unless
// enable_speaker_diarization is set to true.
DiarizationSpeakerCount int32 `` /* 133-byte string literal not displayed */
// Optional. If `true`, the top result includes a list of words and the
// confidence for those words. If `false`, no word-level confidence
// information is returned. The default is `false`.
EnableWordConfidence bool `protobuf:"varint,9,opt,name=enable_word_confidence,json=enableWordConfidence,proto3" json:"enable_word_confidence,omitempty"`
// contains filtered or unexported fields
}
Config for SPEECH_TRANSCRIPTION.
func (*SpeechTranscriptionConfig) Descriptor
deprecated
func (*SpeechTranscriptionConfig) Descriptor() ([]byte, []int)
Deprecated: Use SpeechTranscriptionConfig.ProtoReflect.Descriptor instead.
func (*SpeechTranscriptionConfig) GetAudioTracks ¶
func (x *SpeechTranscriptionConfig) GetAudioTracks() []int32
func (*SpeechTranscriptionConfig) GetDiarizationSpeakerCount ¶
func (x *SpeechTranscriptionConfig) GetDiarizationSpeakerCount() int32
func (*SpeechTranscriptionConfig) GetEnableAutomaticPunctuation ¶
func (x *SpeechTranscriptionConfig) GetEnableAutomaticPunctuation() bool
func (*SpeechTranscriptionConfig) GetEnableSpeakerDiarization ¶
func (x *SpeechTranscriptionConfig) GetEnableSpeakerDiarization() bool
func (*SpeechTranscriptionConfig) GetEnableWordConfidence ¶
func (x *SpeechTranscriptionConfig) GetEnableWordConfidence() bool
func (*SpeechTranscriptionConfig) GetFilterProfanity ¶
func (x *SpeechTranscriptionConfig) GetFilterProfanity() bool
func (*SpeechTranscriptionConfig) GetLanguageCode ¶
func (x *SpeechTranscriptionConfig) GetLanguageCode() string
func (*SpeechTranscriptionConfig) GetMaxAlternatives ¶
func (x *SpeechTranscriptionConfig) GetMaxAlternatives() int32
func (*SpeechTranscriptionConfig) GetSpeechContexts ¶
func (x *SpeechTranscriptionConfig) GetSpeechContexts() []*SpeechContext
func (*SpeechTranscriptionConfig) ProtoMessage ¶
func (*SpeechTranscriptionConfig) ProtoMessage()
func (*SpeechTranscriptionConfig) ProtoReflect ¶
func (x *SpeechTranscriptionConfig) ProtoReflect() protoreflect.Message
func (*SpeechTranscriptionConfig) Reset ¶
func (x *SpeechTranscriptionConfig) Reset()
func (*SpeechTranscriptionConfig) String ¶
func (x *SpeechTranscriptionConfig) String() string
type TextAnnotation ¶
type TextAnnotation struct {
// The detected text.
Text string `protobuf:"bytes,1,opt,name=text,proto3" json:"text,omitempty"`
// All video segments where OCR detected text appears.
Segments []*TextSegment `protobuf:"bytes,2,rep,name=segments,proto3" json:"segments,omitempty"`
// Feature version.
Version string `protobuf:"bytes,3,opt,name=version,proto3" json:"version,omitempty"`
// contains filtered or unexported fields
}
Annotations related to one detected OCR text snippet. This will contain the corresponding text, confidence value, and frame level information for each detection.
func (*TextAnnotation) Descriptor
deprecated
func (*TextAnnotation) Descriptor() ([]byte, []int)
Deprecated: Use TextAnnotation.ProtoReflect.Descriptor instead.
func (*TextAnnotation) GetSegments ¶
func (x *TextAnnotation) GetSegments() []*TextSegment
func (*TextAnnotation) GetText ¶
func (x *TextAnnotation) GetText() string
func (*TextAnnotation) GetVersion ¶
func (x *TextAnnotation) GetVersion() string
func (*TextAnnotation) ProtoMessage ¶
func (*TextAnnotation) ProtoMessage()
func (*TextAnnotation) ProtoReflect ¶
func (x *TextAnnotation) ProtoReflect() protoreflect.Message
func (*TextAnnotation) Reset ¶
func (x *TextAnnotation) Reset()
func (*TextAnnotation) String ¶
func (x *TextAnnotation) String() string
type TextDetectionConfig ¶
type TextDetectionConfig struct {
// Language hint can be specified if the language to be detected is known a
// priori. It can increase the accuracy of the detection. Language hint must
// be language code in BCP-47 format.
//
// Automatic language detection is performed if no hint is provided.
LanguageHints []string `protobuf:"bytes,1,rep,name=language_hints,json=languageHints,proto3" json:"language_hints,omitempty"`
// Model to use for text detection.
// Supported values: "builtin/stable" (the default if unset) and
// "builtin/latest".
Model string `protobuf:"bytes,2,opt,name=model,proto3" json:"model,omitempty"`
// contains filtered or unexported fields
}
Config for TEXT_DETECTION.
func (*TextDetectionConfig) Descriptor
deprecated
func (*TextDetectionConfig) Descriptor() ([]byte, []int)
Deprecated: Use TextDetectionConfig.ProtoReflect.Descriptor instead.
func (*TextDetectionConfig) GetLanguageHints ¶
func (x *TextDetectionConfig) GetLanguageHints() []string
func (*TextDetectionConfig) GetModel ¶
func (x *TextDetectionConfig) GetModel() string
func (*TextDetectionConfig) ProtoMessage ¶
func (*TextDetectionConfig) ProtoMessage()
func (*TextDetectionConfig) ProtoReflect ¶
func (x *TextDetectionConfig) ProtoReflect() protoreflect.Message
func (*TextDetectionConfig) Reset ¶
func (x *TextDetectionConfig) Reset()
func (*TextDetectionConfig) String ¶
func (x *TextDetectionConfig) String() string
type TextFrame ¶
type TextFrame struct {
// Bounding polygon of the detected text for this frame.
RotatedBoundingBox *NormalizedBoundingPoly `protobuf:"bytes,1,opt,name=rotated_bounding_box,json=rotatedBoundingBox,proto3" json:"rotated_bounding_box,omitempty"`
// Timestamp of this frame.
TimeOffset *durationpb.Duration `protobuf:"bytes,2,opt,name=time_offset,json=timeOffset,proto3" json:"time_offset,omitempty"`
// contains filtered or unexported fields
}
Video frame level annotation results for text annotation (OCR). Contains information regarding timestamp and bounding box locations for the frames containing detected OCR text snippets.
func (*TextFrame) Descriptor
deprecated
func (*TextFrame) GetRotatedBoundingBox ¶
func (x *TextFrame) GetRotatedBoundingBox() *NormalizedBoundingPoly
func (*TextFrame) GetTimeOffset ¶
func (x *TextFrame) GetTimeOffset() *durationpb.Duration
func (*TextFrame) ProtoMessage ¶
func (*TextFrame) ProtoMessage()
func (*TextFrame) ProtoReflect ¶
func (x *TextFrame) ProtoReflect() protoreflect.Message
type TextSegment ¶
type TextSegment struct {
// Video segment where a text snippet was detected.
Segment *VideoSegment `protobuf:"bytes,1,opt,name=segment,proto3" json:"segment,omitempty"`
// Confidence for the track of detected text. It is calculated as the highest
// over all frames where OCR detected text appears.
Confidence float32 `protobuf:"fixed32,2,opt,name=confidence,proto3" json:"confidence,omitempty"`
// Information related to the frames where OCR detected text appears.
Frames []*TextFrame `protobuf:"bytes,3,rep,name=frames,proto3" json:"frames,omitempty"`
// contains filtered or unexported fields
}
Video segment level annotation results for text detection.
func (*TextSegment) Descriptor
deprecated
func (*TextSegment) Descriptor() ([]byte, []int)
Deprecated: Use TextSegment.ProtoReflect.Descriptor instead.
func (*TextSegment) GetConfidence ¶
func (x *TextSegment) GetConfidence() float32
func (*TextSegment) GetFrames ¶
func (x *TextSegment) GetFrames() []*TextFrame
func (*TextSegment) GetSegment ¶
func (x *TextSegment) GetSegment() *VideoSegment
func (*TextSegment) ProtoMessage ¶
func (*TextSegment) ProtoMessage()
func (*TextSegment) ProtoReflect ¶
func (x *TextSegment) ProtoReflect() protoreflect.Message
func (*TextSegment) Reset ¶
func (x *TextSegment) Reset()
func (*TextSegment) String ¶
func (x *TextSegment) String() string
type TimestampedObject ¶
type TimestampedObject struct {
// Normalized Bounding box in a frame, where the object is located.
NormalizedBoundingBox *NormalizedBoundingBox `` /* 126-byte string literal not displayed */
// Time-offset, relative to the beginning of the video,
// corresponding to the video frame for this object.
TimeOffset *durationpb.Duration `protobuf:"bytes,2,opt,name=time_offset,json=timeOffset,proto3" json:"time_offset,omitempty"`
// Optional. The attributes of the object in the bounding box.
Attributes []*DetectedAttribute `protobuf:"bytes,3,rep,name=attributes,proto3" json:"attributes,omitempty"`
// Optional. The detected landmarks.
Landmarks []*DetectedLandmark `protobuf:"bytes,4,rep,name=landmarks,proto3" json:"landmarks,omitempty"`
// contains filtered or unexported fields
}
For tracking related features. An object at time_offset with attributes, and located with normalized_bounding_box.
func (*TimestampedObject) Descriptor
deprecated
func (*TimestampedObject) Descriptor() ([]byte, []int)
Deprecated: Use TimestampedObject.ProtoReflect.Descriptor instead.
func (*TimestampedObject) GetAttributes ¶
func (x *TimestampedObject) GetAttributes() []*DetectedAttribute
func (*TimestampedObject) GetLandmarks ¶
func (x *TimestampedObject) GetLandmarks() []*DetectedLandmark
func (*TimestampedObject) GetNormalizedBoundingBox ¶
func (x *TimestampedObject) GetNormalizedBoundingBox() *NormalizedBoundingBox
func (*TimestampedObject) GetTimeOffset ¶
func (x *TimestampedObject) GetTimeOffset() *durationpb.Duration
func (*TimestampedObject) ProtoMessage ¶
func (*TimestampedObject) ProtoMessage()
func (*TimestampedObject) ProtoReflect ¶
func (x *TimestampedObject) ProtoReflect() protoreflect.Message
func (*TimestampedObject) Reset ¶
func (x *TimestampedObject) Reset()
func (*TimestampedObject) String ¶
func (x *TimestampedObject) String() string
type Track ¶
type Track struct {
// Video segment of a track.
Segment *VideoSegment `protobuf:"bytes,1,opt,name=segment,proto3" json:"segment,omitempty"`
// The object with timestamp and attributes per frame in the track.
TimestampedObjects []*TimestampedObject `protobuf:"bytes,2,rep,name=timestamped_objects,json=timestampedObjects,proto3" json:"timestamped_objects,omitempty"`
// Optional. Attributes in the track level.
Attributes []*DetectedAttribute `protobuf:"bytes,3,rep,name=attributes,proto3" json:"attributes,omitempty"`
// Optional. The confidence score of the tracked object.
Confidence float32 `protobuf:"fixed32,4,opt,name=confidence,proto3" json:"confidence,omitempty"`
// contains filtered or unexported fields
}
A track of an object instance.
func (*Track) Descriptor
deprecated
func (*Track) GetAttributes ¶
func (x *Track) GetAttributes() []*DetectedAttribute
func (*Track) GetConfidence ¶
func (*Track) GetSegment ¶
func (x *Track) GetSegment() *VideoSegment
func (*Track) GetTimestampedObjects ¶
func (x *Track) GetTimestampedObjects() []*TimestampedObject
func (*Track) ProtoMessage ¶
func (*Track) ProtoMessage()
func (*Track) ProtoReflect ¶
func (x *Track) ProtoReflect() protoreflect.Message
type UnimplementedVideoIntelligenceServiceServer ¶
type UnimplementedVideoIntelligenceServiceServer struct {
}
UnimplementedVideoIntelligenceServiceServer should be embedded to have forward compatible implementations.
func (UnimplementedVideoIntelligenceServiceServer) AnnotateVideo ¶
func (UnimplementedVideoIntelligenceServiceServer) AnnotateVideo(context.Context, *AnnotateVideoRequest) (*longrunningpb.Operation, error)
type UnsafeVideoIntelligenceServiceServer ¶ added in v1.12.7
type UnsafeVideoIntelligenceServiceServer interface {
// contains filtered or unexported methods
}
UnsafeVideoIntelligenceServiceServer may be embedded to opt out of forward compatibility for this service. Use of this interface is not recommended, as added methods to VideoIntelligenceServiceServer will result in compilation errors.
type VideoAnnotationProgress ¶
type VideoAnnotationProgress struct {
// Video file location in
// [Cloud Storage](https://cloud.google.com/storage/).
InputUri string `protobuf:"bytes,1,opt,name=input_uri,json=inputUri,proto3" json:"input_uri,omitempty"`
// Approximate percentage processed thus far. Guaranteed to be
// 100 when fully processed.
ProgressPercent int32 `protobuf:"varint,2,opt,name=progress_percent,json=progressPercent,proto3" json:"progress_percent,omitempty"`
// Time when the request was received.
StartTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"`
// Time of the most recent update.
UpdateTime *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=update_time,json=updateTime,proto3" json:"update_time,omitempty"`
// Specifies which feature is being tracked if the request contains more than
// one feature.
Feature Feature `protobuf:"varint,5,opt,name=feature,proto3,enum=google.cloud.videointelligence.v1.Feature" json:"feature,omitempty"`
// Specifies which segment is being tracked if the request contains more than
// one segment.
Segment *VideoSegment `protobuf:"bytes,6,opt,name=segment,proto3" json:"segment,omitempty"`
// contains filtered or unexported fields
}
Annotation progress for a single video.
func (*VideoAnnotationProgress) Descriptor
deprecated
func (*VideoAnnotationProgress) Descriptor() ([]byte, []int)
Deprecated: Use VideoAnnotationProgress.ProtoReflect.Descriptor instead.
func (*VideoAnnotationProgress) GetFeature ¶
func (x *VideoAnnotationProgress) GetFeature() Feature
func (*VideoAnnotationProgress) GetInputUri ¶
func (x *VideoAnnotationProgress) GetInputUri() string
func (*VideoAnnotationProgress) GetProgressPercent ¶
func (x *VideoAnnotationProgress) GetProgressPercent() int32
func (*VideoAnnotationProgress) GetSegment ¶
func (x *VideoAnnotationProgress) GetSegment() *VideoSegment
func (*VideoAnnotationProgress) GetStartTime ¶
func (x *VideoAnnotationProgress) GetStartTime() *timestamppb.Timestamp
func (*VideoAnnotationProgress) GetUpdateTime ¶
func (x *VideoAnnotationProgress) GetUpdateTime() *timestamppb.Timestamp
func (*VideoAnnotationProgress) ProtoMessage ¶
func (*VideoAnnotationProgress) ProtoMessage()
func (*VideoAnnotationProgress) ProtoReflect ¶
func (x *VideoAnnotationProgress) ProtoReflect() protoreflect.Message
func (*VideoAnnotationProgress) Reset ¶
func (x *VideoAnnotationProgress) Reset()
func (*VideoAnnotationProgress) String ¶
func (x *VideoAnnotationProgress) String() string
type VideoAnnotationResults ¶
type VideoAnnotationResults struct {
// Video file location in
// [Cloud Storage](https://cloud.google.com/storage/).
InputUri string `protobuf:"bytes,1,opt,name=input_uri,json=inputUri,proto3" json:"input_uri,omitempty"`
// Video segment on which the annotation is run.
Segment *VideoSegment `protobuf:"bytes,10,opt,name=segment,proto3" json:"segment,omitempty"`
// Topical label annotations on video level or user-specified segment level.
// There is exactly one element for each unique label.
SegmentLabelAnnotations []*LabelAnnotation `` /* 132-byte string literal not displayed */
// Presence label annotations on video level or user-specified segment level.
// There is exactly one element for each unique label. Compared to the
// existing topical `segment_label_annotations`, this field presents more
// fine-grained, segment-level labels detected in video content and is made
// available only when the client sets `LabelDetectionConfig.model` to
// "builtin/latest" in the request.
SegmentPresenceLabelAnnotations []*LabelAnnotation `` /* 159-byte string literal not displayed */
// Topical label annotations on shot level.
// There is exactly one element for each unique label.
ShotLabelAnnotations []*LabelAnnotation `protobuf:"bytes,3,rep,name=shot_label_annotations,json=shotLabelAnnotations,proto3" json:"shot_label_annotations,omitempty"`
// Presence label annotations on shot level. There is exactly one element for
// each unique label. Compared to the existing topical
// `shot_label_annotations`, this field presents more fine-grained, shot-level
// labels detected in video content and is made available only when the client
// sets `LabelDetectionConfig.model` to "builtin/latest" in the request.
ShotPresenceLabelAnnotations []*LabelAnnotation `` /* 150-byte string literal not displayed */
// Label annotations on frame level.
// There is exactly one element for each unique label.
FrameLabelAnnotations []*LabelAnnotation `` /* 126-byte string literal not displayed */
// Deprecated. Please use `face_detection_annotations` instead.
//
// Deprecated: Marked as deprecated in google/cloud/videointelligence/v1/video_intelligence.proto.
FaceAnnotations []*FaceAnnotation `protobuf:"bytes,5,rep,name=face_annotations,json=faceAnnotations,proto3" json:"face_annotations,omitempty"`
// Face detection annotations.
FaceDetectionAnnotations []*FaceDetectionAnnotation `` /* 136-byte string literal not displayed */
// Shot annotations. Each shot is represented as a video segment.
ShotAnnotations []*VideoSegment `protobuf:"bytes,6,rep,name=shot_annotations,json=shotAnnotations,proto3" json:"shot_annotations,omitempty"`
// Explicit content annotation.
ExplicitAnnotation *ExplicitContentAnnotation `protobuf:"bytes,7,opt,name=explicit_annotation,json=explicitAnnotation,proto3" json:"explicit_annotation,omitempty"`
// Speech transcription.
SpeechTranscriptions []*SpeechTranscription `protobuf:"bytes,11,rep,name=speech_transcriptions,json=speechTranscriptions,proto3" json:"speech_transcriptions,omitempty"`
// OCR text detection and tracking.
// Annotations for list of detected text snippets. Each will have list of
// frame information associated with it.
TextAnnotations []*TextAnnotation `protobuf:"bytes,12,rep,name=text_annotations,json=textAnnotations,proto3" json:"text_annotations,omitempty"`
// Annotations for list of objects detected and tracked in video.
ObjectAnnotations []*ObjectTrackingAnnotation `protobuf:"bytes,14,rep,name=object_annotations,json=objectAnnotations,proto3" json:"object_annotations,omitempty"`
// Annotations for list of logos detected, tracked and recognized in video.
LogoRecognitionAnnotations []*LogoRecognitionAnnotation `` /* 142-byte string literal not displayed */
// Person detection annotations.
PersonDetectionAnnotations []*PersonDetectionAnnotation `` /* 142-byte string literal not displayed */
// If set, indicates an error. Note that for a single `AnnotateVideoRequest`
// some videos may succeed and some may fail.
Error *status.Status `protobuf:"bytes,9,opt,name=error,proto3" json:"error,omitempty"`
// contains filtered or unexported fields
}
Annotation results for a single video.
func (*VideoAnnotationResults) Descriptor
deprecated
func (*VideoAnnotationResults) Descriptor() ([]byte, []int)
Deprecated: Use VideoAnnotationResults.ProtoReflect.Descriptor instead.
func (*VideoAnnotationResults) GetError ¶
func (x *VideoAnnotationResults) GetError() *status.Status
func (*VideoAnnotationResults) GetExplicitAnnotation ¶
func (x *VideoAnnotationResults) GetExplicitAnnotation() *ExplicitContentAnnotation
func (*VideoAnnotationResults) GetFaceAnnotations
deprecated
func (x *VideoAnnotationResults) GetFaceAnnotations() []*FaceAnnotation
Deprecated: Marked as deprecated in google/cloud/videointelligence/v1/video_intelligence.proto.
func (*VideoAnnotationResults) GetFaceDetectionAnnotations ¶
func (x *VideoAnnotationResults) GetFaceDetectionAnnotations() []*FaceDetectionAnnotation
func (*VideoAnnotationResults) GetFrameLabelAnnotations ¶
func (x *VideoAnnotationResults) GetFrameLabelAnnotations() []*LabelAnnotation
func (*VideoAnnotationResults) GetInputUri ¶
func (x *VideoAnnotationResults) GetInputUri() string
func (*VideoAnnotationResults) GetLogoRecognitionAnnotations ¶
func (x *VideoAnnotationResults) GetLogoRecognitionAnnotations() []*LogoRecognitionAnnotation
func (*VideoAnnotationResults) GetObjectAnnotations ¶
func (x *VideoAnnotationResults) GetObjectAnnotations() []*ObjectTrackingAnnotation
func (*VideoAnnotationResults) GetPersonDetectionAnnotations ¶
func (x *VideoAnnotationResults) GetPersonDetectionAnnotations() []*PersonDetectionAnnotation
func (*VideoAnnotationResults) GetSegment ¶
func (x *VideoAnnotationResults) GetSegment() *VideoSegment
func (*VideoAnnotationResults) GetSegmentLabelAnnotations ¶
func (x *VideoAnnotationResults) GetSegmentLabelAnnotations() []*LabelAnnotation
func (*VideoAnnotationResults) GetSegmentPresenceLabelAnnotations ¶
func (x *VideoAnnotationResults) GetSegmentPresenceLabelAnnotations() []*LabelAnnotation
func (*VideoAnnotationResults) GetShotAnnotations ¶
func (x *VideoAnnotationResults) GetShotAnnotations() []*VideoSegment
func (*VideoAnnotationResults) GetShotLabelAnnotations ¶
func (x *VideoAnnotationResults) GetShotLabelAnnotations() []*LabelAnnotation
func (*VideoAnnotationResults) GetShotPresenceLabelAnnotations ¶
func (x *VideoAnnotationResults) GetShotPresenceLabelAnnotations() []*LabelAnnotation
func (*VideoAnnotationResults) GetSpeechTranscriptions ¶
func (x *VideoAnnotationResults) GetSpeechTranscriptions() []*SpeechTranscription
func (*VideoAnnotationResults) GetTextAnnotations ¶
func (x *VideoAnnotationResults) GetTextAnnotations() []*TextAnnotation
func (*VideoAnnotationResults) ProtoMessage ¶
func (*VideoAnnotationResults) ProtoMessage()
func (*VideoAnnotationResults) ProtoReflect ¶
func (x *VideoAnnotationResults) ProtoReflect() protoreflect.Message
func (*VideoAnnotationResults) Reset ¶
func (x *VideoAnnotationResults) Reset()
func (*VideoAnnotationResults) String ¶
func (x *VideoAnnotationResults) String() string
type VideoContext ¶
type VideoContext struct {
// Video segments to annotate. The segments may overlap and are not required
// to be contiguous or span the whole video. If unspecified, each video is
// treated as a single segment.
Segments []*VideoSegment `protobuf:"bytes,1,rep,name=segments,proto3" json:"segments,omitempty"`
// Config for LABEL_DETECTION.
LabelDetectionConfig *LabelDetectionConfig `protobuf:"bytes,2,opt,name=label_detection_config,json=labelDetectionConfig,proto3" json:"label_detection_config,omitempty"`
// Config for SHOT_CHANGE_DETECTION.
ShotChangeDetectionConfig *ShotChangeDetectionConfig `` /* 140-byte string literal not displayed */
// Config for EXPLICIT_CONTENT_DETECTION.
ExplicitContentDetectionConfig *ExplicitContentDetectionConfig `` /* 155-byte string literal not displayed */
// Config for FACE_DETECTION.
FaceDetectionConfig *FaceDetectionConfig `protobuf:"bytes,5,opt,name=face_detection_config,json=faceDetectionConfig,proto3" json:"face_detection_config,omitempty"`
// Config for SPEECH_TRANSCRIPTION.
SpeechTranscriptionConfig *SpeechTranscriptionConfig `` /* 138-byte string literal not displayed */
// Config for TEXT_DETECTION.
TextDetectionConfig *TextDetectionConfig `protobuf:"bytes,8,opt,name=text_detection_config,json=textDetectionConfig,proto3" json:"text_detection_config,omitempty"`
// Config for PERSON_DETECTION.
PersonDetectionConfig *PersonDetectionConfig `` /* 127-byte string literal not displayed */
// Config for OBJECT_TRACKING.
ObjectTrackingConfig *ObjectTrackingConfig `protobuf:"bytes,13,opt,name=object_tracking_config,json=objectTrackingConfig,proto3" json:"object_tracking_config,omitempty"`
// contains filtered or unexported fields
}
Video context and/or feature-specific parameters.
func (*VideoContext) Descriptor
deprecated
func (*VideoContext) Descriptor() ([]byte, []int)
Deprecated: Use VideoContext.ProtoReflect.Descriptor instead.
func (*VideoContext) GetExplicitContentDetectionConfig ¶
func (x *VideoContext) GetExplicitContentDetectionConfig() *ExplicitContentDetectionConfig
func (*VideoContext) GetFaceDetectionConfig ¶
func (x *VideoContext) GetFaceDetectionConfig() *FaceDetectionConfig
func (*VideoContext) GetLabelDetectionConfig ¶
func (x *VideoContext) GetLabelDetectionConfig() *LabelDetectionConfig
func (*VideoContext) GetObjectTrackingConfig ¶
func (x *VideoContext) GetObjectTrackingConfig() *ObjectTrackingConfig
func (*VideoContext) GetPersonDetectionConfig ¶
func (x *VideoContext) GetPersonDetectionConfig() *PersonDetectionConfig
func (*VideoContext) GetSegments ¶
func (x *VideoContext) GetSegments() []*VideoSegment
func (*VideoContext) GetShotChangeDetectionConfig ¶
func (x *VideoContext) GetShotChangeDetectionConfig() *ShotChangeDetectionConfig
func (*VideoContext) GetSpeechTranscriptionConfig ¶
func (x *VideoContext) GetSpeechTranscriptionConfig() *SpeechTranscriptionConfig
func (*VideoContext) GetTextDetectionConfig ¶
func (x *VideoContext) GetTextDetectionConfig() *TextDetectionConfig
func (*VideoContext) ProtoMessage ¶
func (*VideoContext) ProtoMessage()
func (*VideoContext) ProtoReflect ¶
func (x *VideoContext) ProtoReflect() protoreflect.Message
func (*VideoContext) Reset ¶
func (x *VideoContext) Reset()
func (*VideoContext) String ¶
func (x *VideoContext) String() string
type VideoIntelligenceServiceClient ¶
type VideoIntelligenceServiceClient interface {
// Performs asynchronous video annotation. Progress and results can be
// retrieved through the `google.longrunning.Operations` interface.
// `Operation.metadata` contains `AnnotateVideoProgress` (progress).
// `Operation.response` contains `AnnotateVideoResponse` (results).
AnnotateVideo(ctx context.Context, in *AnnotateVideoRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error)
}
VideoIntelligenceServiceClient is the client API for VideoIntelligenceService service.
For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
func NewVideoIntelligenceServiceClient ¶
func NewVideoIntelligenceServiceClient(cc grpc.ClientConnInterface) VideoIntelligenceServiceClient
type VideoIntelligenceServiceServer ¶
type VideoIntelligenceServiceServer interface {
// Performs asynchronous video annotation. Progress and results can be
// retrieved through the `google.longrunning.Operations` interface.
// `Operation.metadata` contains `AnnotateVideoProgress` (progress).
// `Operation.response` contains `AnnotateVideoResponse` (results).
AnnotateVideo(context.Context, *AnnotateVideoRequest) (*longrunningpb.Operation, error)
}
VideoIntelligenceServiceServer is the server API for VideoIntelligenceService service. All implementations should embed UnimplementedVideoIntelligenceServiceServer for forward compatibility
type VideoSegment ¶
type VideoSegment struct {
// Time-offset, relative to the beginning of the video,
// corresponding to the start of the segment (inclusive).
StartTimeOffset *durationpb.Duration `protobuf:"bytes,1,opt,name=start_time_offset,json=startTimeOffset,proto3" json:"start_time_offset,omitempty"`
// Time-offset, relative to the beginning of the video,
// corresponding to the end of the segment (inclusive).
EndTimeOffset *durationpb.Duration `protobuf:"bytes,2,opt,name=end_time_offset,json=endTimeOffset,proto3" json:"end_time_offset,omitempty"`
// contains filtered or unexported fields
}
Video segment.
func (*VideoSegment) Descriptor
deprecated
func (*VideoSegment) Descriptor() ([]byte, []int)
Deprecated: Use VideoSegment.ProtoReflect.Descriptor instead.
func (*VideoSegment) GetEndTimeOffset ¶
func (x *VideoSegment) GetEndTimeOffset() *durationpb.Duration
func (*VideoSegment) GetStartTimeOffset ¶
func (x *VideoSegment) GetStartTimeOffset() *durationpb.Duration
func (*VideoSegment) ProtoMessage ¶
func (*VideoSegment) ProtoMessage()
func (*VideoSegment) ProtoReflect ¶
func (x *VideoSegment) ProtoReflect() protoreflect.Message
func (*VideoSegment) Reset ¶
func (x *VideoSegment) Reset()
func (*VideoSegment) String ¶
func (x *VideoSegment) String() string
type WordInfo ¶
type WordInfo struct {
// Time offset relative to the beginning of the audio, and
// corresponding to the start of the spoken word. This field is only set if
// `enable_word_time_offsets=true` and only in the top hypothesis. This is an
// experimental feature and the accuracy of the time offset can vary.
StartTime *durationpb.Duration `protobuf:"bytes,1,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"`
// Time offset relative to the beginning of the audio, and
// corresponding to the end of the spoken word. This field is only set if
// `enable_word_time_offsets=true` and only in the top hypothesis. This is an
// experimental feature and the accuracy of the time offset can vary.
EndTime *durationpb.Duration `protobuf:"bytes,2,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"`
// The word corresponding to this set of information.
Word string `protobuf:"bytes,3,opt,name=word,proto3" json:"word,omitempty"`
// Output only. The confidence estimate between 0.0 and 1.0. A higher number
// indicates an estimated greater likelihood that the recognized words are
// correct. This field is set only for the top alternative.
// This field is not guaranteed to be accurate and users should not rely on it
// to be always provided.
// The default of 0.0 is a sentinel value indicating `confidence` was not set.
Confidence float32 `protobuf:"fixed32,4,opt,name=confidence,proto3" json:"confidence,omitempty"`
// Output only. A distinct integer value is assigned for every speaker within
// the audio. This field specifies which one of those speakers was detected to
// have spoken this word. Value ranges from 1 up to diarization_speaker_count,
// and is only set if speaker diarization is enabled.
SpeakerTag int32 `protobuf:"varint,5,opt,name=speaker_tag,json=speakerTag,proto3" json:"speaker_tag,omitempty"`
// contains filtered or unexported fields
}
Word-specific information for recognized words. Word information is only included in the response when certain request parameters are set, such as `enable_word_time_offsets`.
func (*WordInfo) Descriptor
deprecated
func (*WordInfo) GetConfidence ¶
func (*WordInfo) GetEndTime ¶
func (x *WordInfo) GetEndTime() *durationpb.Duration
func (*WordInfo) GetSpeakerTag ¶
func (*WordInfo) GetStartTime ¶
func (x *WordInfo) GetStartTime() *durationpb.Duration
func (*WordInfo) ProtoMessage ¶
func (*WordInfo) ProtoMessage()
func (*WordInfo) ProtoReflect ¶
func (x *WordInfo) ProtoReflect() protoreflect.Message