Documentation

Index

Constants

This section is empty.

Variables

View Source
var (
	LabelDetectionMode_name = map[int32]string{
		0: "LABEL_DETECTION_MODE_UNSPECIFIED",
		1: "SHOT_MODE",
		2: "FRAME_MODE",
		3: "SHOT_AND_FRAME_MODE",
	}
	LabelDetectionMode_value = map[string]int32{
		"LABEL_DETECTION_MODE_UNSPECIFIED": 0,
		"SHOT_MODE":                        1,
		"FRAME_MODE":                       2,
		"SHOT_AND_FRAME_MODE":              3,
	}
)

Enum value maps for LabelDetectionMode.

View Source
var (
	Likelihood_name = map[int32]string{
		0: "LIKELIHOOD_UNSPECIFIED",
		1: "VERY_UNLIKELY",
		2: "UNLIKELY",
		3: "POSSIBLE",
		4: "LIKELY",
		5: "VERY_LIKELY",
	}
	Likelihood_value = map[string]int32{
		"LIKELIHOOD_UNSPECIFIED": 0,
		"VERY_UNLIKELY":          1,
		"UNLIKELY":               2,
		"POSSIBLE":               3,
		"LIKELY":                 4,
		"VERY_LIKELY":            5,
	}
)

Enum value maps for Likelihood.

View Source
var (
	StreamingFeature_name = map[int32]string{
		0:  "STREAMING_FEATURE_UNSPECIFIED",
		1:  "STREAMING_LABEL_DETECTION",
		2:  "STREAMING_SHOT_CHANGE_DETECTION",
		3:  "STREAMING_EXPLICIT_CONTENT_DETECTION",
		4:  "STREAMING_OBJECT_TRACKING",
		23: "STREAMING_AUTOML_ACTION_RECOGNITION",
		21: "STREAMING_AUTOML_CLASSIFICATION",
		22: "STREAMING_AUTOML_OBJECT_TRACKING",
	}
	StreamingFeature_value = map[string]int32{
		"STREAMING_FEATURE_UNSPECIFIED":        0,
		"STREAMING_LABEL_DETECTION":            1,
		"STREAMING_SHOT_CHANGE_DETECTION":      2,
		"STREAMING_EXPLICIT_CONTENT_DETECTION": 3,
		"STREAMING_OBJECT_TRACKING":            4,
		"STREAMING_AUTOML_ACTION_RECOGNITION":  23,
		"STREAMING_AUTOML_CLASSIFICATION":      21,
		"STREAMING_AUTOML_OBJECT_TRACKING":     22,
	}
)

Enum value maps for StreamingFeature.

View Source
var (
	Feature_name = map[int32]string{
		0:  "FEATURE_UNSPECIFIED",
		1:  "LABEL_DETECTION",
		2:  "SHOT_CHANGE_DETECTION",
		3:  "EXPLICIT_CONTENT_DETECTION",
		4:  "FACE_DETECTION",
		6:  "SPEECH_TRANSCRIPTION",
		7:  "TEXT_DETECTION",
		9:  "OBJECT_TRACKING",
		12: "LOGO_RECOGNITION",
		13: "CELEBRITY_RECOGNITION",
		14: "PERSON_DETECTION",
	}
	Feature_value = map[string]int32{
		"FEATURE_UNSPECIFIED":        0,
		"LABEL_DETECTION":            1,
		"SHOT_CHANGE_DETECTION":      2,
		"EXPLICIT_CONTENT_DETECTION": 3,
		"FACE_DETECTION":             4,
		"SPEECH_TRANSCRIPTION":       6,
		"TEXT_DETECTION":             7,
		"OBJECT_TRACKING":            9,
		"LOGO_RECOGNITION":           12,
		"CELEBRITY_RECOGNITION":      13,
		"PERSON_DETECTION":           14,
	}
)

Enum value maps for Feature.

View Source
var File_google_cloud_videointelligence_v1p3beta1_video_intelligence_proto protoreflect.FileDescriptor

Functions

func RegisterStreamingVideoIntelligenceServiceServer

func RegisterStreamingVideoIntelligenceServiceServer(s *grpc.Server, srv StreamingVideoIntelligenceServiceServer)

func RegisterVideoIntelligenceServiceServer

func RegisterVideoIntelligenceServiceServer(s *grpc.Server, srv VideoIntelligenceServiceServer)

Types

type AnnotateVideoProgress

type AnnotateVideoProgress struct {

	// Progress metadata for all videos specified in `AnnotateVideoRequest`.
	AnnotationProgress []*VideoAnnotationProgress `protobuf:"bytes,1,rep,name=annotation_progress,json=annotationProgress,proto3" json:"annotation_progress,omitempty"`
	// contains filtered or unexported fields
}

Video annotation progress. Included in the `metadata` field of the `Operation` returned by the `GetOperation` call of the `google::longrunning::Operations` service.

func (*AnnotateVideoProgress) Descriptor

func (*AnnotateVideoProgress) Descriptor() ([]byte, []int)

Deprecated: Use AnnotateVideoProgress.ProtoReflect.Descriptor instead.

func (*AnnotateVideoProgress) GetAnnotationProgress

func (x *AnnotateVideoProgress) GetAnnotationProgress() []*VideoAnnotationProgress

func (*AnnotateVideoProgress) ProtoMessage

func (*AnnotateVideoProgress) ProtoMessage()

func (*AnnotateVideoProgress) ProtoReflect

func (x *AnnotateVideoProgress) ProtoReflect() protoreflect.Message

func (*AnnotateVideoProgress) Reset

func (x *AnnotateVideoProgress) Reset()

func (*AnnotateVideoProgress) String

func (x *AnnotateVideoProgress) String() string

type AnnotateVideoRequest

type AnnotateVideoRequest struct {

	// Input video location. Currently, only
	// [Cloud Storage](https://cloud.google.com/storage/) URIs are
	// supported. URIs must be specified in the following format:
	// `gs://bucket-id/object-id` (other URI formats return
	// [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For
	// more information, see [Request
	// URIs](https://cloud.google.com/storage/docs/request-endpoints). To identify
	// multiple videos, a video URI may include wildcards in the `object-id`.
	// Supported wildcards: '*' to match 0 or more characters;
	// '?' to match 1 character. If unset, the input video should be embedded
	// in the request as `input_content`. If set, `input_content` must be unset.
	InputUri string `protobuf:"bytes,1,opt,name=input_uri,json=inputUri,proto3" json:"input_uri,omitempty"`
	// The video data bytes.
	// If unset, the input video(s) should be specified via the `input_uri`.
	// If set, `input_uri` must be unset.
	InputContent []byte `protobuf:"bytes,6,opt,name=input_content,json=inputContent,proto3" json:"input_content,omitempty"`
	// Required. Requested video annotation features.
	Features []Feature `` /* 131-byte string literal not displayed */
	// Additional video context and/or feature-specific parameters.
	VideoContext *VideoContext `protobuf:"bytes,3,opt,name=video_context,json=videoContext,proto3" json:"video_context,omitempty"`
	// Optional. Location where the output (in JSON format) should be stored.
	// Currently, only [Cloud Storage](https://cloud.google.com/storage/)
	// URIs are supported. These must be specified in the following format:
	// `gs://bucket-id/object-id` (other URI formats return
	// [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For
	// more information, see [Request
	// URIs](https://cloud.google.com/storage/docs/request-endpoints).
	OutputUri string `protobuf:"bytes,4,opt,name=output_uri,json=outputUri,proto3" json:"output_uri,omitempty"`
	// Optional. Cloud region where annotation should take place. Supported cloud
	// regions are: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no
	// region is specified, the region will be determined based on video file
	// location.
	LocationId string `protobuf:"bytes,5,opt,name=location_id,json=locationId,proto3" json:"location_id,omitempty"`
	// contains filtered or unexported fields
}

Video annotation request.

func (*AnnotateVideoRequest) Descriptor

func (*AnnotateVideoRequest) Descriptor() ([]byte, []int)

Deprecated: Use AnnotateVideoRequest.ProtoReflect.Descriptor instead.

func (*AnnotateVideoRequest) GetFeatures

func (x *AnnotateVideoRequest) GetFeatures() []Feature

func (*AnnotateVideoRequest) GetInputContent

func (x *AnnotateVideoRequest) GetInputContent() []byte

func (*AnnotateVideoRequest) GetInputUri

func (x *AnnotateVideoRequest) GetInputUri() string

func (*AnnotateVideoRequest) GetLocationId

func (x *AnnotateVideoRequest) GetLocationId() string

func (*AnnotateVideoRequest) GetOutputUri

func (x *AnnotateVideoRequest) GetOutputUri() string

func (*AnnotateVideoRequest) GetVideoContext

func (x *AnnotateVideoRequest) GetVideoContext() *VideoContext

func (*AnnotateVideoRequest) ProtoMessage

func (*AnnotateVideoRequest) ProtoMessage()

func (*AnnotateVideoRequest) ProtoReflect

func (x *AnnotateVideoRequest) ProtoReflect() protoreflect.Message

func (*AnnotateVideoRequest) Reset

func (x *AnnotateVideoRequest) Reset()

func (*AnnotateVideoRequest) String

func (x *AnnotateVideoRequest) String() string

type AnnotateVideoResponse

type AnnotateVideoResponse struct {

	// Annotation results for all videos specified in `AnnotateVideoRequest`.
	AnnotationResults []*VideoAnnotationResults `protobuf:"bytes,1,rep,name=annotation_results,json=annotationResults,proto3" json:"annotation_results,omitempty"`
	// contains filtered or unexported fields
}

Video annotation response. Included in the `response` field of the `Operation` returned by the `GetOperation` call of the `google::longrunning::Operations` service.

func (*AnnotateVideoResponse) Descriptor

func (*AnnotateVideoResponse) Descriptor() ([]byte, []int)

Deprecated: Use AnnotateVideoResponse.ProtoReflect.Descriptor instead.

func (*AnnotateVideoResponse) GetAnnotationResults

func (x *AnnotateVideoResponse) GetAnnotationResults() []*VideoAnnotationResults

func (*AnnotateVideoResponse) ProtoMessage

func (*AnnotateVideoResponse) ProtoMessage()

func (*AnnotateVideoResponse) ProtoReflect

func (x *AnnotateVideoResponse) ProtoReflect() protoreflect.Message

func (*AnnotateVideoResponse) Reset

func (x *AnnotateVideoResponse) Reset()

func (*AnnotateVideoResponse) String

func (x *AnnotateVideoResponse) String() string

type Celebrity

type Celebrity struct {

	// The resource name of the celebrity. Have the format
	// `video-intelligence/kg-mid` indicates a celebrity from preloaded gallery.
	// kg-mid is the id in Google knowledge graph, which is unique for the
	// celebrity.
	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
	// The celebrity name.
	DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"`
	// Textual description of additional information about the celebrity, if
	// applicable.
	Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"`
	// contains filtered or unexported fields
}

Celebrity definition.

func (*Celebrity) Descriptor

func (*Celebrity) Descriptor() ([]byte, []int)

Deprecated: Use Celebrity.ProtoReflect.Descriptor instead.

func (*Celebrity) GetDescription

func (x *Celebrity) GetDescription() string

func (*Celebrity) GetDisplayName

func (x *Celebrity) GetDisplayName() string

func (*Celebrity) GetName

func (x *Celebrity) GetName() string

func (*Celebrity) ProtoMessage

func (*Celebrity) ProtoMessage()

func (*Celebrity) ProtoReflect

func (x *Celebrity) ProtoReflect() protoreflect.Message

func (*Celebrity) Reset

func (x *Celebrity) Reset()

func (*Celebrity) String

func (x *Celebrity) String() string

type CelebrityRecognitionAnnotation

type CelebrityRecognitionAnnotation struct {

	// The tracks detected from the input video, including recognized celebrities
	// and other detected faces in the video.
	CelebrityTracks []*CelebrityTrack `protobuf:"bytes,1,rep,name=celebrity_tracks,json=celebrityTracks,proto3" json:"celebrity_tracks,omitempty"`
	// contains filtered or unexported fields
}

Celebrity recognition annotation per video.

func (*CelebrityRecognitionAnnotation) Descriptor

func (*CelebrityRecognitionAnnotation) Descriptor() ([]byte, []int)

Deprecated: Use CelebrityRecognitionAnnotation.ProtoReflect.Descriptor instead.

func (*CelebrityRecognitionAnnotation) GetCelebrityTracks

func (x *CelebrityRecognitionAnnotation) GetCelebrityTracks() []*CelebrityTrack

func (*CelebrityRecognitionAnnotation) ProtoMessage

func (*CelebrityRecognitionAnnotation) ProtoMessage()

func (*CelebrityRecognitionAnnotation) ProtoReflect

func (*CelebrityRecognitionAnnotation) Reset

func (x *CelebrityRecognitionAnnotation) Reset()

func (*CelebrityRecognitionAnnotation) String

type CelebrityTrack

type CelebrityTrack struct {

	// Top N match of the celebrities for the face in this track.
	Celebrities []*CelebrityTrack_RecognizedCelebrity `protobuf:"bytes,1,rep,name=celebrities,proto3" json:"celebrities,omitempty"`
	// A track of a person's face.
	FaceTrack *Track `protobuf:"bytes,3,opt,name=face_track,json=faceTrack,proto3" json:"face_track,omitempty"`
	// contains filtered or unexported fields
}

The annotation result of a celebrity face track. RecognizedCelebrity field could be empty if the face track does not have any matched celebrities.

func (*CelebrityTrack) Descriptor

func (*CelebrityTrack) Descriptor() ([]byte, []int)

Deprecated: Use CelebrityTrack.ProtoReflect.Descriptor instead.

func (*CelebrityTrack) GetCelebrities

func (x *CelebrityTrack) GetCelebrities() []*CelebrityTrack_RecognizedCelebrity

func (*CelebrityTrack) GetFaceTrack

func (x *CelebrityTrack) GetFaceTrack() *Track

func (*CelebrityTrack) ProtoMessage

func (*CelebrityTrack) ProtoMessage()

func (*CelebrityTrack) ProtoReflect

func (x *CelebrityTrack) ProtoReflect() protoreflect.Message

func (*CelebrityTrack) Reset

func (x *CelebrityTrack) Reset()

func (*CelebrityTrack) String

func (x *CelebrityTrack) String() string

type CelebrityTrack_RecognizedCelebrity

type CelebrityTrack_RecognizedCelebrity struct {

	// The recognized celebrity.
	Celebrity *Celebrity `protobuf:"bytes,1,opt,name=celebrity,proto3" json:"celebrity,omitempty"`
	// Recognition confidence. Range [0, 1].
	Confidence float32 `protobuf:"fixed32,2,opt,name=confidence,proto3" json:"confidence,omitempty"`
	// contains filtered or unexported fields
}

The recognized celebrity with confidence score.

func (*CelebrityTrack_RecognizedCelebrity) Descriptor

func (*CelebrityTrack_RecognizedCelebrity) Descriptor() ([]byte, []int)

Deprecated: Use CelebrityTrack_RecognizedCelebrity.ProtoReflect.Descriptor instead.

func (*CelebrityTrack_RecognizedCelebrity) GetCelebrity

func (x *CelebrityTrack_RecognizedCelebrity) GetCelebrity() *Celebrity

func (*CelebrityTrack_RecognizedCelebrity) GetConfidence

func (x *CelebrityTrack_RecognizedCelebrity) GetConfidence() float32

func (*CelebrityTrack_RecognizedCelebrity) ProtoMessage

func (*CelebrityTrack_RecognizedCelebrity) ProtoMessage()

func (*CelebrityTrack_RecognizedCelebrity) ProtoReflect

func (*CelebrityTrack_RecognizedCelebrity) Reset

func (*CelebrityTrack_RecognizedCelebrity) String

type DetectedAttribute

type DetectedAttribute struct {

	// The name of the attribute, for example, glasses, dark_glasses, mouth_open.
	// A full list of supported type names will be provided in the document.
	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
	// Detected attribute confidence. Range [0, 1].
	Confidence float32 `protobuf:"fixed32,2,opt,name=confidence,proto3" json:"confidence,omitempty"`
	// Text value of the detection result. For example, the value for "HairColor"
	// can be "black", "blonde", etc.
	Value string `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"`
	// contains filtered or unexported fields
}

A generic detected attribute represented by name in string format.

func (*DetectedAttribute) Descriptor

func (*DetectedAttribute) Descriptor() ([]byte, []int)

Deprecated: Use DetectedAttribute.ProtoReflect.Descriptor instead.

func (*DetectedAttribute) GetConfidence

func (x *DetectedAttribute) GetConfidence() float32

func (*DetectedAttribute) GetName

func (x *DetectedAttribute) GetName() string

func (*DetectedAttribute) GetValue

func (x *DetectedAttribute) GetValue() string

func (*DetectedAttribute) ProtoMessage

func (*DetectedAttribute) ProtoMessage()

func (*DetectedAttribute) ProtoReflect

func (x *DetectedAttribute) ProtoReflect() protoreflect.Message

func (*DetectedAttribute) Reset

func (x *DetectedAttribute) Reset()

func (*DetectedAttribute) String

func (x *DetectedAttribute) String() string

type DetectedLandmark

type DetectedLandmark struct {

	// The name of this landmark, for example, left_hand, right_shoulder.
	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
	// The 2D point of the detected landmark using the normalized image
	// coordindate system. The normalized coordinates have the range from 0 to 1.
	Point *NormalizedVertex `protobuf:"bytes,2,opt,name=point,proto3" json:"point,omitempty"`
	// The confidence score of the detected landmark. Range [0, 1].
	Confidence float32 `protobuf:"fixed32,3,opt,name=confidence,proto3" json:"confidence,omitempty"`
	// contains filtered or unexported fields
}

A generic detected landmark represented by name in string format and a 2D location.

func (*DetectedLandmark) Descriptor

func (*DetectedLandmark) Descriptor() ([]byte, []int)

Deprecated: Use DetectedLandmark.ProtoReflect.Descriptor instead.

func (*DetectedLandmark) GetConfidence

func (x *DetectedLandmark) GetConfidence() float32

func (*DetectedLandmark) GetName

func (x *DetectedLandmark) GetName() string

func (*DetectedLandmark) GetPoint

func (x *DetectedLandmark) GetPoint() *NormalizedVertex

func (*DetectedLandmark) ProtoMessage

func (*DetectedLandmark) ProtoMessage()

func (*DetectedLandmark) ProtoReflect

func (x *DetectedLandmark) ProtoReflect() protoreflect.Message

func (*DetectedLandmark) Reset

func (x *DetectedLandmark) Reset()

func (*DetectedLandmark) String

func (x *DetectedLandmark) String() string

type Entity

type Entity struct {

	// Opaque entity ID. Some IDs may be available in
	// [Google Knowledge Graph Search
	// API](https://developers.google.com/knowledge-graph/).
	EntityId string `protobuf:"bytes,1,opt,name=entity_id,json=entityId,proto3" json:"entity_id,omitempty"`
	// Textual description, e.g., `Fixed-gear bicycle`.
	Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
	// Language code for `description` in BCP-47 format.
	LanguageCode string `protobuf:"bytes,3,opt,name=language_code,json=languageCode,proto3" json:"language_code,omitempty"`
	// contains filtered or unexported fields
}

Detected entity from video analysis.

func (*Entity) Descriptor

func (*Entity) Descriptor() ([]byte, []int)

Deprecated: Use Entity.ProtoReflect.Descriptor instead.

func (*Entity) GetDescription

func (x *Entity) GetDescription() string

func (*Entity) GetEntityId

func (x *Entity) GetEntityId() string

func (*Entity) GetLanguageCode

func (x *Entity) GetLanguageCode() string

func (*Entity) ProtoMessage

func (*Entity) ProtoMessage()

func (*Entity) ProtoReflect

func (x *Entity) ProtoReflect() protoreflect.Message

func (*Entity) Reset

func (x *Entity) Reset()

func (*Entity) String

func (x *Entity) String() string

type ExplicitContentAnnotation

type ExplicitContentAnnotation struct {

	// All video frames where explicit content was detected.
	Frames []*ExplicitContentFrame `protobuf:"bytes,1,rep,name=frames,proto3" json:"frames,omitempty"`
	// contains filtered or unexported fields
}

Explicit content annotation (based on per-frame visual signals only). If no explicit content has been detected in a frame, no annotations are present for that frame.

func (*ExplicitContentAnnotation) Descriptor

func (*ExplicitContentAnnotation) Descriptor() ([]byte, []int)

Deprecated: Use ExplicitContentAnnotation.ProtoReflect.Descriptor instead.

func (*ExplicitContentAnnotation) GetFrames

func (*ExplicitContentAnnotation) ProtoMessage

func (*ExplicitContentAnnotation) ProtoMessage()

func (*ExplicitContentAnnotation) ProtoReflect

func (*ExplicitContentAnnotation) Reset

func (x *ExplicitContentAnnotation) Reset()

func (*ExplicitContentAnnotation) String

func (x *ExplicitContentAnnotation) String() string

type ExplicitContentDetectionConfig

type ExplicitContentDetectionConfig struct {

	// Model to use for explicit content detection.
	// Supported values: "builtin/stable" (the default if unset) and
	// "builtin/latest".
	Model string `protobuf:"bytes,1,opt,name=model,proto3" json:"model,omitempty"`
	// contains filtered or unexported fields
}

Config for EXPLICIT_CONTENT_DETECTION.

func (*ExplicitContentDetectionConfig) Descriptor

func (*ExplicitContentDetectionConfig) Descriptor() ([]byte, []int)

Deprecated: Use ExplicitContentDetectionConfig.ProtoReflect.Descriptor instead.

func (*ExplicitContentDetectionConfig) GetModel

func (x *ExplicitContentDetectionConfig) GetModel() string

func (*ExplicitContentDetectionConfig) ProtoMessage

func (*ExplicitContentDetectionConfig) ProtoMessage()

func (*ExplicitContentDetectionConfig) ProtoReflect

func (*ExplicitContentDetectionConfig) Reset

func (x *ExplicitContentDetectionConfig) Reset()

func (*ExplicitContentDetectionConfig) String

type ExplicitContentFrame

type ExplicitContentFrame struct {

	// Time-offset, relative to the beginning of the video, corresponding to the
	// video frame for this location.
	TimeOffset *durationpb.Duration `protobuf:"bytes,1,opt,name=time_offset,json=timeOffset,proto3" json:"time_offset,omitempty"`
	// Likelihood of the pornography content..
	PornographyLikelihood Likelihood `` /* 182-byte string literal not displayed */
	// contains filtered or unexported fields
}

Video frame level annotation results for explicit content.

func (*ExplicitContentFrame) Descriptor

func (*ExplicitContentFrame) Descriptor() ([]byte, []int)

Deprecated: Use ExplicitContentFrame.ProtoReflect.Descriptor instead.

func (*ExplicitContentFrame) GetPornographyLikelihood

func (x *ExplicitContentFrame) GetPornographyLikelihood() Likelihood

func (*ExplicitContentFrame) GetTimeOffset

func (x *ExplicitContentFrame) GetTimeOffset() *durationpb.Duration

func (*ExplicitContentFrame) ProtoMessage

func (*ExplicitContentFrame) ProtoMessage()

func (*ExplicitContentFrame) ProtoReflect

func (x *ExplicitContentFrame) ProtoReflect() protoreflect.Message

func (*ExplicitContentFrame) Reset

func (x *ExplicitContentFrame) Reset()

func (*ExplicitContentFrame) String

func (x *ExplicitContentFrame) String() string

type FaceDetectionAnnotation

type FaceDetectionAnnotation struct {

	// The face tracks with attributes.
	Tracks []*Track `protobuf:"bytes,3,rep,name=tracks,proto3" json:"tracks,omitempty"`
	// The thumbnail of a person's face.
	Thumbnail []byte `protobuf:"bytes,4,opt,name=thumbnail,proto3" json:"thumbnail,omitempty"`
	// contains filtered or unexported fields
}

Face detection annotation.

func (*FaceDetectionAnnotation) Descriptor

func (*FaceDetectionAnnotation) Descriptor() ([]byte, []int)

Deprecated: Use FaceDetectionAnnotation.ProtoReflect.Descriptor instead.

func (*FaceDetectionAnnotation) GetThumbnail

func (x *FaceDetectionAnnotation) GetThumbnail() []byte

func (*FaceDetectionAnnotation) GetTracks

func (x *FaceDetectionAnnotation) GetTracks() []*Track

func (*FaceDetectionAnnotation) ProtoMessage

func (*FaceDetectionAnnotation) ProtoMessage()

func (*FaceDetectionAnnotation) ProtoReflect

func (x *FaceDetectionAnnotation) ProtoReflect() protoreflect.Message

func (*FaceDetectionAnnotation) Reset

func (x *FaceDetectionAnnotation) Reset()

func (*FaceDetectionAnnotation) String

func (x *FaceDetectionAnnotation) String() string

type FaceDetectionConfig

type FaceDetectionConfig struct {

	// Model to use for face detection.
	// Supported values: "builtin/stable" (the default if unset) and
	// "builtin/latest".
	Model string `protobuf:"bytes,1,opt,name=model,proto3" json:"model,omitempty"`
	// Whether bounding boxes are included in the face annotation output.
	IncludeBoundingBoxes bool `protobuf:"varint,2,opt,name=include_bounding_boxes,json=includeBoundingBoxes,proto3" json:"include_bounding_boxes,omitempty"`
	// Whether to enable face attributes detection, such as glasses, dark_glasses,
	// mouth_open etc. Ignored if 'include_bounding_boxes' is set to false.
	IncludeAttributes bool `protobuf:"varint,5,opt,name=include_attributes,json=includeAttributes,proto3" json:"include_attributes,omitempty"`
	// contains filtered or unexported fields
}

Config for FACE_DETECTION.

func (*FaceDetectionConfig) Descriptor

func (*FaceDetectionConfig) Descriptor() ([]byte, []int)

Deprecated: Use FaceDetectionConfig.ProtoReflect.Descriptor instead.

func (*FaceDetectionConfig) GetIncludeAttributes

func (x *FaceDetectionConfig) GetIncludeAttributes() bool

func (*FaceDetectionConfig) GetIncludeBoundingBoxes

func (x *FaceDetectionConfig) GetIncludeBoundingBoxes() bool

func (*FaceDetectionConfig) GetModel

func (x *FaceDetectionConfig) GetModel() string

func (*FaceDetectionConfig) ProtoMessage

func (*FaceDetectionConfig) ProtoMessage()

func (*FaceDetectionConfig) ProtoReflect

func (x *FaceDetectionConfig) ProtoReflect() protoreflect.Message

func (*FaceDetectionConfig) Reset

func (x *FaceDetectionConfig) Reset()

func (*FaceDetectionConfig) String

func (x *FaceDetectionConfig) String() string

type Feature

type Feature int32

Video annotation feature.

const (
	// Unspecified.
	Feature_FEATURE_UNSPECIFIED Feature = 0
	// Label detection. Detect objects, such as dog or flower.
	Feature_LABEL_DETECTION Feature = 1
	// Shot change detection.
	Feature_SHOT_CHANGE_DETECTION Feature = 2
	// Explicit content detection.
	Feature_EXPLICIT_CONTENT_DETECTION Feature = 3
	// Human face detection.
	Feature_FACE_DETECTION Feature = 4
	// Speech transcription.
	Feature_SPEECH_TRANSCRIPTION Feature = 6
	// OCR text detection and tracking.
	Feature_TEXT_DETECTION Feature = 7
	// Object detection and tracking.
	Feature_OBJECT_TRACKING Feature = 9
	// Logo detection, tracking, and recognition.
	Feature_LOGO_RECOGNITION Feature = 12
	// Celebrity recognition.
	Feature_CELEBRITY_RECOGNITION Feature = 13
	// Person detection.
	Feature_PERSON_DETECTION Feature = 14
)

func (Feature) Descriptor

func (Feature) Descriptor() protoreflect.EnumDescriptor

func (Feature) Enum

func (x Feature) Enum() *Feature

func (Feature) EnumDescriptor

func (Feature) EnumDescriptor() ([]byte, []int)

Deprecated: Use Feature.Descriptor instead.

func (Feature) Number

func (x Feature) Number() protoreflect.EnumNumber

func (Feature) String

func (x Feature) String() string

func (Feature) Type

func (Feature) Type() protoreflect.EnumType

type LabelAnnotation

type LabelAnnotation struct {

	// Detected entity.
	Entity *Entity `protobuf:"bytes,1,opt,name=entity,proto3" json:"entity,omitempty"`
	// Common categories for the detected entity.
	// For example, when the label is `Terrier`, the category is likely `dog`. And
	// in some cases there might be more than one categories e.g., `Terrier` could
	// also be a `pet`.
	CategoryEntities []*Entity `protobuf:"bytes,2,rep,name=category_entities,json=categoryEntities,proto3" json:"category_entities,omitempty"`
	// All video segments where a label was detected.
	Segments []*LabelSegment `protobuf:"bytes,3,rep,name=segments,proto3" json:"segments,omitempty"`
	// All video frames where a label was detected.
	Frames []*LabelFrame `protobuf:"bytes,4,rep,name=frames,proto3" json:"frames,omitempty"`
	// contains filtered or unexported fields
}

Label annotation.

func (*LabelAnnotation) Descriptor

func (*LabelAnnotation) Descriptor() ([]byte, []int)

Deprecated: Use LabelAnnotation.ProtoReflect.Descriptor instead.

func (*LabelAnnotation) GetCategoryEntities

func (x *LabelAnnotation) GetCategoryEntities() []*Entity

func (*LabelAnnotation) GetEntity

func (x *LabelAnnotation) GetEntity() *Entity

func (*LabelAnnotation) GetFrames

func (x *LabelAnnotation) GetFrames() []*LabelFrame

func (*LabelAnnotation) GetSegments

func (x *LabelAnnotation) GetSegments() []*LabelSegment

func (*LabelAnnotation) ProtoMessage

func (*LabelAnnotation) ProtoMessage()

func (*LabelAnnotation) ProtoReflect

func (x *LabelAnnotation) ProtoReflect() protoreflect.Message

func (*LabelAnnotation) Reset

func (x *LabelAnnotation) Reset()

func (*LabelAnnotation) String

func (x *LabelAnnotation) String() string

type LabelDetectionConfig

type LabelDetectionConfig struct {

	// What labels should be detected with LABEL_DETECTION, in addition to
	// video-level labels or segment-level labels.
	// If unspecified, defaults to `SHOT_MODE`.
	LabelDetectionMode LabelDetectionMode `` /* 183-byte string literal not displayed */
	// Whether the video has been shot from a stationary (i.e., non-moving)
	// camera. When set to true, might improve detection accuracy for moving
	// objects. Should be used with `SHOT_AND_FRAME_MODE` enabled.
	StationaryCamera bool `protobuf:"varint,2,opt,name=stationary_camera,json=stationaryCamera,proto3" json:"stationary_camera,omitempty"`
	// Model to use for label detection.
	// Supported values: "builtin/stable" (the default if unset) and
	// "builtin/latest".
	Model string `protobuf:"bytes,3,opt,name=model,proto3" json:"model,omitempty"`
	// The confidence threshold we perform filtering on the labels from
	// frame-level detection. If not set, it is set to 0.4 by default. The valid
	// range for this threshold is [0.1, 0.9]. Any value set outside of this
	// range will be clipped.
	// Note: For best results, follow the default threshold. We will update
	// the default threshold everytime when we release a new model.
	FrameConfidenceThreshold float32 `` /* 137-byte string literal not displayed */
	// The confidence threshold we perform filtering on the labels from
	// video-level and shot-level detections. If not set, it's set to 0.3 by
	// default. The valid range for this threshold is [0.1, 0.9]. Any value set
	// outside of this range will be clipped.
	// Note: For best results, follow the default threshold. We will update
	// the default threshold everytime when we release a new model.
	VideoConfidenceThreshold float32 `` /* 137-byte string literal not displayed */
	// contains filtered or unexported fields
}

Config for LABEL_DETECTION.

func (*LabelDetectionConfig) Descriptor

func (*LabelDetectionConfig) Descriptor() ([]byte, []int)

Deprecated: Use LabelDetectionConfig.ProtoReflect.Descriptor instead.

func (*LabelDetectionConfig) GetFrameConfidenceThreshold

func (x *LabelDetectionConfig) GetFrameConfidenceThreshold() float32

func (*LabelDetectionConfig) GetLabelDetectionMode

func (x *LabelDetectionConfig) GetLabelDetectionMode() LabelDetectionMode

func (*LabelDetectionConfig) GetModel

func (x *LabelDetectionConfig) GetModel() string

func (*LabelDetectionConfig) GetStationaryCamera

func (x *LabelDetectionConfig) GetStationaryCamera() bool

func (*LabelDetectionConfig) GetVideoConfidenceThreshold

func (x *LabelDetectionConfig) GetVideoConfidenceThreshold() float32

func (*LabelDetectionConfig) ProtoMessage

func (*LabelDetectionConfig) ProtoMessage()

func (*LabelDetectionConfig) ProtoReflect

func (x *LabelDetectionConfig) ProtoReflect() protoreflect.Message

func (*LabelDetectionConfig) Reset

func (x *LabelDetectionConfig) Reset()

func (*LabelDetectionConfig) String

func (x *LabelDetectionConfig) String() string

type LabelDetectionMode

type LabelDetectionMode int32

Label detection mode.

const (
	// Unspecified.
	LabelDetectionMode_LABEL_DETECTION_MODE_UNSPECIFIED LabelDetectionMode = 0
	// Detect shot-level labels.
	LabelDetectionMode_SHOT_MODE LabelDetectionMode = 1
	// Detect frame-level labels.
	LabelDetectionMode_FRAME_MODE LabelDetectionMode = 2
	// Detect both shot-level and frame-level labels.
	LabelDetectionMode_SHOT_AND_FRAME_MODE LabelDetectionMode = 3
)

func (LabelDetectionMode) Descriptor

func (LabelDetectionMode) Enum

func (LabelDetectionMode) EnumDescriptor

func (LabelDetectionMode) EnumDescriptor() ([]byte, []int)

Deprecated: Use LabelDetectionMode.Descriptor instead.

func (LabelDetectionMode) Number

func (LabelDetectionMode) String

func (x LabelDetectionMode) String() string

func (LabelDetectionMode) Type

type LabelFrame

type LabelFrame struct {

	// Time-offset, relative to the beginning of the video, corresponding to the
	// video frame for this location.
	TimeOffset *durationpb.Duration `protobuf:"bytes,1,opt,name=time_offset,json=timeOffset,proto3" json:"time_offset,omitempty"`
	// Confidence that the label is accurate. Range: [0, 1].
	Confidence float32 `protobuf:"fixed32,2,opt,name=confidence,proto3" json:"confidence,omitempty"`
	// contains filtered or unexported fields
}

Video frame level annotation results for label detection.

func (*LabelFrame) Descriptor

func (*LabelFrame) Descriptor() ([]byte, []int)

Deprecated: Use LabelFrame.ProtoReflect.Descriptor instead.

func (*LabelFrame) GetConfidence

func (x *LabelFrame) GetConfidence() float32

func (*LabelFrame) GetTimeOffset

func (x *LabelFrame) GetTimeOffset() *durationpb.Duration

func (*LabelFrame) ProtoMessage

func (*LabelFrame) ProtoMessage()

func (*LabelFrame) ProtoReflect

func (x *LabelFrame) ProtoReflect() protoreflect.Message

func (*LabelFrame) Reset

func (x *LabelFrame) Reset()

func (*LabelFrame) String

func (x *LabelFrame) String() string

type LabelSegment

type LabelSegment struct {

	// Video segment where a label was detected.
	Segment *VideoSegment `protobuf:"bytes,1,opt,name=segment,proto3" json:"segment,omitempty"`
	// Confidence that the label is accurate. Range: [0, 1].
	Confidence float32 `protobuf:"fixed32,2,opt,name=confidence,proto3" json:"confidence,omitempty"`
	// contains filtered or unexported fields
}

Video segment level annotation results for label detection.

func (*LabelSegment) Descriptor

func (*LabelSegment) Descriptor() ([]byte, []int)

Deprecated: Use LabelSegment.ProtoReflect.Descriptor instead.

func (*LabelSegment) GetConfidence

func (x *LabelSegment) GetConfidence() float32

func (*LabelSegment) GetSegment

func (x *LabelSegment) GetSegment() *VideoSegment

func (*LabelSegment) ProtoMessage

func (*LabelSegment) ProtoMessage()

func (*LabelSegment) ProtoReflect

func (x *LabelSegment) ProtoReflect() protoreflect.Message

func (*LabelSegment) Reset

func (x *LabelSegment) Reset()

func (*LabelSegment) String

func (x *LabelSegment) String() string

type Likelihood

type Likelihood int32

Bucketized representation of likelihood.

const (
	// Unspecified likelihood.
	Likelihood_LIKELIHOOD_UNSPECIFIED Likelihood = 0
	// Very unlikely.
	Likelihood_VERY_UNLIKELY Likelihood = 1
	// Unlikely.
	Likelihood_UNLIKELY Likelihood = 2
	// Possible.
	Likelihood_POSSIBLE Likelihood = 3
	// Likely.
	Likelihood_LIKELY Likelihood = 4
	// Very likely.
	Likelihood_VERY_LIKELY Likelihood = 5
)

func (Likelihood) Descriptor

func (Likelihood) Descriptor() protoreflect.EnumDescriptor

func (Likelihood) Enum

func (x Likelihood) Enum() *Likelihood

func (Likelihood) EnumDescriptor

func (Likelihood) EnumDescriptor() ([]byte, []int)

Deprecated: Use Likelihood.Descriptor instead.

func (Likelihood) Number

func (x Likelihood) Number() protoreflect.EnumNumber

func (Likelihood) String

func (x Likelihood) String() string

func (Likelihood) Type

type LogoRecognitionAnnotation

type LogoRecognitionAnnotation struct {

	// Entity category information to specify the logo class that all the logo
	// tracks within this LogoRecognitionAnnotation are recognized as.
	Entity *Entity `protobuf:"bytes,1,opt,name=entity,proto3" json:"entity,omitempty"`
	// All logo tracks where the recognized logo appears. Each track corresponds
	// to one logo instance appearing in consecutive frames.
	Tracks []*Track `protobuf:"bytes,2,rep,name=tracks,proto3" json:"tracks,omitempty"`
	// All video segments where the recognized logo appears. There might be
	// multiple instances of the same logo class appearing in one VideoSegment.
	Segments []*VideoSegment `protobuf:"bytes,3,rep,name=segments,proto3" json:"segments,omitempty"`
	// contains filtered or unexported fields
}

Annotation corresponding to one detected, tracked and recognized logo class.

func (*LogoRecognitionAnnotation) Descriptor

func (*LogoRecognitionAnnotation) Descriptor() ([]byte, []int)

Deprecated: Use LogoRecognitionAnnotation.ProtoReflect.Descriptor instead.

func (*LogoRecognitionAnnotation) GetEntity

func (x *LogoRecognitionAnnotation) GetEntity() *Entity

func (*LogoRecognitionAnnotation) GetSegments

func (x *LogoRecognitionAnnotation) GetSegments() []*VideoSegment

func (*LogoRecognitionAnnotation) GetTracks

func (x *LogoRecognitionAnnotation) GetTracks() []*Track

func (*LogoRecognitionAnnotation) ProtoMessage

func (*LogoRecognitionAnnotation) ProtoMessage()

func (*LogoRecognitionAnnotation) ProtoReflect

func (*LogoRecognitionAnnotation) Reset

func (x *LogoRecognitionAnnotation) Reset()

func (*LogoRecognitionAnnotation) String

func (x *LogoRecognitionAnnotation) String() string

type NormalizedBoundingBox

type NormalizedBoundingBox struct {

	// Left X coordinate.
	Left float32 `protobuf:"fixed32,1,opt,name=left,proto3" json:"left,omitempty"`
	// Top Y coordinate.
	Top float32 `protobuf:"fixed32,2,opt,name=top,proto3" json:"top,omitempty"`
	// Right X coordinate.
	Right float32 `protobuf:"fixed32,3,opt,name=right,proto3" json:"right,omitempty"`
	// Bottom Y coordinate.
	Bottom float32 `protobuf:"fixed32,4,opt,name=bottom,proto3" json:"bottom,omitempty"`
	// contains filtered or unexported fields
}

Normalized bounding box. The normalized vertex coordinates are relative to the original image. Range: [0, 1].

func (*NormalizedBoundingBox) Descriptor

func (*NormalizedBoundingBox) Descriptor() ([]byte, []int)

Deprecated: Use NormalizedBoundingBox.ProtoReflect.Descriptor instead.

func (*NormalizedBoundingBox) GetBottom

func (x *NormalizedBoundingBox) GetBottom() float32

func (*NormalizedBoundingBox) GetLeft

func (x *NormalizedBoundingBox) GetLeft() float32

func (*NormalizedBoundingBox) GetRight

func (x *NormalizedBoundingBox) GetRight() float32

func (*NormalizedBoundingBox) GetTop

func (x *NormalizedBoundingBox) GetTop() float32

func (*NormalizedBoundingBox) ProtoMessage

func (*NormalizedBoundingBox) ProtoMessage()

func (*NormalizedBoundingBox) ProtoReflect

func (x *NormalizedBoundingBox) ProtoReflect() protoreflect.Message

func (*NormalizedBoundingBox) Reset

func (x *NormalizedBoundingBox) Reset()

func (*NormalizedBoundingBox) String

func (x *NormalizedBoundingBox) String() string

type NormalizedBoundingPoly

type NormalizedBoundingPoly struct {

	// Normalized vertices of the bounding polygon.
	Vertices []*NormalizedVertex `protobuf:"bytes,1,rep,name=vertices,proto3" json:"vertices,omitempty"`
	// contains filtered or unexported fields
}

Normalized bounding polygon for text (that might not be aligned with axis). Contains list of the corner points in clockwise order starting from top-left corner. For example, for a rectangular bounding box: When the text is horizontal it might look like:

0----1
|    |
3----2

When it's clockwise rotated 180 degrees around the top-left corner it becomes:

2----3
|    |
1----0

and the vertex order will still be (0, 1, 2, 3). Note that values can be less than 0, or greater than 1 due to trignometric calculations for location of the box.

func (*NormalizedBoundingPoly) Descriptor

func (*NormalizedBoundingPoly) Descriptor() ([]byte, []int)

Deprecated: Use NormalizedBoundingPoly.ProtoReflect.Descriptor instead.

func (*NormalizedBoundingPoly) GetVertices

func (x *NormalizedBoundingPoly) GetVertices() []*NormalizedVertex

func (*NormalizedBoundingPoly) ProtoMessage

func (*NormalizedBoundingPoly) ProtoMessage()

func (*NormalizedBoundingPoly) ProtoReflect

func (x *NormalizedBoundingPoly) ProtoReflect() protoreflect.Message

func (*NormalizedBoundingPoly) Reset

func (x *NormalizedBoundingPoly) Reset()

func (*NormalizedBoundingPoly) String

func (x *NormalizedBoundingPoly) String() string

type NormalizedVertex

type NormalizedVertex struct {

	// X coordinate.
	X float32 `protobuf:"fixed32,1,opt,name=x,proto3" json:"x,omitempty"`
	// Y coordinate.
	Y float32 `protobuf:"fixed32,2,opt,name=y,proto3" json:"y,omitempty"`
	// contains filtered or unexported fields
}

A vertex represents a 2D point in the image. NOTE: the normalized vertex coordinates are relative to the original image and range from 0 to 1.

func (*NormalizedVertex) Descriptor

func (*NormalizedVertex) Descriptor() ([]byte, []int)

Deprecated: Use NormalizedVertex.ProtoReflect.Descriptor instead.

func (*NormalizedVertex) GetX

func (x *NormalizedVertex) GetX() float32

func (*NormalizedVertex) GetY

func (x *NormalizedVertex) GetY() float32

func (*NormalizedVertex) ProtoMessage

func (*NormalizedVertex) ProtoMessage()

func (*NormalizedVertex) ProtoReflect

func (x *NormalizedVertex) ProtoReflect() protoreflect.Message

func (*NormalizedVertex) Reset

func (x *NormalizedVertex) Reset()

func (*NormalizedVertex) String

func (x *NormalizedVertex) String() string

type ObjectTrackingAnnotation

type ObjectTrackingAnnotation struct {

	// Different representation of tracking info in non-streaming batch
	// and streaming modes.
	//
	// Types that are assignable to TrackInfo:
	//	*ObjectTrackingAnnotation_Segment
	//	*ObjectTrackingAnnotation_TrackId
	TrackInfo isObjectTrackingAnnotation_TrackInfo `protobuf_oneof:"track_info"`
	// Entity to specify the object category that this track is labeled as.
	Entity *Entity `protobuf:"bytes,1,opt,name=entity,proto3" json:"entity,omitempty"`
	// Object category's labeling confidence of this track.
	Confidence float32 `protobuf:"fixed32,4,opt,name=confidence,proto3" json:"confidence,omitempty"`
	// Information corresponding to all frames where this object track appears.
	// Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame
	// messages in frames.
	// Streaming mode: it can only be one ObjectTrackingFrame message in frames.
	Frames []*ObjectTrackingFrame `protobuf:"bytes,2,rep,name=frames,proto3" json:"frames,omitempty"`
	// contains filtered or unexported fields
}

Annotations corresponding to one tracked object.

func (*ObjectTrackingAnnotation) Descriptor

func (*ObjectTrackingAnnotation) Descriptor() ([]byte, []int)

Deprecated: Use ObjectTrackingAnnotation.ProtoReflect.Descriptor instead.

func (*ObjectTrackingAnnotation) GetConfidence

func (x *ObjectTrackingAnnotation) GetConfidence() float32

func (*ObjectTrackingAnnotation) GetEntity

func (x *ObjectTrackingAnnotation) GetEntity() *Entity

func (*ObjectTrackingAnnotation) GetFrames

func (*ObjectTrackingAnnotation) GetSegment

func (x *ObjectTrackingAnnotation) GetSegment() *VideoSegment

func (*ObjectTrackingAnnotation) GetTrackId

func (x *ObjectTrackingAnnotation) GetTrackId() int64

func (*ObjectTrackingAnnotation) GetTrackInfo

func (m *ObjectTrackingAnnotation) GetTrackInfo() isObjectTrackingAnnotation_TrackInfo

func (*ObjectTrackingAnnotation) ProtoMessage

func (*ObjectTrackingAnnotation) ProtoMessage()

func (*ObjectTrackingAnnotation) ProtoReflect

func (x *ObjectTrackingAnnotation) ProtoReflect() protoreflect.Message

func (*ObjectTrackingAnnotation) Reset

func (x *ObjectTrackingAnnotation) Reset()

func (*ObjectTrackingAnnotation) String

func (x *ObjectTrackingAnnotation) String() string

type ObjectTrackingAnnotation_Segment

type ObjectTrackingAnnotation_Segment struct {
	// Non-streaming batch mode ONLY.
	// Each object track corresponds to one video segment where it appears.
	Segment *VideoSegment `protobuf:"bytes,3,opt,name=segment,proto3,oneof"`
}

type ObjectTrackingAnnotation_TrackId

type ObjectTrackingAnnotation_TrackId struct {
	// Streaming mode ONLY.
	// In streaming mode, we do not know the end time of a tracked object
	// before it is completed. Hence, there is no VideoSegment info returned.
	// Instead, we provide a unique identifiable integer track_id so that
	// the customers can correlate the results of the ongoing
	// ObjectTrackAnnotation of the same track_id over time.
	TrackId int64 `protobuf:"varint,5,opt,name=track_id,json=trackId,proto3,oneof"`
}

type ObjectTrackingConfig

type ObjectTrackingConfig struct {

	// Model to use for object tracking.
	// Supported values: "builtin/stable" (the default if unset) and
	// "builtin/latest".
	Model string `protobuf:"bytes,1,opt,name=model,proto3" json:"model,omitempty"`
	// contains filtered or unexported fields
}

Config for OBJECT_TRACKING.

func (*ObjectTrackingConfig) Descriptor

func (*ObjectTrackingConfig) Descriptor() ([]byte, []int)

Deprecated: Use ObjectTrackingConfig.ProtoReflect.Descriptor instead.

func (*ObjectTrackingConfig) GetModel

func (x *ObjectTrackingConfig) GetModel() string

func (*ObjectTrackingConfig) ProtoMessage

func (*ObjectTrackingConfig) ProtoMessage()

func (*ObjectTrackingConfig) ProtoReflect

func (x *ObjectTrackingConfig) ProtoReflect() protoreflect.Message

func (*ObjectTrackingConfig) Reset

func (x *ObjectTrackingConfig) Reset()

func (*ObjectTrackingConfig) String

func (x *ObjectTrackingConfig) String() string

type ObjectTrackingFrame

type ObjectTrackingFrame struct {

	// The normalized bounding box location of this object track for the frame.
	NormalizedBoundingBox *NormalizedBoundingBox `` /* 126-byte string literal not displayed */
	// The timestamp of the frame in microseconds.
	TimeOffset *durationpb.Duration `protobuf:"bytes,2,opt,name=time_offset,json=timeOffset,proto3" json:"time_offset,omitempty"`
	// contains filtered or unexported fields
}

Video frame level annotations for object detection and tracking. This field stores per frame location, time offset, and confidence.

func (*ObjectTrackingFrame) Descriptor

func (*ObjectTrackingFrame) Descriptor() ([]byte, []int)

Deprecated: Use ObjectTrackingFrame.ProtoReflect.Descriptor instead.

func (*ObjectTrackingFrame) GetNormalizedBoundingBox

func (x *ObjectTrackingFrame) GetNormalizedBoundingBox() *NormalizedBoundingBox

func (*ObjectTrackingFrame) GetTimeOffset

func (x *ObjectTrackingFrame) GetTimeOffset() *durationpb.Duration

func (*ObjectTrackingFrame) ProtoMessage

func (*ObjectTrackingFrame) ProtoMessage()

func (*ObjectTrackingFrame) ProtoReflect

func (x *ObjectTrackingFrame) ProtoReflect() protoreflect.Message

func (*ObjectTrackingFrame) Reset

func (x *ObjectTrackingFrame) Reset()

func (*ObjectTrackingFrame) String

func (x *ObjectTrackingFrame) String() string

type PersonDetectionAnnotation

type PersonDetectionAnnotation struct {

	// The detected tracks of a person.
	Tracks []*Track `protobuf:"bytes,1,rep,name=tracks,proto3" json:"tracks,omitempty"`
	// contains filtered or unexported fields
}

Person detection annotation per video.

func (*PersonDetectionAnnotation) Descriptor

func (*PersonDetectionAnnotation) Descriptor() ([]byte, []int)

Deprecated: Use PersonDetectionAnnotation.ProtoReflect.Descriptor instead.

func (*PersonDetectionAnnotation) GetTracks

func (x *PersonDetectionAnnotation) GetTracks() []*Track

func (*PersonDetectionAnnotation) ProtoMessage

func (*PersonDetectionAnnotation) ProtoMessage()

func (*PersonDetectionAnnotation) ProtoReflect

func (*PersonDetectionAnnotation) Reset

func (x *PersonDetectionAnnotation) Reset()

func (*PersonDetectionAnnotation) String

func (x *PersonDetectionAnnotation) String() string

type PersonDetectionConfig

type PersonDetectionConfig struct {

	// Whether bounding boxes are included in the person detection annotation
	// output.
	IncludeBoundingBoxes bool `protobuf:"varint,1,opt,name=include_bounding_boxes,json=includeBoundingBoxes,proto3" json:"include_bounding_boxes,omitempty"`
	// Whether to enable pose landmarks detection. Ignored if
	// 'include_bounding_boxes' is set to false.
	IncludePoseLandmarks bool `protobuf:"varint,2,opt,name=include_pose_landmarks,json=includePoseLandmarks,proto3" json:"include_pose_landmarks,omitempty"`
	// Whether to enable person attributes detection, such as cloth color (black,
	// blue, etc), type (coat, dress, etc), pattern (plain, floral, etc), hair,
	// etc.
	// Ignored if 'include_bounding_boxes' is set to false.
	IncludeAttributes bool `protobuf:"varint,3,opt,name=include_attributes,json=includeAttributes,proto3" json:"include_attributes,omitempty"`
	// contains filtered or unexported fields
}

Config for PERSON_DETECTION.

func (*PersonDetectionConfig) Descriptor

func (*PersonDetectionConfig) Descriptor() ([]byte, []int)

Deprecated: Use PersonDetectionConfig.ProtoReflect.Descriptor instead.

func (*PersonDetectionConfig) GetIncludeAttributes

func (x *PersonDetectionConfig) GetIncludeAttributes() bool

func (*PersonDetectionConfig) GetIncludeBoundingBoxes

func (x *PersonDetectionConfig) GetIncludeBoundingBoxes() bool

func (*PersonDetectionConfig) GetIncludePoseLandmarks

func (x *PersonDetectionConfig) GetIncludePoseLandmarks() bool

func (*PersonDetectionConfig) ProtoMessage

func (*PersonDetectionConfig) ProtoMessage()

func (*PersonDetectionConfig) ProtoReflect

func (x *PersonDetectionConfig) ProtoReflect() protoreflect.Message

func (*PersonDetectionConfig) Reset

func (x *PersonDetectionConfig) Reset()

func (*PersonDetectionConfig) String

func (x *PersonDetectionConfig) String() string

type ShotChangeDetectionConfig

type ShotChangeDetectionConfig struct {

	// Model to use for shot change detection.
	// Supported values: "builtin/stable" (the default if unset) and
	// "builtin/latest".
	Model string `protobuf:"bytes,1,opt,name=model,proto3" json:"model,omitempty"`
	// contains filtered or unexported fields
}

Config for SHOT_CHANGE_DETECTION.

func (*ShotChangeDetectionConfig) Descriptor

func (*ShotChangeDetectionConfig) Descriptor() ([]byte, []int)

Deprecated: Use ShotChangeDetectionConfig.ProtoReflect.Descriptor instead.

func (*ShotChangeDetectionConfig) GetModel

func (x *ShotChangeDetectionConfig) GetModel() string

func (*ShotChangeDetectionConfig) ProtoMessage

func (*ShotChangeDetectionConfig) ProtoMessage()

func (*ShotChangeDetectionConfig) ProtoReflect

func (*ShotChangeDetectionConfig) Reset

func (x *ShotChangeDetectionConfig) Reset()

func (*ShotChangeDetectionConfig) String

func (x *ShotChangeDetectionConfig) String() string

type SpeechContext

type SpeechContext struct {

	// Optional. A list of strings containing words and phrases "hints" so that
	// the speech recognition is more likely to recognize them. This can be used
	// to improve the accuracy for specific words and phrases, for example, if
	// specific commands are typically spoken by the user. This can also be used
	// to add additional words to the vocabulary of the recognizer. See
	// [usage limits](https://cloud.google.com/speech/limits#content).
	Phrases []string `protobuf:"bytes,1,rep,name=phrases,proto3" json:"phrases,omitempty"`
	// contains filtered or unexported fields
}

Provides "hints" to the speech recognizer to favor specific words and phrases in the results.

func (*SpeechContext) Descriptor

func (*SpeechContext) Descriptor() ([]byte, []int)

Deprecated: Use SpeechContext.ProtoReflect.Descriptor instead.

func (*SpeechContext) GetPhrases

func (x *SpeechContext) GetPhrases() []string

func (*SpeechContext) ProtoMessage

func (*SpeechContext) ProtoMessage()

func (*SpeechContext) ProtoReflect

func (x *SpeechContext) ProtoReflect() protoreflect.Message

func (*SpeechContext) Reset

func (x *SpeechContext) Reset()

func (*SpeechContext) String

func (x *SpeechContext) String() string

type SpeechRecognitionAlternative

type SpeechRecognitionAlternative struct {

	// Transcript text representing the words that the user spoke.
	Transcript string `protobuf:"bytes,1,opt,name=transcript,proto3" json:"transcript,omitempty"`
	// Output only. The confidence estimate between 0.0 and 1.0. A higher number
	// indicates an estimated greater likelihood that the recognized words are
	// correct. This field is set only for the top alternative.
	// This field is not guaranteed to be accurate and users should not rely on it
	// to be always provided.
	// The default of 0.0 is a sentinel value indicating `confidence` was not set.
	Confidence float32 `protobuf:"fixed32,2,opt,name=confidence,proto3" json:"confidence,omitempty"`
	// Output only. A list of word-specific information for each recognized word.
	// Note: When `enable_speaker_diarization` is set to true, you will see all
	// the words from the beginning of the audio.
	Words []*WordInfo `protobuf:"bytes,3,rep,name=words,proto3" json:"words,omitempty"`
	// contains filtered or unexported fields
}

Alternative hypotheses (a.k.a. n-best list).

func (*SpeechRecognitionAlternative) Descriptor

func (*SpeechRecognitionAlternative) Descriptor() ([]byte, []int)

Deprecated: Use SpeechRecognitionAlternative.ProtoReflect.Descriptor instead.

func (*SpeechRecognitionAlternative) GetConfidence

func (x *SpeechRecognitionAlternative) GetConfidence() float32

func (*SpeechRecognitionAlternative) GetTranscript

func (x *SpeechRecognitionAlternative) GetTranscript() string

func (*SpeechRecognitionAlternative) GetWords

func (x *SpeechRecognitionAlternative) GetWords() []*WordInfo

func (*SpeechRecognitionAlternative) ProtoMessage

func (*SpeechRecognitionAlternative) ProtoMessage()

func (*SpeechRecognitionAlternative) ProtoReflect

func (*SpeechRecognitionAlternative) Reset

func (x *SpeechRecognitionAlternative) Reset()

func (*SpeechRecognitionAlternative) String

type SpeechTranscription

type SpeechTranscription struct {

	// May contain one or more recognition hypotheses (up to the maximum specified
	// in `max_alternatives`).  These alternatives are ordered in terms of
	// accuracy, with the top (first) alternative being the most probable, as
	// ranked by the recognizer.
	Alternatives []*SpeechRecognitionAlternative `protobuf:"bytes,1,rep,name=alternatives,proto3" json:"alternatives,omitempty"`
	// Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt)
	// language tag of the language in this result. This language code was
	// detected to have the most likelihood of being spoken in the audio.
	LanguageCode string `protobuf:"bytes,2,opt,name=language_code,json=languageCode,proto3" json:"language_code,omitempty"`
	// contains filtered or unexported fields
}

A speech recognition result corresponding to a portion of the audio.

func (*SpeechTranscription) Descriptor

func (*SpeechTranscription) Descriptor() ([]byte, []int)

Deprecated: Use SpeechTranscription.ProtoReflect.Descriptor instead.

func (*SpeechTranscription) GetAlternatives

func (x *SpeechTranscription) GetAlternatives() []*SpeechRecognitionAlternative

func (*SpeechTranscription) GetLanguageCode

func (x *SpeechTranscription) GetLanguageCode() string

func (*SpeechTranscription) ProtoMessage

func (*SpeechTranscription) ProtoMessage()

func (*SpeechTranscription) ProtoReflect

func (x *SpeechTranscription) ProtoReflect() protoreflect.Message

func (*SpeechTranscription) Reset

func (x *SpeechTranscription) Reset()

func (*SpeechTranscription) String

func (x *SpeechTranscription) String() string

type SpeechTranscriptionConfig

type SpeechTranscriptionConfig struct {

	// Required. *Required* The language of the supplied audio as a
	// [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
	// Example: "en-US".
	// See [Language Support](https://cloud.google.com/speech/docs/languages)
	// for a list of the currently supported language codes.
	LanguageCode string `protobuf:"bytes,1,opt,name=language_code,json=languageCode,proto3" json:"language_code,omitempty"`
	// Optional. Maximum number of recognition hypotheses to be returned.
	// Specifically, the maximum number of `SpeechRecognitionAlternative` messages
	// within each `SpeechTranscription`. The server may return fewer than
	// `max_alternatives`. Valid values are `0`-`30`. A value of `0` or `1` will
	// return a maximum of one. If omitted, will return a maximum of one.
	MaxAlternatives int32 `protobuf:"varint,2,opt,name=max_alternatives,json=maxAlternatives,proto3" json:"max_alternatives,omitempty"`
	// Optional. If set to `true`, the server will attempt to filter out
	// profanities, replacing all but the initial character in each filtered word
	// with asterisks, e.g. "f***". If set to `false` or omitted, profanities
	// won't be filtered out.
	FilterProfanity bool `protobuf:"varint,3,opt,name=filter_profanity,json=filterProfanity,proto3" json:"filter_profanity,omitempty"`
	// Optional. A means to provide context to assist the speech recognition.
	SpeechContexts []*SpeechContext `protobuf:"bytes,4,rep,name=speech_contexts,json=speechContexts,proto3" json:"speech_contexts,omitempty"`
	// Optional. If 'true', adds punctuation to recognition result hypotheses.
	// This feature is only available in select languages. Setting this for
	// requests in other languages has no effect at all. The default 'false' value
	// does not add punctuation to result hypotheses. NOTE: "This is currently
	// offered as an experimental service, complimentary to all users. In the
	// future this may be exclusively available as a premium feature."
	EnableAutomaticPunctuation bool `` /* 142-byte string literal not displayed */
	// Optional. For file formats, such as MXF or MKV, supporting multiple audio
	// tracks, specify up to two tracks. Default: track 0.
	AudioTracks []int32 `protobuf:"varint,6,rep,packed,name=audio_tracks,json=audioTracks,proto3" json:"audio_tracks,omitempty"`
	// Optional. If 'true', enables speaker detection for each recognized word in
	// the top alternative of the recognition result using a speaker_tag provided
	// in the WordInfo.
	// Note: When this is true, we send all the words from the beginning of the
	// audio for the top alternative in every consecutive response.
	// This is done in order to improve our speaker tags as our models learn to
	// identify the speakers in the conversation over time.
	EnableSpeakerDiarization bool `` /* 136-byte string literal not displayed */
	// Optional. If set, specifies the estimated number of speakers in the
	// conversation. If not set, defaults to '2'. Ignored unless
	// enable_speaker_diarization is set to true.
	DiarizationSpeakerCount int32 `` /* 133-byte string literal not displayed */
	// Optional. If `true`, the top result includes a list of words and the
	// confidence for those words. If `false`, no word-level confidence
	// information is returned. The default is `false`.
	EnableWordConfidence bool `protobuf:"varint,9,opt,name=enable_word_confidence,json=enableWordConfidence,proto3" json:"enable_word_confidence,omitempty"`
	// contains filtered or unexported fields
}

Config for SPEECH_TRANSCRIPTION.

func (*SpeechTranscriptionConfig) Descriptor

func (*SpeechTranscriptionConfig) Descriptor() ([]byte, []int)

Deprecated: Use SpeechTranscriptionConfig.ProtoReflect.Descriptor instead.

func (*SpeechTranscriptionConfig) GetAudioTracks

func (x *SpeechTranscriptionConfig) GetAudioTracks() []int32

func (*SpeechTranscriptionConfig) GetDiarizationSpeakerCount

func (x *SpeechTranscriptionConfig) GetDiarizationSpeakerCount() int32

func (*SpeechTranscriptionConfig) GetEnableAutomaticPunctuation

func (x *SpeechTranscriptionConfig) GetEnableAutomaticPunctuation() bool

func (*SpeechTranscriptionConfig) GetEnableSpeakerDiarization

func (x *SpeechTranscriptionConfig) GetEnableSpeakerDiarization() bool

func (*SpeechTranscriptionConfig) GetEnableWordConfidence

func (x *SpeechTranscriptionConfig) GetEnableWordConfidence() bool

func (*SpeechTranscriptionConfig) GetFilterProfanity

func (x *SpeechTranscriptionConfig) GetFilterProfanity() bool

func (*SpeechTranscriptionConfig) GetLanguageCode

func (x *SpeechTranscriptionConfig) GetLanguageCode() string

func (*SpeechTranscriptionConfig) GetMaxAlternatives

func (x *SpeechTranscriptionConfig) GetMaxAlternatives() int32

func (*SpeechTranscriptionConfig) GetSpeechContexts

func (x *SpeechTranscriptionConfig) GetSpeechContexts() []*SpeechContext

func (*SpeechTranscriptionConfig) ProtoMessage

func (*SpeechTranscriptionConfig) ProtoMessage()

func (*SpeechTranscriptionConfig) ProtoReflect

func (*SpeechTranscriptionConfig) Reset

func (x *SpeechTranscriptionConfig) Reset()

func (*SpeechTranscriptionConfig) String

func (x *SpeechTranscriptionConfig) String() string

type StreamingAnnotateVideoRequest

type StreamingAnnotateVideoRequest struct {

	// *Required* The streaming request, which is either a streaming config or
	// video content.
	//
	// Types that are assignable to StreamingRequest:
	//	*StreamingAnnotateVideoRequest_VideoConfig
	//	*StreamingAnnotateVideoRequest_InputContent
	StreamingRequest isStreamingAnnotateVideoRequest_StreamingRequest `protobuf_oneof:"streaming_request"`
	// contains filtered or unexported fields
}

The top-level message sent by the client for the `StreamingAnnotateVideo` method. Multiple `StreamingAnnotateVideoRequest` messages are sent. The first message must only contain a `StreamingVideoConfig` message. All subsequent messages must only contain `input_content` data.

func (*StreamingAnnotateVideoRequest) Descriptor

func (*StreamingAnnotateVideoRequest) Descriptor() ([]byte, []int)

Deprecated: Use StreamingAnnotateVideoRequest.ProtoReflect.Descriptor instead.

func (*StreamingAnnotateVideoRequest) GetInputContent

func (x *StreamingAnnotateVideoRequest) GetInputContent() []byte

func (*StreamingAnnotateVideoRequest) GetStreamingRequest

func (m *StreamingAnnotateVideoRequest) GetStreamingRequest() isStreamingAnnotateVideoRequest_StreamingRequest

func (*StreamingAnnotateVideoRequest) GetVideoConfig

func (*StreamingAnnotateVideoRequest) ProtoMessage

func (*StreamingAnnotateVideoRequest) ProtoMessage()

func (*StreamingAnnotateVideoRequest) ProtoReflect

func (*StreamingAnnotateVideoRequest) Reset

func (x *StreamingAnnotateVideoRequest) Reset()

func (*StreamingAnnotateVideoRequest) String

type StreamingAnnotateVideoRequest_InputContent

type StreamingAnnotateVideoRequest_InputContent struct {
	// The video data to be annotated. Chunks of video data are sequentially
	// sent in `StreamingAnnotateVideoRequest` messages. Except the initial
	// `StreamingAnnotateVideoRequest` message containing only
	// `video_config`, all subsequent `AnnotateStreamingVideoRequest`
	// messages must only contain `input_content` field.
	// Note: as with all bytes fields, protobuffers use a pure binary
	// representation (not base64).
	InputContent []byte `protobuf:"bytes,2,opt,name=input_content,json=inputContent,proto3,oneof"`
}

type StreamingAnnotateVideoRequest_VideoConfig

type StreamingAnnotateVideoRequest_VideoConfig struct {
	// Provides information to the annotator, specifing how to process the
	// request. The first `AnnotateStreamingVideoRequest` message must only
	// contain a `video_config` message.
	VideoConfig *StreamingVideoConfig `protobuf:"bytes,1,opt,name=video_config,json=videoConfig,proto3,oneof"`
}

type StreamingAnnotateVideoResponse

type StreamingAnnotateVideoResponse struct {

	// If set, returns a [google.rpc.Status][google.rpc.Status] message that
	// specifies the error for the operation.
	Error *status.Status `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"`
	// Streaming annotation results.
	AnnotationResults *StreamingVideoAnnotationResults `protobuf:"bytes,2,opt,name=annotation_results,json=annotationResults,proto3" json:"annotation_results,omitempty"`
	// Google Cloud Storage(GCS) URI that stores annotation results of one
	// streaming session in JSON format.
	// It is the annotation_result_storage_directory
	// from the request followed by '/cloud_project_number-session_id'.
	AnnotationResultsUri string `protobuf:"bytes,3,opt,name=annotation_results_uri,json=annotationResultsUri,proto3" json:"annotation_results_uri,omitempty"`
	// contains filtered or unexported fields
}

`StreamingAnnotateVideoResponse` is the only message returned to the client by `StreamingAnnotateVideo`. A series of zero or more `StreamingAnnotateVideoResponse` messages are streamed back to the client.

func (*StreamingAnnotateVideoResponse) Descriptor

func (*StreamingAnnotateVideoResponse) Descriptor() ([]byte, []int)

Deprecated: Use StreamingAnnotateVideoResponse.ProtoReflect.Descriptor instead.

func (*StreamingAnnotateVideoResponse) GetAnnotationResults

func (*StreamingAnnotateVideoResponse) GetAnnotationResultsUri

func (x *StreamingAnnotateVideoResponse) GetAnnotationResultsUri() string

func (*StreamingAnnotateVideoResponse) GetError

func (*StreamingAnnotateVideoResponse) ProtoMessage

func (*StreamingAnnotateVideoResponse) ProtoMessage()

func (*StreamingAnnotateVideoResponse) ProtoReflect

func (*StreamingAnnotateVideoResponse) Reset

func (x *StreamingAnnotateVideoResponse) Reset()

func (*StreamingAnnotateVideoResponse) String

type StreamingAutomlActionRecognitionConfig

type StreamingAutomlActionRecognitionConfig struct {

	// Resource name of AutoML model.
	// Format: `projects/{project_id}/locations/{location_id}/models/{model_id}`
	ModelName string `protobuf:"bytes,1,opt,name=model_name,json=modelName,proto3" json:"model_name,omitempty"`
	// contains filtered or unexported fields
}

Config for STREAMING_AUTOML_ACTION_RECOGNITION.

func (*StreamingAutomlActionRecognitionConfig) Descriptor

func (*StreamingAutomlActionRecognitionConfig) Descriptor() ([]byte, []int)

Deprecated: Use StreamingAutomlActionRecognitionConfig.ProtoReflect.Descriptor instead.

func (*StreamingAutomlActionRecognitionConfig) GetModelName

func (*StreamingAutomlActionRecognitionConfig) ProtoMessage

func (*StreamingAutomlActionRecognitionConfig) ProtoReflect

func (*StreamingAutomlActionRecognitionConfig) Reset

func (*StreamingAutomlActionRecognitionConfig) String

type StreamingAutomlClassificationConfig

type StreamingAutomlClassificationConfig struct {

	// Resource name of AutoML model.
	// Format:
	// `projects/{project_number}/locations/{location_id}/models/{model_id}`
	ModelName string `protobuf:"bytes,1,opt,name=model_name,json=modelName,proto3" json:"model_name,omitempty"`
	// contains filtered or unexported fields
}

Config for STREAMING_AUTOML_CLASSIFICATION.

func (*StreamingAutomlClassificationConfig) Descriptor

func (*StreamingAutomlClassificationConfig) Descriptor() ([]byte, []int)

Deprecated: Use StreamingAutomlClassificationConfig.ProtoReflect.Descriptor instead.

func (*StreamingAutomlClassificationConfig) GetModelName

func (x *StreamingAutomlClassificationConfig) GetModelName() string

func (*StreamingAutomlClassificationConfig) ProtoMessage

func (*StreamingAutomlClassificationConfig) ProtoMessage()

func (*StreamingAutomlClassificationConfig) ProtoReflect

func (*StreamingAutomlClassificationConfig) Reset

func (*StreamingAutomlClassificationConfig) String

type StreamingAutomlObjectTrackingConfig

type StreamingAutomlObjectTrackingConfig struct {

	// Resource name of AutoML model.
	// Format: `projects/{project_id}/locations/{location_id}/models/{model_id}`
	ModelName string `protobuf:"bytes,1,opt,name=model_name,json=modelName,proto3" json:"model_name,omitempty"`
	// contains filtered or unexported fields
}

Config for STREAMING_AUTOML_OBJECT_TRACKING.

func (*StreamingAutomlObjectTrackingConfig) Descriptor

func (*StreamingAutomlObjectTrackingConfig) Descriptor() ([]byte, []int)

Deprecated: Use StreamingAutomlObjectTrackingConfig.ProtoReflect.Descriptor instead.

func (*StreamingAutomlObjectTrackingConfig) GetModelName

func (x *StreamingAutomlObjectTrackingConfig) GetModelName() string

func (*StreamingAutomlObjectTrackingConfig) ProtoMessage

func (*StreamingAutomlObjectTrackingConfig) ProtoMessage()

func (*StreamingAutomlObjectTrackingConfig) ProtoReflect

func (*StreamingAutomlObjectTrackingConfig) Reset

func (*StreamingAutomlObjectTrackingConfig) String

type StreamingExplicitContentDetectionConfig

type StreamingExplicitContentDetectionConfig struct {
	// contains filtered or unexported fields
}

Config for STREAMING_EXPLICIT_CONTENT_DETECTION.

func (*StreamingExplicitContentDetectionConfig) Descriptor

func (*StreamingExplicitContentDetectionConfig) Descriptor() ([]byte, []int)

Deprecated: Use StreamingExplicitContentDetectionConfig.ProtoReflect.Descriptor instead.

func (*StreamingExplicitContentDetectionConfig) ProtoMessage

func (*StreamingExplicitContentDetectionConfig) ProtoReflect

func (*StreamingExplicitContentDetectionConfig) Reset

func (*StreamingExplicitContentDetectionConfig) String

type StreamingFeature

type StreamingFeature int32

Streaming video annotation feature.

const (
	// Unspecified.
	StreamingFeature_STREAMING_FEATURE_UNSPECIFIED StreamingFeature = 0
	// Label detection. Detect objects, such as dog or flower.
	StreamingFeature_STREAMING_LABEL_DETECTION StreamingFeature = 1
	// Shot change detection.
	StreamingFeature_STREAMING_SHOT_CHANGE_DETECTION StreamingFeature = 2
	// Explicit content detection.
	StreamingFeature_STREAMING_EXPLICIT_CONTENT_DETECTION StreamingFeature = 3
	// Object detection and tracking.
	StreamingFeature_STREAMING_OBJECT_TRACKING StreamingFeature = 4
	// Action recognition based on AutoML model.
	StreamingFeature_STREAMING_AUTOML_ACTION_RECOGNITION StreamingFeature = 23
	// Video classification based on AutoML model.
	StreamingFeature_STREAMING_AUTOML_CLASSIFICATION StreamingFeature = 21
	// Object detection and tracking based on AutoML model.
	StreamingFeature_STREAMING_AUTOML_OBJECT_TRACKING StreamingFeature = 22
)

func (StreamingFeature) Descriptor

func (StreamingFeature) Enum

func (StreamingFeature) EnumDescriptor

func (StreamingFeature) EnumDescriptor() ([]byte, []int)

Deprecated: Use StreamingFeature.Descriptor instead.

func (StreamingFeature) Number

func (StreamingFeature) String

func (x StreamingFeature) String() string

func (StreamingFeature) Type

type StreamingLabelDetectionConfig

type StreamingLabelDetectionConfig struct {

	// Whether the video has been captured from a stationary (i.e. non-moving)
	// camera. When set to true, might improve detection accuracy for moving
	// objects. Default: false.
	StationaryCamera bool `protobuf:"varint,1,opt,name=stationary_camera,json=stationaryCamera,proto3" json:"stationary_camera,omitempty"`
	// contains filtered or unexported fields
}

Config for STREAMING_LABEL_DETECTION.

func (*StreamingLabelDetectionConfig) Descriptor

func (*StreamingLabelDetectionConfig) Descriptor() ([]byte, []int)

Deprecated: Use StreamingLabelDetectionConfig.ProtoReflect.Descriptor instead.

func (*StreamingLabelDetectionConfig) GetStationaryCamera

func (x *StreamingLabelDetectionConfig) GetStationaryCamera() bool

func (*StreamingLabelDetectionConfig) ProtoMessage

func (*StreamingLabelDetectionConfig) ProtoMessage()

func (*StreamingLabelDetectionConfig) ProtoReflect

func (*StreamingLabelDetectionConfig) Reset

func (x *StreamingLabelDetectionConfig) Reset()

func (*StreamingLabelDetectionConfig) String

type StreamingObjectTrackingConfig

type StreamingObjectTrackingConfig struct {
	// contains filtered or unexported fields
}

Config for STREAMING_OBJECT_TRACKING.

func (*StreamingObjectTrackingConfig) Descriptor

func (*StreamingObjectTrackingConfig) Descriptor() ([]byte, []int)

Deprecated: Use StreamingObjectTrackingConfig.ProtoReflect.Descriptor instead.

func (*StreamingObjectTrackingConfig) ProtoMessage

func (*StreamingObjectTrackingConfig) ProtoMessage()

func (*StreamingObjectTrackingConfig) ProtoReflect

func (*StreamingObjectTrackingConfig) Reset

func (x *StreamingObjectTrackingConfig) Reset()

func (*StreamingObjectTrackingConfig) String

type StreamingShotChangeDetectionConfig

type StreamingShotChangeDetectionConfig struct {
	// contains filtered or unexported fields
}

Config for STREAMING_SHOT_CHANGE_DETECTION.

func (*StreamingShotChangeDetectionConfig) Descriptor

func (*StreamingShotChangeDetectionConfig) Descriptor() ([]byte, []int)

Deprecated: Use StreamingShotChangeDetectionConfig.ProtoReflect.Descriptor instead.

func (*StreamingShotChangeDetectionConfig) ProtoMessage

func (*StreamingShotChangeDetectionConfig) ProtoMessage()

func (*StreamingShotChangeDetectionConfig) ProtoReflect

func (*StreamingShotChangeDetectionConfig) Reset

func (*StreamingShotChangeDetectionConfig) String

type StreamingStorageConfig

type StreamingStorageConfig struct {

	// Enable streaming storage. Default: false.
	EnableStorageAnnotationResult bool `` /* 153-byte string literal not displayed */
	// Cloud Storage URI to store all annotation results for one client. Client
	// should specify this field as the top-level storage directory. Annotation
	// results of different sessions will be put into different sub-directories
	// denoted by project_name and session_id. All sub-directories will be auto
	// generated by program and will be made accessible to client in response
	// proto. URIs must be specified in the following format:
	// `gs://bucket-id/object-id` `bucket-id` should be a valid Cloud Storage
	// bucket created by client and bucket permission shall also be configured
	// properly. `object-id` can be arbitrary string that make sense to client.
	// Other URI formats will return error and cause Cloud Storage write failure.
	AnnotationResultStorageDirectory string `` /* 161-byte string literal not displayed */
	// contains filtered or unexported fields
}

Config for streaming storage option.

func (*StreamingStorageConfig) Descriptor

func (*StreamingStorageConfig) Descriptor() ([]byte, []int)

Deprecated: Use StreamingStorageConfig.ProtoReflect.Descriptor instead.

func (*StreamingStorageConfig) GetAnnotationResultStorageDirectory

func (x *StreamingStorageConfig) GetAnnotationResultStorageDirectory() string

func (*StreamingStorageConfig) GetEnableStorageAnnotationResult

func (x *StreamingStorageConfig) GetEnableStorageAnnotationResult() bool

func (*StreamingStorageConfig) ProtoMessage

func (*StreamingStorageConfig) ProtoMessage()

func (*StreamingStorageConfig) ProtoReflect

func (x *StreamingStorageConfig) ProtoReflect() protoreflect.Message

func (*StreamingStorageConfig) Reset

func (x *StreamingStorageConfig) Reset()

func (*StreamingStorageConfig) String

func (x *StreamingStorageConfig) String() string

type StreamingVideoAnnotationResults

type StreamingVideoAnnotationResults struct {

	// Shot annotation results. Each shot is represented as a video segment.
	ShotAnnotations []*VideoSegment `protobuf:"bytes,1,rep,name=shot_annotations,json=shotAnnotations,proto3" json:"shot_annotations,omitempty"`
	// Label annotation results.
	LabelAnnotations []*LabelAnnotation `protobuf:"bytes,2,rep,name=label_annotations,json=labelAnnotations,proto3" json:"label_annotations,omitempty"`
	// Explicit content annotation results.
	ExplicitAnnotation *ExplicitContentAnnotation `protobuf:"bytes,3,opt,name=explicit_annotation,json=explicitAnnotation,proto3" json:"explicit_annotation,omitempty"`
	// Object tracking results.
	ObjectAnnotations []*ObjectTrackingAnnotation `protobuf:"bytes,4,rep,name=object_annotations,json=objectAnnotations,proto3" json:"object_annotations,omitempty"`
	// contains filtered or unexported fields
}

Streaming annotation results corresponding to a portion of the video that is currently being processed.

func (*StreamingVideoAnnotationResults) Descriptor

func (*StreamingVideoAnnotationResults) Descriptor() ([]byte, []int)

Deprecated: Use StreamingVideoAnnotationResults.ProtoReflect.Descriptor instead.

func (*StreamingVideoAnnotationResults) GetExplicitAnnotation

func (x *StreamingVideoAnnotationResults) GetExplicitAnnotation() *ExplicitContentAnnotation

func (*StreamingVideoAnnotationResults) GetLabelAnnotations

func (x *StreamingVideoAnnotationResults) GetLabelAnnotations() []*LabelAnnotation

func (*StreamingVideoAnnotationResults) GetObjectAnnotations

func (x *StreamingVideoAnnotationResults) GetObjectAnnotations() []*ObjectTrackingAnnotation

func (*StreamingVideoAnnotationResults) GetShotAnnotations

func (x *StreamingVideoAnnotationResults) GetShotAnnotations() []*VideoSegment

func (*StreamingVideoAnnotationResults) ProtoMessage

func (*StreamingVideoAnnotationResults) ProtoMessage()

func (*StreamingVideoAnnotationResults) ProtoReflect

func (*StreamingVideoAnnotationResults) Reset

func (*StreamingVideoAnnotationResults) String

type StreamingVideoConfig

type StreamingVideoConfig struct {

	// Config for requested annotation feature.
	//
	// Types that are assignable to StreamingConfig:
	//	*StreamingVideoConfig_ShotChangeDetectionConfig
	//	*StreamingVideoConfig_LabelDetectionConfig
	//	*StreamingVideoConfig_ExplicitContentDetectionConfig
	//	*StreamingVideoConfig_ObjectTrackingConfig
	//	*StreamingVideoConfig_AutomlActionRecognitionConfig
	//	*StreamingVideoConfig_AutomlClassificationConfig
	//	*StreamingVideoConfig_AutomlObjectTrackingConfig
	StreamingConfig isStreamingVideoConfig_StreamingConfig `protobuf_oneof:"streaming_config"`
	// Requested annotation feature.
	Feature StreamingFeature `` /* 131-byte string literal not displayed */
	// Streaming storage option. By default: storage is disabled.
	StorageConfig *StreamingStorageConfig `protobuf:"bytes,30,opt,name=storage_config,json=storageConfig,proto3" json:"storage_config,omitempty"`
	// contains filtered or unexported fields
}

Provides information to the annotator that specifies how to process the request.

func (*StreamingVideoConfig) Descriptor

func (*StreamingVideoConfig) Descriptor() ([]byte, []int)

Deprecated: Use StreamingVideoConfig.ProtoReflect.Descriptor instead.

func (*StreamingVideoConfig) GetAutomlActionRecognitionConfig

func (x *StreamingVideoConfig) GetAutomlActionRecognitionConfig() *StreamingAutomlActionRecognitionConfig

func (*StreamingVideoConfig) GetAutomlClassificationConfig

func (x *StreamingVideoConfig) GetAutomlClassificationConfig() *StreamingAutomlClassificationConfig

func (*StreamingVideoConfig) GetAutomlObjectTrackingConfig

func (x *StreamingVideoConfig) GetAutomlObjectTrackingConfig() *StreamingAutomlObjectTrackingConfig

func (*StreamingVideoConfig) GetExplicitContentDetectionConfig

func (x *StreamingVideoConfig) GetExplicitContentDetectionConfig() *StreamingExplicitContentDetectionConfig

func (*StreamingVideoConfig) GetFeature

func (x *StreamingVideoConfig) GetFeature() StreamingFeature

func (*StreamingVideoConfig) GetLabelDetectionConfig

func (x *StreamingVideoConfig) GetLabelDetectionConfig() *StreamingLabelDetectionConfig

func (*StreamingVideoConfig) GetObjectTrackingConfig

func (x *StreamingVideoConfig) GetObjectTrackingConfig() *StreamingObjectTrackingConfig

func (*StreamingVideoConfig) GetShotChangeDetectionConfig

func (x *StreamingVideoConfig) GetShotChangeDetectionConfig() *StreamingShotChangeDetectionConfig

func (*StreamingVideoConfig) GetStorageConfig

func (x *StreamingVideoConfig) GetStorageConfig() *StreamingStorageConfig

func (*StreamingVideoConfig) GetStreamingConfig

func (m *StreamingVideoConfig) GetStreamingConfig() isStreamingVideoConfig_StreamingConfig

func (*StreamingVideoConfig) ProtoMessage

func (*StreamingVideoConfig) ProtoMessage()

func (*StreamingVideoConfig) ProtoReflect

func (x *StreamingVideoConfig) ProtoReflect() protoreflect.Message

func (*StreamingVideoConfig) Reset

func (x *StreamingVideoConfig) Reset()

func (*StreamingVideoConfig) String

func (x *StreamingVideoConfig) String() string

type StreamingVideoConfig_AutomlActionRecognitionConfig

type StreamingVideoConfig_AutomlActionRecognitionConfig struct {
	// Config for STREAMING_AUTOML_ACTION_RECOGNITION.
	AutomlActionRecognitionConfig *StreamingAutomlActionRecognitionConfig `protobuf:"bytes,23,opt,name=automl_action_recognition_config,json=automlActionRecognitionConfig,proto3,oneof"`
}

type StreamingVideoConfig_AutomlClassificationConfig

type StreamingVideoConfig_AutomlClassificationConfig struct {
	// Config for STREAMING_AUTOML_CLASSIFICATION.
	AutomlClassificationConfig *StreamingAutomlClassificationConfig `protobuf:"bytes,21,opt,name=automl_classification_config,json=automlClassificationConfig,proto3,oneof"`
}

type StreamingVideoConfig_AutomlObjectTrackingConfig

type StreamingVideoConfig_AutomlObjectTrackingConfig struct {
	// Config for STREAMING_AUTOML_OBJECT_TRACKING.
	AutomlObjectTrackingConfig *StreamingAutomlObjectTrackingConfig `protobuf:"bytes,22,opt,name=automl_object_tracking_config,json=automlObjectTrackingConfig,proto3,oneof"`
}

type StreamingVideoConfig_ExplicitContentDetectionConfig

type StreamingVideoConfig_ExplicitContentDetectionConfig struct {
	// Config for STREAMING_EXPLICIT_CONTENT_DETECTION.
	ExplicitContentDetectionConfig *StreamingExplicitContentDetectionConfig `protobuf:"bytes,4,opt,name=explicit_content_detection_config,json=explicitContentDetectionConfig,proto3,oneof"`
}

type StreamingVideoConfig_LabelDetectionConfig

type StreamingVideoConfig_LabelDetectionConfig struct {
	// Config for STREAMING_LABEL_DETECTION.
	LabelDetectionConfig *StreamingLabelDetectionConfig `protobuf:"bytes,3,opt,name=label_detection_config,json=labelDetectionConfig,proto3,oneof"`
}

type StreamingVideoConfig_ObjectTrackingConfig

type StreamingVideoConfig_ObjectTrackingConfig struct {
	// Config for STREAMING_OBJECT_TRACKING.
	ObjectTrackingConfig *StreamingObjectTrackingConfig `protobuf:"bytes,5,opt,name=object_tracking_config,json=objectTrackingConfig,proto3,oneof"`
}

type StreamingVideoConfig_ShotChangeDetectionConfig

type StreamingVideoConfig_ShotChangeDetectionConfig struct {
	// Config for STREAMING_SHOT_CHANGE_DETECTION.
	ShotChangeDetectionConfig *StreamingShotChangeDetectionConfig `protobuf:"bytes,2,opt,name=shot_change_detection_config,json=shotChangeDetectionConfig,proto3,oneof"`
}

type StreamingVideoIntelligenceServiceClient

type StreamingVideoIntelligenceServiceClient interface {
	// Performs video annotation with bidirectional streaming: emitting results
	// while sending video/audio bytes.
	// This method is only available via the gRPC API (not REST).
	StreamingAnnotateVideo(ctx context.Context, opts ...grpc.CallOption) (StreamingVideoIntelligenceService_StreamingAnnotateVideoClient, error)
}

StreamingVideoIntelligenceServiceClient is the client API for StreamingVideoIntelligenceService service.

For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.

type StreamingVideoIntelligenceServiceServer

type StreamingVideoIntelligenceServiceServer interface {
	// Performs video annotation with bidirectional streaming: emitting results
	// while sending video/audio bytes.
	// This method is only available via the gRPC API (not REST).
	StreamingAnnotateVideo(StreamingVideoIntelligenceService_StreamingAnnotateVideoServer) error
}

StreamingVideoIntelligenceServiceServer is the server API for StreamingVideoIntelligenceService service.

type StreamingVideoIntelligenceService_StreamingAnnotateVideoClient

type StreamingVideoIntelligenceService_StreamingAnnotateVideoClient interface {
	Send(*StreamingAnnotateVideoRequest) error
	Recv() (*StreamingAnnotateVideoResponse, error)
	grpc.ClientStream
}

type StreamingVideoIntelligenceService_StreamingAnnotateVideoServer

type StreamingVideoIntelligenceService_StreamingAnnotateVideoServer interface {
	Send(*StreamingAnnotateVideoResponse) error
	Recv() (*StreamingAnnotateVideoRequest, error)
	grpc.ServerStream
}

type TextAnnotation

type TextAnnotation struct {

	// The detected text.
	Text string `protobuf:"bytes,1,opt,name=text,proto3" json:"text,omitempty"`
	// All video segments where OCR detected text appears.
	Segments []*TextSegment `protobuf:"bytes,2,rep,name=segments,proto3" json:"segments,omitempty"`
	// contains filtered or unexported fields
}

Annotations related to one detected OCR text snippet. This will contain the corresponding text, confidence value, and frame level information for each detection.

func (*TextAnnotation) Descriptor

func (*TextAnnotation) Descriptor() ([]byte, []int)

Deprecated: Use TextAnnotation.ProtoReflect.Descriptor instead.

func (*TextAnnotation) GetSegments

func (x *TextAnnotation) GetSegments() []*TextSegment

func (*TextAnnotation) GetText

func (x *TextAnnotation) GetText() string

func (*TextAnnotation) ProtoMessage

func (*TextAnnotation) ProtoMessage()

func (*TextAnnotation) ProtoReflect

func (x *TextAnnotation) ProtoReflect() protoreflect.Message

func (*TextAnnotation) Reset

func (x *TextAnnotation) Reset()

func (*TextAnnotation) String

func (x *TextAnnotation) String() string

type TextDetectionConfig

type TextDetectionConfig struct {

	// Language hint can be specified if the language to be detected is known a
	// priori. It can increase the accuracy of the detection. Language hint must
	// be language code in BCP-47 format.
	//
	// Automatic language detection is performed if no hint is provided.
	LanguageHints []string `protobuf:"bytes,1,rep,name=language_hints,json=languageHints,proto3" json:"language_hints,omitempty"`
	// Model to use for text detection.
	// Supported values: "builtin/stable" (the default if unset) and
	// "builtin/latest".
	Model string `protobuf:"bytes,2,opt,name=model,proto3" json:"model,omitempty"`
	// contains filtered or unexported fields
}

Config for TEXT_DETECTION.

func (*TextDetectionConfig) Descriptor

func (*TextDetectionConfig) Descriptor() ([]byte, []int)

Deprecated: Use TextDetectionConfig.ProtoReflect.Descriptor instead.

func (*TextDetectionConfig) GetLanguageHints

func (x *TextDetectionConfig) GetLanguageHints() []string

func (*TextDetectionConfig) GetModel

func (x *TextDetectionConfig) GetModel() string

func (*TextDetectionConfig) ProtoMessage

func (*TextDetectionConfig) ProtoMessage()

func (*TextDetectionConfig) ProtoReflect

func (x *TextDetectionConfig) ProtoReflect() protoreflect.Message

func (*TextDetectionConfig) Reset

func (x *TextDetectionConfig) Reset()

func (*TextDetectionConfig) String

func (x *TextDetectionConfig) String() string

type TextFrame

type TextFrame struct {

	// Bounding polygon of the detected text for this frame.
	RotatedBoundingBox *NormalizedBoundingPoly `protobuf:"bytes,1,opt,name=rotated_bounding_box,json=rotatedBoundingBox,proto3" json:"rotated_bounding_box,omitempty"`
	// Timestamp of this frame.
	TimeOffset *durationpb.Duration `protobuf:"bytes,2,opt,name=time_offset,json=timeOffset,proto3" json:"time_offset,omitempty"`
	// contains filtered or unexported fields
}

Video frame level annotation results for text annotation (OCR). Contains information regarding timestamp and bounding box locations for the frames containing detected OCR text snippets.

func (*TextFrame) Descriptor

func (*TextFrame) Descriptor() ([]byte, []int)

Deprecated: Use TextFrame.ProtoReflect.Descriptor instead.

func (*TextFrame) GetRotatedBoundingBox

func (x *TextFrame) GetRotatedBoundingBox() *NormalizedBoundingPoly

func (*TextFrame) GetTimeOffset

func (x *TextFrame) GetTimeOffset() *durationpb.Duration

func (*TextFrame) ProtoMessage

func (*TextFrame) ProtoMessage()

func (*TextFrame) ProtoReflect

func (x *TextFrame) ProtoReflect() protoreflect.Message

func (*TextFrame) Reset

func (x *TextFrame) Reset()

func (*TextFrame) String

func (x *TextFrame) String() string

type TextSegment

type TextSegment struct {

	// Video segment where a text snippet was detected.
	Segment *VideoSegment `protobuf:"bytes,1,opt,name=segment,proto3" json:"segment,omitempty"`
	// Confidence for the track of detected text. It is calculated as the highest
	// over all frames where OCR detected text appears.
	Confidence float32 `protobuf:"fixed32,2,opt,name=confidence,proto3" json:"confidence,omitempty"`
	// Information related to the frames where OCR detected text appears.
	Frames []*TextFrame `protobuf:"bytes,3,rep,name=frames,proto3" json:"frames,omitempty"`
	// contains filtered or unexported fields
}

Video segment level annotation results for text detection.

func (*TextSegment) Descriptor

func (*TextSegment) Descriptor() ([]byte, []int)

Deprecated: Use TextSegment.ProtoReflect.Descriptor instead.

func (*TextSegment) GetConfidence

func (x *TextSegment) GetConfidence() float32

func (*TextSegment) GetFrames

func (x *TextSegment) GetFrames() []*TextFrame

func (*TextSegment) GetSegment

func (x *TextSegment) GetSegment() *VideoSegment

func (*TextSegment) ProtoMessage

func (*TextSegment) ProtoMessage()

func (*TextSegment) ProtoReflect

func (x *TextSegment) ProtoReflect() protoreflect.Message

func (*TextSegment) Reset

func (x *TextSegment) Reset()

func (*TextSegment) String

func (x *TextSegment) String() string

type TimestampedObject

type TimestampedObject struct {

	// Normalized Bounding box in a frame, where the object is located.
	NormalizedBoundingBox *NormalizedBoundingBox `` /* 126-byte string literal not displayed */
	// Time-offset, relative to the beginning of the video,
	// corresponding to the video frame for this object.
	TimeOffset *durationpb.Duration `protobuf:"bytes,2,opt,name=time_offset,json=timeOffset,proto3" json:"time_offset,omitempty"`
	// Optional. The attributes of the object in the bounding box.
	Attributes []*DetectedAttribute `protobuf:"bytes,3,rep,name=attributes,proto3" json:"attributes,omitempty"`
	// Optional. The detected landmarks.
	Landmarks []*DetectedLandmark `protobuf:"bytes,4,rep,name=landmarks,proto3" json:"landmarks,omitempty"`
	// contains filtered or unexported fields
}

For tracking related features. An object at time_offset with attributes, and located with normalized_bounding_box.

func (*TimestampedObject) Descriptor

func (*TimestampedObject) Descriptor() ([]byte, []int)

Deprecated: Use TimestampedObject.ProtoReflect.Descriptor instead.

func (*TimestampedObject) GetAttributes

func (x *TimestampedObject) GetAttributes() []*DetectedAttribute

func (*TimestampedObject) GetLandmarks

func (x *TimestampedObject) GetLandmarks() []*DetectedLandmark

func (*TimestampedObject) GetNormalizedBoundingBox

func (x *TimestampedObject) GetNormalizedBoundingBox() *NormalizedBoundingBox

func (*TimestampedObject) GetTimeOffset

func (x *TimestampedObject) GetTimeOffset() *durationpb.Duration

func (*TimestampedObject) ProtoMessage

func (*TimestampedObject) ProtoMessage()

func (*TimestampedObject) ProtoReflect

func (x *TimestampedObject) ProtoReflect() protoreflect.Message

func (*TimestampedObject) Reset

func (x *TimestampedObject) Reset()

func (*TimestampedObject) String

func (x *TimestampedObject) String() string

type Track

type Track struct {

	// Video segment of a track.
	Segment *VideoSegment `protobuf:"bytes,1,opt,name=segment,proto3" json:"segment,omitempty"`
	// The object with timestamp and attributes per frame in the track.
	TimestampedObjects []*TimestampedObject `protobuf:"bytes,2,rep,name=timestamped_objects,json=timestampedObjects,proto3" json:"timestamped_objects,omitempty"`
	// Optional. Attributes in the track level.
	Attributes []*DetectedAttribute `protobuf:"bytes,3,rep,name=attributes,proto3" json:"attributes,omitempty"`
	// Optional. The confidence score of the tracked object.
	Confidence float32 `protobuf:"fixed32,4,opt,name=confidence,proto3" json:"confidence,omitempty"`
	// contains filtered or unexported fields
}

A track of an object instance.

func (*Track) Descriptor

func (*Track) Descriptor() ([]byte, []int)

Deprecated: Use Track.ProtoReflect.Descriptor instead.

func (*Track) GetAttributes

func (x *Track) GetAttributes() []*DetectedAttribute

func (*Track) GetConfidence

func (x *Track) GetConfidence() float32

func (*Track) GetSegment

func (x *Track) GetSegment() *VideoSegment

func (*Track) GetTimestampedObjects

func (x *Track) GetTimestampedObjects() []*TimestampedObject

func (*Track) ProtoMessage

func (*Track) ProtoMessage()

func (*Track) ProtoReflect

func (x *Track) ProtoReflect() protoreflect.Message

func (*Track) Reset

func (x *Track) Reset()

func (*Track) String

func (x *Track) String() string

type UnimplementedStreamingVideoIntelligenceServiceServer

type UnimplementedStreamingVideoIntelligenceServiceServer struct {
}

UnimplementedStreamingVideoIntelligenceServiceServer can be embedded to have forward compatible implementations.

func (*UnimplementedStreamingVideoIntelligenceServiceServer) StreamingAnnotateVideo

type UnimplementedVideoIntelligenceServiceServer

type UnimplementedVideoIntelligenceServiceServer struct {
}

UnimplementedVideoIntelligenceServiceServer can be embedded to have forward compatible implementations.

func (*UnimplementedVideoIntelligenceServiceServer) AnnotateVideo

type VideoAnnotationProgress

type VideoAnnotationProgress struct {

	// Video file location in
	// [Cloud Storage](https://cloud.google.com/storage/).
	InputUri string `protobuf:"bytes,1,opt,name=input_uri,json=inputUri,proto3" json:"input_uri,omitempty"`
	// Approximate percentage processed thus far. Guaranteed to be
	// 100 when fully processed.
	ProgressPercent int32 `protobuf:"varint,2,opt,name=progress_percent,json=progressPercent,proto3" json:"progress_percent,omitempty"`
	// Time when the request was received.
	StartTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"`
	// Time of the most recent update.
	UpdateTime *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=update_time,json=updateTime,proto3" json:"update_time,omitempty"`
	// Specifies which feature is being tracked if the request contains more than
	// one feature.
	Feature Feature `protobuf:"varint,5,opt,name=feature,proto3,enum=google.cloud.videointelligence.v1p3beta1.Feature" json:"feature,omitempty"`
	// Specifies which segment is being tracked if the request contains more than
	// one segment.
	Segment *VideoSegment `protobuf:"bytes,6,opt,name=segment,proto3" json:"segment,omitempty"`
	// contains filtered or unexported fields
}

Annotation progress for a single video.

func (*VideoAnnotationProgress) Descriptor

func (*VideoAnnotationProgress) Descriptor() ([]byte, []int)

Deprecated: Use VideoAnnotationProgress.ProtoReflect.Descriptor instead.

func (*VideoAnnotationProgress) GetFeature

func (x *VideoAnnotationProgress) GetFeature() Feature

func (*VideoAnnotationProgress) GetInputUri

func (x *VideoAnnotationProgress) GetInputUri() string

func (*VideoAnnotationProgress) GetProgressPercent

func (x *VideoAnnotationProgress) GetProgressPercent() int32

func (*VideoAnnotationProgress) GetSegment

func (x *VideoAnnotationProgress) GetSegment() *VideoSegment

func (*VideoAnnotationProgress) GetStartTime

func (x *VideoAnnotationProgress) GetStartTime() *timestamppb.Timestamp

func (*VideoAnnotationProgress) GetUpdateTime

func (x *VideoAnnotationProgress) GetUpdateTime() *timestamppb.Timestamp

func (*VideoAnnotationProgress) ProtoMessage

func (*VideoAnnotationProgress) ProtoMessage()

func (*VideoAnnotationProgress) ProtoReflect

func (x *VideoAnnotationProgress) ProtoReflect() protoreflect.Message

func (*VideoAnnotationProgress) Reset

func (x *VideoAnnotationProgress) Reset()

func (*VideoAnnotationProgress) String

func (x *VideoAnnotationProgress) String() string

type VideoAnnotationResults

type VideoAnnotationResults struct {

	// Video file location in
	// [Cloud Storage](https://cloud.google.com/storage/).
	InputUri string `protobuf:"bytes,1,opt,name=input_uri,json=inputUri,proto3" json:"input_uri,omitempty"`
	// Video segment on which the annotation is run.
	Segment *VideoSegment `protobuf:"bytes,10,opt,name=segment,proto3" json:"segment,omitempty"`
	// Topical label annotations on video level or user-specified segment level.
	// There is exactly one element for each unique label.
	SegmentLabelAnnotations []*LabelAnnotation `` /* 132-byte string literal not displayed */
	// Presence label annotations on video level or user-specified segment level.
	// There is exactly one element for each unique label. Compared to the
	// existing topical `segment_label_annotations`, this field presents more
	// fine-grained, segment-level labels detected in video content and is made
	// available only when the client sets `LabelDetectionConfig.model` to
	// "builtin/latest" in the request.
	SegmentPresenceLabelAnnotations []*LabelAnnotation `` /* 159-byte string literal not displayed */
	// Topical label annotations on shot level.
	// There is exactly one element for each unique label.
	ShotLabelAnnotations []*LabelAnnotation `protobuf:"bytes,3,rep,name=shot_label_annotations,json=shotLabelAnnotations,proto3" json:"shot_label_annotations,omitempty"`
	// Presence label annotations on shot level. There is exactly one element for
	// each unique label. Compared to the existing topical
	// `shot_label_annotations`, this field presents more fine-grained, shot-level
	// labels detected in video content and is made available only when the client
	// sets `LabelDetectionConfig.model` to "builtin/latest" in the request.
	ShotPresenceLabelAnnotations []*LabelAnnotation `` /* 150-byte string literal not displayed */
	// Label annotations on frame level.
	// There is exactly one element for each unique label.
	FrameLabelAnnotations []*LabelAnnotation `` /* 126-byte string literal not displayed */
	// Face detection annotations.
	FaceDetectionAnnotations []*FaceDetectionAnnotation `` /* 136-byte string literal not displayed */
	// Shot annotations. Each shot is represented as a video segment.
	ShotAnnotations []*VideoSegment `protobuf:"bytes,6,rep,name=shot_annotations,json=shotAnnotations,proto3" json:"shot_annotations,omitempty"`
	// Explicit content annotation.
	ExplicitAnnotation *ExplicitContentAnnotation `protobuf:"bytes,7,opt,name=explicit_annotation,json=explicitAnnotation,proto3" json:"explicit_annotation,omitempty"`
	// Speech transcription.
	SpeechTranscriptions []*SpeechTranscription `protobuf:"bytes,11,rep,name=speech_transcriptions,json=speechTranscriptions,proto3" json:"speech_transcriptions,omitempty"`
	// OCR text detection and tracking.
	// Annotations for list of detected text snippets. Each will have list of
	// frame information associated with it.
	TextAnnotations []*TextAnnotation `protobuf:"bytes,12,rep,name=text_annotations,json=textAnnotations,proto3" json:"text_annotations,omitempty"`
	// Annotations for list of objects detected and tracked in video.
	ObjectAnnotations []*ObjectTrackingAnnotation `protobuf:"bytes,14,rep,name=object_annotations,json=objectAnnotations,proto3" json:"object_annotations,omitempty"`
	// Annotations for list of logos detected, tracked and recognized in video.
	LogoRecognitionAnnotations []*LogoRecognitionAnnotation `` /* 142-byte string literal not displayed */
	// Person detection annotations.
	PersonDetectionAnnotations []*PersonDetectionAnnotation `` /* 142-byte string literal not displayed */
	// Celebrity recognition annotations.
	CelebrityRecognitionAnnotations *CelebrityRecognitionAnnotation `` /* 157-byte string literal not displayed */
	// If set, indicates an error. Note that for a single `AnnotateVideoRequest`
	// some videos may succeed and some may fail.
	Error *status.Status `protobuf:"bytes,9,opt,name=error,proto3" json:"error,omitempty"`
	// contains filtered or unexported fields
}

Annotation results for a single video.

func (*VideoAnnotationResults) Descriptor

func (*VideoAnnotationResults) Descriptor() ([]byte, []int)

Deprecated: Use VideoAnnotationResults.ProtoReflect.Descriptor instead.

func (*VideoAnnotationResults) GetCelebrityRecognitionAnnotations

func (x *VideoAnnotationResults) GetCelebrityRecognitionAnnotations() *CelebrityRecognitionAnnotation

func (*VideoAnnotationResults) GetError

func (x *VideoAnnotationResults) GetError() *status.Status

func (*VideoAnnotationResults) GetExplicitAnnotation

func (x *VideoAnnotationResults) GetExplicitAnnotation() *ExplicitContentAnnotation

func (*VideoAnnotationResults) GetFaceDetectionAnnotations

func (x *VideoAnnotationResults) GetFaceDetectionAnnotations() []*FaceDetectionAnnotation

func (*VideoAnnotationResults) GetFrameLabelAnnotations

func (x *VideoAnnotationResults) GetFrameLabelAnnotations() []*LabelAnnotation

func (*VideoAnnotationResults) GetInputUri

func (x *VideoAnnotationResults) GetInputUri() string

func (*VideoAnnotationResults) GetLogoRecognitionAnnotations

func (x *VideoAnnotationResults) GetLogoRecognitionAnnotations() []*LogoRecognitionAnnotation

func (*VideoAnnotationResults) GetObjectAnnotations

func (x *VideoAnnotationResults) GetObjectAnnotations() []*ObjectTrackingAnnotation

func (*VideoAnnotationResults) GetPersonDetectionAnnotations

func (x *VideoAnnotationResults) GetPersonDetectionAnnotations() []*PersonDetectionAnnotation

func (*VideoAnnotationResults) GetSegment

func (x *VideoAnnotationResults) GetSegment() *VideoSegment

func (*VideoAnnotationResults) GetSegmentLabelAnnotations

func (x *VideoAnnotationResults) GetSegmentLabelAnnotations() []*LabelAnnotation

func (*VideoAnnotationResults) GetSegmentPresenceLabelAnnotations

func (x *VideoAnnotationResults) GetSegmentPresenceLabelAnnotations() []*LabelAnnotation

func (*VideoAnnotationResults) GetShotAnnotations

func (x *VideoAnnotationResults) GetShotAnnotations() []*VideoSegment

func (*VideoAnnotationResults) GetShotLabelAnnotations

func (x *VideoAnnotationResults) GetShotLabelAnnotations() []*LabelAnnotation

func (*VideoAnnotationResults) GetShotPresenceLabelAnnotations

func (x *VideoAnnotationResults) GetShotPresenceLabelAnnotations() []*LabelAnnotation

func (*VideoAnnotationResults) GetSpeechTranscriptions

func (x *VideoAnnotationResults) GetSpeechTranscriptions() []*SpeechTranscription

func (*VideoAnnotationResults) GetTextAnnotations

func (x *VideoAnnotationResults) GetTextAnnotations() []*TextAnnotation

func (*VideoAnnotationResults) ProtoMessage

func (*VideoAnnotationResults) ProtoMessage()

func (*VideoAnnotationResults) ProtoReflect

func (x *VideoAnnotationResults) ProtoReflect() protoreflect.Message

func (*VideoAnnotationResults) Reset

func (x *VideoAnnotationResults) Reset()

func (*VideoAnnotationResults) String

func (x *VideoAnnotationResults) String() string

type VideoContext

type VideoContext struct {

	// Video segments to annotate. The segments may overlap and are not required
	// to be contiguous or span the whole video. If unspecified, each video is
	// treated as a single segment.
	Segments []*VideoSegment `protobuf:"bytes,1,rep,name=segments,proto3" json:"segments,omitempty"`
	// Config for LABEL_DETECTION.
	LabelDetectionConfig *LabelDetectionConfig `protobuf:"bytes,2,opt,name=label_detection_config,json=labelDetectionConfig,proto3" json:"label_detection_config,omitempty"`
	// Config for SHOT_CHANGE_DETECTION.
	ShotChangeDetectionConfig *ShotChangeDetectionConfig `` /* 140-byte string literal not displayed */
	// Config for EXPLICIT_CONTENT_DETECTION.
	ExplicitContentDetectionConfig *ExplicitContentDetectionConfig `` /* 155-byte string literal not displayed */
	// Config for FACE_DETECTION.
	FaceDetectionConfig *FaceDetectionConfig `protobuf:"bytes,5,opt,name=face_detection_config,json=faceDetectionConfig,proto3" json:"face_detection_config,omitempty"`
	// Config for SPEECH_TRANSCRIPTION.
	SpeechTranscriptionConfig *SpeechTranscriptionConfig `` /* 138-byte string literal not displayed */
	// Config for TEXT_DETECTION.
	TextDetectionConfig *TextDetectionConfig `protobuf:"bytes,8,opt,name=text_detection_config,json=textDetectionConfig,proto3" json:"text_detection_config,omitempty"`
	// Config for PERSON_DETECTION.
	PersonDetectionConfig *PersonDetectionConfig `` /* 127-byte string literal not displayed */
	// Config for OBJECT_TRACKING.
	ObjectTrackingConfig *ObjectTrackingConfig `protobuf:"bytes,13,opt,name=object_tracking_config,json=objectTrackingConfig,proto3" json:"object_tracking_config,omitempty"`
	// contains filtered or unexported fields
}

Video context and/or feature-specific parameters.

func (*VideoContext) Descriptor

func (*VideoContext) Descriptor() ([]byte, []int)

Deprecated: Use VideoContext.ProtoReflect.Descriptor instead.

func (*VideoContext) GetExplicitContentDetectionConfig

func (x *VideoContext) GetExplicitContentDetectionConfig() *ExplicitContentDetectionConfig

func (*VideoContext) GetFaceDetectionConfig

func (x *VideoContext) GetFaceDetectionConfig() *FaceDetectionConfig

func (*VideoContext) GetLabelDetectionConfig

func (x *VideoContext) GetLabelDetectionConfig() *LabelDetectionConfig

func (*VideoContext) GetObjectTrackingConfig

func (x *VideoContext) GetObjectTrackingConfig() *ObjectTrackingConfig

func (*VideoContext) GetPersonDetectionConfig

func (x *VideoContext) GetPersonDetectionConfig() *PersonDetectionConfig

func (*VideoContext) GetSegments

func (x *VideoContext) GetSegments() []*VideoSegment

func (*VideoContext) GetShotChangeDetectionConfig

func (x *VideoContext) GetShotChangeDetectionConfig() *ShotChangeDetectionConfig

func (*VideoContext) GetSpeechTranscriptionConfig

func (x *VideoContext) GetSpeechTranscriptionConfig() *SpeechTranscriptionConfig

func (*VideoContext) GetTextDetectionConfig

func (x *VideoContext) GetTextDetectionConfig() *TextDetectionConfig

func (*VideoContext) ProtoMessage

func (*VideoContext) ProtoMessage()

func (*VideoContext) ProtoReflect

func (x *VideoContext) ProtoReflect() protoreflect.Message

func (*VideoContext) Reset

func (x *VideoContext) Reset()

func (*VideoContext) String

func (x *VideoContext) String() string

type VideoIntelligenceServiceClient

type VideoIntelligenceServiceClient interface {
	// Performs asynchronous video annotation. Progress and results can be
	// retrieved through the `google.longrunning.Operations` interface.
	// `Operation.metadata` contains `AnnotateVideoProgress` (progress).
	// `Operation.response` contains `AnnotateVideoResponse` (results).
	AnnotateVideo(ctx context.Context, in *AnnotateVideoRequest, opts ...grpc.CallOption) (*longrunning.Operation, error)
}

VideoIntelligenceServiceClient is the client API for VideoIntelligenceService service.

For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.

type VideoIntelligenceServiceServer

type VideoIntelligenceServiceServer interface {
	// Performs asynchronous video annotation. Progress and results can be
	// retrieved through the `google.longrunning.Operations` interface.
	// `Operation.metadata` contains `AnnotateVideoProgress` (progress).
	// `Operation.response` contains `AnnotateVideoResponse` (results).
	AnnotateVideo(context.Context, *AnnotateVideoRequest) (*longrunning.Operation, error)
}

VideoIntelligenceServiceServer is the server API for VideoIntelligenceService service.

type VideoSegment

type VideoSegment struct {

	// Time-offset, relative to the beginning of the video,
	// corresponding to the start of the segment (inclusive).
	StartTimeOffset *durationpb.Duration `protobuf:"bytes,1,opt,name=start_time_offset,json=startTimeOffset,proto3" json:"start_time_offset,omitempty"`
	// Time-offset, relative to the beginning of the video,
	// corresponding to the end of the segment (inclusive).
	EndTimeOffset *durationpb.Duration `protobuf:"bytes,2,opt,name=end_time_offset,json=endTimeOffset,proto3" json:"end_time_offset,omitempty"`
	// contains filtered or unexported fields
}

Video segment.

func (*VideoSegment) Descriptor

func (*VideoSegment) Descriptor() ([]byte, []int)

Deprecated: Use VideoSegment.ProtoReflect.Descriptor instead.

func (*VideoSegment) GetEndTimeOffset

func (x *VideoSegment) GetEndTimeOffset() *durationpb.Duration

func (*VideoSegment) GetStartTimeOffset

func (x *VideoSegment) GetStartTimeOffset() *durationpb.Duration

func (*VideoSegment) ProtoMessage

func (*VideoSegment) ProtoMessage()

func (*VideoSegment) ProtoReflect

func (x *VideoSegment) ProtoReflect() protoreflect.Message

func (*VideoSegment) Reset

func (x *VideoSegment) Reset()

func (*VideoSegment) String

func (x *VideoSegment) String() string

type WordInfo

type WordInfo struct {

	// Time offset relative to the beginning of the audio, and
	// corresponding to the start of the spoken word. This field is only set if
	// `enable_word_time_offsets=true` and only in the top hypothesis. This is an
	// experimental feature and the accuracy of the time offset can vary.
	StartTime *durationpb.Duration `protobuf:"bytes,1,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"`
	// Time offset relative to the beginning of the audio, and
	// corresponding to the end of the spoken word. This field is only set if
	// `enable_word_time_offsets=true` and only in the top hypothesis. This is an
	// experimental feature and the accuracy of the time offset can vary.
	EndTime *durationpb.Duration `protobuf:"bytes,2,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"`
	// The word corresponding to this set of information.
	Word string `protobuf:"bytes,3,opt,name=word,proto3" json:"word,omitempty"`
	// Output only. The confidence estimate between 0.0 and 1.0. A higher number
	// indicates an estimated greater likelihood that the recognized words are
	// correct. This field is set only for the top alternative.
	// This field is not guaranteed to be accurate and users should not rely on it
	// to be always provided.
	// The default of 0.0 is a sentinel value indicating `confidence` was not set.
	Confidence float32 `protobuf:"fixed32,4,opt,name=confidence,proto3" json:"confidence,omitempty"`
	// Output only. A distinct integer value is assigned for every speaker within
	// the audio. This field specifies which one of those speakers was detected to
	// have spoken this word. Value ranges from 1 up to diarization_speaker_count,
	// and is only set if speaker diarization is enabled.
	SpeakerTag int32 `protobuf:"varint,5,opt,name=speaker_tag,json=speakerTag,proto3" json:"speaker_tag,omitempty"`
	// contains filtered or unexported fields
}

Word-specific information for recognized words. Word information is only included in the response when certain request parameters are set, such as `enable_word_time_offsets`.

func (*WordInfo) Descriptor

func (*WordInfo) Descriptor() ([]byte, []int)

Deprecated: Use WordInfo.ProtoReflect.Descriptor instead.

func (*WordInfo) GetConfidence

func (x *WordInfo) GetConfidence() float32

func (*WordInfo) GetEndTime

func (x *WordInfo) GetEndTime() *durationpb.Duration

func (*WordInfo) GetSpeakerTag

func (x *WordInfo) GetSpeakerTag() int32

func (*WordInfo) GetStartTime

func (x *WordInfo) GetStartTime() *durationpb.Duration

func (*WordInfo) GetWord

func (x *WordInfo) GetWord() string

func (*WordInfo) ProtoMessage

func (*WordInfo) ProtoMessage()

func (*WordInfo) ProtoReflect

func (x *WordInfo) ProtoReflect() protoreflect.Message

func (*WordInfo) Reset

func (x *WordInfo) Reset()

func (*WordInfo) String

func (x *WordInfo) String() string