Documentation

Index

Constants

This section is empty.

Variables

View Source
var (
	ClassificationType_name = map[int32]string{
		0: "CLASSIFICATION_TYPE_UNSPECIFIED",
		1: "MULTICLASS",
		2: "MULTILABEL",
	}
	ClassificationType_value = map[string]int32{
		"CLASSIFICATION_TYPE_UNSPECIFIED": 0,
		"MULTICLASS":                      1,
		"MULTILABEL":                      2,
	}
)

Enum value maps for ClassificationType.

View Source
var (
	DocumentDimensions_DocumentDimensionUnit_name = map[int32]string{
		0: "DOCUMENT_DIMENSION_UNIT_UNSPECIFIED",
		1: "INCH",
		2: "CENTIMETER",
		3: "POINT",
	}
	DocumentDimensions_DocumentDimensionUnit_value = map[string]int32{
		"DOCUMENT_DIMENSION_UNIT_UNSPECIFIED": 0,
		"INCH":                                1,
		"CENTIMETER":                          2,
		"POINT":                               3,
	}
)

Enum value maps for DocumentDimensions_DocumentDimensionUnit.

View Source
var (
	Document_Layout_TextSegmentType_name = map[int32]string{
		0: "TEXT_SEGMENT_TYPE_UNSPECIFIED",
		1: "TOKEN",
		2: "PARAGRAPH",
		3: "FORM_FIELD",
		4: "FORM_FIELD_NAME",
		5: "FORM_FIELD_CONTENTS",
		6: "TABLE",
		7: "TABLE_HEADER",
		8: "TABLE_ROW",
		9: "TABLE_CELL",
	}
	Document_Layout_TextSegmentType_value = map[string]int32{
		"TEXT_SEGMENT_TYPE_UNSPECIFIED": 0,
		"TOKEN":                         1,
		"PARAGRAPH":                     2,
		"FORM_FIELD":                    3,
		"FORM_FIELD_NAME":               4,
		"FORM_FIELD_CONTENTS":           5,
		"TABLE":                         6,
		"TABLE_HEADER":                  7,
		"TABLE_ROW":                     8,
		"TABLE_CELL":                    9,
	}
)

Enum value maps for Document_Layout_TextSegmentType.

View Source
var (
	TypeCode_name = map[int32]string{
		0:  "TYPE_CODE_UNSPECIFIED",
		3:  "FLOAT64",
		4:  "TIMESTAMP",
		6:  "STRING",
		8:  "ARRAY",
		9:  "STRUCT",
		10: "CATEGORY",
	}
	TypeCode_value = map[string]int32{
		"TYPE_CODE_UNSPECIFIED": 0,
		"FLOAT64":               3,
		"TIMESTAMP":             4,
		"STRING":                6,
		"ARRAY":                 8,
		"STRUCT":                9,
		"CATEGORY":              10,
	}
)

Enum value maps for TypeCode.

View Source
var (
	Model_DeploymentState_name = map[int32]string{
		0: "DEPLOYMENT_STATE_UNSPECIFIED",
		1: "DEPLOYED",
		2: "UNDEPLOYED",
	}
	Model_DeploymentState_value = map[string]int32{
		"DEPLOYMENT_STATE_UNSPECIFIED": 0,
		"DEPLOYED":                     1,
		"UNDEPLOYED":                   2,
	}
)

Enum value maps for Model_DeploymentState.

View Source
var File_google_cloud_automl_v1beta1_annotation_payload_proto protoreflect.FileDescriptor
View Source
var File_google_cloud_automl_v1beta1_annotation_spec_proto protoreflect.FileDescriptor
View Source
var File_google_cloud_automl_v1beta1_classification_proto protoreflect.FileDescriptor
View Source
var File_google_cloud_automl_v1beta1_column_spec_proto protoreflect.FileDescriptor
View Source
var File_google_cloud_automl_v1beta1_data_items_proto protoreflect.FileDescriptor
View Source
var File_google_cloud_automl_v1beta1_data_stats_proto protoreflect.FileDescriptor
View Source
var File_google_cloud_automl_v1beta1_data_types_proto protoreflect.FileDescriptor
View Source
var File_google_cloud_automl_v1beta1_dataset_proto protoreflect.FileDescriptor
View Source
var File_google_cloud_automl_v1beta1_detection_proto protoreflect.FileDescriptor
View Source
var File_google_cloud_automl_v1beta1_geometry_proto protoreflect.FileDescriptor
View Source
var File_google_cloud_automl_v1beta1_image_proto protoreflect.FileDescriptor
View Source
var File_google_cloud_automl_v1beta1_io_proto protoreflect.FileDescriptor
View Source
var File_google_cloud_automl_v1beta1_model_evaluation_proto protoreflect.FileDescriptor
View Source
var File_google_cloud_automl_v1beta1_model_proto protoreflect.FileDescriptor
View Source
var File_google_cloud_automl_v1beta1_operations_proto protoreflect.FileDescriptor
View Source
var File_google_cloud_automl_v1beta1_prediction_service_proto protoreflect.FileDescriptor
View Source
var File_google_cloud_automl_v1beta1_ranges_proto protoreflect.FileDescriptor
View Source
var File_google_cloud_automl_v1beta1_regression_proto protoreflect.FileDescriptor
View Source
var File_google_cloud_automl_v1beta1_service_proto protoreflect.FileDescriptor
View Source
var File_google_cloud_automl_v1beta1_table_spec_proto protoreflect.FileDescriptor
View Source
var File_google_cloud_automl_v1beta1_tables_proto protoreflect.FileDescriptor
View Source
var File_google_cloud_automl_v1beta1_temporal_proto protoreflect.FileDescriptor
View Source
var File_google_cloud_automl_v1beta1_text_extraction_proto protoreflect.FileDescriptor
View Source
var File_google_cloud_automl_v1beta1_text_proto protoreflect.FileDescriptor
View Source
var File_google_cloud_automl_v1beta1_text_segment_proto protoreflect.FileDescriptor
View Source
var File_google_cloud_automl_v1beta1_text_sentiment_proto protoreflect.FileDescriptor
View Source
var File_google_cloud_automl_v1beta1_translation_proto protoreflect.FileDescriptor
View Source
var File_google_cloud_automl_v1beta1_video_proto protoreflect.FileDescriptor

Functions

func RegisterAutoMlServer

func RegisterAutoMlServer(s *grpc.Server, srv AutoMlServer)

func RegisterPredictionServiceServer

func RegisterPredictionServiceServer(s *grpc.Server, srv PredictionServiceServer)

Types

type AnnotationPayload

type AnnotationPayload struct {

	// Output only . Additional information about the annotation
	// specific to the AutoML domain.
	//
	// Types that are assignable to Detail:
	//	*AnnotationPayload_Translation
	//	*AnnotationPayload_Classification
	//	*AnnotationPayload_ImageObjectDetection
	//	*AnnotationPayload_VideoClassification
	//	*AnnotationPayload_VideoObjectTracking
	//	*AnnotationPayload_TextExtraction
	//	*AnnotationPayload_TextSentiment
	//	*AnnotationPayload_Tables
	Detail isAnnotationPayload_Detail `protobuf_oneof:"detail"`
	// Output only . The resource ID of the annotation spec that
	// this annotation pertains to. The annotation spec comes from either an
	// ancestor dataset, or the dataset that was used to train the model in use.
	AnnotationSpecId string `protobuf:"bytes,1,opt,name=annotation_spec_id,json=annotationSpecId,proto3" json:"annotation_spec_id,omitempty"`
	// Output only. The value of
	// [display_name][google.cloud.automl.v1beta1.AnnotationSpec.display_name]
	// when the model was trained. Because this field returns a value at model
	// training time, for different models trained using the same dataset, the
	// returned value could be different as model owner could update the
	// `display_name` between any two model training.
	DisplayName string `protobuf:"bytes,5,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"`
	// contains filtered or unexported fields
}

Contains annotation information that is relevant to AutoML.

func (*AnnotationPayload) Descriptor

func (*AnnotationPayload) Descriptor() ([]byte, []int)

Deprecated: Use AnnotationPayload.ProtoReflect.Descriptor instead.

func (*AnnotationPayload) GetAnnotationSpecId

func (x *AnnotationPayload) GetAnnotationSpecId() string

func (*AnnotationPayload) GetClassification

func (x *AnnotationPayload) GetClassification() *ClassificationAnnotation

func (*AnnotationPayload) GetDetail

func (m *AnnotationPayload) GetDetail() isAnnotationPayload_Detail

func (*AnnotationPayload) GetDisplayName

func (x *AnnotationPayload) GetDisplayName() string

func (*AnnotationPayload) GetImageObjectDetection

func (x *AnnotationPayload) GetImageObjectDetection() *ImageObjectDetectionAnnotation

func (*AnnotationPayload) GetTables

func (x *AnnotationPayload) GetTables() *TablesAnnotation

func (*AnnotationPayload) GetTextExtraction

func (x *AnnotationPayload) GetTextExtraction() *TextExtractionAnnotation

func (*AnnotationPayload) GetTextSentiment

func (x *AnnotationPayload) GetTextSentiment() *TextSentimentAnnotation

func (*AnnotationPayload) GetTranslation

func (x *AnnotationPayload) GetTranslation() *TranslationAnnotation

func (*AnnotationPayload) GetVideoClassification

func (x *AnnotationPayload) GetVideoClassification() *VideoClassificationAnnotation

func (*AnnotationPayload) GetVideoObjectTracking

func (x *AnnotationPayload) GetVideoObjectTracking() *VideoObjectTrackingAnnotation

func (*AnnotationPayload) ProtoMessage

func (*AnnotationPayload) ProtoMessage()

func (*AnnotationPayload) ProtoReflect

func (x *AnnotationPayload) ProtoReflect() protoreflect.Message

func (*AnnotationPayload) Reset

func (x *AnnotationPayload) Reset()

func (*AnnotationPayload) String

func (x *AnnotationPayload) String() string

type AnnotationPayload_Classification

type AnnotationPayload_Classification struct {
	// Annotation details for content or image classification.
	Classification *ClassificationAnnotation `protobuf:"bytes,3,opt,name=classification,proto3,oneof"`
}

type AnnotationPayload_ImageObjectDetection

type AnnotationPayload_ImageObjectDetection struct {
	// Annotation details for image object detection.
	ImageObjectDetection *ImageObjectDetectionAnnotation `protobuf:"bytes,4,opt,name=image_object_detection,json=imageObjectDetection,proto3,oneof"`
}

type AnnotationPayload_Tables

type AnnotationPayload_Tables struct {
	// Annotation details for Tables.
	Tables *TablesAnnotation `protobuf:"bytes,10,opt,name=tables,proto3,oneof"`
}

type AnnotationPayload_TextExtraction

type AnnotationPayload_TextExtraction struct {
	// Annotation details for text extraction.
	TextExtraction *TextExtractionAnnotation `protobuf:"bytes,6,opt,name=text_extraction,json=textExtraction,proto3,oneof"`
}

type AnnotationPayload_TextSentiment

type AnnotationPayload_TextSentiment struct {
	// Annotation details for text sentiment.
	TextSentiment *TextSentimentAnnotation `protobuf:"bytes,7,opt,name=text_sentiment,json=textSentiment,proto3,oneof"`
}

type AnnotationPayload_Translation

type AnnotationPayload_Translation struct {
	// Annotation details for translation.
	Translation *TranslationAnnotation `protobuf:"bytes,2,opt,name=translation,proto3,oneof"`
}

type AnnotationPayload_VideoClassification

type AnnotationPayload_VideoClassification struct {
	// Annotation details for video classification.
	// Returned for Video Classification predictions.
	VideoClassification *VideoClassificationAnnotation `protobuf:"bytes,9,opt,name=video_classification,json=videoClassification,proto3,oneof"`
}

type AnnotationPayload_VideoObjectTracking

type AnnotationPayload_VideoObjectTracking struct {
	// Annotation details for video object tracking.
	VideoObjectTracking *VideoObjectTrackingAnnotation `protobuf:"bytes,8,opt,name=video_object_tracking,json=videoObjectTracking,proto3,oneof"`
}

type AnnotationSpec

type AnnotationSpec struct {

	// Output only. Resource name of the annotation spec.
	// Form:
	//
	// 'projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/annotationSpecs/{annotation_spec_id}'
	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
	// Required. The name of the annotation spec to show in the interface. The name can be
	// up to 32 characters long and must match the regexp `[a-zA-Z0-9_]+`.
	DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"`
	// Output only. The number of examples in the parent dataset
	// labeled by the annotation spec.
	ExampleCount int32 `protobuf:"varint,9,opt,name=example_count,json=exampleCount,proto3" json:"example_count,omitempty"`
	// contains filtered or unexported fields
}

A definition of an annotation spec.

func (*AnnotationSpec) Descriptor

func (*AnnotationSpec) Descriptor() ([]byte, []int)

Deprecated: Use AnnotationSpec.ProtoReflect.Descriptor instead.

func (*AnnotationSpec) GetDisplayName

func (x *AnnotationSpec) GetDisplayName() string

func (*AnnotationSpec) GetExampleCount

func (x *AnnotationSpec) GetExampleCount() int32

func (*AnnotationSpec) GetName

func (x *AnnotationSpec) GetName() string

func (*AnnotationSpec) ProtoMessage

func (*AnnotationSpec) ProtoMessage()

func (*AnnotationSpec) ProtoReflect

func (x *AnnotationSpec) ProtoReflect() protoreflect.Message

func (*AnnotationSpec) Reset

func (x *AnnotationSpec) Reset()

func (*AnnotationSpec) String

func (x *AnnotationSpec) String() string

type ArrayStats

type ArrayStats struct {

	// Stats of all the values of all arrays, as if they were a single long
	// series of data. The type depends on the element type of the array.
	MemberStats *DataStats `protobuf:"bytes,2,opt,name=member_stats,json=memberStats,proto3" json:"member_stats,omitempty"`
	// contains filtered or unexported fields
}

The data statistics of a series of ARRAY values.

func (*ArrayStats) Descriptor

func (*ArrayStats) Descriptor() ([]byte, []int)

Deprecated: Use ArrayStats.ProtoReflect.Descriptor instead.

func (*ArrayStats) GetMemberStats

func (x *ArrayStats) GetMemberStats() *DataStats

func (*ArrayStats) ProtoMessage

func (*ArrayStats) ProtoMessage()

func (*ArrayStats) ProtoReflect

func (x *ArrayStats) ProtoReflect() protoreflect.Message

func (*ArrayStats) Reset

func (x *ArrayStats) Reset()

func (*ArrayStats) String

func (x *ArrayStats) String() string

type AutoMlClient

type AutoMlClient interface {
	// Creates a dataset.
	CreateDataset(ctx context.Context, in *CreateDatasetRequest, opts ...grpc.CallOption) (*Dataset, error)
	// Gets a dataset.
	GetDataset(ctx context.Context, in *GetDatasetRequest, opts ...grpc.CallOption) (*Dataset, error)
	// Lists datasets in a project.
	ListDatasets(ctx context.Context, in *ListDatasetsRequest, opts ...grpc.CallOption) (*ListDatasetsResponse, error)
	// Updates a dataset.
	UpdateDataset(ctx context.Context, in *UpdateDatasetRequest, opts ...grpc.CallOption) (*Dataset, error)
	// Deletes a dataset and all of its contents.
	// Returns empty response in the
	// [response][google.longrunning.Operation.response] field when it completes,
	// and `delete_details` in the
	// [metadata][google.longrunning.Operation.metadata] field.
	DeleteDataset(ctx context.Context, in *DeleteDatasetRequest, opts ...grpc.CallOption) (*longrunning.Operation, error)
	// Imports data into a dataset.
	// For Tables this method can only be called on an empty Dataset.
	//
	// For Tables:
	// *   A
	// [schema_inference_version][google.cloud.automl.v1beta1.InputConfig.params]
	//     parameter must be explicitly set.
	// Returns an empty response in the
	// [response][google.longrunning.Operation.response] field when it completes.
	ImportData(ctx context.Context, in *ImportDataRequest, opts ...grpc.CallOption) (*longrunning.Operation, error)
	// Exports dataset's data to the provided output location.
	// Returns an empty response in the
	// [response][google.longrunning.Operation.response] field when it completes.
	ExportData(ctx context.Context, in *ExportDataRequest, opts ...grpc.CallOption) (*longrunning.Operation, error)
	// Gets an annotation spec.
	GetAnnotationSpec(ctx context.Context, in *GetAnnotationSpecRequest, opts ...grpc.CallOption) (*AnnotationSpec, error)
	// Gets a table spec.
	GetTableSpec(ctx context.Context, in *GetTableSpecRequest, opts ...grpc.CallOption) (*TableSpec, error)
	// Lists table specs in a dataset.
	ListTableSpecs(ctx context.Context, in *ListTableSpecsRequest, opts ...grpc.CallOption) (*ListTableSpecsResponse, error)
	// Updates a table spec.
	UpdateTableSpec(ctx context.Context, in *UpdateTableSpecRequest, opts ...grpc.CallOption) (*TableSpec, error)
	// Gets a column spec.
	GetColumnSpec(ctx context.Context, in *GetColumnSpecRequest, opts ...grpc.CallOption) (*ColumnSpec, error)
	// Lists column specs in a table spec.
	ListColumnSpecs(ctx context.Context, in *ListColumnSpecsRequest, opts ...grpc.CallOption) (*ListColumnSpecsResponse, error)
	// Updates a column spec.
	UpdateColumnSpec(ctx context.Context, in *UpdateColumnSpecRequest, opts ...grpc.CallOption) (*ColumnSpec, error)
	// Creates a model.
	// Returns a Model in the [response][google.longrunning.Operation.response]
	// field when it completes.
	// When you create a model, several model evaluations are created for it:
	// a global evaluation, and one evaluation for each annotation spec.
	CreateModel(ctx context.Context, in *CreateModelRequest, opts ...grpc.CallOption) (*longrunning.Operation, error)
	// Gets a model.
	GetModel(ctx context.Context, in *GetModelRequest, opts ...grpc.CallOption) (*Model, error)
	// Lists models.
	ListModels(ctx context.Context, in *ListModelsRequest, opts ...grpc.CallOption) (*ListModelsResponse, error)
	// Deletes a model.
	// Returns `google.protobuf.Empty` in the
	// [response][google.longrunning.Operation.response] field when it completes,
	// and `delete_details` in the
	// [metadata][google.longrunning.Operation.metadata] field.
	DeleteModel(ctx context.Context, in *DeleteModelRequest, opts ...grpc.CallOption) (*longrunning.Operation, error)
	// Deploys a model. If a model is already deployed, deploying it with the
	// same parameters has no effect. Deploying with different parametrs
	// (as e.g. changing
	//
	// [node_number][google.cloud.automl.v1beta1.ImageObjectDetectionModelDeploymentMetadata.node_number])
	//  will reset the deployment state without pausing the model's availability.
	//
	// Only applicable for Text Classification, Image Object Detection , Tables, and Image Segmentation; all other domains manage
	// deployment automatically.
	//
	// Returns an empty response in the
	// [response][google.longrunning.Operation.response] field when it completes.
	DeployModel(ctx context.Context, in *DeployModelRequest, opts ...grpc.CallOption) (*longrunning.Operation, error)
	// Undeploys a model. If the model is not deployed this method has no effect.
	//
	// Only applicable for Text Classification, Image Object Detection and Tables;
	// all other domains manage deployment automatically.
	//
	// Returns an empty response in the
	// [response][google.longrunning.Operation.response] field when it completes.
	UndeployModel(ctx context.Context, in *UndeployModelRequest, opts ...grpc.CallOption) (*longrunning.Operation, error)
	// Exports a trained, "export-able", model to a user specified Google Cloud
	// Storage location. A model is considered export-able if and only if it has
	// an export format defined for it in
	//
	// [ModelExportOutputConfig][google.cloud.automl.v1beta1.ModelExportOutputConfig].
	//
	// Returns an empty response in the
	// [response][google.longrunning.Operation.response] field when it completes.
	ExportModel(ctx context.Context, in *ExportModelRequest, opts ...grpc.CallOption) (*longrunning.Operation, error)
	// Exports examples on which the model was evaluated (i.e. which were in the
	// TEST set of the dataset the model was created from), together with their
	// ground truth annotations and the annotations created (predicted) by the
	// model.
	// The examples, ground truth and predictions are exported in the state
	// they were at the moment the model was evaluated.
	//
	// This export is available only for 30 days since the model evaluation is
	// created.
	//
	// Currently only available for Tables.
	//
	// Returns an empty response in the
	// [response][google.longrunning.Operation.response] field when it completes.
	ExportEvaluatedExamples(ctx context.Context, in *ExportEvaluatedExamplesRequest, opts ...grpc.CallOption) (*longrunning.Operation, error)
	// Gets a model evaluation.
	GetModelEvaluation(ctx context.Context, in *GetModelEvaluationRequest, opts ...grpc.CallOption) (*ModelEvaluation, error)
	// Lists model evaluations.
	ListModelEvaluations(ctx context.Context, in *ListModelEvaluationsRequest, opts ...grpc.CallOption) (*ListModelEvaluationsResponse, error)
}

AutoMlClient is the client API for AutoMl service.

For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.

func NewAutoMlClient

func NewAutoMlClient(cc grpc.ClientConnInterface) AutoMlClient

type AutoMlServer

type AutoMlServer interface {
	// Creates a dataset.
	CreateDataset(context.Context, *CreateDatasetRequest) (*Dataset, error)
	// Gets a dataset.
	GetDataset(context.Context, *GetDatasetRequest) (*Dataset, error)
	// Lists datasets in a project.
	ListDatasets(context.Context, *ListDatasetsRequest) (*ListDatasetsResponse, error)
	// Updates a dataset.
	UpdateDataset(context.Context, *UpdateDatasetRequest) (*Dataset, error)
	// Deletes a dataset and all of its contents.
	// Returns empty response in the
	// [response][google.longrunning.Operation.response] field when it completes,
	// and `delete_details` in the
	// [metadata][google.longrunning.Operation.metadata] field.
	DeleteDataset(context.Context, *DeleteDatasetRequest) (*longrunning.Operation, error)
	// Imports data into a dataset.
	// For Tables this method can only be called on an empty Dataset.
	//
	// For Tables:
	// *   A
	// [schema_inference_version][google.cloud.automl.v1beta1.InputConfig.params]
	//     parameter must be explicitly set.
	// Returns an empty response in the
	// [response][google.longrunning.Operation.response] field when it completes.
	ImportData(context.Context, *ImportDataRequest) (*longrunning.Operation, error)
	// Exports dataset's data to the provided output location.
	// Returns an empty response in the
	// [response][google.longrunning.Operation.response] field when it completes.
	ExportData(context.Context, *ExportDataRequest) (*longrunning.Operation, error)
	// Gets an annotation spec.
	GetAnnotationSpec(context.Context, *GetAnnotationSpecRequest) (*AnnotationSpec, error)
	// Gets a table spec.
	GetTableSpec(context.Context, *GetTableSpecRequest) (*TableSpec, error)
	// Lists table specs in a dataset.
	ListTableSpecs(context.Context, *ListTableSpecsRequest) (*ListTableSpecsResponse, error)
	// Updates a table spec.
	UpdateTableSpec(context.Context, *UpdateTableSpecRequest) (*TableSpec, error)
	// Gets a column spec.
	GetColumnSpec(context.Context, *GetColumnSpecRequest) (*ColumnSpec, error)
	// Lists column specs in a table spec.
	ListColumnSpecs(context.Context, *ListColumnSpecsRequest) (*ListColumnSpecsResponse, error)
	// Updates a column spec.
	UpdateColumnSpec(context.Context, *UpdateColumnSpecRequest) (*ColumnSpec, error)
	// Creates a model.
	// Returns a Model in the [response][google.longrunning.Operation.response]
	// field when it completes.
	// When you create a model, several model evaluations are created for it:
	// a global evaluation, and one evaluation for each annotation spec.
	CreateModel(context.Context, *CreateModelRequest) (*longrunning.Operation, error)
	// Gets a model.
	GetModel(context.Context, *GetModelRequest) (*Model, error)
	// Lists models.
	ListModels(context.Context, *ListModelsRequest) (*ListModelsResponse, error)
	// Deletes a model.
	// Returns `google.protobuf.Empty` in the
	// [response][google.longrunning.Operation.response] field when it completes,
	// and `delete_details` in the
	// [metadata][google.longrunning.Operation.metadata] field.
	DeleteModel(context.Context, *DeleteModelRequest) (*longrunning.Operation, error)
	// Deploys a model. If a model is already deployed, deploying it with the
	// same parameters has no effect. Deploying with different parametrs
	// (as e.g. changing
	//
	// [node_number][google.cloud.automl.v1beta1.ImageObjectDetectionModelDeploymentMetadata.node_number])
	//  will reset the deployment state without pausing the model's availability.
	//
	// Only applicable for Text Classification, Image Object Detection , Tables, and Image Segmentation; all other domains manage
	// deployment automatically.
	//
	// Returns an empty response in the
	// [response][google.longrunning.Operation.response] field when it completes.
	DeployModel(context.Context, *DeployModelRequest) (*longrunning.Operation, error)
	// Undeploys a model. If the model is not deployed this method has no effect.
	//
	// Only applicable for Text Classification, Image Object Detection and Tables;
	// all other domains manage deployment automatically.
	//
	// Returns an empty response in the
	// [response][google.longrunning.Operation.response] field when it completes.
	UndeployModel(context.Context, *UndeployModelRequest) (*longrunning.Operation, error)
	// Exports a trained, "export-able", model to a user specified Google Cloud
	// Storage location. A model is considered export-able if and only if it has
	// an export format defined for it in
	//
	// [ModelExportOutputConfig][google.cloud.automl.v1beta1.ModelExportOutputConfig].
	//
	// Returns an empty response in the
	// [response][google.longrunning.Operation.response] field when it completes.
	ExportModel(context.Context, *ExportModelRequest) (*longrunning.Operation, error)
	// Exports examples on which the model was evaluated (i.e. which were in the
	// TEST set of the dataset the model was created from), together with their
	// ground truth annotations and the annotations created (predicted) by the
	// model.
	// The examples, ground truth and predictions are exported in the state
	// they were at the moment the model was evaluated.
	//
	// This export is available only for 30 days since the model evaluation is
	// created.
	//
	// Currently only available for Tables.
	//
	// Returns an empty response in the
	// [response][google.longrunning.Operation.response] field when it completes.
	ExportEvaluatedExamples(context.Context, *ExportEvaluatedExamplesRequest) (*longrunning.Operation, error)
	// Gets a model evaluation.
	GetModelEvaluation(context.Context, *GetModelEvaluationRequest) (*ModelEvaluation, error)
	// Lists model evaluations.
	ListModelEvaluations(context.Context, *ListModelEvaluationsRequest) (*ListModelEvaluationsResponse, error)
}

AutoMlServer is the server API for AutoMl service.

type BatchPredictInputConfig

type BatchPredictInputConfig struct {

	// Required. The source of the input.
	//
	// Types that are assignable to Source:
	//	*BatchPredictInputConfig_GcsSource
	//	*BatchPredictInputConfig_BigquerySource
	Source isBatchPredictInputConfig_Source `protobuf_oneof:"source"`
	// contains filtered or unexported fields
}

Input configuration for BatchPredict Action.

The format of input depends on the ML problem of the model used for prediction. As input source the [gcs_source][google.cloud.automl.v1beta1.InputConfig.gcs_source] is expected, unless specified otherwise.

The formats are represented in EBNF with commas being literal and with non-terminal symbols defined near the end of this comment. The formats are:

*  For Image Classification:
       CSV file(s) with each line having just a single column:
         GCS_FILE_PATH
         which leads to image of up to 30MB in size. Supported
         extensions: .JPEG, .GIF, .PNG. This path is treated as the ID in
         the Batch predict output.
       Three sample rows:
         gs://folder/image1.jpeg
         gs://folder/image2.gif
         gs://folder/image3.png

*  For Image Object Detection:
       CSV file(s) with each line having just a single column:
         GCS_FILE_PATH
         which leads to image of up to 30MB in size. Supported
         extensions: .JPEG, .GIF, .PNG. This path is treated as the ID in
         the Batch predict output.
       Three sample rows:
         gs://folder/image1.jpeg
         gs://folder/image2.gif
         gs://folder/image3.png
*  For Video Classification:
       CSV file(s) with each line in format:
         GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END
         GCS_FILE_PATH leads to video of up to 50GB in size and up to 3h
         duration. Supported extensions: .MOV, .MPEG4, .MP4, .AVI.
         TIME_SEGMENT_START and TIME_SEGMENT_END must be within the
         length of the video, and end has to be after the start.
       Three sample rows:
         gs://folder/video1.mp4,10,40
         gs://folder/video1.mp4,20,60
         gs://folder/vid2.mov,0,inf

*  For Video Object Tracking:
       CSV file(s) with each line in format:
         GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END
         GCS_FILE_PATH leads to video of up to 50GB in size and up to 3h
         duration. Supported extensions: .MOV, .MPEG4, .MP4, .AVI.
         TIME_SEGMENT_START and TIME_SEGMENT_END must be within the
         length of the video, and end has to be after the start.
       Three sample rows:
         gs://folder/video1.mp4,10,240
         gs://folder/video1.mp4,300,360
         gs://folder/vid2.mov,0,inf
*  For Text Classification:
       CSV file(s) with each line having just a single column:
         GCS_FILE_PATH | TEXT_SNIPPET
       Any given text file can have size upto 128kB.
       Any given text snippet content must have 60,000 characters or less.
       Three sample rows:
         gs://folder/text1.txt
         "Some text content to predict"
         gs://folder/text3.pdf
       Supported file extensions: .txt, .pdf

*  For Text Sentiment:
       CSV file(s) with each line having just a single column:
         GCS_FILE_PATH | TEXT_SNIPPET
       Any given text file can have size upto 128kB.
       Any given text snippet content must have 500 characters or less.
       Three sample rows:
         gs://folder/text1.txt
         "Some text content to predict"
         gs://folder/text3.pdf
       Supported file extensions: .txt, .pdf

* For Text Extraction
       .JSONL (i.e. JSON Lines) file(s) which either provide text in-line or
       as documents (for a single BatchPredict call only one of the these
       formats may be used).
       The in-line .JSONL file(s) contain per line a proto that
         wraps a temporary user-assigned TextSnippet ID (string up to 2000
         characters long) called "id", a TextSnippet proto (in
         json representation) and zero or more TextFeature protos. Any given
         text snippet content must have 30,000 characters or less, and also
         be UTF-8 NFC encoded (ASCII already is). The IDs provided should be
         unique.
       The document .JSONL file(s) contain, per line, a proto that wraps a
         Document proto with input_config set. Only PDF documents are
         supported now, and each document must be up to 2MB large.
       Any given .JSONL file must be 100MB or smaller, and no more than 20
       files may be given.
       Sample in-line JSON Lines file (presented here with artificial line
       breaks, but the only actual line break is denoted by \n):
         {
           "id": "my_first_id",
           "text_snippet": { "content": "dog car cat"},
           "text_features": [
             {
               "text_segment": {"start_offset": 4, "end_offset": 6},
               "structural_type": PARAGRAPH,
               "bounding_poly": {
                 "normalized_vertices": [
                   {"x": 0.1, "y": 0.1},
                   {"x": 0.1, "y": 0.3},
                   {"x": 0.3, "y": 0.3},
                   {"x": 0.3, "y": 0.1},
                 ]
               },
             }
           ],
         }\n
         {
           "id": "2",
           "text_snippet": {
             "content": "An elaborate content",
             "mime_type": "text/plain"
           }
         }
       Sample document JSON Lines file (presented here with artificial line
       breaks, but the only actual line break is denoted by \n).:
         {
           "document": {
             "input_config": {
               "gcs_source": { "input_uris": [ "gs://folder/document1.pdf" ]
               }
             }
           }
         }\n
         {
           "document": {
             "input_config": {
               "gcs_source": { "input_uris": [ "gs://folder/document2.pdf" ]
               }
             }
           }
         }

*  For Tables:
       Either
       [gcs_source][google.cloud.automl.v1beta1.InputConfig.gcs_source] or

[bigquery_source][google.cloud.automl.v1beta1.InputConfig.bigquery_source].

GCS case:
  CSV file(s), each by itself 10GB or smaller and total size must be
  100GB or smaller, where first file must have a header containing
  column names. If the first row of a subsequent file is the same as
  the header, then it is also treated as a header. All other rows
  contain values for the corresponding columns.
  The column names must contain the model's

[input_feature_column_specs'][google.cloud.automl.v1beta1.TablesModelMetadata.input_feature_column_specs]

[display_name-s][google.cloud.automl.v1beta1.ColumnSpec.display_name]

(order doesn't matter). The columns corresponding to the model's
input feature column specs must contain values compatible with the
column spec's data types. Prediction on all the rows, i.e. the CSV
lines, will be attempted. For FORECASTING

[prediction_type][google.cloud.automl.v1beta1.TablesModelMetadata.prediction_type]:

all columns having

[TIME_SERIES_AVAILABLE_PAST_ONLY][google.cloud.automl.v1beta1.ColumnSpec.ForecastingMetadata.ColumnType]

type will be ignored.
First three sample rows of a CSV file:
  "First Name","Last Name","Dob","Addresses"

"John","Doe","1968-01-22","[{"status":"current","address":"123_First_Avenue","city":"Seattle","state":"WA","zip":"11111","numberOfYears":"1"},{"status":"previous","address":"456_Main_Street","city":"Portland","state":"OR","zip":"22222","numberOfYears":"5"}]"

"Jane","Doe","1980-10-16","[{"status":"current","address":"789_Any_Avenue","city":"Albany","state":"NY","zip":"33333","numberOfYears":"2"},{"status":"previous","address":"321_Main_Street","city":"Hoboken","state":"NJ","zip":"44444","numberOfYears":"3"}]}

BigQuery case:
  An URI of a BigQuery table. The user data size of the BigQuery
  table must be 100GB or smaller.
  The column names must contain the model's

[input_feature_column_specs'][google.cloud.automl.v1beta1.TablesModelMetadata.input_feature_column_specs]

[display_name-s][google.cloud.automl.v1beta1.ColumnSpec.display_name]

(order doesn't matter). The columns corresponding to the model's
input feature column specs must contain values compatible with the
column spec's data types. Prediction on all the rows of the table
will be attempted. For FORECASTING

[prediction_type][google.cloud.automl.v1beta1.TablesModelMetadata.prediction_type]:

all columns having

[TIME_SERIES_AVAILABLE_PAST_ONLY][google.cloud.automl.v1beta1.ColumnSpec.ForecastingMetadata.ColumnType]

         type will be ignored.

Definitions:
GCS_FILE_PATH = A path to file on GCS, e.g. "gs://folder/video.avi".
TEXT_SNIPPET = A content of a text snippet, UTF-8 encoded, enclosed within
               double quotes ("")
TIME_SEGMENT_START = TIME_OFFSET
                     Expresses a beginning, inclusive, of a time segment
                     within an
                     example that has a time dimension (e.g. video).
TIME_SEGMENT_END = TIME_OFFSET
                   Expresses an end, exclusive, of a time segment within
                   an example that has a time dimension (e.g. video).
TIME_OFFSET = A number of seconds as measured from the start of an
              example (e.g. video). Fractions are allowed, up to a
              microsecond precision. "inf" is allowed and it means the end
              of the example.

Errors:
If any of the provided CSV files can't be parsed or if more than certain
percent of CSV rows cannot be processed then the operation fails and
prediction does not happen. Regardless of overall success or failure the
per-row failures, up to a certain count cap, will be listed in
Operation.metadata.partial_failures.

func (*BatchPredictInputConfig) Descriptor

func (*BatchPredictInputConfig) Descriptor() ([]byte, []int)

Deprecated: Use BatchPredictInputConfig.ProtoReflect.Descriptor instead.

func (*BatchPredictInputConfig) GetBigquerySource

func (x *BatchPredictInputConfig) GetBigquerySource() *BigQuerySource

func (*BatchPredictInputConfig) GetGcsSource

func (x *BatchPredictInputConfig) GetGcsSource() *GcsSource

func (*BatchPredictInputConfig) GetSource

func (m *BatchPredictInputConfig) GetSource() isBatchPredictInputConfig_Source

func (*BatchPredictInputConfig) ProtoMessage

func (*BatchPredictInputConfig) ProtoMessage()

func (*BatchPredictInputConfig) ProtoReflect

func (x *BatchPredictInputConfig) ProtoReflect() protoreflect.Message

func (*BatchPredictInputConfig) Reset

func (x *BatchPredictInputConfig) Reset()

func (*BatchPredictInputConfig) String

func (x *BatchPredictInputConfig) String() string

type BatchPredictInputConfig_BigquerySource

type BatchPredictInputConfig_BigquerySource struct {
	// The BigQuery location for the input content.
	BigquerySource *BigQuerySource `protobuf:"bytes,2,opt,name=bigquery_source,json=bigquerySource,proto3,oneof"`
}

type BatchPredictInputConfig_GcsSource

type BatchPredictInputConfig_GcsSource struct {
	// The Google Cloud Storage location for the input content.
	GcsSource *GcsSource `protobuf:"bytes,1,opt,name=gcs_source,json=gcsSource,proto3,oneof"`
}

type BatchPredictOperationMetadata

type BatchPredictOperationMetadata struct {

	// Output only. The input config that was given upon starting this
	// batch predict operation.
	InputConfig *BatchPredictInputConfig `protobuf:"bytes,1,opt,name=input_config,json=inputConfig,proto3" json:"input_config,omitempty"`
	// Output only. Information further describing this batch predict's output.
	OutputInfo *BatchPredictOperationMetadata_BatchPredictOutputInfo `protobuf:"bytes,2,opt,name=output_info,json=outputInfo,proto3" json:"output_info,omitempty"`
	// contains filtered or unexported fields
}

Details of BatchPredict operation.

func (*BatchPredictOperationMetadata) Descriptor

func (*BatchPredictOperationMetadata) Descriptor() ([]byte, []int)

Deprecated: Use BatchPredictOperationMetadata.ProtoReflect.Descriptor instead.

func (*BatchPredictOperationMetadata) GetInputConfig

func (*BatchPredictOperationMetadata) GetOutputInfo

func (*BatchPredictOperationMetadata) ProtoMessage

func (*BatchPredictOperationMetadata) ProtoMessage()

func (*BatchPredictOperationMetadata) ProtoReflect

func (*BatchPredictOperationMetadata) Reset

func (x *BatchPredictOperationMetadata) Reset()

func (*BatchPredictOperationMetadata) String

type BatchPredictOperationMetadata_BatchPredictOutputInfo

type BatchPredictOperationMetadata_BatchPredictOutputInfo struct {

	// The output location into which prediction output is written.
	//
	// Types that are assignable to OutputLocation:
	//	*BatchPredictOperationMetadata_BatchPredictOutputInfo_GcsOutputDirectory
	//	*BatchPredictOperationMetadata_BatchPredictOutputInfo_BigqueryOutputDataset
	OutputLocation isBatchPredictOperationMetadata_BatchPredictOutputInfo_OutputLocation `protobuf_oneof:"output_location"`
	// contains filtered or unexported fields
}

Further describes this batch predict's output. Supplements

[BatchPredictOutputConfig][google.cloud.automl.v1beta1.BatchPredictOutputConfig].

func (*BatchPredictOperationMetadata_BatchPredictOutputInfo) Descriptor

Deprecated: Use BatchPredictOperationMetadata_BatchPredictOutputInfo.ProtoReflect.Descriptor instead.

func (*BatchPredictOperationMetadata_BatchPredictOutputInfo) GetBigqueryOutputDataset

func (x *BatchPredictOperationMetadata_BatchPredictOutputInfo) GetBigqueryOutputDataset() string

func (*BatchPredictOperationMetadata_BatchPredictOutputInfo) GetGcsOutputDirectory

func (*BatchPredictOperationMetadata_BatchPredictOutputInfo) GetOutputLocation

func (m *BatchPredictOperationMetadata_BatchPredictOutputInfo) GetOutputLocation() isBatchPredictOperationMetadata_BatchPredictOutputInfo_OutputLocation

func (*BatchPredictOperationMetadata_BatchPredictOutputInfo) ProtoMessage

func (*BatchPredictOperationMetadata_BatchPredictOutputInfo) ProtoReflect

func (*BatchPredictOperationMetadata_BatchPredictOutputInfo) Reset

func (*BatchPredictOperationMetadata_BatchPredictOutputInfo) String

type BatchPredictOperationMetadata_BatchPredictOutputInfo_BigqueryOutputDataset

type BatchPredictOperationMetadata_BatchPredictOutputInfo_BigqueryOutputDataset struct {
	// The path of the BigQuery dataset created, in bq://projectId.bqDatasetId
	// format, into which the prediction output is written.
	BigqueryOutputDataset string `protobuf:"bytes,2,opt,name=bigquery_output_dataset,json=bigqueryOutputDataset,proto3,oneof"`
}

type BatchPredictOperationMetadata_BatchPredictOutputInfo_GcsOutputDirectory

type BatchPredictOperationMetadata_BatchPredictOutputInfo_GcsOutputDirectory struct {
	// The full path of the Google Cloud Storage directory created, into which
	// the prediction output is written.
	GcsOutputDirectory string `protobuf:"bytes,1,opt,name=gcs_output_directory,json=gcsOutputDirectory,proto3,oneof"`
}

type BatchPredictOutputConfig

type BatchPredictOutputConfig struct {

	// Required. The destination of the output.
	//
	// Types that are assignable to Destination:
	//	*BatchPredictOutputConfig_GcsDestination
	//	*BatchPredictOutputConfig_BigqueryDestination
	Destination isBatchPredictOutputConfig_Destination `protobuf_oneof:"destination"`
	// contains filtered or unexported fields
}

Output configuration for BatchPredict Action.

As destination the

[gcs_destination][google.cloud.automl.v1beta1.BatchPredictOutputConfig.gcs_destination] must be set unless specified otherwise for a domain. If gcs_destination is set then in the given directory a new directory is created. Its name will be "prediction-<model-display-name>-<timestamp-of-prediction-call>", where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format. The contents of it depends on the ML problem the predictions are made for.

*  For Image Classification:
       In the created directory files `image_classification_1.jsonl`,
       `image_classification_2.jsonl`,...,`image_classification_N.jsonl`
       will be created, where N may be 1, and depends on the
       total number of the successfully predicted images and annotations.
       A single image will be listed only once with all its annotations,
       and its annotations will never be split across files.
       Each .JSONL file will contain, per line, a JSON representation of a
       proto that wraps image's "ID" : "<id_value>" followed by a list of
       zero or more AnnotationPayload protos (called annotations), which
       have classification detail populated.
       If prediction for any image failed (partially or completely), then an
       additional `errors_1.jsonl`, `errors_2.jsonl`,..., `errors_N.jsonl`
       files will be created (N depends on total number of failed
       predictions). These files will have a JSON representation of a proto
       that wraps the same "ID" : "<id_value>" but here followed by
       exactly one

[`google.rpc.Status`](https: //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto)

       containing only `code` and `message`fields.

*  For Image Object Detection:
       In the created directory files `image_object_detection_1.jsonl`,
       `image_object_detection_2.jsonl`,...,`image_object_detection_N.jsonl`
       will be created, where N may be 1, and depends on the
       total number of the successfully predicted images and annotations.
       Each .JSONL file will contain, per line, a JSON representation of a
       proto that wraps image's "ID" : "<id_value>" followed by a list of
       zero or more AnnotationPayload protos (called annotations), which
       have image_object_detection detail populated. A single image will
       be listed only once with all its annotations, and its annotations
       will never be split across files.
       If prediction for any image failed (partially or completely), then
       additional `errors_1.jsonl`, `errors_2.jsonl`,..., `errors_N.jsonl`
       files will be created (N depends on total number of failed
       predictions). These files will have a JSON representation of a proto
       that wraps the same "ID" : "<id_value>" but here followed by
       exactly one

[`google.rpc.Status`](https: //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto)

       containing only `code` and `message`fields.
*  For Video Classification:
       In the created directory a video_classification.csv file, and a .JSON
       file per each video classification requested in the input (i.e. each
       line in given CSV(s)), will be created.

       The format of video_classification.csv is:

GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END,JSON_FILE_NAME,STATUS

       where:
       GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END = matches 1 to 1
           the prediction input lines (i.e. video_classification.csv has
           precisely the same number of lines as the prediction input had.)
       JSON_FILE_NAME = Name of .JSON file in the output directory, which
           contains prediction responses for the video time segment.
       STATUS = "OK" if prediction completed successfully, or an error code
           with message otherwise. If STATUS is not "OK" then the .JSON file
           for that line may not exist or be empty.

       Each .JSON file, assuming STATUS is "OK", will contain a list of
       AnnotationPayload protos in JSON format, which are the predictions
       for the video time segment the file is assigned to in the
       video_classification.csv. All AnnotationPayload protos will have
       video_classification field set, and will be sorted by
       video_classification.type field (note that the returned types are
       governed by `classifaction_types` parameter in
       [PredictService.BatchPredictRequest.params][]).

*  For Video Object Tracking:
       In the created directory a video_object_tracking.csv file will be
       created, and multiple files video_object_trackinng_1.json,
       video_object_trackinng_2.json,..., video_object_trackinng_N.json,
       where N is the number of requests in the input (i.e. the number of
       lines in given CSV(s)).

       The format of video_object_tracking.csv is:

GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END,JSON_FILE_NAME,STATUS

       where:
       GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END = matches 1 to 1
           the prediction input lines (i.e. video_object_tracking.csv has
           precisely the same number of lines as the prediction input had.)
       JSON_FILE_NAME = Name of .JSON file in the output directory, which
           contains prediction responses for the video time segment.
       STATUS = "OK" if prediction completed successfully, or an error
           code with message otherwise. If STATUS is not "OK" then the .JSON
           file for that line may not exist or be empty.

       Each .JSON file, assuming STATUS is "OK", will contain a list of
       AnnotationPayload protos in JSON format, which are the predictions
       for each frame of the video time segment the file is assigned to in
       video_object_tracking.csv. All AnnotationPayload protos will have
       video_object_tracking field set.
*  For Text Classification:
       In the created directory files `text_classification_1.jsonl`,
       `text_classification_2.jsonl`,...,`text_classification_N.jsonl`
       will be created, where N may be 1, and depends on the
       total number of inputs and annotations found.

       Each .JSONL file will contain, per line, a JSON representation of a
       proto that wraps input text snippet or input text file and a list of
       zero or more AnnotationPayload protos (called annotations), which
       have classification detail populated. A single text snippet or file
       will be listed only once with all its annotations, and its
       annotations will never be split across files.

       If prediction for any text snippet or file failed (partially or
       completely), then additional `errors_1.jsonl`, `errors_2.jsonl`,...,
       `errors_N.jsonl` files will be created (N depends on total number of
       failed predictions). These files will have a JSON representation of a
       proto that wraps input text snippet or input text file followed by
       exactly one

[`google.rpc.Status`](https: //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto)

       containing only `code` and `message`.

*  For Text Sentiment:
       In the created directory files `text_sentiment_1.jsonl`,
       `text_sentiment_2.jsonl`,...,`text_sentiment_N.jsonl`
       will be created, where N may be 1, and depends on the
       total number of inputs and annotations found.

       Each .JSONL file will contain, per line, a JSON representation of a
       proto that wraps input text snippet or input text file and a list of
       zero or more AnnotationPayload protos (called annotations), which
       have text_sentiment detail populated. A single text snippet or file
       will be listed only once with all its annotations, and its
       annotations will never be split across files.

       If prediction for any text snippet or file failed (partially or
       completely), then additional `errors_1.jsonl`, `errors_2.jsonl`,...,
       `errors_N.jsonl` files will be created (N depends on total number of
       failed predictions). These files will have a JSON representation of a
       proto that wraps input text snippet or input text file followed by
       exactly one

[`google.rpc.Status`](https: //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto)

      containing only `code` and `message`.

*  For Text Extraction:
      In the created directory files `text_extraction_1.jsonl`,
      `text_extraction_2.jsonl`,...,`text_extraction_N.jsonl`
      will be created, where N may be 1, and depends on the
      total number of inputs and annotations found.
      The contents of these .JSONL file(s) depend on whether the input
      used inline text, or documents.
      If input was inline, then each .JSONL file will contain, per line,
        a JSON representation of a proto that wraps given in request text
        snippet's "id" (if specified), followed by input text snippet,
        and a list of zero or more
        AnnotationPayload protos (called annotations), which have
        text_extraction detail populated. A single text snippet will be
        listed only once with all its annotations, and its annotations will
        never be split across files.
      If input used documents, then each .JSONL file will contain, per
        line, a JSON representation of a proto that wraps given in request
        document proto, followed by its OCR-ed representation in the form
        of a text snippet, finally followed by a list of zero or more
        AnnotationPayload protos (called annotations), which have
        text_extraction detail populated and refer, via their indices, to
        the OCR-ed text snippet. A single document (and its text snippet)
        will be listed only once with all its annotations, and its
        annotations will never be split across files.
      If prediction for any text snippet failed (partially or completely),
      then additional `errors_1.jsonl`, `errors_2.jsonl`,...,
      `errors_N.jsonl` files will be created (N depends on total number of
      failed predictions). These files will have a JSON representation of a
      proto that wraps either the "id" : "<id_value>" (in case of inline)
      or the document proto (in case of document) but here followed by
      exactly one

[`google.rpc.Status`](https: //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto)

       containing only `code` and `message`.

*  For Tables:
       Output depends on whether

[gcs_destination][google.cloud.automl.v1beta1.BatchPredictOutputConfig.gcs_destination]

or

[bigquery_destination][google.cloud.automl.v1beta1.BatchPredictOutputConfig.bigquery_destination]

is set (either is allowed).
GCS case:
  In the created directory files `tables_1.csv`, `tables_2.csv`,...,
  `tables_N.csv` will be created, where N may be 1, and depends on
  the total number of the successfully predicted rows.
  For all CLASSIFICATION

[prediction_type-s][google.cloud.automl.v1beta1.TablesModelMetadata.prediction_type]:

Each .csv file will contain a header, listing all columns'

[display_name-s][google.cloud.automl.v1beta1.ColumnSpec.display_name]

given on input followed by M target column names in the format of

"<[target_column_specs][google.cloud.automl.v1beta1.TablesModelMetadata.target_column_spec]

[display_name][google.cloud.automl.v1beta1.ColumnSpec.display_name]>_<target

  value>_score" where M is the number of distinct target values,
  i.e. number of distinct values in the target column of the table
  used to train the model. Subsequent lines will contain the
  respective values of successfully predicted rows, with the last,
  i.e. the target, columns having the corresponding prediction
  [scores][google.cloud.automl.v1beta1.TablesAnnotation.score].
For REGRESSION and FORECASTING

[prediction_type-s][google.cloud.automl.v1beta1.TablesModelMetadata.prediction_type]:

Each .csv file will contain a header, listing all columns'
[display_name-s][google.cloud.automl.v1beta1.display_name] given
on input followed by the predicted target column with name in the
format of

"predicted_<[target_column_specs][google.cloud.automl.v1beta1.TablesModelMetadata.target_column_spec]

[display_name][google.cloud.automl.v1beta1.ColumnSpec.display_name]>"

Subsequent lines will contain the respective values of
successfully predicted rows, with the last, i.e. the target,
column having the predicted target value.
If prediction for any rows failed, then an additional
`errors_1.csv`, `errors_2.csv`,..., `errors_N.csv` will be
created (N depends on total number of failed rows). These files
will have analogous format as `tables_*.csv`, but always with a
single target column having

[`google.rpc.Status`](https: //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto)

    represented as a JSON string, and containing only `code` and
    `message`.
BigQuery case:

[bigquery_destination][google.cloud.automl.v1beta1.OutputConfig.bigquery_destination]

pointing to a BigQuery project must be set. In the given project a
new dataset will be created with name
`prediction_<model-display-name>_<timestamp-of-prediction-call>`
where <model-display-name> will be made
BigQuery-dataset-name compatible (e.g. most special characters will
become underscores), and timestamp will be in
YYYY_MM_DDThh_mm_ss_sssZ "based on ISO-8601" format. In the dataset
two tables will be created, `predictions`, and `errors`.
The `predictions` table's column names will be the input columns'

[display_name-s][google.cloud.automl.v1beta1.ColumnSpec.display_name]

followed by the target column with name in the format of

"predicted_<[target_column_specs][google.cloud.automl.v1beta1.TablesModelMetadata.target_column_spec]

[display_name][google.cloud.automl.v1beta1.ColumnSpec.display_name]>"

The input feature columns will contain the respective values of
successfully predicted rows, with the target column having an
ARRAY of

[AnnotationPayloads][google.cloud.automl.v1beta1.AnnotationPayload],

represented as STRUCT-s, containing
[TablesAnnotation][google.cloud.automl.v1beta1.TablesAnnotation].
The `errors` table contains rows for which the prediction has
failed, it has analogous input columns while the target column name
is in the format of

"errors_<[target_column_specs][google.cloud.automl.v1beta1.TablesModelMetadata.target_column_spec]

[display_name][google.cloud.automl.v1beta1.ColumnSpec.display_name]>",

and as a value has

[`google.rpc.Status`](https: //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto)

represented as a STRUCT, and containing only `code` and `message`.

func (*BatchPredictOutputConfig) Descriptor

func (*BatchPredictOutputConfig) Descriptor() ([]byte, []int)

Deprecated: Use BatchPredictOutputConfig.ProtoReflect.Descriptor instead.

func (*BatchPredictOutputConfig) GetBigqueryDestination

func (x *BatchPredictOutputConfig) GetBigqueryDestination() *BigQueryDestination

func (*BatchPredictOutputConfig) GetDestination

func (m *BatchPredictOutputConfig) GetDestination() isBatchPredictOutputConfig_Destination

func (*BatchPredictOutputConfig) GetGcsDestination

func (x *BatchPredictOutputConfig) GetGcsDestination() *GcsDestination

func (*BatchPredictOutputConfig) ProtoMessage

func (*BatchPredictOutputConfig) ProtoMessage()

func (*BatchPredictOutputConfig) ProtoReflect

func (x *BatchPredictOutputConfig) ProtoReflect() protoreflect.Message

func (*BatchPredictOutputConfig) Reset

func (x *BatchPredictOutputConfig) Reset()

func (*BatchPredictOutputConfig) String

func (x *BatchPredictOutputConfig) String() string

type BatchPredictOutputConfig_BigqueryDestination

type BatchPredictOutputConfig_BigqueryDestination struct {
	// The BigQuery location where the output is to be written to.
	BigqueryDestination *BigQueryDestination `protobuf:"bytes,2,opt,name=bigquery_destination,json=bigqueryDestination,proto3,oneof"`
}

type BatchPredictOutputConfig_GcsDestination

type BatchPredictOutputConfig_GcsDestination struct {
	// The Google Cloud Storage location of the directory where the output is to
	// be written to.
	GcsDestination *GcsDestination `protobuf:"bytes,1,opt,name=gcs_destination,json=gcsDestination,proto3,oneof"`
}

type BatchPredictRequest

type BatchPredictRequest struct {

	// Required. Name of the model requested to serve the batch prediction.
	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
	// Required. The input configuration for batch prediction.
	InputConfig *BatchPredictInputConfig `protobuf:"bytes,3,opt,name=input_config,json=inputConfig,proto3" json:"input_config,omitempty"`
	// Required. The Configuration specifying where output predictions should
	// be written.
	OutputConfig *BatchPredictOutputConfig `protobuf:"bytes,4,opt,name=output_config,json=outputConfig,proto3" json:"output_config,omitempty"`
	// Required. Additional domain-specific parameters for the predictions, any string must
	// be up to 25000 characters long.
	//
	// *  For Text Classification:
	//
	//    `score_threshold` - (float) A value from 0.0 to 1.0. When the model
	//         makes predictions for a text snippet, it will only produce results
	//         that have at least this confidence score. The default is 0.5.
	//
	// *  For Image Classification:
	//
	//    `score_threshold` - (float) A value from 0.0 to 1.0. When the model
	//         makes predictions for an image, it will only produce results that
	//         have at least this confidence score. The default is 0.5.
	//
	// *  For Image Object Detection:
	//
	//    `score_threshold` - (float) When Model detects objects on the image,
	//        it will only produce bounding boxes which have at least this
	//        confidence score. Value in 0 to 1 range, default is 0.5.
	//    `max_bounding_box_count` - (int64) No more than this number of bounding
	//        boxes will be produced per image. Default is 100, the
	//        requested value may be limited by server.
	//
	// *  For Video Classification :
	//
	//    `score_threshold` - (float) A value from 0.0 to 1.0. When the model
	//        makes predictions for a video, it will only produce results that
	//        have at least this confidence score. The default is 0.5.
	//    `segment_classification` - (boolean) Set to true to request
	//        segment-level classification. AutoML Video Intelligence returns
	//        labels and their confidence scores for the entire segment of the
	//        video that user specified in the request configuration.
	//        The default is "true".
	//    `shot_classification` - (boolean) Set to true to request shot-level
	//        classification. AutoML Video Intelligence determines the boundaries
	//        for each camera shot in the entire segment of the video that user
	//        specified in the request configuration. AutoML Video Intelligence
	//        then returns labels and their confidence scores for each detected
	//        shot, along with the start and end time of the shot.
	//        WARNING: Model evaluation is not done for this classification type,
	//        the quality of it depends on training data, but there are no metrics
	//        provided to describe that quality. The default is "false".
	//    `1s_interval_classification` - (boolean) Set to true to request
	//        classification for a video at one-second intervals. AutoML Video
	//        Intelligence returns labels and their confidence scores for each
	//        second of the entire segment of the video that user specified in the
	//        request configuration.
	//        WARNING: Model evaluation is not done for this classification
	//        type, the quality of it depends on training data, but there are no
	//        metrics provided to describe that quality. The default is
	//        "false".
	//
	// *  For Tables:
	//
	//    feature_imp<span>ortan</span>ce - (boolean) Whether feature importance
	//        should be populated in the returned TablesAnnotations. The
	//        default is false.
	//
	// *  For Video Object Tracking:
	//
	//    `score_threshold` - (float) When Model detects objects on video frames,
	//        it will only produce bounding boxes which have at least this
	//        confidence score. Value in 0 to 1 range, default is 0.5.
	//    `max_bounding_box_count` - (int64) No more than this number of bounding
	//        boxes will be returned per frame. Default is 100, the requested
	//        value may be limited by server.
	//    `min_bounding_box_size` - (float) Only bounding boxes with shortest edge
	//      at least that long as a relative value of video frame size will be
	//      returned. Value in 0 to 1 range. Default is 0.
	Params map[string]string `` /* 153-byte string literal not displayed */
	// contains filtered or unexported fields
}

Request message for [PredictionService.BatchPredict][google.cloud.automl.v1beta1.PredictionService.BatchPredict].

func (*BatchPredictRequest) Descriptor

func (*BatchPredictRequest) Descriptor() ([]byte, []int)

Deprecated: Use BatchPredictRequest.ProtoReflect.Descriptor instead.

func (*BatchPredictRequest) GetInputConfig

func (x *BatchPredictRequest) GetInputConfig() *BatchPredictInputConfig

func (*BatchPredictRequest) GetName

func (x *BatchPredictRequest) GetName() string

func (*BatchPredictRequest) GetOutputConfig

func (x *BatchPredictRequest) GetOutputConfig() *BatchPredictOutputConfig

func (*BatchPredictRequest) GetParams

func (x *BatchPredictRequest) GetParams() map[string]string

func (*BatchPredictRequest) ProtoMessage

func (*BatchPredictRequest) ProtoMessage()

func (*BatchPredictRequest) ProtoReflect

func (x *BatchPredictRequest) ProtoReflect() protoreflect.Message

func (*BatchPredictRequest) Reset

func (x *BatchPredictRequest) Reset()

func (*BatchPredictRequest) String

func (x *BatchPredictRequest) String() string

type BatchPredictResult

type BatchPredictResult struct {

	// Additional domain-specific prediction response metadata.
	//
	// *  For Image Object Detection:
	//  `max_bounding_box_count` - (int64) At most that many bounding boxes per
	//      image could have been returned.
	//
	// *  For Video Object Tracking:
	//  `max_bounding_box_count` - (int64) At most that many bounding boxes per
	//      frame could have been returned.
	Metadata map[string]string `` /* 157-byte string literal not displayed */
	// contains filtered or unexported fields
}

Result of the Batch Predict. This message is returned in [response][google.longrunning.Operation.response] of the operation returned by the [PredictionService.BatchPredict][google.cloud.automl.v1beta1.PredictionService.BatchPredict].

func (*BatchPredictResult) Descriptor

func (*BatchPredictResult) Descriptor() ([]byte, []int)

Deprecated: Use BatchPredictResult.ProtoReflect.Descriptor instead.

func (*BatchPredictResult) GetMetadata

func (x *BatchPredictResult) GetMetadata() map[string]string

func (*BatchPredictResult) ProtoMessage

func (*BatchPredictResult) ProtoMessage()

func (*BatchPredictResult) ProtoReflect

func (x *BatchPredictResult) ProtoReflect() protoreflect.Message

func (*BatchPredictResult) Reset

func (x *BatchPredictResult) Reset()

func (*BatchPredictResult) String

func (x *BatchPredictResult) String() string

type BigQueryDestination

type BigQueryDestination struct {

	// Required. BigQuery URI to a project, up to 2000 characters long.
	// Accepted forms:
	// *  BigQuery path e.g. bq://projectId
	OutputUri string `protobuf:"bytes,1,opt,name=output_uri,json=outputUri,proto3" json:"output_uri,omitempty"`
	// contains filtered or unexported fields
}

The BigQuery location for the output content.

func (*BigQueryDestination) Descriptor

func (*BigQueryDestination) Descriptor() ([]byte, []int)

Deprecated: Use BigQueryDestination.ProtoReflect.Descriptor instead.

func (*BigQueryDestination) GetOutputUri

func (x *BigQueryDestination) GetOutputUri() string

func (*BigQueryDestination) ProtoMessage

func (*BigQueryDestination) ProtoMessage()

func (*BigQueryDestination) ProtoReflect

func (x *BigQueryDestination) ProtoReflect() protoreflect.Message

func (*BigQueryDestination) Reset

func (x *BigQueryDestination) Reset()

func (*BigQueryDestination) String

func (x *BigQueryDestination) String() string

type BigQuerySource

type BigQuerySource struct {

	// Required. BigQuery URI to a table, up to 2000 characters long.
	// Accepted forms:
	// *  BigQuery path e.g. bq://projectId.bqDatasetId.bqTableId
	InputUri string `protobuf:"bytes,1,opt,name=input_uri,json=inputUri,proto3" json:"input_uri,omitempty"`
	// contains filtered or unexported fields
}

The BigQuery location for the input content.

func (*BigQuerySource) Descriptor

func (*BigQuerySource) Descriptor() ([]byte, []int)

Deprecated: Use BigQuerySource.ProtoReflect.Descriptor instead.

func (*BigQuerySource) GetInputUri

func (x *BigQuerySource) GetInputUri() string

func (*BigQuerySource) ProtoMessage

func (*BigQuerySource) ProtoMessage()

func (*BigQuerySource) ProtoReflect

func (x *BigQuerySource) ProtoReflect() protoreflect.Message

func (*BigQuerySource) Reset

func (x *BigQuerySource) Reset()

func (*BigQuerySource) String

func (x *BigQuerySource) String() string

type BoundingBoxMetricsEntry

type BoundingBoxMetricsEntry struct {

	// Output only. The intersection-over-union threshold value used to compute
	// this metrics entry.
	IouThreshold float32 `protobuf:"fixed32,1,opt,name=iou_threshold,json=iouThreshold,proto3" json:"iou_threshold,omitempty"`
	// Output only. The mean average precision, most often close to au_prc.
	MeanAveragePrecision float32 `protobuf:"fixed32,2,opt,name=mean_average_precision,json=meanAveragePrecision,proto3" json:"mean_average_precision,omitempty"`
	// Output only. Metrics for each label-match confidence_threshold from
	// 0.05,0.10,...,0.95,0.96,0.97,0.98,0.99. Precision-recall curve is
	// derived from them.
	ConfidenceMetricsEntries []*BoundingBoxMetricsEntry_ConfidenceMetricsEntry `` /* 135-byte string literal not displayed */
	// contains filtered or unexported fields
}

Bounding box matching model metrics for a single intersection-over-union threshold and multiple label match confidence thresholds.

func (*BoundingBoxMetricsEntry) Descriptor

func (*BoundingBoxMetricsEntry) Descriptor() ([]byte, []int)

Deprecated: Use BoundingBoxMetricsEntry.ProtoReflect.Descriptor instead.

func (*BoundingBoxMetricsEntry) GetConfidenceMetricsEntries

func (x *BoundingBoxMetricsEntry) GetConfidenceMetricsEntries() []*BoundingBoxMetricsEntry_ConfidenceMetricsEntry

func (*BoundingBoxMetricsEntry) GetIouThreshold

func (x *BoundingBoxMetricsEntry) GetIouThreshold() float32

func (*BoundingBoxMetricsEntry) GetMeanAveragePrecision

func (x *BoundingBoxMetricsEntry) GetMeanAveragePrecision() float32

func (*BoundingBoxMetricsEntry) ProtoMessage

func (*BoundingBoxMetricsEntry) ProtoMessage()

func (*BoundingBoxMetricsEntry) ProtoReflect

func (x *BoundingBoxMetricsEntry) ProtoReflect() protoreflect.Message

func (*BoundingBoxMetricsEntry) Reset

func (x *BoundingBoxMetricsEntry) Reset()

func (*BoundingBoxMetricsEntry) String

func (x *BoundingBoxMetricsEntry) String() string

type BoundingBoxMetricsEntry_ConfidenceMetricsEntry

type BoundingBoxMetricsEntry_ConfidenceMetricsEntry struct {

	// Output only. The confidence threshold value used to compute the metrics.
	ConfidenceThreshold float32 `protobuf:"fixed32,1,opt,name=confidence_threshold,json=confidenceThreshold,proto3" json:"confidence_threshold,omitempty"`
	// Output only. Recall under the given confidence threshold.
	Recall float32 `protobuf:"fixed32,2,opt,name=recall,proto3" json:"recall,omitempty"`
	// Output only. Precision under the given confidence threshold.
	Precision float32 `protobuf:"fixed32,3,opt,name=precision,proto3" json:"precision,omitempty"`
	// Output only. The harmonic mean of recall and precision.
	F1Score float32 `protobuf:"fixed32,4,opt,name=f1_score,json=f1Score,proto3" json:"f1_score,omitempty"`
	// contains filtered or unexported fields
}

Metrics for a single confidence threshold.

func (*BoundingBoxMetricsEntry_ConfidenceMetricsEntry) Descriptor

Deprecated: Use BoundingBoxMetricsEntry_ConfidenceMetricsEntry.ProtoReflect.Descriptor instead.

func (*BoundingBoxMetricsEntry_ConfidenceMetricsEntry) GetConfidenceThreshold

func (x *BoundingBoxMetricsEntry_ConfidenceMetricsEntry) GetConfidenceThreshold() float32

func (*BoundingBoxMetricsEntry_ConfidenceMetricsEntry) GetF1Score

func (*BoundingBoxMetricsEntry_ConfidenceMetricsEntry) GetPrecision

func (*BoundingBoxMetricsEntry_ConfidenceMetricsEntry) GetRecall

func (*BoundingBoxMetricsEntry_ConfidenceMetricsEntry) ProtoMessage

func (*BoundingBoxMetricsEntry_ConfidenceMetricsEntry) ProtoReflect

func (*BoundingBoxMetricsEntry_ConfidenceMetricsEntry) Reset

func (*BoundingBoxMetricsEntry_ConfidenceMetricsEntry) String

type BoundingPoly

type BoundingPoly struct {

	// Output only . The bounding polygon normalized vertices.
	NormalizedVertices []*NormalizedVertex `protobuf:"bytes,2,rep,name=normalized_vertices,json=normalizedVertices,proto3" json:"normalized_vertices,omitempty"`
	// contains filtered or unexported fields
}

A bounding polygon of a detected object on a plane. On output both vertices and normalized_vertices are provided. The polygon is formed by connecting vertices in the order they are listed.

func (*BoundingPoly) Descriptor

func (*BoundingPoly) Descriptor() ([]byte, []int)

Deprecated: Use BoundingPoly.ProtoReflect.Descriptor instead.

func (*BoundingPoly) GetNormalizedVertices

func (x *BoundingPoly) GetNormalizedVertices() []*NormalizedVertex

func (*BoundingPoly) ProtoMessage

func (*BoundingPoly) ProtoMessage()

func (*BoundingPoly) ProtoReflect

func (x *BoundingPoly) ProtoReflect() protoreflect.Message

func (*BoundingPoly) Reset

func (x *BoundingPoly) Reset()

func (*BoundingPoly) String

func (x *BoundingPoly) String() string

type CategoryStats

type CategoryStats struct {

	// The statistics of the top 20 CATEGORY values, ordered by
	//
	// [count][google.cloud.automl.v1beta1.CategoryStats.SingleCategoryStats.count].
	TopCategoryStats []*CategoryStats_SingleCategoryStats `protobuf:"bytes,1,rep,name=top_category_stats,json=topCategoryStats,proto3" json:"top_category_stats,omitempty"`
	// contains filtered or unexported fields
}

The data statistics of a series of CATEGORY values.

func (*CategoryStats) Descriptor

func (*CategoryStats) Descriptor() ([]byte, []int)

Deprecated: Use CategoryStats.ProtoReflect.Descriptor instead.

func (*CategoryStats) GetTopCategoryStats

func (x *CategoryStats) GetTopCategoryStats() []*CategoryStats_SingleCategoryStats

func (*CategoryStats) ProtoMessage

func (*CategoryStats) ProtoMessage()

func (*CategoryStats) ProtoReflect

func (x *CategoryStats) ProtoReflect() protoreflect.Message

func (*CategoryStats) Reset

func (x *CategoryStats) Reset()

func (*CategoryStats) String

func (x *CategoryStats) String() string

type CategoryStats_SingleCategoryStats

type CategoryStats_SingleCategoryStats struct {

	// The CATEGORY value.
	Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"`
	// The number of occurrences of this value in the series.
	Count int64 `protobuf:"varint,2,opt,name=count,proto3" json:"count,omitempty"`
	// contains filtered or unexported fields
}

The statistics of a single CATEGORY value.

func (*CategoryStats_SingleCategoryStats) Descriptor

func (*CategoryStats_SingleCategoryStats) Descriptor() ([]byte, []int)

Deprecated: Use CategoryStats_SingleCategoryStats.ProtoReflect.Descriptor instead.

func (*CategoryStats_SingleCategoryStats) GetCount

func (*CategoryStats_SingleCategoryStats) GetValue

func (*CategoryStats_SingleCategoryStats) ProtoMessage

func (*CategoryStats_SingleCategoryStats) ProtoMessage()

func (*CategoryStats_SingleCategoryStats) ProtoReflect

func (*CategoryStats_SingleCategoryStats) Reset

func (*CategoryStats_SingleCategoryStats) String

type ClassificationAnnotation

type ClassificationAnnotation struct {

	// Output only. A confidence estimate between 0.0 and 1.0. A higher value
	// means greater confidence that the annotation is positive. If a user
	// approves an annotation as negative or positive, the score value remains
	// unchanged. If a user creates an annotation, the score is 0 for negative or
	// 1 for positive.
	Score float32 `protobuf:"fixed32,1,opt,name=score,proto3" json:"score,omitempty"`
	// contains filtered or unexported fields
}

Contains annotation details specific to classification.

func (*ClassificationAnnotation) Descriptor

func (*ClassificationAnnotation) Descriptor() ([]byte, []int)

Deprecated: Use ClassificationAnnotation.ProtoReflect.Descriptor instead.

func (*ClassificationAnnotation) GetScore

func (x *ClassificationAnnotation) GetScore() float32

func (*ClassificationAnnotation) ProtoMessage

func (*ClassificationAnnotation) ProtoMessage()

func (*ClassificationAnnotation) ProtoReflect

func (x *ClassificationAnnotation) ProtoReflect() protoreflect.Message

func (*ClassificationAnnotation) Reset

func (x *ClassificationAnnotation) Reset()

func (*ClassificationAnnotation) String

func (x *ClassificationAnnotation) String() string

type ClassificationEvaluationMetrics

type ClassificationEvaluationMetrics struct {

	// Output only. The Area Under Precision-Recall Curve metric. Micro-averaged
	// for the overall evaluation.
	AuPrc float32 `protobuf:"fixed32,1,opt,name=au_prc,json=auPrc,proto3" json:"au_prc,omitempty"`
	// Output only. The Area Under Precision-Recall Curve metric based on priors.
	// Micro-averaged for the overall evaluation.
	// Deprecated.
	//
	// Deprecated: Do not use.
	BaseAuPrc float32 `protobuf:"fixed32,2,opt,name=base_au_prc,json=baseAuPrc,proto3" json:"base_au_prc,omitempty"`
	// Output only. The Area Under Receiver Operating Characteristic curve metric.
	// Micro-averaged for the overall evaluation.
	AuRoc float32 `protobuf:"fixed32,6,opt,name=au_roc,json=auRoc,proto3" json:"au_roc,omitempty"`
	// Output only. The Log Loss metric.
	LogLoss float32 `protobuf:"fixed32,7,opt,name=log_loss,json=logLoss,proto3" json:"log_loss,omitempty"`
	// Output only. Metrics for each confidence_threshold in
	// 0.00,0.05,0.10,...,0.95,0.96,0.97,0.98,0.99 and
	// position_threshold = INT32_MAX_VALUE.
	// ROC and precision-recall curves, and other aggregated metrics are derived
	// from them. The confidence metrics entries may also be supplied for
	// additional values of position_threshold, but from these no aggregated
	// metrics are computed.
	ConfidenceMetricsEntry []*ClassificationEvaluationMetrics_ConfidenceMetricsEntry `` /* 129-byte string literal not displayed */
	// Output only. Confusion matrix of the evaluation.
	// Only set for MULTICLASS classification problems where number
	// of labels is no more than 10.
	// Only set for model level evaluation, not for evaluation per label.
	ConfusionMatrix *ClassificationEvaluationMetrics_ConfusionMatrix `protobuf:"bytes,4,opt,name=confusion_matrix,json=confusionMatrix,proto3" json:"confusion_matrix,omitempty"`
	// Output only. The annotation spec ids used for this evaluation.
	AnnotationSpecId []string `protobuf:"bytes,5,rep,name=annotation_spec_id,json=annotationSpecId,proto3" json:"annotation_spec_id,omitempty"`
	// contains filtered or unexported fields
}

Model evaluation metrics for classification problems. Note: For Video Classification this metrics only describe quality of the Video Classification predictions of "segment_classification" type.

func (*ClassificationEvaluationMetrics) Descriptor

func (*ClassificationEvaluationMetrics) Descriptor() ([]byte, []int)

Deprecated: Use ClassificationEvaluationMetrics.ProtoReflect.Descriptor instead.

func (*ClassificationEvaluationMetrics) GetAnnotationSpecId

func (x *ClassificationEvaluationMetrics) GetAnnotationSpecId() []string

func (*ClassificationEvaluationMetrics) GetAuPrc

func (*ClassificationEvaluationMetrics) GetAuRoc

func (*ClassificationEvaluationMetrics) GetBaseAuPrc

func (x *ClassificationEvaluationMetrics) GetBaseAuPrc() float32

Deprecated: Do not use.

func (*ClassificationEvaluationMetrics) GetConfidenceMetricsEntry

func (*ClassificationEvaluationMetrics) GetConfusionMatrix

func (*ClassificationEvaluationMetrics) GetLogLoss

func (x *ClassificationEvaluationMetrics) GetLogLoss() float32

func (*ClassificationEvaluationMetrics) ProtoMessage

func (*ClassificationEvaluationMetrics) ProtoMessage()

func (*ClassificationEvaluationMetrics) ProtoReflect

func (*ClassificationEvaluationMetrics) Reset

func (*ClassificationEvaluationMetrics) String

type ClassificationEvaluationMetrics_ConfidenceMetricsEntry

type ClassificationEvaluationMetrics_ConfidenceMetricsEntry struct {

	// Output only. Metrics are computed with an assumption that the model
	// never returns predictions with score lower than this value.
	ConfidenceThreshold float32 `protobuf:"fixed32,1,opt,name=confidence_threshold,json=confidenceThreshold,proto3" json:"confidence_threshold,omitempty"`
	// Output only. Metrics are computed with an assumption that the model
	// always returns at most this many predictions (ordered by their score,
	// descendingly), but they all still need to meet the confidence_threshold.
	PositionThreshold int32 `protobuf:"varint,14,opt,name=position_threshold,json=positionThreshold,proto3" json:"position_threshold,omitempty"`
	// Output only. Recall (True Positive Rate) for the given confidence
	// threshold.
	Recall float32 `protobuf:"fixed32,2,opt,name=recall,proto3" json:"recall,omitempty"`
	// Output only. Precision for the given confidence threshold.
	Precision float32 `protobuf:"fixed32,3,opt,name=precision,proto3" json:"precision,omitempty"`
	// Output only. False Positive Rate for the given confidence threshold.
	FalsePositiveRate float32 `protobuf:"fixed32,8,opt,name=false_positive_rate,json=falsePositiveRate,proto3" json:"false_positive_rate,omitempty"`
	// Output only. The harmonic mean of recall and precision.
	F1Score float32 `protobuf:"fixed32,4,opt,name=f1_score,json=f1Score,proto3" json:"f1_score,omitempty"`
	// Output only. The Recall (True Positive Rate) when only considering the
	// label that has the highest prediction score and not below the confidence
	// threshold for each example.
	RecallAt1 float32 `protobuf:"fixed32,5,opt,name=recall_at1,json=recallAt1,proto3" json:"recall_at1,omitempty"`
	// Output only. The precision when only considering the label that has the
	// highest prediction score and not below the confidence threshold for each
	// example.
	PrecisionAt1 float32 `protobuf:"fixed32,6,opt,name=precision_at1,json=precisionAt1,proto3" json:"precision_at1,omitempty"`
	// Output only. The False Positive Rate when only considering the label that
	// has the highest prediction score and not below the confidence threshold
	// for each example.
	FalsePositiveRateAt1 float32 `` /* 127-byte string literal not displayed */
	// Output only. The harmonic mean of [recall_at1][google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.ConfidenceMetricsEntry.recall_at1] and [precision_at1][google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.ConfidenceMetricsEntry.precision_at1].
	F1ScoreAt1 float32 `protobuf:"fixed32,7,opt,name=f1_score_at1,json=f1ScoreAt1,proto3" json:"f1_score_at1,omitempty"`
	// Output only. The number of model created labels that match a ground truth
	// label.
	TruePositiveCount int64 `protobuf:"varint,10,opt,name=true_positive_count,json=truePositiveCount,proto3" json:"true_positive_count,omitempty"`
	// Output only. The number of model created labels that do not match a
	// ground truth label.
	FalsePositiveCount int64 `protobuf:"varint,11,opt,name=false_positive_count,json=falsePositiveCount,proto3" json:"false_positive_count,omitempty"`
	// Output only. The number of ground truth labels that are not matched
	// by a model created label.
	FalseNegativeCount int64 `protobuf:"varint,12,opt,name=false_negative_count,json=falseNegativeCount,proto3" json:"false_negative_count,omitempty"`
	// Output only. The number of labels that were not created by the model,
	// but if they would, they would not match a ground truth label.
	TrueNegativeCount int64 `protobuf:"varint,13,opt,name=true_negative_count,json=trueNegativeCount,proto3" json:"true_negative_count,omitempty"`
	// contains filtered or unexported fields
}

Metrics for a single confidence threshold.

func (*ClassificationEvaluationMetrics_ConfidenceMetricsEntry) Descriptor

Deprecated: Use ClassificationEvaluationMetrics_ConfidenceMetricsEntry.ProtoReflect.Descriptor instead.

func (*ClassificationEvaluationMetrics_ConfidenceMetricsEntry) GetConfidenceThreshold

func (*ClassificationEvaluationMetrics_ConfidenceMetricsEntry) GetF1Score

func (*ClassificationEvaluationMetrics_ConfidenceMetricsEntry) GetF1ScoreAt1

func (*ClassificationEvaluationMetrics_ConfidenceMetricsEntry) GetFalseNegativeCount

func (*ClassificationEvaluationMetrics_ConfidenceMetricsEntry) GetFalsePositiveCount

func (*ClassificationEvaluationMetrics_ConfidenceMetricsEntry) GetFalsePositiveRate

func (*ClassificationEvaluationMetrics_ConfidenceMetricsEntry) GetFalsePositiveRateAt1

func (*ClassificationEvaluationMetrics_ConfidenceMetricsEntry) GetPositionThreshold

func (*ClassificationEvaluationMetrics_ConfidenceMetricsEntry) GetPrecision

func (*ClassificationEvaluationMetrics_ConfidenceMetricsEntry) GetPrecisionAt1

func (*ClassificationEvaluationMetrics_ConfidenceMetricsEntry) GetRecall

func (*ClassificationEvaluationMetrics_ConfidenceMetricsEntry) GetRecallAt1

func (*ClassificationEvaluationMetrics_ConfidenceMetricsEntry) GetTrueNegativeCount

func (*ClassificationEvaluationMetrics_ConfidenceMetricsEntry) GetTruePositiveCount

func (*ClassificationEvaluationMetrics_ConfidenceMetricsEntry) ProtoMessage

func (*ClassificationEvaluationMetrics_ConfidenceMetricsEntry) ProtoReflect

func (*ClassificationEvaluationMetrics_ConfidenceMetricsEntry) Reset

func (*ClassificationEvaluationMetrics_ConfidenceMetricsEntry) String

type ClassificationEvaluationMetrics_ConfusionMatrix

type ClassificationEvaluationMetrics_ConfusionMatrix struct {

	// Output only. IDs of the annotation specs used in the confusion matrix.
	// For Tables CLASSIFICATION
	//
	// [prediction_type][google.cloud.automl.v1beta1.TablesModelMetadata.prediction_type]
	// only list of [annotation_spec_display_name-s][] is populated.
	AnnotationSpecId []string `protobuf:"bytes,1,rep,name=annotation_spec_id,json=annotationSpecId,proto3" json:"annotation_spec_id,omitempty"`
	// Output only. Display name of the annotation specs used in the confusion
	// matrix, as they were at the moment of the evaluation. For Tables
	// CLASSIFICATION
	//
	// [prediction_type-s][google.cloud.automl.v1beta1.TablesModelMetadata.prediction_type],
	// distinct values of the target column at the moment of the model
	// evaluation are populated here.
	DisplayName []string `protobuf:"bytes,3,rep,name=display_name,json=displayName,proto3" json:"display_name,omitempty"`
	// Output only. Rows in the confusion matrix. The number of rows is equal to
	// the size of `annotation_spec_id`.
	// `row[i].example_count[j]` is the number of examples that have ground
	// truth of the `annotation_spec_id[i]` and are predicted as
	// `annotation_spec_id[j]` by the model being evaluated.
	Row []*ClassificationEvaluationMetrics_ConfusionMatrix_Row `protobuf:"bytes,2,rep,name=row,proto3" json:"row,omitempty"`
	// contains filtered or unexported fields
}

Confusion matrix of the model running the classification.

func (*ClassificationEvaluationMetrics_ConfusionMatrix) Descriptor

Deprecated: Use ClassificationEvaluationMetrics_ConfusionMatrix.ProtoReflect.Descriptor instead.

func (*ClassificationEvaluationMetrics_ConfusionMatrix) GetAnnotationSpecId

func (x *ClassificationEvaluationMetrics_ConfusionMatrix) GetAnnotationSpecId() []string

func (*ClassificationEvaluationMetrics_ConfusionMatrix) GetDisplayName

func (*ClassificationEvaluationMetrics_ConfusionMatrix) GetRow

func (*ClassificationEvaluationMetrics_ConfusionMatrix) ProtoMessage

func (*ClassificationEvaluationMetrics_ConfusionMatrix) ProtoReflect

func (*ClassificationEvaluationMetrics_ConfusionMatrix) Reset

func (*ClassificationEvaluationMetrics_ConfusionMatrix) String

type ClassificationEvaluationMetrics_ConfusionMatrix_Row

type ClassificationEvaluationMetrics_ConfusionMatrix_Row struct {

	// Output only. Value of the specific cell in the confusion matrix.
	// The number of values each row has (i.e. the length of the row) is equal
	// to the length of the `annotation_spec_id` field or, if that one is not
	// populated, length of the [display_name][google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.ConfusionMatrix.display_name] field.
	ExampleCount []int32 `protobuf:"varint,1,rep,packed,name=example_count,json=exampleCount,proto3" json:"example_count,omitempty"`
	// contains filtered or unexported fields
}

Output only. A row in the confusion matrix.

func (*ClassificationEvaluationMetrics_ConfusionMatrix_Row) Descriptor

Deprecated: Use ClassificationEvaluationMetrics_ConfusionMatrix_Row.ProtoReflect.Descriptor instead.

func (*ClassificationEvaluationMetrics_ConfusionMatrix_Row) GetExampleCount

func (*ClassificationEvaluationMetrics_ConfusionMatrix_Row) ProtoMessage

func (*ClassificationEvaluationMetrics_ConfusionMatrix_Row) ProtoReflect

func (*ClassificationEvaluationMetrics_ConfusionMatrix_Row) Reset

func (*ClassificationEvaluationMetrics_ConfusionMatrix_Row) String

type ClassificationType

type ClassificationType int32

Type of the classification problem.

const (
	// An un-set value of this enum.
	ClassificationType_CLASSIFICATION_TYPE_UNSPECIFIED ClassificationType