dataproc

package
Version: v0.0.0-...-00ab72f Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Jan 18, 2022 License: Apache-2.0 Imports: 14 Imported by: 26

Documentation

Index

Constants

This section is empty.

Variables

View Source
var (
	Batch_State_name = map[int32]string{
		0: "STATE_UNSPECIFIED",
		1: "PENDING",
		2: "RUNNING",
		3: "CANCELLING",
		4: "CANCELLED",
		5: "SUCCEEDED",
		6: "FAILED",
	}
	Batch_State_value = map[string]int32{
		"STATE_UNSPECIFIED": 0,
		"PENDING":           1,
		"RUNNING":           2,
		"CANCELLING":        3,
		"CANCELLED":         4,
		"SUCCEEDED":         5,
		"FAILED":            6,
	}
)

Enum value maps for Batch_State.

View Source
var (
	GceClusterConfig_PrivateIpv6GoogleAccess_name = map[int32]string{
		0: "PRIVATE_IPV6_GOOGLE_ACCESS_UNSPECIFIED",
		1: "INHERIT_FROM_SUBNETWORK",
		2: "OUTBOUND",
		3: "BIDIRECTIONAL",
	}
	GceClusterConfig_PrivateIpv6GoogleAccess_value = map[string]int32{
		"PRIVATE_IPV6_GOOGLE_ACCESS_UNSPECIFIED": 0,
		"INHERIT_FROM_SUBNETWORK":                1,
		"OUTBOUND":                               2,
		"BIDIRECTIONAL":                          3,
	}
)

Enum value maps for GceClusterConfig_PrivateIpv6GoogleAccess.

View Source
var (
	InstanceGroupConfig_Preemptibility_name = map[int32]string{
		0: "PREEMPTIBILITY_UNSPECIFIED",
		1: "NON_PREEMPTIBLE",
		2: "PREEMPTIBLE",
	}
	InstanceGroupConfig_Preemptibility_value = map[string]int32{
		"PREEMPTIBILITY_UNSPECIFIED": 0,
		"NON_PREEMPTIBLE":            1,
		"PREEMPTIBLE":                2,
	}
)

Enum value maps for InstanceGroupConfig_Preemptibility.

View Source
var (
	ClusterStatus_State_name = map[int32]string{
		0: "UNKNOWN",
		1: "CREATING",
		2: "RUNNING",
		3: "ERROR",
		9: "ERROR_DUE_TO_UPDATE",
		4: "DELETING",
		5: "UPDATING",
		6: "STOPPING",
		7: "STOPPED",
		8: "STARTING",
	}
	ClusterStatus_State_value = map[string]int32{
		"UNKNOWN":             0,
		"CREATING":            1,
		"RUNNING":             2,
		"ERROR":               3,
		"ERROR_DUE_TO_UPDATE": 9,
		"DELETING":            4,
		"UPDATING":            5,
		"STOPPING":            6,
		"STOPPED":             7,
		"STARTING":            8,
	}
)

Enum value maps for ClusterStatus_State.

View Source
var (
	ClusterStatus_Substate_name = map[int32]string{
		0: "UNSPECIFIED",
		1: "UNHEALTHY",
		2: "STALE_STATUS",
	}
	ClusterStatus_Substate_value = map[string]int32{
		"UNSPECIFIED":  0,
		"UNHEALTHY":    1,
		"STALE_STATUS": 2,
	}
)

Enum value maps for ClusterStatus_Substate.

View Source
var (
	ReservationAffinity_Type_name = map[int32]string{
		0: "TYPE_UNSPECIFIED",
		1: "NO_RESERVATION",
		2: "ANY_RESERVATION",
		3: "SPECIFIC_RESERVATION",
	}
	ReservationAffinity_Type_value = map[string]int32{
		"TYPE_UNSPECIFIED":     0,
		"NO_RESERVATION":       1,
		"ANY_RESERVATION":      2,
		"SPECIFIC_RESERVATION": 3,
	}
)

Enum value maps for ReservationAffinity_Type.

View Source
var (
	LoggingConfig_Level_name = map[int32]string{
		0: "LEVEL_UNSPECIFIED",
		1: "ALL",
		2: "TRACE",
		3: "DEBUG",
		4: "INFO",
		5: "WARN",
		6: "ERROR",
		7: "FATAL",
		8: "OFF",
	}
	LoggingConfig_Level_value = map[string]int32{
		"LEVEL_UNSPECIFIED": 0,
		"ALL":               1,
		"TRACE":             2,
		"DEBUG":             3,
		"INFO":              4,
		"WARN":              5,
		"ERROR":             6,
		"FATAL":             7,
		"OFF":               8,
	}
)

Enum value maps for LoggingConfig_Level.

View Source
var (
	JobStatus_State_name = map[int32]string{
		0: "STATE_UNSPECIFIED",
		1: "PENDING",
		8: "SETUP_DONE",
		2: "RUNNING",
		3: "CANCEL_PENDING",
		7: "CANCEL_STARTED",
		4: "CANCELLED",
		5: "DONE",
		6: "ERROR",
		9: "ATTEMPT_FAILURE",
	}
	JobStatus_State_value = map[string]int32{
		"STATE_UNSPECIFIED": 0,
		"PENDING":           1,
		"SETUP_DONE":        8,
		"RUNNING":           2,
		"CANCEL_PENDING":    3,
		"CANCEL_STARTED":    7,
		"CANCELLED":         4,
		"DONE":              5,
		"ERROR":             6,
		"ATTEMPT_FAILURE":   9,
	}
)

Enum value maps for JobStatus_State.

View Source
var (
	JobStatus_Substate_name = map[int32]string{
		0: "UNSPECIFIED",
		1: "SUBMITTED",
		2: "QUEUED",
		3: "STALE_STATUS",
	}
	JobStatus_Substate_value = map[string]int32{
		"UNSPECIFIED":  0,
		"SUBMITTED":    1,
		"QUEUED":       2,
		"STALE_STATUS": 3,
	}
)

Enum value maps for JobStatus_Substate.

View Source
var (
	YarnApplication_State_name = map[int32]string{
		0: "STATE_UNSPECIFIED",
		1: "NEW",
		2: "NEW_SAVING",
		3: "SUBMITTED",
		4: "ACCEPTED",
		5: "RUNNING",
		6: "FINISHED",
		7: "FAILED",
		8: "KILLED",
	}
	YarnApplication_State_value = map[string]int32{
		"STATE_UNSPECIFIED": 0,
		"NEW":               1,
		"NEW_SAVING":        2,
		"SUBMITTED":         3,
		"ACCEPTED":          4,
		"RUNNING":           5,
		"FINISHED":          6,
		"FAILED":            7,
		"KILLED":            8,
	}
)

Enum value maps for YarnApplication_State.

View Source
var (
	ListJobsRequest_JobStateMatcher_name = map[int32]string{
		0: "ALL",
		1: "ACTIVE",
		2: "NON_ACTIVE",
	}
	ListJobsRequest_JobStateMatcher_value = map[string]int32{
		"ALL":        0,
		"ACTIVE":     1,
		"NON_ACTIVE": 2,
	}
)

Enum value maps for ListJobsRequest_JobStateMatcher.

View Source
var (
	BatchOperationMetadata_BatchOperationType_name = map[int32]string{
		0: "BATCH_OPERATION_TYPE_UNSPECIFIED",
		1: "BATCH",
	}
	BatchOperationMetadata_BatchOperationType_value = map[string]int32{
		"BATCH_OPERATION_TYPE_UNSPECIFIED": 0,
		"BATCH":                            1,
	}
)

Enum value maps for BatchOperationMetadata_BatchOperationType.

View Source
var (
	ClusterOperationStatus_State_name = map[int32]string{
		0: "UNKNOWN",
		1: "PENDING",
		2: "RUNNING",
		3: "DONE",
	}
	ClusterOperationStatus_State_value = map[string]int32{
		"UNKNOWN": 0,
		"PENDING": 1,
		"RUNNING": 2,
		"DONE":    3,
	}
)

Enum value maps for ClusterOperationStatus_State.

View Source
var (
	Component_name = map[int32]string{
		0:  "COMPONENT_UNSPECIFIED",
		5:  "ANACONDA",
		13: "DOCKER",
		9:  "DRUID",
		14: "FLINK",
		11: "HBASE",
		3:  "HIVE_WEBHCAT",
		1:  "JUPYTER",
		6:  "PRESTO",
		12: "RANGER",
		10: "SOLR",
		4:  "ZEPPELIN",
		8:  "ZOOKEEPER",
	}
	Component_value = map[string]int32{
		"COMPONENT_UNSPECIFIED": 0,
		"ANACONDA":              5,
		"DOCKER":                13,
		"DRUID":                 9,
		"FLINK":                 14,
		"HBASE":                 11,
		"HIVE_WEBHCAT":          3,
		"JUPYTER":               1,
		"PRESTO":                6,
		"RANGER":                12,
		"SOLR":                  10,
		"ZEPPELIN":              4,
		"ZOOKEEPER":             8,
	}
)

Enum value maps for Component.

View Source
var (
	FailureAction_name = map[int32]string{
		0: "FAILURE_ACTION_UNSPECIFIED",
		1: "NO_ACTION",
		2: "DELETE",
	}
	FailureAction_value = map[string]int32{
		"FAILURE_ACTION_UNSPECIFIED": 0,
		"NO_ACTION":                  1,
		"DELETE":                     2,
	}
)

Enum value maps for FailureAction.

View Source
var (
	WorkflowMetadata_State_name = map[int32]string{
		0: "UNKNOWN",
		1: "PENDING",
		2: "RUNNING",
		3: "DONE",
	}
	WorkflowMetadata_State_value = map[string]int32{
		"UNKNOWN": 0,
		"PENDING": 1,
		"RUNNING": 2,
		"DONE":    3,
	}
)

Enum value maps for WorkflowMetadata_State.

View Source
var (
	WorkflowNode_NodeState_name = map[int32]string{
		0: "NODE_STATE_UNSPECIFIED",
		1: "BLOCKED",
		2: "RUNNABLE",
		3: "RUNNING",
		4: "COMPLETED",
		5: "FAILED",
	}
	WorkflowNode_NodeState_value = map[string]int32{
		"NODE_STATE_UNSPECIFIED": 0,
		"BLOCKED":                1,
		"RUNNABLE":               2,
		"RUNNING":                3,
		"COMPLETED":              4,
		"FAILED":                 5,
	}
)

Enum value maps for WorkflowNode_NodeState.

View Source
var File_google_cloud_dataproc_v1_autoscaling_policies_proto protoreflect.FileDescriptor
View Source
var File_google_cloud_dataproc_v1_batches_proto protoreflect.FileDescriptor
View Source
var File_google_cloud_dataproc_v1_clusters_proto protoreflect.FileDescriptor
View Source
var File_google_cloud_dataproc_v1_jobs_proto protoreflect.FileDescriptor
View Source
var File_google_cloud_dataproc_v1_operations_proto protoreflect.FileDescriptor
View Source
var File_google_cloud_dataproc_v1_shared_proto protoreflect.FileDescriptor
View Source
var File_google_cloud_dataproc_v1_workflow_templates_proto protoreflect.FileDescriptor

Functions

func RegisterAutoscalingPolicyServiceServer

func RegisterAutoscalingPolicyServiceServer(s *grpc.Server, srv AutoscalingPolicyServiceServer)

func RegisterBatchControllerServer

func RegisterBatchControllerServer(s *grpc.Server, srv BatchControllerServer)

func RegisterClusterControllerServer

func RegisterClusterControllerServer(s *grpc.Server, srv ClusterControllerServer)

func RegisterJobControllerServer

func RegisterJobControllerServer(s *grpc.Server, srv JobControllerServer)

func RegisterWorkflowTemplateServiceServer

func RegisterWorkflowTemplateServiceServer(s *grpc.Server, srv WorkflowTemplateServiceServer)

Types

type AcceleratorConfig

type AcceleratorConfig struct {

	// Full URL, partial URI, or short name of the accelerator type resource to
	// expose to this instance. See
	// [Compute Engine
	// AcceleratorTypes](https://cloud.google.com/compute/docs/reference/beta/acceleratorTypes).
	//
	// Examples:
	//
	// * `https://www.googleapis.com/compute/beta/projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80`
	// * `projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80`
	// * `nvidia-tesla-k80`
	//
	// **Auto Zone Exception**: If you are using the Dataproc
	// [Auto Zone
	// Placement](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement)
	// feature, you must use the short name of the accelerator type
	// resource, for example, `nvidia-tesla-k80`.
	AcceleratorTypeUri string `protobuf:"bytes,1,opt,name=accelerator_type_uri,json=acceleratorTypeUri,proto3" json:"accelerator_type_uri,omitempty"`
	// The number of the accelerator cards of this type exposed to this instance.
	AcceleratorCount int32 `protobuf:"varint,2,opt,name=accelerator_count,json=acceleratorCount,proto3" json:"accelerator_count,omitempty"`
	// contains filtered or unexported fields
}

Specifies the type and number of accelerator cards attached to the instances of an instance. See [GPUs on Compute Engine](https://cloud.google.com/compute/docs/gpus/).

func (*AcceleratorConfig) Descriptor deprecated

func (*AcceleratorConfig) Descriptor() ([]byte, []int)

Deprecated: Use AcceleratorConfig.ProtoReflect.Descriptor instead.

func (*AcceleratorConfig) GetAcceleratorCount

func (x *AcceleratorConfig) GetAcceleratorCount() int32

func (*AcceleratorConfig) GetAcceleratorTypeUri

func (x *AcceleratorConfig) GetAcceleratorTypeUri() string

func (*AcceleratorConfig) ProtoMessage

func (*AcceleratorConfig) ProtoMessage()

func (*AcceleratorConfig) ProtoReflect

func (x *AcceleratorConfig) ProtoReflect() protoreflect.Message

func (*AcceleratorConfig) Reset

func (x *AcceleratorConfig) Reset()

func (*AcceleratorConfig) String

func (x *AcceleratorConfig) String() string

type AutoscalingConfig

type AutoscalingConfig struct {

	// Optional. The autoscaling policy used by the cluster.
	//
	// Only resource names including projectid and location (region) are valid.
	// Examples:
	//
	// * `https://www.googleapis.com/compute/v1/projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id]`
	// * `projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id]`
	//
	// Note that the policy must be in the same project and Dataproc region.
	PolicyUri string `protobuf:"bytes,1,opt,name=policy_uri,json=policyUri,proto3" json:"policy_uri,omitempty"`
	// contains filtered or unexported fields
}

Autoscaling Policy config associated with the cluster.

func (*AutoscalingConfig) Descriptor deprecated

func (*AutoscalingConfig) Descriptor() ([]byte, []int)

Deprecated: Use AutoscalingConfig.ProtoReflect.Descriptor instead.

func (*AutoscalingConfig) GetPolicyUri

func (x *AutoscalingConfig) GetPolicyUri() string

func (*AutoscalingConfig) ProtoMessage

func (*AutoscalingConfig) ProtoMessage()

func (*AutoscalingConfig) ProtoReflect

func (x *AutoscalingConfig) ProtoReflect() protoreflect.Message

func (*AutoscalingConfig) Reset

func (x *AutoscalingConfig) Reset()

func (*AutoscalingConfig) String

func (x *AutoscalingConfig) String() string

type AutoscalingPolicy

type AutoscalingPolicy struct {

	// Required. The policy id.
	//
	// The id must contain only letters (a-z, A-Z), numbers (0-9),
	// underscores (_), and hyphens (-). Cannot begin or end with underscore
	// or hyphen. Must consist of between 3 and 50 characters.
	//
	Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
	// Output only. The "resource name" of the autoscaling policy, as described
	// in https://cloud.google.com/apis/design/resource_names.
	//
	// * For `projects.regions.autoscalingPolicies`, the resource name of the
	//   policy has the following format:
	//   `projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}`
	//
	// * For `projects.locations.autoscalingPolicies`, the resource name of the
	//   policy has the following format:
	//   `projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}`
	Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
	// Autoscaling algorithm for policy.
	//
	// Types that are assignable to Algorithm:
	//	*AutoscalingPolicy_BasicAlgorithm
	Algorithm isAutoscalingPolicy_Algorithm `protobuf_oneof:"algorithm"`
	// Required. Describes how the autoscaler will operate for primary workers.
	WorkerConfig *InstanceGroupAutoscalingPolicyConfig `protobuf:"bytes,4,opt,name=worker_config,json=workerConfig,proto3" json:"worker_config,omitempty"`
	// Optional. Describes how the autoscaler will operate for secondary workers.
	SecondaryWorkerConfig *InstanceGroupAutoscalingPolicyConfig `` /* 126-byte string literal not displayed */
	// Optional. The labels to associate with this autoscaling policy.
	// Label **keys** must contain 1 to 63 characters, and must conform to
	// [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
	// Label **values** may be empty, but, if present, must contain 1 to 63
	// characters, and must conform to [RFC
	// 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be
	// associated with an autoscaling policy.
	Labels map[string]string `` /* 153-byte string literal not displayed */
	// contains filtered or unexported fields
}

Describes an autoscaling policy for Dataproc cluster autoscaler.

func (*AutoscalingPolicy) Descriptor deprecated

func (*AutoscalingPolicy) Descriptor() ([]byte, []int)

Deprecated: Use AutoscalingPolicy.ProtoReflect.Descriptor instead.

func (*AutoscalingPolicy) GetAlgorithm

func (m *AutoscalingPolicy) GetAlgorithm() isAutoscalingPolicy_Algorithm

func (*AutoscalingPolicy) GetBasicAlgorithm

func (x *AutoscalingPolicy) GetBasicAlgorithm() *BasicAutoscalingAlgorithm

func (*AutoscalingPolicy) GetId

func (x *AutoscalingPolicy) GetId() string

func (*AutoscalingPolicy) GetLabels

func (x *AutoscalingPolicy) GetLabels() map[string]string

func (*AutoscalingPolicy) GetName

func (x *AutoscalingPolicy) GetName() string

func (*AutoscalingPolicy) GetSecondaryWorkerConfig

func (x *AutoscalingPolicy) GetSecondaryWorkerConfig() *InstanceGroupAutoscalingPolicyConfig

func (*AutoscalingPolicy) GetWorkerConfig

func (*AutoscalingPolicy) ProtoMessage

func (*AutoscalingPolicy) ProtoMessage()

func (*AutoscalingPolicy) ProtoReflect

func (x *AutoscalingPolicy) ProtoReflect() protoreflect.Message

func (*AutoscalingPolicy) Reset

func (x *AutoscalingPolicy) Reset()

func (*AutoscalingPolicy) String

func (x *AutoscalingPolicy) String() string

type AutoscalingPolicyServiceClient

type AutoscalingPolicyServiceClient interface {
	// Creates new autoscaling policy.
	CreateAutoscalingPolicy(ctx context.Context, in *CreateAutoscalingPolicyRequest, opts ...grpc.CallOption) (*AutoscalingPolicy, error)
	// Updates (replaces) autoscaling policy.
	//
	// Disabled check for update_mask, because all updates will be full
	// replacements.
	UpdateAutoscalingPolicy(ctx context.Context, in *UpdateAutoscalingPolicyRequest, opts ...grpc.CallOption) (*AutoscalingPolicy, error)
	// Retrieves autoscaling policy.
	GetAutoscalingPolicy(ctx context.Context, in *GetAutoscalingPolicyRequest, opts ...grpc.CallOption) (*AutoscalingPolicy, error)
	// Lists autoscaling policies in the project.
	ListAutoscalingPolicies(ctx context.Context, in *ListAutoscalingPoliciesRequest, opts ...grpc.CallOption) (*ListAutoscalingPoliciesResponse, error)
	// Deletes an autoscaling policy. It is an error to delete an autoscaling
	// policy that is in use by one or more clusters.
	DeleteAutoscalingPolicy(ctx context.Context, in *DeleteAutoscalingPolicyRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
}

AutoscalingPolicyServiceClient is the client API for AutoscalingPolicyService service.

For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.

type AutoscalingPolicyServiceServer

type AutoscalingPolicyServiceServer interface {
	// Creates new autoscaling policy.
	CreateAutoscalingPolicy(context.Context, *CreateAutoscalingPolicyRequest) (*AutoscalingPolicy, error)
	// Updates (replaces) autoscaling policy.
	//
	// Disabled check for update_mask, because all updates will be full
	// replacements.
	UpdateAutoscalingPolicy(context.Context, *UpdateAutoscalingPolicyRequest) (*AutoscalingPolicy, error)
	// Retrieves autoscaling policy.
	GetAutoscalingPolicy(context.Context, *GetAutoscalingPolicyRequest) (*AutoscalingPolicy, error)
	// Lists autoscaling policies in the project.
	ListAutoscalingPolicies(context.Context, *ListAutoscalingPoliciesRequest) (*ListAutoscalingPoliciesResponse, error)
	// Deletes an autoscaling policy. It is an error to delete an autoscaling
	// policy that is in use by one or more clusters.
	DeleteAutoscalingPolicy(context.Context, *DeleteAutoscalingPolicyRequest) (*emptypb.Empty, error)
}

AutoscalingPolicyServiceServer is the server API for AutoscalingPolicyService service.

type AutoscalingPolicy_BasicAlgorithm

type AutoscalingPolicy_BasicAlgorithm struct {
	BasicAlgorithm *BasicAutoscalingAlgorithm `protobuf:"bytes,3,opt,name=basic_algorithm,json=basicAlgorithm,proto3,oneof"`
}

type BasicAutoscalingAlgorithm

type BasicAutoscalingAlgorithm struct {

	// Required. YARN autoscaling configuration.
	YarnConfig *BasicYarnAutoscalingConfig `protobuf:"bytes,1,opt,name=yarn_config,json=yarnConfig,proto3" json:"yarn_config,omitempty"`
	// Optional. Duration between scaling events. A scaling period starts after
	// the update operation from the previous event has completed.
	//
	// Bounds: [2m, 1d]. Default: 2m.
	CooldownPeriod *durationpb.Duration `protobuf:"bytes,2,opt,name=cooldown_period,json=cooldownPeriod,proto3" json:"cooldown_period,omitempty"`
	// contains filtered or unexported fields
}

Basic algorithm for autoscaling.

func (*BasicAutoscalingAlgorithm) Descriptor deprecated

func (*BasicAutoscalingAlgorithm) Descriptor() ([]byte, []int)

Deprecated: Use BasicAutoscalingAlgorithm.ProtoReflect.Descriptor instead.

func (*BasicAutoscalingAlgorithm) GetCooldownPeriod

func (x *BasicAutoscalingAlgorithm) GetCooldownPeriod() *durationpb.Duration

func (*BasicAutoscalingAlgorithm) GetYarnConfig

func (*BasicAutoscalingAlgorithm) ProtoMessage

func (*BasicAutoscalingAlgorithm) ProtoMessage()

func (*BasicAutoscalingAlgorithm) ProtoReflect

func (*BasicAutoscalingAlgorithm) Reset

func (x *BasicAutoscalingAlgorithm) Reset()

func (*BasicAutoscalingAlgorithm) String

func (x *BasicAutoscalingAlgorithm) String() string

type BasicYarnAutoscalingConfig

type BasicYarnAutoscalingConfig struct {

	// Required. Timeout for YARN graceful decommissioning of Node Managers.
	// Specifies the duration to wait for jobs to complete before forcefully
	// removing workers (and potentially interrupting jobs). Only applicable to
	// downscaling operations.
	//
	// Bounds: [0s, 1d].
	GracefulDecommissionTimeout *durationpb.Duration `` /* 144-byte string literal not displayed */
	// Required. Fraction of average YARN pending memory in the last cooldown period
	// for which to add workers. A scale-up factor of 1.0 will result in scaling
	// up so that there is no pending memory remaining after the update (more
	// aggressive scaling). A scale-up factor closer to 0 will result in a smaller
	// magnitude of scaling up (less aggressive scaling).
	// See [How autoscaling
	// works](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/autoscaling#how_autoscaling_works)
	// for more information.
	//
	// Bounds: [0.0, 1.0].
	ScaleUpFactor float64 `protobuf:"fixed64,1,opt,name=scale_up_factor,json=scaleUpFactor,proto3" json:"scale_up_factor,omitempty"`
	// Required. Fraction of average YARN pending memory in the last cooldown period
	// for which to remove workers. A scale-down factor of 1 will result in
	// scaling down so that there is no available memory remaining after the
	// update (more aggressive scaling). A scale-down factor of 0 disables
	// removing workers, which can be beneficial for autoscaling a single job.
	// See [How autoscaling
	// works](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/autoscaling#how_autoscaling_works)
	// for more information.
	//
	// Bounds: [0.0, 1.0].
	ScaleDownFactor float64 `protobuf:"fixed64,2,opt,name=scale_down_factor,json=scaleDownFactor,proto3" json:"scale_down_factor,omitempty"`
	// Optional. Minimum scale-up threshold as a fraction of total cluster size
	// before scaling occurs. For example, in a 20-worker cluster, a threshold of
	// 0.1 means the autoscaler must recommend at least a 2-worker scale-up for
	// the cluster to scale. A threshold of 0 means the autoscaler will scale up
	// on any recommended change.
	//
	// Bounds: [0.0, 1.0]. Default: 0.0.
	ScaleUpMinWorkerFraction float64 `` /* 141-byte string literal not displayed */
	// Optional. Minimum scale-down threshold as a fraction of total cluster size
	// before scaling occurs. For example, in a 20-worker cluster, a threshold of
	// 0.1 means the autoscaler must recommend at least a 2 worker scale-down for
	// the cluster to scale. A threshold of 0 means the autoscaler will scale down
	// on any recommended change.
	//
	// Bounds: [0.0, 1.0]. Default: 0.0.
	ScaleDownMinWorkerFraction float64 `` /* 147-byte string literal not displayed */
	// contains filtered or unexported fields
}

Basic autoscaling configurations for YARN.

func (*BasicYarnAutoscalingConfig) Descriptor deprecated

func (*BasicYarnAutoscalingConfig) Descriptor() ([]byte, []int)

Deprecated: Use BasicYarnAutoscalingConfig.ProtoReflect.Descriptor instead.

func (*BasicYarnAutoscalingConfig) GetGracefulDecommissionTimeout

func (x *BasicYarnAutoscalingConfig) GetGracefulDecommissionTimeout() *durationpb.Duration

func (*BasicYarnAutoscalingConfig) GetScaleDownFactor

func (x *BasicYarnAutoscalingConfig) GetScaleDownFactor() float64

func (*BasicYarnAutoscalingConfig) GetScaleDownMinWorkerFraction

func (x *BasicYarnAutoscalingConfig) GetScaleDownMinWorkerFraction() float64

func (*BasicYarnAutoscalingConfig) GetScaleUpFactor

func (x *BasicYarnAutoscalingConfig) GetScaleUpFactor() float64

func (*BasicYarnAutoscalingConfig) GetScaleUpMinWorkerFraction

func (x *BasicYarnAutoscalingConfig) GetScaleUpMinWorkerFraction() float64

func (*BasicYarnAutoscalingConfig) ProtoMessage

func (*BasicYarnAutoscalingConfig) ProtoMessage()

func (*BasicYarnAutoscalingConfig) ProtoReflect

func (*BasicYarnAutoscalingConfig) Reset

func (x *BasicYarnAutoscalingConfig) Reset()

func (*BasicYarnAutoscalingConfig) String

func (x *BasicYarnAutoscalingConfig) String() string

type Batch

type Batch struct {

	// Output only. The resource name of the batch.
	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
	// Output only. A batch UUID (Unique Universal Identifier). The service
	// generates this value when it creates the batch.
	Uuid string `protobuf:"bytes,2,opt,name=uuid,proto3" json:"uuid,omitempty"`
	// Output only. The time when the batch was created.
	CreateTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"`
	// The application/framework-specific portion of the batch configuration.
	//
	// Types that are assignable to BatchConfig:
	//	*Batch_PysparkBatch
	//	*Batch_SparkBatch
	//	*Batch_SparkRBatch
	//	*Batch_SparkSqlBatch
	BatchConfig isBatch_BatchConfig `protobuf_oneof:"batch_config"`
	// Output only. Runtime information about batch execution.
	RuntimeInfo *RuntimeInfo `protobuf:"bytes,8,opt,name=runtime_info,json=runtimeInfo,proto3" json:"runtime_info,omitempty"`
	// Output only. The state of the batch.
	State Batch_State `protobuf:"varint,9,opt,name=state,proto3,enum=google.cloud.dataproc.v1.Batch_State" json:"state,omitempty"`
	// Output only. Batch state details, such as a failure
	// description if the state is `FAILED`.
	StateMessage string `protobuf:"bytes,10,opt,name=state_message,json=stateMessage,proto3" json:"state_message,omitempty"`
	// Output only. The time when the batch entered a current state.
	StateTime *timestamppb.Timestamp `protobuf:"bytes,11,opt,name=state_time,json=stateTime,proto3" json:"state_time,omitempty"`
	// Output only. The email address of the user who created the batch.
	Creator string `protobuf:"bytes,12,opt,name=creator,proto3" json:"creator,omitempty"`
	// Optional. The labels to associate with this batch.
	// Label **keys** must contain 1 to 63 characters, and must conform to
	// [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
	// Label **values** may be empty, but, if present, must contain 1 to 63
	// characters, and must conform to [RFC
	// 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be
	// associated with a batch.
	Labels map[string]string `` /* 154-byte string literal not displayed */
	// Optional. Runtime configuration for the batch execution.
	RuntimeConfig *RuntimeConfig `protobuf:"bytes,14,opt,name=runtime_config,json=runtimeConfig,proto3" json:"runtime_config,omitempty"`
	// Optional. Environment configuration for the batch execution.
	EnvironmentConfig *EnvironmentConfig `protobuf:"bytes,15,opt,name=environment_config,json=environmentConfig,proto3" json:"environment_config,omitempty"`
	// Output only. The resource name of the operation associated with this batch.
	Operation string `protobuf:"bytes,16,opt,name=operation,proto3" json:"operation,omitempty"`
	// Output only. Historical state information for the batch.
	StateHistory []*Batch_StateHistory `protobuf:"bytes,17,rep,name=state_history,json=stateHistory,proto3" json:"state_history,omitempty"`
	// contains filtered or unexported fields
}

A representation of a batch workload in the service.

func (*Batch) Descriptor deprecated

func (*Batch) Descriptor() ([]byte, []int)

Deprecated: Use Batch.ProtoReflect.Descriptor instead.

func (*Batch) GetBatchConfig

func (m *Batch) GetBatchConfig() isBatch_BatchConfig

func (*Batch) GetCreateTime

func (x *Batch) GetCreateTime() *timestamppb.Timestamp

func (*Batch) GetCreator

func (x *Batch) GetCreator() string

func (*Batch) GetEnvironmentConfig

func (x *Batch) GetEnvironmentConfig() *EnvironmentConfig

func (*Batch) GetLabels

func (x *Batch) GetLabels() map[string]string

func (*Batch) GetName

func (x *Batch) GetName() string

func (*Batch) GetOperation

func (x *Batch) GetOperation() string

func (*Batch) GetPysparkBatch

func (x *Batch) GetPysparkBatch() *PySparkBatch

func (*Batch) GetRuntimeConfig

func (x *Batch) GetRuntimeConfig() *RuntimeConfig

func (*Batch) GetRuntimeInfo

func (x *Batch) GetRuntimeInfo() *RuntimeInfo

func (*Batch) GetSparkBatch

func (x *Batch) GetSparkBatch() *SparkBatch

func (*Batch) GetSparkRBatch

func (x *Batch) GetSparkRBatch() *SparkRBatch

func (*Batch) GetSparkSqlBatch

func (x *Batch) GetSparkSqlBatch() *SparkSqlBatch

func (*Batch) GetState

func (x *Batch) GetState() Batch_State

func (*Batch) GetStateHistory

func (x *Batch) GetStateHistory() []*Batch_StateHistory

func (*Batch) GetStateMessage

func (x *Batch) GetStateMessage() string

func (*Batch) GetStateTime

func (x *Batch) GetStateTime() *timestamppb.Timestamp

func (*Batch) GetUuid

func (x *Batch) GetUuid() string

func (*Batch) ProtoMessage

func (*Batch) ProtoMessage()

func (*Batch) ProtoReflect

func (x *Batch) ProtoReflect() protoreflect.Message

func (*Batch) Reset

func (x *Batch) Reset()

func (*Batch) String

func (x *Batch) String() string

type BatchControllerClient

type BatchControllerClient interface {
	// Creates a batch workload that executes asynchronously.
	CreateBatch(ctx context.Context, in *CreateBatchRequest, opts ...grpc.CallOption) (*longrunning.Operation, error)
	// Gets the batch workload resource representation.
	GetBatch(ctx context.Context, in *GetBatchRequest, opts ...grpc.CallOption) (*Batch, error)
	// Lists batch workloads.
	ListBatches(ctx context.Context, in *ListBatchesRequest, opts ...grpc.CallOption) (*ListBatchesResponse, error)
	// Deletes the batch workload resource. If the batch is not in terminal state,
	// the delete fails and the response returns `FAILED_PRECONDITION`.
	DeleteBatch(ctx context.Context, in *DeleteBatchRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
}

BatchControllerClient is the client API for BatchController service.

For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.

type BatchControllerServer

type BatchControllerServer interface {
	// Creates a batch workload that executes asynchronously.
	CreateBatch(context.Context, *CreateBatchRequest) (*longrunning.Operation, error)
	// Gets the batch workload resource representation.
	GetBatch(context.Context, *GetBatchRequest) (*Batch, error)
	// Lists batch workloads.
	ListBatches(context.Context, *ListBatchesRequest) (*ListBatchesResponse, error)
	// Deletes the batch workload resource. If the batch is not in terminal state,
	// the delete fails and the response returns `FAILED_PRECONDITION`.
	DeleteBatch(context.Context, *DeleteBatchRequest) (*emptypb.Empty, error)
}

BatchControllerServer is the server API for BatchController service.

type BatchOperationMetadata

type BatchOperationMetadata struct {

	// Name of the batch for the operation.
	Batch string `protobuf:"bytes,1,opt,name=batch,proto3" json:"batch,omitempty"`
	// Batch UUID for the operation.
	BatchUuid string `protobuf:"bytes,2,opt,name=batch_uuid,json=batchUuid,proto3" json:"batch_uuid,omitempty"`
	// The time when the operation was created.
	CreateTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"`
	// The time when the operation finished.
	DoneTime *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=done_time,json=doneTime,proto3" json:"done_time,omitempty"`
	// The operation type.
	OperationType BatchOperationMetadata_BatchOperationType `` /* 173-byte string literal not displayed */
	// Short description of the operation.
	Description string `protobuf:"bytes,7,opt,name=description,proto3" json:"description,omitempty"`
	// Labels associated with the operation.
	Labels map[string]string `` /* 153-byte string literal not displayed */
	// Warnings encountered during operation execution.
	Warnings []string `protobuf:"bytes,9,rep,name=warnings,proto3" json:"warnings,omitempty"`
	// contains filtered or unexported fields
}

Metadata describing the Batch operation.

func (*BatchOperationMetadata) Descriptor deprecated

func (*BatchOperationMetadata) Descriptor() ([]byte, []int)

Deprecated: Use BatchOperationMetadata.ProtoReflect.Descriptor instead.

func (*BatchOperationMetadata) GetBatch

func (x *BatchOperationMetadata) GetBatch() string

func (*BatchOperationMetadata) GetBatchUuid

func (x *BatchOperationMetadata) GetBatchUuid() string

func (*BatchOperationMetadata) GetCreateTime

func (x *BatchOperationMetadata) GetCreateTime() *timestamppb.Timestamp

func (*BatchOperationMetadata) GetDescription

func (x *BatchOperationMetadata) GetDescription() string

func (*BatchOperationMetadata) GetDoneTime

func (x *BatchOperationMetadata) GetDoneTime() *timestamppb.Timestamp

func (*BatchOperationMetadata) GetLabels

func (x *BatchOperationMetadata) GetLabels() map[string]string

func (*BatchOperationMetadata) GetOperationType

func (*BatchOperationMetadata) GetWarnings

func (x *BatchOperationMetadata) GetWarnings() []string

func (*BatchOperationMetadata) ProtoMessage

func (*BatchOperationMetadata) ProtoMessage()

func (*BatchOperationMetadata) ProtoReflect

func (x *BatchOperationMetadata) ProtoReflect() protoreflect.Message

func (*BatchOperationMetadata) Reset

func (x *BatchOperationMetadata) Reset()

func (*BatchOperationMetadata) String

func (x *BatchOperationMetadata) String() string

type BatchOperationMetadata_BatchOperationType

type BatchOperationMetadata_BatchOperationType int32

Operation type for Batch resources

const (
	// Batch operation type is unknown.
	BatchOperationMetadata_BATCH_OPERATION_TYPE_UNSPECIFIED BatchOperationMetadata_BatchOperationType = 0
	// Batch operation type.
	BatchOperationMetadata_BATCH BatchOperationMetadata_BatchOperationType = 1
)

func (BatchOperationMetadata_BatchOperationType) Descriptor

func (BatchOperationMetadata_BatchOperationType) Enum

func (BatchOperationMetadata_BatchOperationType) EnumDescriptor deprecated

func (BatchOperationMetadata_BatchOperationType) EnumDescriptor() ([]byte, []int)

Deprecated: Use BatchOperationMetadata_BatchOperationType.Descriptor instead.

func (BatchOperationMetadata_BatchOperationType) Number

func (BatchOperationMetadata_BatchOperationType) String

func (BatchOperationMetadata_BatchOperationType) Type

type Batch_PysparkBatch

type Batch_PysparkBatch struct {
	// Optional. PySpark batch config.
	PysparkBatch *PySparkBatch `protobuf:"bytes,4,opt,name=pyspark_batch,json=pysparkBatch,proto3,oneof"`
}

type Batch_SparkBatch

type Batch_SparkBatch struct {
	// Optional. Spark batch config.
	SparkBatch *SparkBatch `protobuf:"bytes,5,opt,name=spark_batch,json=sparkBatch,proto3,oneof"`
}

type Batch_SparkRBatch

type Batch_SparkRBatch struct {
	// Optional. SparkR batch config.
	SparkRBatch *SparkRBatch `protobuf:"bytes,6,opt,name=spark_r_batch,json=sparkRBatch,proto3,oneof"`
}

type Batch_SparkSqlBatch

type Batch_SparkSqlBatch struct {
	// Optional. SparkSql batch config.
	SparkSqlBatch *SparkSqlBatch `protobuf:"bytes,7,opt,name=spark_sql_batch,json=sparkSqlBatch,proto3,oneof"`
}

type Batch_State

type Batch_State int32

The batch state.

const (
	// The batch state is unknown.
	Batch_STATE_UNSPECIFIED Batch_State = 0
	// The batch is created before running.
	Batch_PENDING Batch_State = 1
	// The batch is running.
	Batch_RUNNING Batch_State = 2
	// The batch is cancelling.
	Batch_CANCELLING Batch_State = 3
	// The batch cancellation was successful.
	Batch_CANCELLED Batch_State = 4
	// The batch completed successfully.
	Batch_SUCCEEDED Batch_State = 5
	// The batch is no longer running due to an error.
	Batch_FAILED Batch_State = 6
)

func (Batch_State) Descriptor

func (Batch_State) Enum

func (x Batch_State) Enum() *Batch_State

func (Batch_State) EnumDescriptor deprecated

func (Batch_State) EnumDescriptor() ([]byte, []int)

Deprecated: Use Batch_State.Descriptor instead.

func (Batch_State) Number

func (x Batch_State) Number() protoreflect.EnumNumber

func (Batch_State) String

func (x Batch_State) String() string

func (Batch_State) Type

type Batch_StateHistory

type Batch_StateHistory struct {

	// Output only. The state of the batch at this point in history.
	State Batch_State `protobuf:"varint,1,opt,name=state,proto3,enum=google.cloud.dataproc.v1.Batch_State" json:"state,omitempty"`
	// Output only. Details about the state at this point in history.
	StateMessage string `protobuf:"bytes,2,opt,name=state_message,json=stateMessage,proto3" json:"state_message,omitempty"`
	// Output only. The time when the batch entered the historical state.
	StateStartTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=state_start_time,json=stateStartTime,proto3" json:"state_start_time,omitempty"`
	// contains filtered or unexported fields
}

Historical state information.

func (*Batch_StateHistory) Descriptor deprecated

func (*Batch_StateHistory) Descriptor() ([]byte, []int)

Deprecated: Use Batch_StateHistory.ProtoReflect.Descriptor instead.

func (*Batch_StateHistory) GetState

func (x *Batch_StateHistory) GetState() Batch_State

func (*Batch_StateHistory) GetStateMessage

func (x *Batch_StateHistory) GetStateMessage() string

func (*Batch_StateHistory) GetStateStartTime

func (x *Batch_StateHistory) GetStateStartTime() *timestamppb.Timestamp

func (*Batch_StateHistory) ProtoMessage

func (*Batch_StateHistory) ProtoMessage()

func (*Batch_StateHistory) ProtoReflect

func (x *Batch_StateHistory) ProtoReflect() protoreflect.Message

func (*Batch_StateHistory) Reset

func (x *Batch_StateHistory) Reset()

func (*Batch_StateHistory) String

func (x *Batch_StateHistory) String() string

type CancelJobRequest

type CancelJobRequest struct {

	// Required. The ID of the Google Cloud Platform project that the job
	// belongs to.
	ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"`
	// Required. The Dataproc region in which to handle the request.
	Region string `protobuf:"bytes,3,opt,name=region,proto3" json:"region,omitempty"`
	// Required. The job ID.
	JobId string `protobuf:"bytes,2,opt,name=job_id,json=jobId,proto3" json:"job_id,omitempty"`
	// contains filtered or unexported fields
}

A request to cancel a job.

func (*CancelJobRequest) Descriptor deprecated

func (*CancelJobRequest) Descriptor() ([]byte, []int)

Deprecated: Use CancelJobRequest.ProtoReflect.Descriptor instead.

func (*CancelJobRequest) GetJobId

func (x *CancelJobRequest) GetJobId() string

func (*CancelJobRequest) GetProjectId

func (x *CancelJobRequest) GetProjectId() string

func (*CancelJobRequest) GetRegion

func (x *CancelJobRequest) GetRegion() string

func (*CancelJobRequest) ProtoMessage

func (*CancelJobRequest) ProtoMessage()

func (*CancelJobRequest) ProtoReflect

func (x *CancelJobRequest) ProtoReflect() protoreflect.Message

func (*CancelJobRequest) Reset

func (x *CancelJobRequest) Reset()

func (*CancelJobRequest) String

func (x *CancelJobRequest) String() string

type Cluster

type Cluster struct {

	// Required. The Google Cloud Platform project ID that the cluster belongs to.
	ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"`
	// Required. The cluster name. Cluster names within a project must be
	// unique. Names of deleted clusters can be reused.
	ClusterName string `protobuf:"bytes,2,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"`
	// Optional. The cluster config for a cluster of Compute Engine Instances.
	// Note that Dataproc may set default values, and values may change
	// when clusters are updated.
	Config *ClusterConfig `protobuf:"bytes,3,opt,name=config,proto3" json:"config,omitempty"`
	// Optional. The labels to associate with this cluster.
	// Label **keys** must contain 1 to 63 characters, and must conform to
	// [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
	// Label **values** may be empty, but, if present, must contain 1 to 63
	// characters, and must conform to [RFC
	// 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be
	// associated with a cluster.
	Labels map[string]string `` /* 153-byte string literal not displayed */
	// Output only. Cluster status.
	Status *ClusterStatus `protobuf:"bytes,4,opt,name=status,proto3" json:"status,omitempty"`
	// Output only. The previous cluster status.
	StatusHistory []*ClusterStatus `protobuf:"bytes,7,rep,name=status_history,json=statusHistory,proto3" json:"status_history,omitempty"`
	// Output only. A cluster UUID (Unique Universal Identifier). Dataproc
	// generates this value when it creates the cluster.
	ClusterUuid string `protobuf:"bytes,6,opt,name=cluster_uuid,json=clusterUuid,proto3" json:"cluster_uuid,omitempty"`
	// Output only. Contains cluster daemon metrics such as HDFS and YARN stats.
	//
	// **Beta Feature**: This report is available for testing purposes only. It
	// may be changed before final release.
	Metrics *ClusterMetrics `protobuf:"bytes,9,opt,name=metrics,proto3" json:"metrics,omitempty"`
	// contains filtered or unexported fields
}

Describes the identifying information, config, and status of a Dataproc cluster

func (*Cluster) Descriptor deprecated

func (*Cluster) Descriptor() ([]byte, []int)

Deprecated: Use Cluster.ProtoReflect.Descriptor instead.

func (*Cluster) GetClusterName

func (x *Cluster) GetClusterName() string

func (*Cluster) GetClusterUuid

func (x *Cluster) GetClusterUuid() string

func (*Cluster) GetConfig

func (x *Cluster) GetConfig() *ClusterConfig

func (*Cluster) GetLabels

func (x *Cluster) GetLabels() map[string]string

func (*Cluster) GetMetrics

func (x *Cluster) GetMetrics() *ClusterMetrics

func (*Cluster) GetProjectId

func (x *Cluster) GetProjectId() string

func (*Cluster) GetStatus

func (x *Cluster) GetStatus() *ClusterStatus

func (*Cluster) GetStatusHistory

func (x *Cluster) GetStatusHistory() []*ClusterStatus

func (*Cluster) ProtoMessage

func (*Cluster) ProtoMessage()

func (*Cluster) ProtoReflect

func (x *Cluster) ProtoReflect() protoreflect.Message

func (*Cluster) Reset

func (x *Cluster) Reset()

func (*Cluster) String

func (x *Cluster) String() string

type ClusterConfig

type ClusterConfig struct {

	// Optional. A Cloud Storage bucket used to stage job
	// dependencies, config files, and job driver console output.
	// If you do not specify a staging bucket, Cloud
	// Dataproc will determine a Cloud Storage location (US,
	// ASIA, or EU) for your cluster's staging bucket according to the
	// Compute Engine zone where your cluster is deployed, and then create
	// and manage this project-level, per-location bucket (see
	// [Dataproc staging and temp
	// buckets](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)).
	// **This field requires a Cloud Storage bucket name, not a `gs://...` URI to
	// a Cloud Storage bucket.**
	ConfigBucket string `protobuf:"bytes,1,opt,name=config_bucket,json=configBucket,proto3" json:"config_bucket,omitempty"`
	// Optional. A Cloud Storage bucket used to store ephemeral cluster and jobs data,
	// such as Spark and MapReduce history files.
	// If you do not specify a temp bucket,
	// Dataproc will determine a Cloud Storage location (US,
	// ASIA, or EU) for your cluster's temp bucket according to the
	// Compute Engine zone where your cluster is deployed, and then create
	// and manage this project-level, per-location bucket. The default bucket has
	// a TTL of 90 days, but you can use any TTL (or none) if you specify a
	// bucket (see
	// [Dataproc staging and temp
	// buckets](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)).
	// **This field requires a Cloud Storage bucket name, not a `gs://...` URI to
	// a Cloud Storage bucket.**
	TempBucket string `protobuf:"bytes,2,opt,name=temp_bucket,json=tempBucket,proto3" json:"temp_bucket,omitempty"`
	// Optional. The shared Compute Engine config settings for
	// all instances in a cluster.
	GceClusterConfig *GceClusterConfig `protobuf:"bytes,8,opt,name=gce_cluster_config,json=gceClusterConfig,proto3" json:"gce_cluster_config,omitempty"`
	// Optional. The Compute Engine config settings for
	// the cluster's master instance.
	MasterConfig *InstanceGroupConfig `protobuf:"bytes,9,opt,name=master_config,json=masterConfig,proto3" json:"master_config,omitempty"`
	// Optional. The Compute Engine config settings for
	// the cluster's worker instances.
	WorkerConfig *InstanceGroupConfig `protobuf:"bytes,10,opt,name=worker_config,json=workerConfig,proto3" json:"worker_config,omitempty"`
	// Optional. The Compute Engine config settings for
	// a cluster's secondary worker instances
	SecondaryWorkerConfig *InstanceGroupConfig `` /* 127-byte string literal not displayed */
	// Optional. The config settings for cluster software.
	SoftwareConfig *SoftwareConfig `protobuf:"bytes,13,opt,name=software_config,json=softwareConfig,proto3" json:"software_config,omitempty"`
	// Optional. Commands to execute on each node after config is
	// completed. By default, executables are run on master and all worker nodes.
	// You can test a node's `role` metadata to run an executable on
	// a master or worker node, as shown below using `curl` (you can also use
	// `wget`):
	//
	//     ROLE=$(curl -H Metadata-Flavor:Google
	//     http://metadata/computeMetadata/v1/instance/attributes/dataproc-role)
	//     if [[ "${ROLE}" == 'Master' ]]; then
	//       ... master specific actions ...
	//     else
	//       ... worker specific actions ...
	//     fi
	InitializationActions []*NodeInitializationAction `protobuf:"bytes,11,rep,name=initialization_actions,json=initializationActions,proto3" json:"initialization_actions,omitempty"`
	// Optional. Encryption settings for the cluster.
	EncryptionConfig *EncryptionConfig `protobuf:"bytes,15,opt,name=encryption_config,json=encryptionConfig,proto3" json:"encryption_config,omitempty"`
	// Optional. Autoscaling config for the policy associated with the cluster.
	// Cluster does not autoscale if this field is unset.
	AutoscalingConfig *AutoscalingConfig `protobuf:"bytes,18,opt,name=autoscaling_config,json=autoscalingConfig,proto3" json:"autoscaling_config,omitempty"`
	// Optional. Security settings for the cluster.
	SecurityConfig *SecurityConfig `protobuf:"bytes,16,opt,name=security_config,json=securityConfig,proto3" json:"security_config,omitempty"`
	// Optional. Lifecycle setting for the cluster.
	LifecycleConfig *LifecycleConfig `protobuf:"bytes,17,opt,name=lifecycle_config,json=lifecycleConfig,proto3" json:"lifecycle_config,omitempty"`
	// Optional. Port/endpoint configuration for this cluster
	EndpointConfig *EndpointConfig `protobuf:"bytes,19,opt,name=endpoint_config,json=endpointConfig,proto3" json:"endpoint_config,omitempty"`
	// Optional. Metastore configuration.
	MetastoreConfig *MetastoreConfig `protobuf:"bytes,20,opt,name=metastore_config,json=metastoreConfig,proto3" json:"metastore_config,omitempty"`
	// Optional. BETA. The Kubernetes Engine config for Dataproc clusters deployed to
	// Kubernetes. Setting this is considered mutually exclusive with Compute
	// Engine-based options such as `gce_cluster_config`, `master_config`,
	// `worker_config`, `secondary_worker_config`, and `autoscaling_config`.
	GkeClusterConfig *GkeClusterConfig `protobuf:"bytes,21,opt,name=gke_cluster_config,json=gkeClusterConfig,proto3" json:"gke_cluster_config,omitempty"`
	// contains filtered or unexported fields
}

The cluster config.

func (*ClusterConfig) Descriptor deprecated

func (*ClusterConfig) Descriptor() ([]byte, []int)

Deprecated: Use ClusterConfig.ProtoReflect.Descriptor instead.

func (*ClusterConfig) GetAutoscalingConfig

func (x *ClusterConfig) GetAutoscalingConfig() *AutoscalingConfig

func (*ClusterConfig) GetConfigBucket

func (x *ClusterConfig) GetConfigBucket() string

func (*ClusterConfig) GetEncryptionConfig

func (x *ClusterConfig) GetEncryptionConfig() *EncryptionConfig

func (*ClusterConfig) GetEndpointConfig

func (x *ClusterConfig) GetEndpointConfig() *EndpointConfig

func (*ClusterConfig) GetGceClusterConfig

func (x *ClusterConfig) GetGceClusterConfig() *GceClusterConfig

func (*ClusterConfig) GetGkeClusterConfig

func (x *ClusterConfig) GetGkeClusterConfig() *GkeClusterConfig

func (*ClusterConfig) GetInitializationActions

func (x *ClusterConfig) GetInitializationActions() []*NodeInitializationAction

func (*ClusterConfig) GetLifecycleConfig

func (x *ClusterConfig) GetLifecycleConfig() *LifecycleConfig

func (*ClusterConfig) GetMasterConfig

func (x *ClusterConfig) GetMasterConfig() *InstanceGroupConfig

func (*ClusterConfig) GetMetastoreConfig

func (x *ClusterConfig) GetMetastoreConfig() *MetastoreConfig

func (*ClusterConfig) GetSecondaryWorkerConfig

func (x *ClusterConfig) GetSecondaryWorkerConfig() *InstanceGroupConfig

func (*ClusterConfig) GetSecurityConfig

func (x *ClusterConfig) GetSecurityConfig() *SecurityConfig

func (*ClusterConfig) GetSoftwareConfig

func (x *ClusterConfig) GetSoftwareConfig() *SoftwareConfig

func (*ClusterConfig) GetTempBucket

func (x *ClusterConfig) GetTempBucket() string

func (*ClusterConfig) GetWorkerConfig

func (x *ClusterConfig) GetWorkerConfig() *InstanceGroupConfig

func (*ClusterConfig) ProtoMessage

func (*ClusterConfig) ProtoMessage()

func (*ClusterConfig) ProtoReflect

func (x *ClusterConfig) ProtoReflect() protoreflect.Message

func (*ClusterConfig) Reset

func (x *ClusterConfig) Reset()

func (*ClusterConfig) String

func (x *ClusterConfig) String() string

type ClusterControllerClient

type ClusterControllerClient interface {
	// Creates a cluster in a project. The returned
	// [Operation.metadata][google.longrunning.Operation.metadata] will be
	// [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata).
	CreateCluster(ctx context.Context, in *CreateClusterRequest, opts ...grpc.CallOption) (*longrunning.Operation, error)
	// Updates a cluster in a project. The returned
	// [Operation.metadata][google.longrunning.Operation.metadata] will be
	// [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata).
	// The cluster must be in a [`RUNNING`][google.cloud.dataproc.v1.ClusterStatus.State] state or an error
	// is returned.
	UpdateCluster(ctx context.Context, in *UpdateClusterRequest, opts ...grpc.CallOption) (*longrunning.Operation, error)
	// Stops a cluster in a project.
	StopCluster(ctx context.Context, in *StopClusterRequest, opts ...grpc.CallOption) (*longrunning.Operation, error)
	// Starts a cluster in a project.
	StartCluster(ctx context.Context, in *StartClusterRequest, opts ...grpc.CallOption) (*longrunning.Operation, error)
	// Deletes a cluster in a project. The returned
	// [Operation.metadata][google.longrunning.Operation.metadata] will be
	// [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata).
	DeleteCluster(ctx context.Context, in *DeleteClusterRequest, opts ...grpc.CallOption) (*longrunning.Operation, error)
	// Gets the resource representation for a cluster in a project.
	GetCluster(ctx context.Context, in *GetClusterRequest, opts ...grpc.CallOption) (*Cluster, error)
	// Lists all regions/{region}/clusters in a project alphabetically.
	ListClusters(ctx context.Context, in *ListClustersRequest, opts ...grpc.CallOption) (*ListClustersResponse, error)
	// Gets cluster diagnostic information. The returned
	// [Operation.metadata][google.longrunning.Operation.metadata] will be
	// [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata).
	// After the operation completes,
	// [Operation.response][google.longrunning.Operation.response]
	// contains
	// [DiagnoseClusterResults](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#diagnoseclusterresults).
	DiagnoseCluster(ctx context.Context, in *DiagnoseClusterRequest, opts ...grpc.CallOption) (*longrunning.Operation, error)
}

ClusterControllerClient is the client API for ClusterController service.

For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.

type ClusterControllerServer

type ClusterControllerServer interface {
	// Creates a cluster in a project. The returned
	// [Operation.metadata][google.longrunning.Operation.metadata] will be
	// [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata).
	CreateCluster(context.Context, *CreateClusterRequest) (*longrunning.Operation, error)
	// Updates a cluster in a project. The returned
	// [Operation.metadata][google.longrunning.Operation.metadata] will be
	// [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata).
	// The cluster must be in a [`RUNNING`][google.cloud.dataproc.v1.ClusterStatus.State] state or an error
	// is returned.
	UpdateCluster(context.Context, *UpdateClusterRequest) (*longrunning.Operation, error)
	// Stops a cluster in a project.
	StopCluster(context.Context, *StopClusterRequest) (*longrunning.Operation, error)
	// Starts a cluster in a project.
	StartCluster(context.Context, *StartClusterRequest) (*longrunning.Operation, error)
	// Deletes a cluster in a project. The returned
	// [Operation.metadata][google.longrunning.Operation.metadata] will be
	// [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata).
	DeleteCluster(context.Context, *DeleteClusterRequest) (*longrunning.Operation, error)
	// Gets the resource representation for a cluster in a project.
	GetCluster(context.Context, *GetClusterRequest) (*Cluster, error)
	// Lists all regions/{region}/clusters in a project alphabetically.
	ListClusters(context.Context, *ListClustersRequest) (*ListClustersResponse, error)
	// Gets cluster diagnostic information. The returned
	// [Operation.metadata][google.longrunning.Operation.metadata] will be
	// [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata).
	// After the operation completes,
	// [Operation.response][google.longrunning.Operation.response]
	// contains
	// [DiagnoseClusterResults](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#diagnoseclusterresults).
	DiagnoseCluster(context.Context, *DiagnoseClusterRequest) (*longrunning.Operation, error)
}

ClusterControllerServer is the server API for ClusterController service.

type ClusterMetrics

type ClusterMetrics struct {

	// The HDFS metrics.
	HdfsMetrics map[string]int64 `` /* 183-byte string literal not displayed */
	// The YARN metrics.
	YarnMetrics map[string]int64 `` /* 183-byte string literal not displayed */
	// contains filtered or unexported fields
}

Contains cluster daemon metrics, such as HDFS and YARN stats.

**Beta Feature**: This report is available for testing purposes only. It may be changed before final release.

func (*ClusterMetrics) Descriptor deprecated

func (*ClusterMetrics) Descriptor() ([]byte, []int)

Deprecated: Use ClusterMetrics.ProtoReflect.Descriptor instead.

func (*ClusterMetrics) GetHdfsMetrics

func (x *ClusterMetrics) GetHdfsMetrics() map[string]int64

func (*ClusterMetrics) GetYarnMetrics

func (x *ClusterMetrics) GetYarnMetrics() map[string]int64

func (*ClusterMetrics) ProtoMessage

func (*ClusterMetrics) ProtoMessage()

func (*ClusterMetrics) ProtoReflect

func (x *ClusterMetrics) ProtoReflect() protoreflect.Message

func (*ClusterMetrics) Reset

func (x *ClusterMetrics) Reset()

func (*ClusterMetrics) String

func (x *ClusterMetrics) String() string

type ClusterOperation

type ClusterOperation struct {

	// Output only. The id of the cluster operation.
	OperationId string `protobuf:"bytes,1,opt,name=operation_id,json=operationId,proto3" json:"operation_id,omitempty"`
	// Output only. Error, if operation failed.
	Error string `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"`
	// Output only. Indicates the operation is done.
	Done bool `protobuf:"varint,3,opt,name=done,proto3" json:"done,omitempty"`
	// contains filtered or unexported fields
}

The cluster operation triggered by a workflow.

func (*ClusterOperation) Descriptor deprecated

func (*ClusterOperation) Descriptor() ([]byte, []int)

Deprecated: Use ClusterOperation.ProtoReflect.Descriptor instead.

func (*ClusterOperation) GetDone

func (x *ClusterOperation) GetDone() bool

func (*ClusterOperation) GetError

func (x *ClusterOperation) GetError() string

func (*ClusterOperation) GetOperationId

func (x *ClusterOperation) GetOperationId() string

func (*ClusterOperation) ProtoMessage

func (*ClusterOperation) ProtoMessage()

func (*ClusterOperation) ProtoReflect

func (x *ClusterOperation) ProtoReflect() protoreflect.Message

func (*ClusterOperation) Reset

func (x *ClusterOperation) Reset()

func (*ClusterOperation) String

func (x *ClusterOperation) String() string

type ClusterOperationMetadata

type ClusterOperationMetadata struct {

	// Output only. Name of the cluster for the operation.
	ClusterName string `protobuf:"bytes,7,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"`
	// Output only. Cluster UUID for the operation.
	ClusterUuid string `protobuf:"bytes,8,opt,name=cluster_uuid,json=clusterUuid,proto3" json:"cluster_uuid,omitempty"`
	// Output only. Current operation status.
	Status *ClusterOperationStatus `protobuf:"bytes,9,opt,name=status,proto3" json:"status,omitempty"`
	// Output only. The previous operation status.
	StatusHistory []*ClusterOperationStatus `protobuf:"bytes,10,rep,name=status_history,json=statusHistory,proto3" json:"status_history,omitempty"`
	// Output only. The operation type.
	OperationType string `protobuf:"bytes,11,opt,name=operation_type,json=operationType,proto3" json:"operation_type,omitempty"`
	// Output only. Short description of operation.
	Description string `protobuf:"bytes,12,opt,name=description,proto3" json:"description,omitempty"`
	// Output only. Labels associated with the operation
	Labels map[string]string `` /* 154-byte string literal not displayed */
	// Output only. Errors encountered during operation execution.
	Warnings []string `protobuf:"bytes,14,rep,name=warnings,proto3" json:"warnings,omitempty"`
	// contains filtered or unexported fields
}

Metadata describing the operation.

func (*ClusterOperationMetadata) Descriptor deprecated

func (*ClusterOperationMetadata) Descriptor() ([]byte, []int)

Deprecated: Use ClusterOperationMetadata.ProtoReflect.Descriptor instead.

func (*ClusterOperationMetadata) GetClusterName

func (x *ClusterOperationMetadata) GetClusterName() string

func (*ClusterOperationMetadata) GetClusterUuid

func (x *ClusterOperationMetadata) GetClusterUuid() string

func (*ClusterOperationMetadata) GetDescription

func (x *ClusterOperationMetadata) GetDescription() string

func (*ClusterOperationMetadata) GetLabels

func (x *ClusterOperationMetadata) GetLabels() map[string]string

func (*ClusterOperationMetadata) GetOperationType

func (x *ClusterOperationMetadata) GetOperationType() string

func (*ClusterOperationMetadata) GetStatus

func (*ClusterOperationMetadata) GetStatusHistory

func (x *ClusterOperationMetadata) GetStatusHistory() []*ClusterOperationStatus

func (*ClusterOperationMetadata) GetWarnings

func (x *ClusterOperationMetadata) GetWarnings() []string

func (*ClusterOperationMetadata) ProtoMessage

func (*ClusterOperationMetadata) ProtoMessage()

func (*ClusterOperationMetadata) ProtoReflect

func (x *ClusterOperationMetadata) ProtoReflect() protoreflect.Message

func (*ClusterOperationMetadata) Reset

func (x *ClusterOperationMetadata) Reset()

func (*ClusterOperationMetadata) String

func (x *ClusterOperationMetadata) String() string

type ClusterOperationStatus

type ClusterOperationStatus struct {

	// Output only. A message containing the operation state.
	State ClusterOperationStatus_State `protobuf:"varint,1,opt,name=state,proto3,enum=google.cloud.dataproc.v1.ClusterOperationStatus_State" json:"state,omitempty"`
	// Output only. A message containing the detailed operation state.
	InnerState string `protobuf:"bytes,2,opt,name=inner_state,json=innerState,proto3" json:"inner_state,omitempty"`
	// Output only. A message containing any operation metadata details.
	Details string `protobuf:"bytes,3,opt,name=details,proto3" json:"details,omitempty"`
	// Output only. The time this state was entered.
	StateStartTime *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=state_start_time,json=stateStartTime,proto3" json:"state_start_time,omitempty"`
	// contains filtered or unexported fields
}

The status of the operation.

func (*ClusterOperationStatus) Descriptor deprecated

func (*ClusterOperationStatus) Descriptor() ([]byte, []int)

Deprecated: Use ClusterOperationStatus.ProtoReflect.Descriptor instead.

func (*ClusterOperationStatus) GetDetails

func (x *ClusterOperationStatus) GetDetails() string

func (*ClusterOperationStatus) GetInnerState

func (x *ClusterOperationStatus) GetInnerState() string

func (*ClusterOperationStatus) GetState

func (*ClusterOperationStatus) GetStateStartTime

func (x *ClusterOperationStatus) GetStateStartTime() *timestamppb.Timestamp

func (*ClusterOperationStatus) ProtoMessage

func (*ClusterOperationStatus) ProtoMessage()

func (*ClusterOperationStatus) ProtoReflect

func (x *ClusterOperationStatus) ProtoReflect() protoreflect.Message

func (*ClusterOperationStatus) Reset

func (x *ClusterOperationStatus) Reset()

func (*ClusterOperationStatus) String

func (x *ClusterOperationStatus) String() string

type ClusterOperationStatus_State

type ClusterOperationStatus_State int32

The operation state.

const (
	// Unused.
	ClusterOperationStatus_UNKNOWN ClusterOperationStatus_State = 0
	// The operation has been created.
	ClusterOperationStatus_PENDING ClusterOperationStatus_State = 1
	// The operation is running.
	ClusterOperationStatus_RUNNING ClusterOperationStatus_State = 2
	// The operation is done; either cancelled or completed.
	ClusterOperationStatus_DONE ClusterOperationStatus_State = 3
)

func (ClusterOperationStatus_State) Descriptor

func (ClusterOperationStatus_State) Enum

func (ClusterOperationStatus_State) EnumDescriptor deprecated

func (ClusterOperationStatus_State) EnumDescriptor() ([]byte, []int)

Deprecated: Use ClusterOperationStatus_State.Descriptor instead.

func (ClusterOperationStatus_State) Number

func (ClusterOperationStatus_State) String

func (ClusterOperationStatus_State) Type

type ClusterSelector

type ClusterSelector struct {

	// Optional. The zone where workflow process executes. This parameter does not
	// affect the selection of the cluster.
	//
	// If unspecified, the zone of the first cluster matching the selector
	// is used.
	Zone string `protobuf:"bytes,1,opt,name=zone,proto3" json:"zone,omitempty"`
	// Required. The cluster labels. Cluster must have all labels
	// to match.
	ClusterLabels map[string]string `` /* 188-byte string literal not displayed */
	// contains filtered or unexported fields
}

A selector that chooses target cluster for jobs based on metadata.

func (*ClusterSelector) Descriptor deprecated

func (*ClusterSelector) Descriptor() ([]byte, []int)

Deprecated: Use ClusterSelector.ProtoReflect.Descriptor instead.

func (*ClusterSelector) GetClusterLabels

func (x *ClusterSelector) GetClusterLabels() map[string]string

func (*ClusterSelector) GetZone

func (x *ClusterSelector) GetZone() string

func (*ClusterSelector) ProtoMessage

func (*ClusterSelector) ProtoMessage()

func (*ClusterSelector) ProtoReflect

func (x *ClusterSelector) ProtoReflect() protoreflect.Message

func (*ClusterSelector) Reset

func (x *ClusterSelector) Reset()

func (*ClusterSelector) String

func (x *ClusterSelector) String() string

type ClusterStatus

type ClusterStatus struct {

	// Output only. The cluster's state.
	State ClusterStatus_State `protobuf:"varint,1,opt,name=state,proto3,enum=google.cloud.dataproc.v1.ClusterStatus_State" json:"state,omitempty"`
	// Optional. Output only. Details of cluster's state.
	Detail string `protobuf:"bytes,2,opt,name=detail,proto3" json:"detail,omitempty"`
	// Output only. Time when this state was entered (see JSON representation of
	// [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)).
	StateStartTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=state_start_time,json=stateStartTime,proto3" json:"state_start_time,omitempty"`
	// Output only. Additional state information that includes
	// status reported by the agent.
	Substate ClusterStatus_Substate `protobuf:"varint,4,opt,name=substate,proto3,enum=google.cloud.dataproc.v1.ClusterStatus_Substate" json:"substate,omitempty"`
	// contains filtered or unexported fields
}

The status of a cluster and its instances.

func (*ClusterStatus) Descriptor deprecated

func (*ClusterStatus) Descriptor() ([]byte, []int)

Deprecated: Use ClusterStatus.ProtoReflect.Descriptor instead.

func (*ClusterStatus) GetDetail

func (x *ClusterStatus) GetDetail() string

func (*ClusterStatus) GetState

func (x *ClusterStatus) GetState() ClusterStatus_State

func (*ClusterStatus) GetStateStartTime

func (x *ClusterStatus) GetStateStartTime() *timestamppb.Timestamp

func (*ClusterStatus) GetSubstate

func (x *ClusterStatus) GetSubstate() ClusterStatus_Substate

func (*ClusterStatus) ProtoMessage

func (*ClusterStatus) ProtoMessage()

func (*ClusterStatus) ProtoReflect

func (x *ClusterStatus) ProtoReflect() protoreflect.Message

func (*ClusterStatus) Reset

func (x *ClusterStatus) Reset()

func (*ClusterStatus) String

func (x *ClusterStatus) String() string

type ClusterStatus_State

type ClusterStatus_State int32

The cluster state.

const (
	// The cluster state is unknown.
	ClusterStatus_UNKNOWN ClusterStatus_State = 0
	// The cluster is being created and set up. It is not ready for use.
	ClusterStatus_CREATING ClusterStatus_State = 1
	// The cluster is currently running and healthy. It is ready for use.
	ClusterStatus_RUNNING ClusterStatus_State = 2
	// The cluster encountered an error. It is not ready for use.
	ClusterStatus_ERROR ClusterStatus_State = 3
	// The cluster has encountered an error while being updated. Jobs can
	// be submitted to the cluster, but the cluster cannot be updated.
	ClusterStatus_ERROR_DUE_TO_UPDATE ClusterStatus_State = 9
	// The cluster is being deleted. It cannot be used.
	ClusterStatus_DELETING ClusterStatus_State = 4
	// The cluster is being updated. It continues to accept and process jobs.
	ClusterStatus_UPDATING ClusterStatus_State = 5
	// The cluster is being stopped. It cannot be used.
	ClusterStatus_STOPPING ClusterStatus_State = 6
	// The cluster is currently stopped. It is not ready for use.
	ClusterStatus_STOPPED ClusterStatus_State = 7
	// The cluster is being started. It is not ready for use.
	ClusterStatus_STARTING ClusterStatus_State = 8
)

func (ClusterStatus_State) Descriptor

func (ClusterStatus_State) Enum

func (ClusterStatus_State) EnumDescriptor deprecated

func (ClusterStatus_State) EnumDescriptor() ([]byte, []int)

Deprecated: Use ClusterStatus_State.Descriptor instead.

func (ClusterStatus_State) Number

func (ClusterStatus_State) String

func (x ClusterStatus_State) String() string

func (ClusterStatus_State) Type

type ClusterStatus_Substate

type ClusterStatus_Substate int32

The cluster substate.

const (
	// The cluster substate is unknown.
	ClusterStatus_UNSPECIFIED ClusterStatus_Substate = 0
	// The cluster is known to be in an unhealthy state
	// (for example, critical daemons are not running or HDFS capacity is
	// exhausted).
	//
	// Applies to RUNNING state.
	ClusterStatus_UNHEALTHY ClusterStatus_Substate = 1
	// The agent-reported status is out of date (may occur if
	// Dataproc loses communication with Agent).
	//
	// Applies to RUNNING state.
	ClusterStatus_STALE_STATUS ClusterStatus_Substate = 2
)

func (ClusterStatus_Substate) Descriptor

func (ClusterStatus_Substate) Enum

func (ClusterStatus_Substate) EnumDescriptor deprecated

func (ClusterStatus_Substate) EnumDescriptor() ([]byte, []int)

Deprecated: Use ClusterStatus_Substate.Descriptor instead.

func (ClusterStatus_Substate) Number

func (ClusterStatus_Substate) String

func (x ClusterStatus_Substate) String() string

func (ClusterStatus_Substate) Type

type Component

type Component int32

Cluster components that can be activated.

const (
	// Unspecified component. Specifying this will cause Cluster creation to fail.
	Component_COMPONENT_UNSPECIFIED Component = 0
	// The Anaconda python distribution. The Anaconda component is not supported
	// in the Dataproc
	// <a
	// href="/dataproc/docs/concepts/versioning/dataproc-release-2.0">2.0
	// image</a>. The 2.0 image is pre-installed with Miniconda.
	Component_ANACONDA Component = 5
	// Docker
	Component_DOCKER Component = 13
	// The Druid query engine. (alpha)
	Component_DRUID Component = 9
	// Flink
	Component_FLINK Component = 14
	// HBase. (beta)
	Component_HBASE Component = 11
	// The Hive Web HCatalog (the REST service for accessing HCatalog).
	Component_HIVE_WEBHCAT Component = 3
	// The Jupyter Notebook.
	Component_JUPYTER Component = 1
	// The Presto query engine.
	Component_PRESTO Component = 6
	// The Ranger service.
	Component_RANGER Component = 12
	// The Solr service.
	Component_SOLR Component = 10
	// The Zeppelin notebook.
	Component_ZEPPELIN Component = 4
	// The Zookeeper service.
	Component_ZOOKEEPER Component = 8
)

func (Component) Descriptor

func (Component) Descriptor() protoreflect.EnumDescriptor

func (Component) Enum

func (x Component) Enum() *Component

func (Component) EnumDescriptor deprecated

func (Component) EnumDescriptor() ([]byte, []int)

Deprecated: Use Component.Descriptor instead.

func (Component) Number

func (x Component) Number() protoreflect.EnumNumber

func (Component) String

func (x Component) String() string

func (Component) Type

type ConfidentialInstanceConfig

type ConfidentialInstanceConfig struct {

	// Optional. Defines whether the instance should have confidential compute enabled.
	EnableConfidentialCompute bool `` /* 139-byte string literal not displayed */
	// contains filtered or unexported fields
}

Confidential Instance Config for clusters using [Confidential VMs](https://cloud.google.com/compute/confidential-vm/docs)

func (*ConfidentialInstanceConfig) Descriptor deprecated

func (*ConfidentialInstanceConfig) Descriptor() ([]byte, []int)

Deprecated: Use ConfidentialInstanceConfig.ProtoReflect.Descriptor instead.

func (*ConfidentialInstanceConfig) GetEnableConfidentialCompute

func (x *ConfidentialInstanceConfig) GetEnableConfidentialCompute() bool

func (*ConfidentialInstanceConfig) ProtoMessage

func (*ConfidentialInstanceConfig) ProtoMessage()

func (*ConfidentialInstanceConfig) ProtoReflect

func (*ConfidentialInstanceConfig) Reset

func (x *ConfidentialInstanceConfig) Reset()

func (*ConfidentialInstanceConfig) String

func (x *ConfidentialInstanceConfig) String() string

type CreateAutoscalingPolicyRequest

type CreateAutoscalingPolicyRequest struct {

	// Required. The "resource name" of the region or location, as described
	// in https://cloud.google.com/apis/design/resource_names.
	//
	// * For `projects.regions.autoscalingPolicies.create`, the resource name
	//   of the region has the following format:
	//   `projects/{project_id}/regions/{region}`
	//
	// * For `projects.locations.autoscalingPolicies.create`, the resource name
	//   of the location has the following format:
	//   `projects/{project_id}/locations/{location}`
	Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
	// Required. The autoscaling policy to create.
	Policy *AutoscalingPolicy `protobuf:"bytes,2,opt,name=policy,proto3" json:"policy,omitempty"`
	// contains filtered or unexported fields
}

A request to create an autoscaling policy.

func (*CreateAutoscalingPolicyRequest) Descriptor deprecated

func (*CreateAutoscalingPolicyRequest) Descriptor() ([]byte, []int)

Deprecated: Use CreateAutoscalingPolicyRequest.ProtoReflect.Descriptor instead.

func (*CreateAutoscalingPolicyRequest) GetParent

func (x *CreateAutoscalingPolicyRequest) GetParent() string

func (*CreateAutoscalingPolicyRequest) GetPolicy

func (*CreateAutoscalingPolicyRequest) ProtoMessage

func (*CreateAutoscalingPolicyRequest) ProtoMessage()

func (*CreateAutoscalingPolicyRequest) ProtoReflect

func (*CreateAutoscalingPolicyRequest) Reset

func (x *CreateAutoscalingPolicyRequest) Reset()

func (*CreateAutoscalingPolicyRequest) String

type CreateBatchRequest

type CreateBatchRequest struct {

	// Required. The parent resource where this batch will be created.
	Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
	// Required. The batch to create.
	Batch *Batch `protobuf:"bytes,2,opt,name=batch,proto3" json:"batch,omitempty"`
	// Optional. The ID to use for the batch, which will become the final component of
	// the batch's resource name.
	//
	// This value must be 4-63 characters. Valid characters are `/[a-z][0-9]-/`.
	BatchId string `protobuf:"bytes,3,opt,name=batch_id,json=batchId,proto3" json:"batch_id,omitempty"`
	// Optional. A unique ID used to identify the request. If the service
	// receives two
	// [CreateBatchRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.CreateBatchRequest)s
	// with the same request_id, the second request is ignored and the
	// Operation that corresponds to the first Batch created and stored
	// in the backend is returned.
	//
	// Recommendation: Set this value to a
	// [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
	//
	// The value must contain only letters (a-z, A-Z), numbers (0-9),
	// underscores (_), and hyphens (-). The maximum length is 40 characters.
	RequestId string `protobuf:"bytes,4,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"`
	// contains filtered or unexported fields
}

A request to create a batch workload.

func (*CreateBatchRequest) Descriptor deprecated

func (*CreateBatchRequest) Descriptor() ([]byte, []int)

Deprecated: Use CreateBatchRequest.ProtoReflect.Descriptor instead.

func (*CreateBatchRequest) GetBatch

func (x *CreateBatchRequest) GetBatch() *Batch

func (*CreateBatchRequest) GetBatchId

func (x *CreateBatchRequest) GetBatchId() string

func (*CreateBatchRequest) GetParent

func (x *CreateBatchRequest) GetParent() string

func (*CreateBatchRequest) GetRequestId

func (x *CreateBatchRequest) GetRequestId() string

func (*CreateBatchRequest) ProtoMessage

func (*CreateBatchRequest) ProtoMessage()

func (*CreateBatchRequest) ProtoReflect

func (x *CreateBatchRequest) ProtoReflect() protoreflect.Message

func (*CreateBatchRequest) Reset

func (x *CreateBatchRequest) Reset()

func (*CreateBatchRequest) String

func (x *CreateBatchRequest) String() string

type CreateClusterRequest

type CreateClusterRequest struct {

	// Required. The ID of the Google Cloud Platform project that the cluster
	// belongs to.
	ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"`
	// Required. The Dataproc region in which to handle the request.
	Region string `protobuf:"bytes,3,opt,name=region,proto3" json:"region,omitempty"`
	// Required. The cluster to create.
	Cluster *Cluster `protobuf:"bytes,2,opt,name=cluster,proto3" json:"cluster,omitempty"`
	// Optional. A unique ID used to identify the request. If the server receives two
	// [CreateClusterRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.CreateClusterRequest)s
	// with the same id, then the second request will be ignored and the
	// first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the backend
	// is returned.
	//
	// It is recommended to always set this value to a
	// [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
	//
	// The ID must contain only letters (a-z, A-Z), numbers (0-9),
	// underscores (_), and hyphens (-). The maximum length is 40 characters.
	RequestId string `protobuf:"bytes,4,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"`
	// Optional. Failure action when primary worker creation fails.
	ActionOnFailedPrimaryWorkers FailureAction `` /* 196-byte string literal not displayed */
	// contains filtered or unexported fields
}

A request to create a cluster.

func (*CreateClusterRequest) Descriptor deprecated

func (*CreateClusterRequest) Descriptor() ([]byte, []int)

Deprecated: Use CreateClusterRequest.ProtoReflect.Descriptor instead.

func (*CreateClusterRequest) GetActionOnFailedPrimaryWorkers

func (x *CreateClusterRequest) GetActionOnFailedPrimaryWorkers() FailureAction

func (*CreateClusterRequest) GetCluster

func (x *CreateClusterRequest) GetCluster() *Cluster

func (*CreateClusterRequest) GetProjectId

func (x *CreateClusterRequest) GetProjectId() string

func (*CreateClusterRequest) GetRegion

func (x *CreateClusterRequest) GetRegion() string

func (*CreateClusterRequest) GetRequestId

func (x *CreateClusterRequest) GetRequestId() string

func (*CreateClusterRequest) ProtoMessage

func (*CreateClusterRequest) ProtoMessage()

func (*CreateClusterRequest) ProtoReflect

func (x *CreateClusterRequest) ProtoReflect() protoreflect.Message

func (*CreateClusterRequest) Reset

func (x *CreateClusterRequest) Reset()

func (*CreateClusterRequest) String

func (x *CreateClusterRequest) String() string

type CreateWorkflowTemplateRequest

type CreateWorkflowTemplateRequest struct {

	// Required. The resource name of the region or location, as described
	// in https://cloud.google.com/apis/design/resource_names.
	//
	// * For `projects.regions.workflowTemplates.create`, the resource name of the
	//   region has the following format:
	//   `projects/{project_id}/regions/{region}`
	//
	// * For `projects.locations.workflowTemplates.create`, the resource name of
	//   the location has the following format:
	//   `projects/{project_id}/locations/{location}`
	Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
	// Required. The Dataproc workflow template to create.
	Template *WorkflowTemplate `protobuf:"bytes,2,opt,name=template,proto3" json:"template,omitempty"`
	// contains filtered or unexported fields
}

A request to create a workflow template.

func (*CreateWorkflowTemplateRequest) Descriptor deprecated

func (*CreateWorkflowTemplateRequest) Descriptor() ([]byte, []int)

Deprecated: Use CreateWorkflowTemplateRequest.ProtoReflect.Descriptor instead.

func (*CreateWorkflowTemplateRequest) GetParent

func (x *CreateWorkflowTemplateRequest) GetParent() string

func (*CreateWorkflowTemplateRequest) GetTemplate

func (*CreateWorkflowTemplateRequest) ProtoMessage

func (*CreateWorkflowTemplateRequest) ProtoMessage()

func (*CreateWorkflowTemplateRequest) ProtoReflect

func (*CreateWorkflowTemplateRequest) Reset

func (x *CreateWorkflowTemplateRequest) Reset()

func (*CreateWorkflowTemplateRequest) String

type DeleteAutoscalingPolicyRequest

type DeleteAutoscalingPolicyRequest struct {

	// Required. The "resource name" of the autoscaling policy, as described
	// in https://cloud.google.com/apis/design/resource_names.
	//
	// * For `projects.regions.autoscalingPolicies.delete`, the resource name
	//   of the policy has the following format:
	//   `projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}`
	//
	// * For `projects.locations.autoscalingPolicies.delete`, the resource name
	//   of the policy has the following format:
	//   `projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}`
	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
	// contains filtered or unexported fields
}

A request to delete an autoscaling policy.

Autoscaling policies in use by one or more clusters will not be deleted.

func (*DeleteAutoscalingPolicyRequest) Descriptor deprecated

func (*DeleteAutoscalingPolicyRequest) Descriptor() ([]byte, []int)

Deprecated: Use DeleteAutoscalingPolicyRequest.ProtoReflect.Descriptor instead.

func (*DeleteAutoscalingPolicyRequest) GetName

func (*DeleteAutoscalingPolicyRequest) ProtoMessage

func (*DeleteAutoscalingPolicyRequest) ProtoMessage()

func (*DeleteAutoscalingPolicyRequest) ProtoReflect

func (*DeleteAutoscalingPolicyRequest) Reset

func (x *DeleteAutoscalingPolicyRequest) Reset()

func (*DeleteAutoscalingPolicyRequest) String

type DeleteBatchRequest

type DeleteBatchRequest struct {

	// Required. The name of the batch resource to delete.
	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
	// contains filtered or unexported fields
}

A request to delete a batch workload.

func (*DeleteBatchRequest) Descriptor deprecated

func (*DeleteBatchRequest) Descriptor() ([]byte, []int)

Deprecated: Use DeleteBatchRequest.ProtoReflect.Descriptor instead.

func (*DeleteBatchRequest) GetName

func (x *DeleteBatchRequest) GetName() string

func (*DeleteBatchRequest) ProtoMessage

func (*DeleteBatchRequest) ProtoMessage()

func (*DeleteBatchRequest) ProtoReflect

func (x *DeleteBatchRequest) ProtoReflect() protoreflect.Message

func (*DeleteBatchRequest) Reset

func (x *DeleteBatchRequest) Reset()

func (*DeleteBatchRequest) String

func (x *DeleteBatchRequest) String() string

type DeleteClusterRequest

type DeleteClusterRequest struct {

	// Required. The ID of the Google Cloud Platform project that the cluster
	// belongs to.
	ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"`
	// Required. The Dataproc region in which to handle the request.
	Region string `protobuf:"bytes,3,opt,name=region,proto3" json:"region,omitempty"`
	// Required. The cluster name.
	ClusterName string `protobuf:"bytes,2,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"`
	// Optional. Specifying the `cluster_uuid` means the RPC should fail
	// (with error NOT_FOUND) if cluster with specified UUID does not exist.
	ClusterUuid string `protobuf:"bytes,4,opt,name=cluster_uuid,json=clusterUuid,proto3" json:"cluster_uuid,omitempty"`
	// Optional. A unique ID used to identify the request. If the server
	// receives two
	// [DeleteClusterRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.DeleteClusterRequest)s
	// with the same id, then the second request will be ignored and the
	// first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the
	// backend is returned.
	//
	// It is recommended to always set this value to a
	// [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
	//
	// The ID must contain only letters (a-z, A-Z), numbers (0-9),
	// underscores (_), and hyphens (-). The maximum length is 40 characters.
	RequestId string `protobuf:"bytes,5,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"`
	// contains filtered or unexported fields
}

A request to delete a cluster.

func (*DeleteClusterRequest) Descriptor deprecated

func (*DeleteClusterRequest) Descriptor() ([]byte, []int)

Deprecated: Use DeleteClusterRequest.ProtoReflect.Descriptor instead.

func (*DeleteClusterRequest) GetClusterName

func (x *DeleteClusterRequest) GetClusterName() string

func (*DeleteClusterRequest) GetClusterUuid

func (x *DeleteClusterRequest) GetClusterUuid() string

func (*DeleteClusterRequest) GetProjectId

func (x *DeleteClusterRequest) GetProjectId() string

func (*DeleteClusterRequest) GetRegion

func (x *DeleteClusterRequest) GetRegion() string

func (*DeleteClusterRequest) GetRequestId