dataproc

package
v0.0.0-...-0e82294 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Sep 25, 2018 License: Apache-2.0 Imports: 11 Imported by: 0

Documentation

Index

Constants

This section is empty.

Variables

View Source
var ClusterOperationStatus_State_name = map[int32]string{
	0: "UNKNOWN",
	1: "PENDING",
	2: "RUNNING",
	3: "DONE",
}
View Source
var ClusterOperationStatus_State_value = map[string]int32{
	"UNKNOWN": 0,
	"PENDING": 1,
	"RUNNING": 2,
	"DONE":    3,
}
View Source
var ClusterStatus_State_name = map[int32]string{
	0: "UNKNOWN",
	1: "CREATING",
	2: "RUNNING",
	3: "ERROR",
	4: "DELETING",
	5: "UPDATING",
}
View Source
var ClusterStatus_State_value = map[string]int32{
	"UNKNOWN":  0,
	"CREATING": 1,
	"RUNNING":  2,
	"ERROR":    3,
	"DELETING": 4,
	"UPDATING": 5,
}
View Source
var ClusterStatus_Substate_name = map[int32]string{
	0: "UNSPECIFIED",
	1: "UNHEALTHY",
	2: "STALE_STATUS",
}
View Source
var ClusterStatus_Substate_value = map[string]int32{
	"UNSPECIFIED":  0,
	"UNHEALTHY":    1,
	"STALE_STATUS": 2,
}
View Source
var JobStatus_State_name = map[int32]string{
	0: "STATE_UNSPECIFIED",
	1: "PENDING",
	8: "SETUP_DONE",
	2: "RUNNING",
	3: "CANCEL_PENDING",
	7: "CANCEL_STARTED",
	4: "CANCELLED",
	5: "DONE",
	6: "ERROR",
	9: "ATTEMPT_FAILURE",
}
View Source
var JobStatus_State_value = map[string]int32{
	"STATE_UNSPECIFIED": 0,
	"PENDING":           1,
	"SETUP_DONE":        8,
	"RUNNING":           2,
	"CANCEL_PENDING":    3,
	"CANCEL_STARTED":    7,
	"CANCELLED":         4,
	"DONE":              5,
	"ERROR":             6,
	"ATTEMPT_FAILURE":   9,
}
View Source
var JobStatus_Substate_name = map[int32]string{
	0: "UNSPECIFIED",
	1: "SUBMITTED",
	2: "QUEUED",
	3: "STALE_STATUS",
}
View Source
var JobStatus_Substate_value = map[string]int32{
	"UNSPECIFIED":  0,
	"SUBMITTED":    1,
	"QUEUED":       2,
	"STALE_STATUS": 3,
}
View Source
var ListJobsRequest_JobStateMatcher_name = map[int32]string{
	0: "ALL",
	1: "ACTIVE",
	2: "NON_ACTIVE",
}
View Source
var ListJobsRequest_JobStateMatcher_value = map[string]int32{
	"ALL":        0,
	"ACTIVE":     1,
	"NON_ACTIVE": 2,
}
View Source
var LoggingConfig_Level_name = map[int32]string{
	0: "LEVEL_UNSPECIFIED",
	1: "ALL",
	2: "TRACE",
	3: "DEBUG",
	4: "INFO",
	5: "WARN",
	6: "ERROR",
	7: "FATAL",
	8: "OFF",
}
View Source
var LoggingConfig_Level_value = map[string]int32{
	"LEVEL_UNSPECIFIED": 0,
	"ALL":               1,
	"TRACE":             2,
	"DEBUG":             3,
	"INFO":              4,
	"WARN":              5,
	"ERROR":             6,
	"FATAL":             7,
	"OFF":               8,
}
View Source
var WorkflowMetadata_State_name = map[int32]string{
	0: "UNKNOWN",
	1: "PENDING",
	2: "RUNNING",
	3: "DONE",
}
View Source
var WorkflowMetadata_State_value = map[string]int32{
	"UNKNOWN": 0,
	"PENDING": 1,
	"RUNNING": 2,
	"DONE":    3,
}
View Source
var WorkflowNode_NodeState_name = map[int32]string{
	0: "NODE_STATUS_UNSPECIFIED",
	1: "BLOCKED",
	2: "RUNNABLE",
	3: "RUNNING",
	4: "COMPLETED",
	5: "FAILED",
}
View Source
var WorkflowNode_NodeState_value = map[string]int32{
	"NODE_STATUS_UNSPECIFIED": 0,
	"BLOCKED":                 1,
	"RUNNABLE":                2,
	"RUNNING":                 3,
	"COMPLETED":               4,
	"FAILED":                  5,
}
View Source
var YarnApplication_State_name = map[int32]string{
	0: "STATE_UNSPECIFIED",
	1: "NEW",
	2: "NEW_SAVING",
	3: "SUBMITTED",
	4: "ACCEPTED",
	5: "RUNNING",
	6: "FINISHED",
	7: "FAILED",
	8: "KILLED",
}
View Source
var YarnApplication_State_value = map[string]int32{
	"STATE_UNSPECIFIED": 0,
	"NEW":               1,
	"NEW_SAVING":        2,
	"SUBMITTED":         3,
	"ACCEPTED":          4,
	"RUNNING":           5,
	"FINISHED":          6,
	"FAILED":            7,
	"KILLED":            8,
}

Functions

func RegisterClusterControllerServer

func RegisterClusterControllerServer(s *grpc.Server, srv ClusterControllerServer)

func RegisterJobControllerServer

func RegisterJobControllerServer(s *grpc.Server, srv JobControllerServer)

func RegisterWorkflowTemplateServiceServer

func RegisterWorkflowTemplateServiceServer(s *grpc.Server, srv WorkflowTemplateServiceServer)

Types

type AcceleratorConfig

type AcceleratorConfig struct {
	// Full URL, partial URI, or short name of the accelerator type resource to
	// expose to this instance. See [Compute Engine AcceleratorTypes](
	// /compute/docs/reference/beta/acceleratorTypes)
	//
	// Examples
	// * `https://www.googleapis.com/compute/beta/projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80`
	// * `projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80`
	// * `nvidia-tesla-k80`
	//
	// **Auto Zone Exception**: If you are using the Cloud Dataproc
	// [Auto Zone Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement)
	// feature, you must use the short name of the accelerator type
	// resource, for example, `nvidia-tesla-k80`.
	AcceleratorTypeUri string `protobuf:"bytes,1,opt,name=accelerator_type_uri,json=acceleratorTypeUri,proto3" json:"accelerator_type_uri,omitempty"`
	// The number of the accelerator cards of this type exposed to this instance.
	AcceleratorCount     int32    `protobuf:"varint,2,opt,name=accelerator_count,json=acceleratorCount,proto3" json:"accelerator_count,omitempty"`
	XXX_NoUnkeyedLiteral struct{} `json:"-"`
	XXX_unrecognized     []byte   `json:"-"`
	XXX_sizecache        int32    `json:"-"`
}

Specifies the type and number of accelerator cards attached to the instances of an instance group (see [GPUs on Compute Engine](/compute/docs/gpus/)).

func (*AcceleratorConfig) Descriptor

func (*AcceleratorConfig) Descriptor() ([]byte, []int)

func (*AcceleratorConfig) GetAcceleratorCount

func (m *AcceleratorConfig) GetAcceleratorCount() int32

func (*AcceleratorConfig) GetAcceleratorTypeUri

func (m *AcceleratorConfig) GetAcceleratorTypeUri() string

func (*AcceleratorConfig) ProtoMessage

func (*AcceleratorConfig) ProtoMessage()

func (*AcceleratorConfig) Reset

func (m *AcceleratorConfig) Reset()

func (*AcceleratorConfig) String

func (m *AcceleratorConfig) String() string

func (*AcceleratorConfig) XXX_DiscardUnknown

func (m *AcceleratorConfig) XXX_DiscardUnknown()

func (*AcceleratorConfig) XXX_Marshal

func (m *AcceleratorConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*AcceleratorConfig) XXX_Merge

func (m *AcceleratorConfig) XXX_Merge(src proto.Message)

func (*AcceleratorConfig) XXX_Size

func (m *AcceleratorConfig) XXX_Size() int

func (*AcceleratorConfig) XXX_Unmarshal

func (m *AcceleratorConfig) XXX_Unmarshal(b []byte) error

type CancelJobRequest

type CancelJobRequest struct {
	// Required. The ID of the Google Cloud Platform project that the job
	// belongs to.
	ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"`
	// Required. The Cloud Dataproc region in which to handle the request.
	Region string `protobuf:"bytes,3,opt,name=region,proto3" json:"region,omitempty"`
	// Required. The job ID.
	JobId                string   `protobuf:"bytes,2,opt,name=job_id,json=jobId,proto3" json:"job_id,omitempty"`
	XXX_NoUnkeyedLiteral struct{} `json:"-"`
	XXX_unrecognized     []byte   `json:"-"`
	XXX_sizecache        int32    `json:"-"`
}

A request to cancel a job.

func (*CancelJobRequest) Descriptor

func (*CancelJobRequest) Descriptor() ([]byte, []int)

func (*CancelJobRequest) GetJobId

func (m *CancelJobRequest) GetJobId() string

func (*CancelJobRequest) GetProjectId

func (m *CancelJobRequest) GetProjectId() string

func (*CancelJobRequest) GetRegion

func (m *CancelJobRequest) GetRegion() string

func (*CancelJobRequest) ProtoMessage

func (*CancelJobRequest) ProtoMessage()

func (*CancelJobRequest) Reset

func (m *CancelJobRequest) Reset()

func (*CancelJobRequest) String

func (m *CancelJobRequest) String() string

func (*CancelJobRequest) XXX_DiscardUnknown

func (m *CancelJobRequest) XXX_DiscardUnknown()

func (*CancelJobRequest) XXX_Marshal

func (m *CancelJobRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*CancelJobRequest) XXX_Merge

func (m *CancelJobRequest) XXX_Merge(src proto.Message)

func (*CancelJobRequest) XXX_Size

func (m *CancelJobRequest) XXX_Size() int

func (*CancelJobRequest) XXX_Unmarshal

func (m *CancelJobRequest) XXX_Unmarshal(b []byte) error

type Cluster

type Cluster struct {
	// Required. The Google Cloud Platform project ID that the cluster belongs to.
	ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"`
	// Required. The cluster name. Cluster names within a project must be
	// unique. Names of deleted clusters can be reused.
	ClusterName string `protobuf:"bytes,2,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"`
	// Required. The cluster config. Note that Cloud Dataproc may set
	// default values, and values may change when clusters are updated.
	Config *ClusterConfig `protobuf:"bytes,3,opt,name=config,proto3" json:"config,omitempty"`
	// Optional. The labels to associate with this cluster.
	// Label **keys** must contain 1 to 63 characters, and must conform to
	// [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
	// Label **values** may be empty, but, if present, must contain 1 to 63
	// characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
	// No more than 32 labels can be associated with a cluster.
	Labels map[string]string `` /* 153-byte string literal not displayed */
	// Output only. Cluster status.
	Status *ClusterStatus `protobuf:"bytes,4,opt,name=status,proto3" json:"status,omitempty"`
	// Output only. The previous cluster status.
	StatusHistory []*ClusterStatus `protobuf:"bytes,7,rep,name=status_history,json=statusHistory,proto3" json:"status_history,omitempty"`
	// Output only. A cluster UUID (Unique Universal Identifier). Cloud Dataproc
	// generates this value when it creates the cluster.
	ClusterUuid string `protobuf:"bytes,6,opt,name=cluster_uuid,json=clusterUuid,proto3" json:"cluster_uuid,omitempty"`
	// Contains cluster daemon metrics such as HDFS and YARN stats.
	//
	// **Beta Feature**: This report is available for testing purposes only. It may
	// be changed before final release.
	Metrics              *ClusterMetrics `protobuf:"bytes,9,opt,name=metrics,proto3" json:"metrics,omitempty"`
	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
	XXX_unrecognized     []byte          `json:"-"`
	XXX_sizecache        int32           `json:"-"`
}

Describes the identifying information, config, and status of a cluster of Compute Engine instances.

func (*Cluster) Descriptor

func (*Cluster) Descriptor() ([]byte, []int)

func (*Cluster) GetClusterName

func (m *Cluster) GetClusterName() string

func (*Cluster) GetClusterUuid

func (m *Cluster) GetClusterUuid() string

func (*Cluster) GetConfig

func (m *Cluster) GetConfig() *ClusterConfig

func (*Cluster) GetLabels

func (m *Cluster) GetLabels() map[string]string

func (*Cluster) GetMetrics

func (m *Cluster) GetMetrics() *ClusterMetrics

func (*Cluster) GetProjectId

func (m *Cluster) GetProjectId() string

func (*Cluster) GetStatus

func (m *Cluster) GetStatus() *ClusterStatus

func (*Cluster) GetStatusHistory

func (m *Cluster) GetStatusHistory() []*ClusterStatus

func (*Cluster) ProtoMessage

func (*Cluster) ProtoMessage()

func (*Cluster) Reset

func (m *Cluster) Reset()

func (*Cluster) String

func (m *Cluster) String() string

func (*Cluster) XXX_DiscardUnknown

func (m *Cluster) XXX_DiscardUnknown()

func (*Cluster) XXX_Marshal

func (m *Cluster) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*Cluster) XXX_Merge

func (m *Cluster) XXX_Merge(src proto.Message)

func (*Cluster) XXX_Size

func (m *Cluster) XXX_Size() int

func (*Cluster) XXX_Unmarshal

func (m *Cluster) XXX_Unmarshal(b []byte) error

type ClusterConfig

type ClusterConfig struct {
	// Optional. A Cloud Storage staging bucket used for sharing generated
	// SSH keys and config. If you do not specify a staging bucket, Cloud
	// Dataproc will determine an appropriate Cloud Storage location (US,
	// ASIA, or EU) for your cluster's staging bucket according to the Google
	// Compute Engine zone where your cluster is deployed, and then it will create
	// and manage this project-level, per-location bucket for you.
	ConfigBucket string `protobuf:"bytes,1,opt,name=config_bucket,json=configBucket,proto3" json:"config_bucket,omitempty"`
	// Required. The shared Compute Engine config settings for
	// all instances in a cluster.
	GceClusterConfig *GceClusterConfig `protobuf:"bytes,8,opt,name=gce_cluster_config,json=gceClusterConfig,proto3" json:"gce_cluster_config,omitempty"`
	// Optional. The Compute Engine config settings for
	// the master instance in a cluster.
	MasterConfig *InstanceGroupConfig `protobuf:"bytes,9,opt,name=master_config,json=masterConfig,proto3" json:"master_config,omitempty"`
	// Optional. The Compute Engine config settings for
	// worker instances in a cluster.
	WorkerConfig *InstanceGroupConfig `protobuf:"bytes,10,opt,name=worker_config,json=workerConfig,proto3" json:"worker_config,omitempty"`
	// Optional. The Compute Engine config settings for
	// additional worker instances in a cluster.
	SecondaryWorkerConfig *InstanceGroupConfig `` /* 127-byte string literal not displayed */
	// Optional. The config settings for software inside the cluster.
	SoftwareConfig *SoftwareConfig `protobuf:"bytes,13,opt,name=software_config,json=softwareConfig,proto3" json:"software_config,omitempty"`
	// Optional. The config setting for auto delete cluster schedule.
	LifecycleConfig *LifecycleConfig `protobuf:"bytes,14,opt,name=lifecycle_config,json=lifecycleConfig,proto3" json:"lifecycle_config,omitempty"`
	// Optional. Commands to execute on each node after config is
	// completed. By default, executables are run on master and all worker nodes.
	// You can test a node's <code>role</code> metadata to run an executable on
	// a master or worker node, as shown below using `curl` (you can also use `wget`):
	//
	//     ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1beta2/instance/attributes/dataproc-role)
	//     if [[ "${ROLE}" == 'Master' ]]; then
	//       ... master specific actions ...
	//     else
	//       ... worker specific actions ...
	//     fi
	InitializationActions []*NodeInitializationAction `protobuf:"bytes,11,rep,name=initialization_actions,json=initializationActions,proto3" json:"initialization_actions,omitempty"`
	XXX_NoUnkeyedLiteral  struct{}                    `json:"-"`
	XXX_unrecognized      []byte                      `json:"-"`
	XXX_sizecache         int32                       `json:"-"`
}

The cluster config.

func (*ClusterConfig) Descriptor

func (*ClusterConfig) Descriptor() ([]byte, []int)

func (*ClusterConfig) GetConfigBucket

func (m *ClusterConfig) GetConfigBucket() string

func (*ClusterConfig) GetGceClusterConfig

func (m *ClusterConfig) GetGceClusterConfig() *GceClusterConfig

func (*ClusterConfig) GetInitializationActions

func (m *ClusterConfig) GetInitializationActions() []*NodeInitializationAction

func (*ClusterConfig) GetLifecycleConfig

func (m *ClusterConfig) GetLifecycleConfig() *LifecycleConfig

func (*ClusterConfig) GetMasterConfig

func (m *ClusterConfig) GetMasterConfig() *InstanceGroupConfig

func (*ClusterConfig) GetSecondaryWorkerConfig

func (m *ClusterConfig) GetSecondaryWorkerConfig() *InstanceGroupConfig

func (*ClusterConfig) GetSoftwareConfig

func (m *ClusterConfig) GetSoftwareConfig() *SoftwareConfig

func (*ClusterConfig) GetWorkerConfig

func (m *ClusterConfig) GetWorkerConfig() *InstanceGroupConfig

func (*ClusterConfig) ProtoMessage

func (*ClusterConfig) ProtoMessage()

func (*ClusterConfig) Reset

func (m *ClusterConfig) Reset()

func (*ClusterConfig) String

func (m *ClusterConfig) String() string

func (*ClusterConfig) XXX_DiscardUnknown

func (m *ClusterConfig) XXX_DiscardUnknown()

func (*ClusterConfig) XXX_Marshal

func (m *ClusterConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*ClusterConfig) XXX_Merge

func (m *ClusterConfig) XXX_Merge(src proto.Message)

func (*ClusterConfig) XXX_Size

func (m *ClusterConfig) XXX_Size() int

func (*ClusterConfig) XXX_Unmarshal

func (m *ClusterConfig) XXX_Unmarshal(b []byte) error

type ClusterControllerClient

type ClusterControllerClient interface {
	// Creates a cluster in a project.
	CreateCluster(ctx context.Context, in *CreateClusterRequest, opts ...grpc.CallOption) (*longrunning.Operation, error)
	// Updates a cluster in a project.
	UpdateCluster(ctx context.Context, in *UpdateClusterRequest, opts ...grpc.CallOption) (*longrunning.Operation, error)
	// Deletes a cluster in a project.
	DeleteCluster(ctx context.Context, in *DeleteClusterRequest, opts ...grpc.CallOption) (*longrunning.Operation, error)
	// Gets the resource representation for a cluster in a project.
	GetCluster(ctx context.Context, in *GetClusterRequest, opts ...grpc.CallOption) (*Cluster, error)
	// Lists all regions/{region}/clusters in a project.
	ListClusters(ctx context.Context, in *ListClustersRequest, opts ...grpc.CallOption) (*ListClustersResponse, error)
	// Gets cluster diagnostic information.
	// After the operation completes, the Operation.response field
	// contains `DiagnoseClusterOutputLocation`.
	DiagnoseCluster(ctx context.Context, in *DiagnoseClusterRequest, opts ...grpc.CallOption) (*longrunning.Operation, error)
}

ClusterControllerClient is the client API for ClusterController service.

For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.

func NewClusterControllerClient

func NewClusterControllerClient(cc *grpc.ClientConn) ClusterControllerClient

type ClusterControllerServer

type ClusterControllerServer interface {
	// Creates a cluster in a project.
	CreateCluster(context.Context, *CreateClusterRequest) (*longrunning.Operation, error)
	// Updates a cluster in a project.
	UpdateCluster(context.Context, *UpdateClusterRequest) (*longrunning.Operation, error)
	// Deletes a cluster in a project.
	DeleteCluster(context.Context, *DeleteClusterRequest) (*longrunning.Operation, error)
	// Gets the resource representation for a cluster in a project.
	GetCluster(context.Context, *GetClusterRequest) (*Cluster, error)
	// Lists all regions/{region}/clusters in a project.
	ListClusters(context.Context, *ListClustersRequest) (*ListClustersResponse, error)
	// Gets cluster diagnostic information.
	// After the operation completes, the Operation.response field
	// contains `DiagnoseClusterOutputLocation`.
	DiagnoseCluster(context.Context, *DiagnoseClusterRequest) (*longrunning.Operation, error)
}

ClusterControllerServer is the server API for ClusterController service.

type ClusterMetrics

type ClusterMetrics struct {
	// The HDFS metrics.
	HdfsMetrics map[string]int64 `` /* 183-byte string literal not displayed */
	// The YARN metrics.
	YarnMetrics          map[string]int64 `` /* 183-byte string literal not displayed */
	XXX_NoUnkeyedLiteral struct{}         `json:"-"`
	XXX_unrecognized     []byte           `json:"-"`
	XXX_sizecache        int32            `json:"-"`
}

Contains cluster daemon metrics, such as HDFS and YARN stats.

**Beta Feature**: This report is available for testing purposes only. It may be changed before final release.

func (*ClusterMetrics) Descriptor

func (*ClusterMetrics) Descriptor() ([]byte, []int)

func (*ClusterMetrics) GetHdfsMetrics

func (m *ClusterMetrics) GetHdfsMetrics() map[string]int64

func (*ClusterMetrics) GetYarnMetrics

func (m *ClusterMetrics) GetYarnMetrics() map[string]int64

func (*ClusterMetrics) ProtoMessage

func (*ClusterMetrics) ProtoMessage()

func (*ClusterMetrics) Reset

func (m *ClusterMetrics) Reset()

func (*ClusterMetrics) String

func (m *ClusterMetrics) String() string

func (*ClusterMetrics) XXX_DiscardUnknown

func (m *ClusterMetrics) XXX_DiscardUnknown()

func (*ClusterMetrics) XXX_Marshal

func (m *ClusterMetrics) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*ClusterMetrics) XXX_Merge

func (m *ClusterMetrics) XXX_Merge(src proto.Message)

func (*ClusterMetrics) XXX_Size

func (m *ClusterMetrics) XXX_Size() int

func (*ClusterMetrics) XXX_Unmarshal

func (m *ClusterMetrics) XXX_Unmarshal(b []byte) error

type ClusterOperation

type ClusterOperation struct {
	// Output only. The id of the cluster operation.
	OperationId string `protobuf:"bytes,1,opt,name=operation_id,json=operationId,proto3" json:"operation_id,omitempty"`
	// Output only. Error, if operation failed.
	Error string `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"`
	// Output only. Indicates the operation is done.
	Done                 bool     `protobuf:"varint,3,opt,name=done,proto3" json:"done,omitempty"`
	XXX_NoUnkeyedLiteral struct{} `json:"-"`
	XXX_unrecognized     []byte   `json:"-"`
	XXX_sizecache        int32    `json:"-"`
}

The cluster operation triggered by a workflow.

func (*ClusterOperation) Descriptor

func (*ClusterOperation) Descriptor() ([]byte, []int)

func (*ClusterOperation) GetDone

func (m *ClusterOperation) GetDone() bool

func (*ClusterOperation) GetError

func (m *ClusterOperation) GetError() string

func (*ClusterOperation) GetOperationId

func (m *ClusterOperation) GetOperationId() string

func (*ClusterOperation) ProtoMessage

func (*ClusterOperation) ProtoMessage()

func (*ClusterOperation) Reset

func (m *ClusterOperation) Reset()

func (*ClusterOperation) String

func (m *ClusterOperation) String() string

func (*ClusterOperation) XXX_DiscardUnknown

func (m *ClusterOperation) XXX_DiscardUnknown()

func (*ClusterOperation) XXX_Marshal

func (m *ClusterOperation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*ClusterOperation) XXX_Merge

func (m *ClusterOperation) XXX_Merge(src proto.Message)

func (*ClusterOperation) XXX_Size

func (m *ClusterOperation) XXX_Size() int

func (*ClusterOperation) XXX_Unmarshal

func (m *ClusterOperation) XXX_Unmarshal(b []byte) error

type ClusterOperationMetadata

type ClusterOperationMetadata struct {
	// Output only. Name of the cluster for the operation.
	ClusterName string `protobuf:"bytes,7,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"`
	// Output only. Cluster UUID for the operation.
	ClusterUuid string `protobuf:"bytes,8,opt,name=cluster_uuid,json=clusterUuid,proto3" json:"cluster_uuid,omitempty"`
	// Output only. Current operation status.
	Status *ClusterOperationStatus `protobuf:"bytes,9,opt,name=status,proto3" json:"status,omitempty"`
	// Output only. The previous operation status.
	StatusHistory []*ClusterOperationStatus `protobuf:"bytes,10,rep,name=status_history,json=statusHistory,proto3" json:"status_history,omitempty"`
	// Output only. The operation type.
	OperationType string `protobuf:"bytes,11,opt,name=operation_type,json=operationType,proto3" json:"operation_type,omitempty"`
	// Output only. Short description of operation.
	Description string `protobuf:"bytes,12,opt,name=description,proto3" json:"description,omitempty"`
	// Output only. Labels associated with the operation
	Labels map[string]string `` /* 154-byte string literal not displayed */
	// Output only. Errors encountered during operation execution.
	Warnings             []string `protobuf:"bytes,14,rep,name=warnings,proto3" json:"warnings,omitempty"`
	XXX_NoUnkeyedLiteral struct{} `json:"-"`
	XXX_unrecognized     []byte   `json:"-"`
	XXX_sizecache        int32    `json:"-"`
}

Metadata describing the operation.

func (*ClusterOperationMetadata) Descriptor

func (*ClusterOperationMetadata) Descriptor() ([]byte, []int)

func (*ClusterOperationMetadata) GetClusterName

func (m *ClusterOperationMetadata) GetClusterName() string

func (*ClusterOperationMetadata) GetClusterUuid

func (m *ClusterOperationMetadata) GetClusterUuid() string

func (*ClusterOperationMetadata) GetDescription

func (m *ClusterOperationMetadata) GetDescription() string

func (*ClusterOperationMetadata) GetLabels

func (m *ClusterOperationMetadata) GetLabels() map[string]string

func (*ClusterOperationMetadata) GetOperationType

func (m *ClusterOperationMetadata) GetOperationType() string

func (*ClusterOperationMetadata) GetStatus

func (*ClusterOperationMetadata) GetStatusHistory

func (m *ClusterOperationMetadata) GetStatusHistory() []*ClusterOperationStatus

func (*ClusterOperationMetadata) GetWarnings

func (m *ClusterOperationMetadata) GetWarnings() []string

func (*ClusterOperationMetadata) ProtoMessage

func (*ClusterOperationMetadata) ProtoMessage()

func (*ClusterOperationMetadata) Reset

func (m *ClusterOperationMetadata) Reset()

func (*ClusterOperationMetadata) String

func (m *ClusterOperationMetadata) String() string

func (*ClusterOperationMetadata) XXX_DiscardUnknown

func (m *ClusterOperationMetadata) XXX_DiscardUnknown()

func (*ClusterOperationMetadata) XXX_Marshal

func (m *ClusterOperationMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*ClusterOperationMetadata) XXX_Merge

func (m *ClusterOperationMetadata) XXX_Merge(src proto.Message)

func (*ClusterOperationMetadata) XXX_Size

func (m *ClusterOperationMetadata) XXX_Size() int

func (*ClusterOperationMetadata) XXX_Unmarshal

func (m *ClusterOperationMetadata) XXX_Unmarshal(b []byte) error

type ClusterOperationStatus

type ClusterOperationStatus struct {
	// Output only. A message containing the operation state.
	State ClusterOperationStatus_State `` /* 128-byte string literal not displayed */
	// Output only. A message containing the detailed operation state.
	InnerState string `protobuf:"bytes,2,opt,name=inner_state,json=innerState,proto3" json:"inner_state,omitempty"`
	// Output only. A message containing any operation metadata details.
	Details string `protobuf:"bytes,3,opt,name=details,proto3" json:"details,omitempty"`
	// Output only. The time this state was entered.
	StateStartTime       *timestamp.Timestamp `protobuf:"bytes,4,opt,name=state_start_time,json=stateStartTime,proto3" json:"state_start_time,omitempty"`
	XXX_NoUnkeyedLiteral struct{}             `json:"-"`
	XXX_unrecognized     []byte               `json:"-"`
	XXX_sizecache        int32                `json:"-"`
}

The status of the operation.

func (*ClusterOperationStatus) Descriptor

func (*ClusterOperationStatus) Descriptor() ([]byte, []int)

func (*ClusterOperationStatus) GetDetails

func (m *ClusterOperationStatus) GetDetails() string

func (*ClusterOperationStatus) GetInnerState

func (m *ClusterOperationStatus) GetInnerState() string

func (*ClusterOperationStatus) GetState

func (*ClusterOperationStatus) GetStateStartTime

func (m *ClusterOperationStatus) GetStateStartTime() *timestamp.Timestamp

func (*ClusterOperationStatus) ProtoMessage

func (*ClusterOperationStatus) ProtoMessage()

func (*ClusterOperationStatus) Reset

func (m *ClusterOperationStatus) Reset()

func (*ClusterOperationStatus) String

func (m *ClusterOperationStatus) String() string

func (*ClusterOperationStatus) XXX_DiscardUnknown

func (m *ClusterOperationStatus) XXX_DiscardUnknown()

func (*ClusterOperationStatus) XXX_Marshal

func (m *ClusterOperationStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*ClusterOperationStatus) XXX_Merge

func (m *ClusterOperationStatus) XXX_Merge(src proto.Message)

func (*ClusterOperationStatus) XXX_Size

func (m *ClusterOperationStatus) XXX_Size() int

func (*ClusterOperationStatus) XXX_Unmarshal

func (m *ClusterOperationStatus) XXX_Unmarshal(b []byte) error

type ClusterOperationStatus_State

type ClusterOperationStatus_State int32

The operation state.

const (
	// Unused.
	ClusterOperationStatus_UNKNOWN ClusterOperationStatus_State = 0
	// The operation has been created.
	ClusterOperationStatus_PENDING ClusterOperationStatus_State = 1
	// The operation is running.
	ClusterOperationStatus_RUNNING ClusterOperationStatus_State = 2
	// The operation is done; either cancelled or completed.
	ClusterOperationStatus_DONE ClusterOperationStatus_State = 3
)

func (ClusterOperationStatus_State) EnumDescriptor

func (ClusterOperationStatus_State) EnumDescriptor() ([]byte, []int)

func (ClusterOperationStatus_State) String

type ClusterSelector

type ClusterSelector struct {
	// Optional. The zone where workflow process executes. This parameter does not
	// affect the selection of the cluster.
	//
	// If unspecified, the zone of the first cluster matching the selector
	// is used.
	Zone string `protobuf:"bytes,1,opt,name=zone,proto3" json:"zone,omitempty"`
	// Required. The cluster labels. Cluster must have all labels
	// to match.
	ClusterLabels        map[string]string `` /* 188-byte string literal not displayed */
	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
	XXX_unrecognized     []byte            `json:"-"`
	XXX_sizecache        int32             `json:"-"`
}

A selector that chooses target cluster for jobs based on metadata.

func (*ClusterSelector) Descriptor

func (*ClusterSelector) Descriptor() ([]byte, []int)

func (*ClusterSelector) GetClusterLabels

func (m *ClusterSelector) GetClusterLabels() map[string]string

func (*ClusterSelector) GetZone

func (m *ClusterSelector) GetZone() string

func (*ClusterSelector) ProtoMessage

func (*ClusterSelector) ProtoMessage()

func (*ClusterSelector) Reset

func (m *ClusterSelector) Reset()

func (*ClusterSelector) String

func (m *ClusterSelector) String() string

func (*ClusterSelector) XXX_DiscardUnknown

func (m *ClusterSelector) XXX_DiscardUnknown()

func (*ClusterSelector) XXX_Marshal

func (m *ClusterSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*ClusterSelector) XXX_Merge

func (m *ClusterSelector) XXX_Merge(src proto.Message)

func (*ClusterSelector) XXX_Size

func (m *ClusterSelector) XXX_Size() int

func (*ClusterSelector) XXX_Unmarshal

func (m *ClusterSelector) XXX_Unmarshal(b []byte) error

type ClusterStatus

type ClusterStatus struct {
	// Output only. The cluster's state.
	State ClusterStatus_State `protobuf:"varint,1,opt,name=state,proto3,enum=google.cloud.dataproc.v1beta2.ClusterStatus_State" json:"state,omitempty"`
	// Output only. Optional details of cluster's state.
	Detail string `protobuf:"bytes,2,opt,name=detail,proto3" json:"detail,omitempty"`
	// Output only. Time when this state was entered.
	StateStartTime *timestamp.Timestamp `protobuf:"bytes,3,opt,name=state_start_time,json=stateStartTime,proto3" json:"state_start_time,omitempty"`
	// Output only. Additional state information that includes
	// status reported by the agent.
	Substate             ClusterStatus_Substate `` /* 128-byte string literal not displayed */
	XXX_NoUnkeyedLiteral struct{}               `json:"-"`
	XXX_unrecognized     []byte                 `json:"-"`
	XXX_sizecache        int32                  `json:"-"`
}

The status of a cluster and its instances.

func (*ClusterStatus) Descriptor

func (*ClusterStatus) Descriptor() ([]byte, []int)

func (*ClusterStatus) GetDetail

func (m *ClusterStatus) GetDetail() string

func (*ClusterStatus) GetState

func (m *ClusterStatus) GetState() ClusterStatus_State

func (*ClusterStatus) GetStateStartTime

func (m *ClusterStatus) GetStateStartTime() *timestamp.Timestamp

func (*ClusterStatus) GetSubstate

func (m *ClusterStatus) GetSubstate() ClusterStatus_Substate

func (*ClusterStatus) ProtoMessage

func (*ClusterStatus) ProtoMessage()

func (*ClusterStatus) Reset

func (m *ClusterStatus) Reset()

func (*ClusterStatus) String

func (m *ClusterStatus) String() string

func (*ClusterStatus) XXX_DiscardUnknown

func (m *ClusterStatus) XXX_DiscardUnknown()

func (*ClusterStatus) XXX_Marshal

func (m *ClusterStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*ClusterStatus) XXX_Merge

func (m *ClusterStatus) XXX_Merge(src proto.Message)

func (*ClusterStatus) XXX_Size

func (m *ClusterStatus) XXX_Size() int

func (*ClusterStatus) XXX_Unmarshal

func (m *ClusterStatus) XXX_Unmarshal(b []byte) error

type ClusterStatus_State

type ClusterStatus_State int32

The cluster state.

const (
	// The cluster state is unknown.
	ClusterStatus_UNKNOWN ClusterStatus_State = 0
	// The cluster is being created and set up. It is not ready for use.
	ClusterStatus_CREATING ClusterStatus_State = 1
	// The cluster is currently running and healthy. It is ready for use.
	ClusterStatus_RUNNING ClusterStatus_State = 2
	// The cluster encountered an error. It is not ready for use.
	ClusterStatus_ERROR ClusterStatus_State = 3
	// The cluster is being deleted. It cannot be used.
	ClusterStatus_DELETING ClusterStatus_State = 4
	// The cluster is being updated. It continues to accept and process jobs.
	ClusterStatus_UPDATING ClusterStatus_State = 5
)

func (ClusterStatus_State) EnumDescriptor

func (ClusterStatus_State) EnumDescriptor() ([]byte, []int)

func (ClusterStatus_State) String

func (x ClusterStatus_State) String() string

type ClusterStatus_Substate

type ClusterStatus_Substate int32

The cluster substate.

const (
	// The cluster substate is unknown.
	ClusterStatus_UNSPECIFIED ClusterStatus_Substate = 0
	// The cluster is known to be in an unhealthy state
	// (for example, critical daemons are not running or HDFS capacity is
	// exhausted).
	//
	// Applies to RUNNING state.
	ClusterStatus_UNHEALTHY ClusterStatus_Substate = 1
	// The agent-reported status is out of date (may occur if
	// Cloud Dataproc loses communication with Agent).
	//
	// Applies to RUNNING state.
	ClusterStatus_STALE_STATUS ClusterStatus_Substate = 2
)

func (ClusterStatus_Substate) EnumDescriptor

func (ClusterStatus_Substate) EnumDescriptor() ([]byte, []int)

func (ClusterStatus_Substate) String

func (x ClusterStatus_Substate) String() string

type CreateClusterRequest

type CreateClusterRequest struct {
	// Required. The ID of the Google Cloud Platform project that the cluster
	// belongs to.
	ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"`
	// Required. The Cloud Dataproc region in which to handle the request.
	Region string `protobuf:"bytes,3,opt,name=region,proto3" json:"region,omitempty"`
	// Required. The cluster to create.
	Cluster *Cluster `protobuf:"bytes,2,opt,name=cluster,proto3" json:"cluster,omitempty"`
	// Optional. A unique id used to identify the request. If the server
	// receives two [CreateClusterRequest][google.cloud.dataproc.v1beta2.CreateClusterRequest] requests  with the same
	// id, then the second request will be ignored and the
	// first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the backend
	// is returned.
	//
	// It is recommended to always set this value to a
	// [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
	//
	// The id must contain only letters (a-z, A-Z), numbers (0-9),
	// underscores (_), and hyphens (-). The maximum length is 40 characters.
	RequestId            string   `protobuf:"bytes,4,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"`
	XXX_NoUnkeyedLiteral struct{} `json:"-"`
	XXX_unrecognized     []byte   `json:"-"`
	XXX_sizecache        int32    `json:"-"`
}

A request to create a cluster.

func (*CreateClusterRequest) Descriptor

func (*CreateClusterRequest) Descriptor() ([]byte, []int)

func (*CreateClusterRequest) GetCluster

func (m *CreateClusterRequest) GetCluster() *Cluster

func (*CreateClusterRequest) GetProjectId

func (m *CreateClusterRequest) GetProjectId() string

func (*CreateClusterRequest) GetRegion

func (m *CreateClusterRequest) GetRegion() string

func (*CreateClusterRequest) GetRequestId

func (m *CreateClusterRequest) GetRequestId() string

func (*CreateClusterRequest) ProtoMessage

func (*CreateClusterRequest) ProtoMessage()

func (*CreateClusterRequest) Reset

func (m *CreateClusterRequest) Reset()

func (*CreateClusterRequest) String

func (m *CreateClusterRequest) String() string

func (*CreateClusterRequest) XXX_DiscardUnknown

func (m *CreateClusterRequest) XXX_DiscardUnknown()

func (*CreateClusterRequest) XXX_Marshal

func (m *CreateClusterRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*CreateClusterRequest) XXX_Merge

func (m *CreateClusterRequest) XXX_Merge(src proto.Message)

func (*CreateClusterRequest) XXX_Size

func (m *CreateClusterRequest) XXX_Size() int

func (*CreateClusterRequest) XXX_Unmarshal

func (m *CreateClusterRequest) XXX_Unmarshal(b []byte) error

type CreateWorkflowTemplateRequest

type CreateWorkflowTemplateRequest struct {
	// Required. The "resource name" of the region, as described
	// in https://cloud.google.com/apis/design/resource_names of the form
	// `projects/{project_id}/regions/{region}`
	Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
	// Required. The Dataproc workflow template to create.
	Template             *WorkflowTemplate `protobuf:"bytes,2,opt,name=template,proto3" json:"template,omitempty"`
	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
	XXX_unrecognized     []byte            `json:"-"`
	XXX_sizecache        int32             `json:"-"`
}

A request to create a workflow template.

func (*CreateWorkflowTemplateRequest) Descriptor

func (*CreateWorkflowTemplateRequest) Descriptor() ([]byte, []int)

func (*CreateWorkflowTemplateRequest) GetParent

func (m *CreateWorkflowTemplateRequest) GetParent() string

func (*CreateWorkflowTemplateRequest) GetTemplate

func (*CreateWorkflowTemplateRequest) ProtoMessage

func (*CreateWorkflowTemplateRequest) ProtoMessage()

func (*CreateWorkflowTemplateRequest) Reset

func (m *CreateWorkflowTemplateRequest) Reset()

func (*CreateWorkflowTemplateRequest) String

func (*CreateWorkflowTemplateRequest) XXX_DiscardUnknown

func (m *CreateWorkflowTemplateRequest) XXX_DiscardUnknown()

func (*CreateWorkflowTemplateRequest) XXX_Marshal

func (m *CreateWorkflowTemplateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*CreateWorkflowTemplateRequest) XXX_Merge

func (m *CreateWorkflowTemplateRequest) XXX_Merge(src proto.Message)

func (*CreateWorkflowTemplateRequest) XXX_Size

func (m *CreateWorkflowTemplateRequest) XXX_Size() int

func (*CreateWorkflowTemplateRequest) XXX_Unmarshal

func (m *CreateWorkflowTemplateRequest) XXX_Unmarshal(b []byte) error

type DeleteClusterRequest

type DeleteClusterRequest struct {
	// Required. The ID of the Google Cloud Platform project that the cluster
	// belongs to.
	ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"`
	// Required. The Cloud Dataproc region in which to handle the request.
	Region string `protobuf:"bytes,3,opt,name=region,proto3" json:"region,omitempty"`
	// Required. The cluster name.
	ClusterName string `protobuf:"bytes,2,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"`
	// Optional. Specifying the `cluster_uuid` means the RPC should fail
	// (with error NOT_FOUND) if cluster with specified UUID does not exist.
	ClusterUuid string `protobuf:"bytes,4,opt,name=cluster_uuid,json=clusterUuid,proto3" json:"cluster_uuid,omitempty"`
	// Optional. A unique id used to identify the request. If the server
	// receives two [DeleteClusterRequest][google.cloud.dataproc.v1beta2.DeleteClusterRequest] requests  with the same
	// id, then the second request will be ignored and the
	// first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the
	// backend is returned.
	//
	// It is recommended to always set this value to a
	// [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
	//
	// The id must contain only letters (a-z, A-Z), numbers (0-9),
	// underscores (_), and hyphens (-). The maximum length is 40 characters.
	RequestId            string   `protobuf:"bytes,5,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"`
	XXX_NoUnkeyedLiteral struct{} `json:"-"`
	XXX_unrecognized     []byte   `json:"-"`
	XXX_sizecache        int32    `json:"-"`
}

A request to delete a cluster.

func (*DeleteClusterRequest) Descriptor

func (*DeleteClusterRequest) Descriptor() ([]byte, []int)

func (*DeleteClusterRequest) GetClusterName

func (m *DeleteClusterRequest) GetClusterName() string

func (*DeleteClusterRequest) GetClusterUuid

func (m *DeleteClusterRequest) GetClusterUuid() string

func (*DeleteClusterRequest) GetProjectId

func (m *DeleteClusterRequest) GetProjectId() string

func (*DeleteClusterRequest) GetRegion

func (m *DeleteClusterRequest) GetRegion() string

func (*DeleteClusterRequest) GetRequestId

func (m *DeleteClusterRequest) GetRequestId() string

func (*DeleteClusterRequest) ProtoMessage

func (*DeleteClusterRequest) ProtoMessage()

func (*DeleteClusterRequest) Reset

func (m *DeleteClusterRequest) Reset()

func (*DeleteClusterRequest) String

func (m *DeleteClusterRequest) String() string

func (*DeleteClusterRequest) XXX_DiscardUnknown

func (m *DeleteClusterRequest) XXX_DiscardUnknown()

func (*DeleteClusterRequest) XXX_Marshal

func (m *DeleteClusterRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*DeleteClusterRequest) XXX_Merge

func (m *DeleteClusterRequest) XXX_Merge(src proto.Message)

func (*DeleteClusterRequest) XXX_Size

func (m *DeleteClusterRequest) XXX_Size() int

func (*DeleteClusterRequest) XXX_Unmarshal

func (m *DeleteClusterRequest) XXX_Unmarshal(b []byte) error

type DeleteJobRequest

type DeleteJobRequest struct {
	// Required. The ID of the Google Cloud Platform project that the job
	// belongs to.
	ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"`
	// Required. The Cloud Dataproc region in which to handle the request.
	Region string `protobuf:"bytes,3,opt,name=region,proto3" json:"region,omitempty"`
	// Required. The job ID.
	JobId                string   `protobuf:"bytes,2,opt,name=job_id,json=jobId,proto3" json:"job_id,omitempty"`
	XXX_NoUnkeyedLiteral struct{} `json:"-"`
	XXX_unrecognized     []byte   `json:"-"`
	XXX_sizecache        int32    `json:"-"`
}

A request to delete a job.

func (*DeleteJobRequest) Descriptor

func (*DeleteJobRequest) Descriptor() ([]byte, []int)

func (*DeleteJobRequest) GetJobId

func (m *DeleteJobRequest) GetJobId() string

func (*DeleteJobRequest) GetProjectId

func (m *DeleteJobRequest) GetProjectId() string

func (*DeleteJobRequest) GetRegion

func (m *DeleteJobRequest) GetRegion() string

func (*DeleteJobRequest) ProtoMessage

func (*DeleteJobRequest) ProtoMessage()

func (*DeleteJobRequest) Reset

func (m *DeleteJobRequest) Reset()

func (*DeleteJobRequest) String

func (m *DeleteJobRequest) String() string

func (*DeleteJobRequest) XXX_DiscardUnknown

func (m *DeleteJobRequest) XXX_DiscardUnknown()

func (*DeleteJobRequest) XXX_Marshal

func (m *DeleteJobRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*DeleteJobRequest) XXX_Merge

func (m *DeleteJobRequest) XXX_Merge(src proto.Message)

func (*DeleteJobRequest) XXX_Size

func (m *DeleteJobRequest) XXX_Size() int

func (*DeleteJobRequest) XXX_Unmarshal

func (m *DeleteJobRequest) XXX_Unmarshal(b []byte) error

type DeleteWorkflowTemplateRequest

type DeleteWorkflowTemplateRequest struct {
	// Required. The "resource name" of the workflow template, as described
	// in https://cloud.google.com/apis/design/resource_names of the form
	// `projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`
	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
	// Optional. The version of workflow template to delete. If specified,
	// will only delete the template if the current server version matches
	// specified version.
	Version              int32    `protobuf:"varint,2,opt,name=version,proto3" json:"version,omitempty"`
	XXX_NoUnkeyedLiteral struct{} `json:"-"`
	XXX_unrecognized     []byte   `json:"-"`
	XXX_sizecache        int32    `json:"-"`
}

A request to delete a workflow template.

Currently started workflows will remain running.

func (*DeleteWorkflowTemplateRequest) Descriptor

func (*DeleteWorkflowTemplateRequest) Descriptor() ([]byte, []int)

func (*DeleteWorkflowTemplateRequest) GetName

func (*DeleteWorkflowTemplateRequest) GetVersion

func (m *DeleteWorkflowTemplateRequest) GetVersion() int32

func (*DeleteWorkflowTemplateRequest) ProtoMessage

func (*DeleteWorkflowTemplateRequest) ProtoMessage()

func (*DeleteWorkflowTemplateRequest) Reset

func (m *DeleteWorkflowTemplateRequest) Reset()

func (*DeleteWorkflowTemplateRequest) String

func (*DeleteWorkflowTemplateRequest) XXX_DiscardUnknown

func (m *DeleteWorkflowTemplateRequest) XXX_DiscardUnknown()

func (*DeleteWorkflowTemplateRequest) XXX_Marshal

func (m *DeleteWorkflowTemplateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*DeleteWorkflowTemplateRequest) XXX_Merge

func (m *DeleteWorkflowTemplateRequest) XXX_Merge(src proto.Message)

func (*DeleteWorkflowTemplateRequest) XXX_Size

func (m *DeleteWorkflowTemplateRequest) XXX_Size() int

func (*DeleteWorkflowTemplateRequest) XXX_Unmarshal

func (m *DeleteWorkflowTemplateRequest) XXX_Unmarshal(b []byte) error

type DiagnoseClusterRequest

type DiagnoseClusterRequest struct {
	// Required. The ID of the Google Cloud Platform project that the cluster
	// belongs to.
	ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"`
	// Required. The Cloud Dataproc region in which to handle the request.
	Region string `protobuf:"bytes,3,opt,name=region,proto3" json:"region,omitempty"`
	// Required. The cluster name.
	ClusterName          string   `protobuf:"bytes,2,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"`
	XXX_NoUnkeyedLiteral struct{} `json:"-"`
	XXX_unrecognized     []byte   `json:"-"`
	XXX_sizecache        int32    `json:"-"`
}

A request to collect cluster diagnostic information.

func (*DiagnoseClusterRequest) Descriptor

func (*DiagnoseClusterRequest) Descriptor() ([]byte, []int)

func (*DiagnoseClusterRequest) GetClusterName

func (m *DiagnoseClusterRequest) GetClusterName() string

func (*DiagnoseClusterRequest) GetProjectId

func (m *DiagnoseClusterRequest) GetProjectId() string

func (*DiagnoseClusterRequest) GetRegion

func (m *DiagnoseClusterRequest) GetRegion() string

func (*DiagnoseClusterRequest) ProtoMessage

func (*DiagnoseClusterRequest) ProtoMessage()

func (*DiagnoseClusterRequest) Reset

func (m *DiagnoseClusterRequest) Reset()

func (*DiagnoseClusterRequest) String

func (m *DiagnoseClusterRequest) String() string

func (*DiagnoseClusterRequest) XXX_DiscardUnknown

func (m *DiagnoseClusterRequest) XXX_DiscardUnknown()

func (*DiagnoseClusterRequest) XXX_Marshal

func (m *DiagnoseClusterRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*DiagnoseClusterRequest) XXX_Merge

func (m *DiagnoseClusterRequest) XXX_Merge(src proto.Message)

func (*DiagnoseClusterRequest) XXX_Size

func (m *DiagnoseClusterRequest) XXX_Size() int

func (*DiagnoseClusterRequest) XXX_Unmarshal

func (m *DiagnoseClusterRequest) XXX_Unmarshal(b []byte) error

type DiagnoseClusterResults

type DiagnoseClusterResults struct {
	// Output only. The Cloud Storage URI of the diagnostic output.
	// The output report is a plain text file with a summary of collected
	// diagnostics.
	OutputUri            string   `protobuf:"bytes,1,opt,name=output_uri,json=outputUri,proto3" json:"output_uri,omitempty"`
	XXX_NoUnkeyedLiteral struct{} `json:"-"`
	XXX_unrecognized     []byte   `json:"-"`
	XXX_sizecache        int32    `json:"-"`
}

The location of diagnostic output.

func (*DiagnoseClusterResults) Descriptor

func (*DiagnoseClusterResults) Descriptor() ([]byte, []int)

func (*DiagnoseClusterResults) GetOutputUri

func (m *DiagnoseClusterResults) GetOutputUri() string

func (*DiagnoseClusterResults) ProtoMessage

func (*DiagnoseClusterResults) ProtoMessage()

func (*DiagnoseClusterResults) Reset

func (m *DiagnoseClusterResults) Reset()

func (*DiagnoseClusterResults) String

func (m *DiagnoseClusterResults) String() string

func (*DiagnoseClusterResults) XXX_DiscardUnknown

func (m *DiagnoseClusterResults) XXX_DiscardUnknown()

func (*DiagnoseClusterResults) XXX_Marshal

func (m *DiagnoseClusterResults) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*DiagnoseClusterResults) XXX_Merge

func (m *DiagnoseClusterResults) XXX_Merge(src proto.Message)

func (*DiagnoseClusterResults) XXX_Size

func (m *DiagnoseClusterResults) XXX_Size() int

func (*DiagnoseClusterResults) XXX_Unmarshal

func (m *DiagnoseClusterResults) XXX_Unmarshal(b []byte) error

type DiskConfig

type DiskConfig struct {
	// Optional. Type of the boot disk (default is "pd-standard").
	// Valid values: "pd-ssd" (Persistent Disk Solid State Drive) or
	// "pd-standard" (Persistent Disk Hard Disk Drive).
	BootDiskType string `protobuf:"bytes,3,opt,name=boot_disk_type,json=bootDiskType,proto3" json:"boot_disk_type,omitempty"`
	// Optional. Size in GB of the boot disk (default is 500GB).
	BootDiskSizeGb int32 `protobuf:"varint,1,opt,name=boot_disk_size_gb,json=bootDiskSizeGb,proto3" json:"boot_disk_size_gb,omitempty"`
	// Optional. Number of attached SSDs, from 0 to 4 (default is 0).
	// If SSDs are not attached, the boot disk is used to store runtime logs and
	// [HDFS](https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data.
	// If one or more SSDs are attached, this runtime bulk
	// data is spread across them, and the boot disk contains only basic
	// config and installed binaries.
	NumLocalSsds         int32    `protobuf:"varint,2,opt,name=num_local_ssds,json=numLocalSsds,proto3" json:"num_local_ssds,omitempty"`
	XXX_NoUnkeyedLiteral struct{} `json:"-"`
	XXX_unrecognized     []byte   `json:"-"`
	XXX_sizecache        int32    `json:"-"`
}

Specifies the config of disk options for a group of VM instances.

func (*DiskConfig) Descriptor

func (*DiskConfig) Descriptor() ([]byte, []int)

func (*DiskConfig) GetBootDiskSizeGb

func (m *DiskConfig) GetBootDiskSizeGb() int32

func (*DiskConfig) GetBootDiskType

func (m *DiskConfig) GetBootDiskType() string

func (*DiskConfig) GetNumLocalSsds

func (m *DiskConfig) GetNumLocalSsds() int32

func (*DiskConfig) ProtoMessage

func (*DiskConfig) ProtoMessage()

func (*DiskConfig) Reset

func (m *DiskConfig) Reset()

func (*DiskConfig) String

func (m *DiskConfig) String() string

func (*DiskConfig) XXX_DiscardUnknown

func (m *DiskConfig) XXX_DiscardUnknown()

func (*DiskConfig) XXX_Marshal

func (m *DiskConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*DiskConfig) XXX_Merge

func (m *DiskConfig) XXX_Merge(src proto.Message)

func (*DiskConfig) XXX_Size

func (m *DiskConfig) XXX_Size() int

func (*DiskConfig) XXX_Unmarshal

func (m *DiskConfig) XXX_Unmarshal(b []byte) error

type GceClusterConfig

type GceClusterConfig struct {
	// Optional. The zone where the Compute Engine cluster will be located.
	// On a create request, it is required in the "global" region. If omitted
	// in a non-global Cloud Dataproc region, the service will pick a zone in the
	// corresponding Compute Engine region. On a get request, zone will always be
	// present.
	//
	// A full URL, partial URI, or short name are valid. Examples:
	//
	// * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]`
	// * `projects/[project_id]/zones/[zone]`
	// * `us-central1-f`
	ZoneUri string `protobuf:"bytes,1,opt,name=zone_uri,json=zoneUri,proto3" json:"zone_uri,omitempty"`
	// Optional. The Compute Engine network to be used for machine
	// communications. Cannot be specified with subnetwork_uri. If neither
	// `network_uri` nor `subnetwork_uri` is specified, the "default" network of
	// the project is used, if it exists. Cannot be a "Custom Subnet Network" (see
	// [Using Subnetworks](/compute/docs/subnetworks) for more information).
	//
	// A full URL, partial URI, or short name are valid. Examples:
	//
	// * `https://www.googleapis.com/compute/v1/projects/[project_id]/regions/global/default`
	// * `projects/[project_id]/regions/global/default`
	// * `default`
	NetworkUri string `protobuf:"bytes,2,opt,name=network_uri,json=networkUri,proto3" json:"network_uri,omitempty"`
	// Optional. The Compute Engine subnetwork to be used for machine
	// communications. Cannot be specified with network_uri.
	//
	// A full URL, partial URI, or short name are valid. Examples:
	//
	// * `https://www.googleapis.com/compute/v1/projects/[project_id]/regions/us-east1/sub0`
	// * `projects/[project_id]/regions/us-east1/sub0`
	// * `sub0`
	SubnetworkUri string `protobuf:"bytes,6,opt,name=subnetwork_uri,json=subnetworkUri,proto3" json:"subnetwork_uri,omitempty"`
	// Optional. If true, all instances in the cluster will only have internal IP
	// addresses. By default, clusters are not restricted to internal IP addresses,
	// and will have ephemeral external IP addresses assigned to each instance.
	// This `internal_ip_only` restriction can only be enabled for subnetwork
	// enabled networks, and all off-cluster dependencies must be configured to be
	// accessible without external IP addresses.
	InternalIpOnly bool `protobuf:"varint,7,opt,name=internal_ip_only,json=internalIpOnly,proto3" json:"internal_ip_only,omitempty"`
	// Optional. The service account of the instances. Defaults to the default
	// Compute Engine service account. Custom service accounts need
	// permissions equivalent to the following IAM roles:
	//
	// * roles/logging.logWriter
	// * roles/storage.objectAdmin
	//
	// (see https://cloud.google.com/compute/docs/access/service-accounts#custom_service_accounts
	// for more information).
	// Example: `[account_id]@[project_id].iam.gserviceaccount.com`
	ServiceAccount string `protobuf:"bytes,8,opt,name=service_account,json=serviceAccount,proto3" json:"service_account,omitempty"`
	// Optional. The URIs of service account scopes to be included in
	// Compute Engine instances. The following base set of scopes is always
	// included:
	//
	// * https://www.googleapis.com/auth/cloud.useraccounts.readonly
	// * https://www.googleapis.com/auth/devstorage.read_write
	// * https://www.googleapis.com/auth/logging.write
	//
	// If no scopes are specified, the following defaults are also provided:
	//
	// * https://www.googleapis.com/auth/bigquery
	// * https://www.googleapis.com/auth/bigtable.admin.table
	// * https://www.googleapis.com/auth/bigtable.data
	// * https://www.googleapis.com/auth/devstorage.full_control
	ServiceAccountScopes []string `protobuf:"bytes,3,rep,name=service_account_scopes,json=serviceAccountScopes,proto3" json:"service_account_scopes,omitempty"`
	// The Compute Engine tags to add to all instances (see
	// [Tagging instances](/compute/docs/label-or-tag-resources#tags)).
	Tags []string `protobuf:"bytes,4,rep,name=tags,proto3" json:"tags,omitempty"`
	// The Compute Engine metadata entries to add to all instances (see
	// [Project and instance metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
	Metadata             map[string]string `` /* 157-byte string literal not displayed */
	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
	XXX_unrecognized     []byte            `json:"-"`
	XXX_sizecache        int32             `json:"-"`
}

Common config settings for resources of Compute Engine cluster instances, applicable to all instances in the cluster.

func (*GceClusterConfig) Descriptor

func (*GceClusterConfig) Descriptor() ([]byte, []int)

func (*GceClusterConfig) GetInternalIpOnly

func (m *GceClusterConfig) GetInternalIpOnly() bool

func (*GceClusterConfig) GetMetadata

func (m *GceClusterConfig) GetMetadata() map[string]string

func (*GceClusterConfig) GetNetworkUri

func (m *GceClusterConfig) GetNetworkUri() string

func (*GceClusterConfig) GetServiceAccount

func (m *GceClusterConfig) GetServiceAccount() string

func (*GceClusterConfig) GetServiceAccountScopes

func (m *GceClusterConfig) GetServiceAccountScopes() []string

func (*GceClusterConfig) GetSubnetworkUri

func (m *GceClusterConfig) GetSubnetworkUri() string

func (*GceClusterConfig) GetTags

func (m *GceClusterConfig) GetTags() []string

func (*GceClusterConfig) GetZoneUri

func (m *GceClusterConfig) GetZoneUri() string

func (*GceClusterConfig) ProtoMessage

func (*GceClusterConfig) ProtoMessage()

func (*GceClusterConfig) Reset

func (m *GceClusterConfig) Reset()

func (*GceClusterConfig) String

func (m *GceClusterConfig) String() string

func (*GceClusterConfig) XXX_DiscardUnknown

func (m *GceClusterConfig) XXX_DiscardUnknown()

func (*GceClusterConfig) XXX_Marshal

func (m *GceClusterConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*GceClusterConfig) XXX_Merge

func (m *GceClusterConfig) XXX_Merge(src proto.Message)

func (*GceClusterConfig) XXX_Size

func (m *GceClusterConfig) XXX_Size() int

func (*GceClusterConfig) XXX_Unmarshal

func (m *GceClusterConfig) XXX_Unmarshal(b []byte) error

type GetClusterRequest

type GetClusterRequest struct {
	// Required. The ID of the Google Cloud Platform project that the cluster
	// belongs to.
	ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"`
	// Required. The Cloud Dataproc region in which to handle the request.
	Region string `protobuf:"bytes,3,opt,name=region,proto3" json:"region,omitempty"`
	// Required. The cluster name.
	ClusterName          string   `protobuf:"bytes,2,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"`
	XXX_NoUnkeyedLiteral struct{} `json:"-"`
	XXX_unrecognized     []byte   `json:"-"`
	XXX_sizecache        int32    `json:"-"`
}

Request to get the resource representation for a cluster in a project.

func (*GetClusterRequest) Descriptor

func (*GetClusterRequest) Descriptor() ([]byte, []int)

func (*GetClusterRequest) GetClusterName

func (m *GetClusterRequest) GetClusterName() string

func (*GetClusterRequest) GetProjectId

func (m *GetClusterRequest) GetProjectId() string

func (*GetClusterRequest) GetRegion

func (m *GetClusterRequest) GetRegion() string

func (*GetClusterRequest) ProtoMessage

func (*GetClusterRequest) ProtoMessage()

func (*GetClusterRequest) Reset

func (m *GetClusterRequest) Reset()

func (*GetClusterRequest) String

func (m *GetClusterRequest) String() string

func (*GetClusterRequest) XXX_DiscardUnknown

func (m *GetClusterRequest) XXX_DiscardUnknown()

func (*GetClusterRequest) XXX_Marshal

func (m *GetClusterRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*GetClusterRequest) XXX_Merge

func (m *GetClusterRequest) XXX_Merge(src proto.Message)

func (*GetClusterRequest) XXX_Size

func (m *GetClusterRequest) XXX_Size() int

func (*GetClusterRequest) XXX_Unmarshal

func (m *GetClusterRequest) XXX_Unmarshal(b []byte) error

type GetJobRequest

type GetJobRequest struct {
	// Required. The ID of the Google Cloud Platform project that the job
	// belongs to.
	ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"`
	// Required. The Cloud Dataproc region in which to handle the request.
	Region string `protobuf:"bytes,3,opt,name=region,proto3" json:"region,omitempty"`
	// Required. The job ID.
	JobId                string   `protobuf:"bytes,2,opt,name=job_id,json=jobId,proto3" json:"job_id,omitempty"`
	XXX_NoUnkeyedLiteral struct{} `json:"-"`
	XXX_unrecognized     []byte   `json:"-"`
	XXX_sizecache        int32    `json:"-"`
}

A request to get the resource representation for a job in a project.

func (*GetJobRequest) Descriptor

func (*GetJobRequest) Descriptor() ([]byte, []int)

func (*GetJobRequest) GetJobId

func (m *GetJobRequest) GetJobId() string

func (*GetJobRequest) GetProjectId

func (m *GetJobRequest) GetProjectId() string

func (*GetJobRequest) GetRegion

func (m *GetJobRequest) GetRegion() string

func (*GetJobRequest) ProtoMessage

func (*GetJobRequest) ProtoMessage()

func (*GetJobRequest) Reset

func (m *GetJobRequest) Reset()

func (*GetJobRequest) String

func (m *GetJobRequest) String() string

func (*GetJobRequest) XXX_DiscardUnknown

func (m *GetJobRequest) XXX_DiscardUnknown()

func (*GetJobRequest) XXX_Marshal

func (m *GetJobRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*GetJobRequest) XXX_Merge

func (m *GetJobRequest) XXX_Merge(src proto.Message)

func (*GetJobRequest) XXX_Size

func (m *GetJobRequest) XXX_Size() int

func (*GetJobRequest) XXX_Unmarshal

func (m *GetJobRequest) XXX_Unmarshal(b []byte) error

type GetWorkflowTemplateRequest

type GetWorkflowTemplateRequest struct {
	// Required. The "resource name" of the workflow template, as described
	// in https://cloud.google.com/apis/design/resource_names of the form
	// `projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`
	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
	// Optional. The version of workflow template to retrieve. Only previously
	// instatiated versions can be retrieved.
	//
	// If unspecified, retrieves the current version.
	Version              int32    `protobuf:"varint,2,opt,name=version,proto3" json:"version,omitempty"`
	XXX_NoUnkeyedLiteral struct{} `json:"-"`
	XXX_unrecognized     []byte   `json:"-"`
	XXX_sizecache        int32    `json:"-"`
}

A request to fetch a workflow template.

func (*GetWorkflowTemplateRequest) Descriptor

func (*GetWorkflowTemplateRequest) Descriptor() ([]byte, []int)

func (*GetWorkflowTemplateRequest) GetName

func (m *GetWorkflowTemplateRequest) GetName() string

func (*GetWorkflowTemplateRequest) GetVersion

func (m *GetWorkflowTemplateRequest) GetVersion() int32

func (*GetWorkflowTemplateRequest) ProtoMessage

func (*GetWorkflowTemplateRequest) ProtoMessage()

func (*GetWorkflowTemplateRequest) Reset

func (m *GetWorkflowTemplateRequest) Reset()

func (*GetWorkflowTemplateRequest) String

func (m *GetWorkflowTemplateRequest) String() string

func (*GetWorkflowTemplateRequest) XXX_DiscardUnknown

func (m *GetWorkflowTemplateRequest) XXX_DiscardUnknown()

func (*GetWorkflowTemplateRequest) XXX_Marshal

func (m *GetWorkflowTemplateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*GetWorkflowTemplateRequest) XXX_Merge

func (m *GetWorkflowTemplateRequest) XXX_Merge(src proto.Message)

func (*GetWorkflowTemplateRequest) XXX_Size

func (m *GetWorkflowTemplateRequest) XXX_Size() int

func (*GetWorkflowTemplateRequest) XXX_Unmarshal

func (m *GetWorkflowTemplateRequest) XXX_Unmarshal(b []byte) error

type HadoopJob

type HadoopJob struct {
	// Required. Indicates the location of the driver's main class. Specify
	// either the jar file that contains the main class or the main class name.
	// To specify both, add the jar file to `jar_file_uris`, and then specify
	// the main class name in this property.
	//
	// Types that are valid to be assigned to Driver:
	//	*HadoopJob_MainJarFileUri
	//	*HadoopJob_MainClass
	Driver isHadoopJob_Driver `protobuf_oneof:"driver"`
	// Optional. The arguments to pass to the driver. Do not
	// include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as job
	// properties, since a collision may occur that causes an incorrect job
	// submission.
	Args []string `protobuf:"bytes,3,rep,name=args,proto3" json:"args,omitempty"`
	// Optional. Jar file URIs to add to the CLASSPATHs of the
	// Hadoop driver and tasks.
	JarFileUris []string `protobuf:"bytes,4,rep,name=jar_file_uris,json=jarFileUris,proto3" json:"jar_file_uris,omitempty"`
	// Optional. HCFS (Hadoop Compatible Filesystem) URIs of files to be copied
	// to the working directory of Hadoop drivers and distributed tasks. Useful
	// for naively parallel tasks.
	FileUris []string `protobuf:"bytes,5,rep,name=file_uris,json=fileUris,proto3" json:"file_uris,omitempty"`
	// Optional. HCFS URIs of archives to be extracted in the working directory of
	// Hadoop drivers and tasks. Supported file types:
	// .jar, .tar, .tar.gz, .tgz, or .zip.
	ArchiveUris []string `protobuf:"bytes,6,rep,name=archive_uris,json=archiveUris,proto3" json:"archive_uris,omitempty"`
	// Optional. A mapping of property names to values, used to configure Hadoop.
	// Properties that conflict with values set by the Cloud Dataproc API may be
	// overwritten. Can include properties set in /etc/hadoop/conf/*-site and
	// classes in user code.
	Properties map[string]string `` /* 161-byte string literal not displayed */
	// Optional. The runtime log config for job execution.
	LoggingConfig        *LoggingConfig `protobuf:"bytes,8,opt,name=logging_config,json=loggingConfig,proto3" json:"logging_config,omitempty"`
	XXX_NoUnkeyedLiteral struct{}       `json:"-"`
	XXX_unrecognized     []byte         `json:"-"`
	XXX_sizecache        int32          `json:"-"`
}

A Cloud Dataproc job for running [Apache Hadoop MapReduce](https://hadoop.apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduceTutorial.html) jobs on [Apache Hadoop YARN](https://hadoop.apache.org/docs/r2.7.1/hadoop-yarn/hadoop-yarn-site/YARN.html).

func (*HadoopJob) Descriptor

func (*HadoopJob) Descriptor() ([]byte, []int)

func (*HadoopJob) GetArchiveUris

func (m *HadoopJob) GetArchiveUris() []string

func (*HadoopJob) GetArgs

func (m *HadoopJob) GetArgs() []string

func (*HadoopJob) GetDriver

func (m *HadoopJob) GetDriver() isHadoopJob_Driver

func (*HadoopJob) GetFileUris

func (m *HadoopJob) GetFileUris() []string

func (*HadoopJob) GetJarFileUris

func (m *HadoopJob) GetJarFileUris() []string

func (*HadoopJob) GetLoggingConfig

func (m *HadoopJob) GetLoggingConfig() *LoggingConfig

func (*HadoopJob) GetMainClass

func (m *HadoopJob) GetMainClass() string

func (*HadoopJob) GetMainJarFileUri

func (m *HadoopJob) GetMainJarFileUri() string

func (*HadoopJob) GetProperties

func (m *HadoopJob) GetProperties() map[string]string

func (*HadoopJob) ProtoMessage

func (*HadoopJob) ProtoMessage()

func (*HadoopJob) Reset

func (m *HadoopJob) Reset()

func (*HadoopJob) String

func (m *HadoopJob) String() string

func (*HadoopJob) XXX_DiscardUnknown

func (m *HadoopJob) XXX_DiscardUnknown()

func (*HadoopJob) XXX_Marshal

func (m *HadoopJob) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*HadoopJob) XXX_Merge

func (m *HadoopJob) XXX_Merge(src proto.Message)

func (*HadoopJob) XXX_OneofFuncs

func (*HadoopJob) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{})

XXX_OneofFuncs is for the internal use of the proto package.

func (*HadoopJob) XXX_Size

func (m *HadoopJob) XXX_Size() int

func (*HadoopJob) XXX_Unmarshal

func (m *HadoopJob) XXX_Unmarshal(b []byte) error

type HadoopJob_MainClass

type HadoopJob_MainClass struct {
	MainClass string `protobuf:"bytes,2,opt,name=main_class,json=mainClass,proto3,oneof"`
}

type HadoopJob_MainJarFileUri

type HadoopJob_MainJarFileUri struct {
	MainJarFileUri string `protobuf:"bytes,1,opt,name=main_jar_file_uri,json=mainJarFileUri,proto3,oneof"`
}

type HiveJob

type HiveJob struct {
	// Required. The sequence of Hive queries to execute, specified as either
	// an HCFS file URI or a list of queries.
	//
	// Types that are valid to be assigned to Queries:
	//	*HiveJob_QueryFileUri
	//	*HiveJob_QueryList
	Queries isHiveJob_Queries `protobuf_oneof:"queries"`
	// Optional. Whether to continue executing queries if a query fails.
	// The default value is `false`. Setting to `true` can be useful when executing
	// independent parallel queries.
	ContinueOnFailure bool `protobuf:"varint,3,opt,name=continue_on_failure,json=continueOnFailure,proto3" json:"continue_on_failure,omitempty"`
	// Optional. Mapping of query variable names to values (equivalent to the
	// Hive command: `SET name="value";`).
	ScriptVariables map[string]string `` /* 194-byte string literal not displayed */
	// Optional. A mapping of property names and values, used to configure Hive.
	// Properties that conflict with values set by the Cloud Dataproc API may be
	// overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml,
	// /etc/hive/conf/hive-site.xml, and classes in user code.
	Properties map[string]string `` /* 161-byte string literal not displayed */
	// Optional. HCFS URIs of jar files to add to the CLASSPATH of the
	// Hive server and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes
	// and UDFs.
	JarFileUris          []string `protobuf:"bytes,6,rep,name=jar_file_uris,json=jarFileUris,proto3" json:"jar_file_uris,omitempty"`
	XXX_NoUnkeyedLiteral struct{} `json:"-"`
	XXX_unrecognized     []byte   `json:"-"`
	XXX_sizecache        int32    `json:"-"`
}

A Cloud Dataproc job for running [Apache Hive](https://hive.apache.org/) queries on YARN.

func (*HiveJob) Descriptor

func (*HiveJob) Descriptor() ([]byte, []int)

func (*HiveJob) GetContinueOnFailure

func (m *HiveJob) GetContinueOnFailure() bool

func (*HiveJob) GetJarFileUris

func (m *HiveJob) GetJarFileUris() []string

func (*HiveJob) GetProperties

func (m *HiveJob) GetProperties() map[string]string

func (*HiveJob) GetQueries

func (m *HiveJob) GetQueries() isHiveJob_Queries

func (*HiveJob) GetQueryFileUri

func (m *HiveJob) GetQueryFileUri() string

func (*HiveJob) GetQueryList

func (m *HiveJob) GetQueryList() *QueryList

func (*HiveJob) GetScriptVariables

func (m *HiveJob) GetScriptVariables() map[string]string

func (*HiveJob) ProtoMessage

func (*HiveJob) ProtoMessage()

func (*HiveJob) Reset

func (m *HiveJob) Reset()

func (*HiveJob) String

func (m *HiveJob) String() string

func (*HiveJob) XXX_DiscardUnknown

func (m *HiveJob) XXX_DiscardUnknown()

func (*HiveJob) XXX_Marshal

func (m *HiveJob) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*HiveJob) XXX_Merge

func (m *HiveJob) XXX_Merge(src proto.Message)

func (*HiveJob) XXX_OneofFuncs

func (*HiveJob) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{})

XXX_OneofFuncs is for the internal use of the proto package.

func (*HiveJob) XXX_Size

func (m *HiveJob) XXX_Size() int

func (*HiveJob) XXX_Unmarshal

func (m *HiveJob) XXX_Unmarshal(b []byte) error

type HiveJob_QueryFileUri

type HiveJob_QueryFileUri struct {
	QueryFileUri string `protobuf:"bytes,1,opt,name=query_file_uri,json=queryFileUri,proto3,oneof"`
}

type HiveJob_QueryList

type HiveJob_QueryList struct {
	QueryList *QueryList `protobuf:"bytes,2,opt,name=query_list,json=queryList,proto3,oneof"`
}

type InstanceGroupConfig

type InstanceGroupConfig struct {
	// Optional. The number of VM instances in the instance group.
	// For master instance groups, must be set to 1.
	NumInstances int32 `protobuf:"varint,1,opt,name=num_instances,json=numInstances,proto3" json:"num_instances,omitempty"`
	// Output only. The list of instance names. Cloud Dataproc derives the names
	// from `cluster_name`, `num_instances`, and the instance group.
	InstanceNames []string `protobuf:"bytes,2,rep,name=instance_names,json=instanceNames,proto3" json:"instance_names,omitempty"`
	// Output only. The Compute Engine image resource used for cluster
	// instances. Inferred from `SoftwareConfig.image_version`.
	ImageUri string `protobuf:"bytes,3,opt,name=image_uri,json=imageUri,proto3" json:"image_uri,omitempty"`
	// Optional. The Compute Engine machine type used for cluster instances.
	//
	// A full URL, partial URI, or short name are valid. Examples:
	//
	// * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2`
	// * `projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2`
	// * `n1-standard-2`
	//
	// **Auto Zone Exception**: If you are using the Cloud Dataproc
	// [Auto Zone Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement)
	// feature, you must use the short name of the machine type
	// resource, for example, `n1-standard-2`.
	MachineTypeUri string `protobuf:"bytes,4,opt,name=machine_type_uri,json=machineTypeUri,proto3" json:"machine_type_uri,omitempty"`
	// Optional. Disk option config settings.
	DiskConfig *DiskConfig `protobuf:"bytes,5,opt,name=disk_config,json=diskConfig,proto3" json:"disk_config,omitempty"`
	// Optional. Specifies that this instance group contains preemptible instances.
	IsPreemptible bool `protobuf:"varint,6,opt,name=is_preemptible,json=isPreemptible,proto3" json:"is_preemptible,omitempty"`
	// Output only. The config for Compute Engine Instance Group
	// Manager that manages this group.
	// This is only used for preemptible instance groups.
	ManagedGroupConfig *ManagedGroupConfig `protobuf:"bytes,7,opt,name=managed_group_config,json=managedGroupConfig,proto3" json:"managed_group_config,omitempty"`
	// Optional. The Compute Engine accelerator configuration for these
	// instances.
	//
	// **Beta Feature**: This feature is still under development. It may be
	// changed before final release.
	Accelerators []*AcceleratorConfig `protobuf:"bytes,8,rep,name=accelerators,proto3" json:"accelerators,omitempty"`
	// Optional. Specifies the minimum cpu platform for the Instance Group.
	// See [Cloud Dataproc&rarr;Minimum CPU Platform]
	// (/dataproc/docs/concepts/compute/dataproc-min-cpu).
	MinCpuPlatform       string   `protobuf:"bytes,9,opt,name=min_cpu_platform,json=minCpuPlatform,proto3" json:"min_cpu_platform,omitempty"`
	XXX_NoUnkeyedLiteral struct{} `json:"-"`
	XXX_unrecognized     []byte   `json:"-"`
	XXX_sizecache        int32    `json:"-"`
}

Optional. The config settings for Compute Engine resources in an instance group, such as a master or worker group.

func (*InstanceGroupConfig) Descriptor

func (*InstanceGroupConfig) Descriptor() ([]byte, []int)

func (*InstanceGroupConfig) GetAccelerators

func (m *InstanceGroupConfig) GetAccelerators() []*AcceleratorConfig

func (*InstanceGroupConfig) GetDiskConfig

func (m *InstanceGroupConfig) GetDiskConfig() *DiskConfig

func (*InstanceGroupConfig) GetImageUri

func (m *InstanceGroupConfig) GetImageUri() string

func (*InstanceGroupConfig) GetInstanceNames

func (m *InstanceGroupConfig) GetInstanceNames() []string

func (*InstanceGroupConfig) GetIsPreemptible

func (m *InstanceGroupConfig) GetIsPreemptible() bool

func (*InstanceGroupConfig) GetMachineTypeUri

func (m *InstanceGroupConfig) GetMachineTypeUri() string

func (*InstanceGroupConfig) GetManagedGroupConfig

func (m *InstanceGroupConfig) GetManagedGroupConfig() *ManagedGroupConfig

func (*InstanceGroupConfig) GetMinCpuPlatform

func (m *InstanceGroupConfig) GetMinCpuPlatform() string

func (*InstanceGroupConfig) GetNumInstances

func (m *InstanceGroupConfig) GetNumInstances() int32

func (*InstanceGroupConfig) ProtoMessage

func (*InstanceGroupConfig) ProtoMessage()

func (*InstanceGroupConfig) Reset

func (m *InstanceGroupConfig) Reset()

func (*InstanceGroupConfig) String

func (m *InstanceGroupConfig) String() string

func (*InstanceGroupConfig) XXX_DiscardUnknown

func (m *InstanceGroupConfig) XXX_DiscardUnknown()

func (*InstanceGroupConfig) XXX_Marshal

func (m *InstanceGroupConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*InstanceGroupConfig) XXX_Merge

func (m *InstanceGroupConfig) XXX_Merge(src proto.Message)

func (*InstanceGroupConfig) XXX_Size

func (m *InstanceGroupConfig) XXX_Size() int

func (*InstanceGroupConfig) XXX_Unmarshal

func (m *InstanceGroupConfig) XXX_Unmarshal(b []byte) error

type InstantiateInlineWorkflowTemplateRequest

type InstantiateInlineWorkflowTemplateRequest struct {
	// Required. The "resource name" of the workflow template region, as described
	// in https://cloud.google.com/apis/design/resource_names of the form
	// `projects/{project_id}/regions/{region}`
	Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
	// Required. The workflow template to instantiate.
	Template *WorkflowTemplate `protobuf:"bytes,2,opt,name=template,proto3" json:"template,omitempty"`
	// Optional. A tag that prevents multiple concurrent workflow
	// instances with the same tag from running. This mitigates risk of
	// concurrent instances started due to retries.
	//
	// It is recommended to always set this value to a
	// [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
	//
	// The tag must contain only letters (a-z, A-Z), numbers (0-9),
	// underscores (_), and hyphens (-). The maximum length is 40 characters.
	InstanceId           string   `protobuf:"bytes,3,opt,name=instance_id,json=instanceId,proto3" json:"instance_id,omitempty"`
	XXX_NoUnkeyedLiteral struct{} `json:"-"`
	XXX_unrecognized     []byte   `json:"-"`
	XXX_sizecache        int32    `json:"-"`
}

A request to instantiate an inline workflow template.

func (*InstantiateInlineWorkflowTemplateRequest) Descriptor

func (*InstantiateInlineWorkflowTemplateRequest) Descriptor() ([]byte, []int)

func (*InstantiateInlineWorkflowTemplateRequest) GetInstanceId

func (*InstantiateInlineWorkflowTemplateRequest) GetParent

func (*InstantiateInlineWorkflowTemplateRequest) GetTemplate

func (*InstantiateInlineWorkflowTemplateRequest) ProtoMessage

func (*InstantiateInlineWorkflowTemplateRequest) Reset

func (*InstantiateInlineWorkflowTemplateRequest) String

func (*InstantiateInlineWorkflowTemplateRequest) XXX_DiscardUnknown

func (m *InstantiateInlineWorkflowTemplateRequest) XXX_DiscardUnknown()

func (*InstantiateInlineWorkflowTemplateRequest) XXX_Marshal

func (m *InstantiateInlineWorkflowTemplateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*InstantiateInlineWorkflowTemplateRequest) XXX_Merge

func (*InstantiateInlineWorkflowTemplateRequest) XXX_Size

func (*InstantiateInlineWorkflowTemplateRequest) XXX_Unmarshal

func (m *InstantiateInlineWorkflowTemplateRequest) XXX_Unmarshal(b []byte) error

type InstantiateWorkflowTemplateRequest

type InstantiateWorkflowTemplateRequest struct {
	// Required. The "resource name" of the workflow template, as described
	// in https://cloud.google.com/apis/design/resource_names of the form
	// `projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`
	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
	// Optional. The version of workflow template to instantiate. If specified,
	// the workflow will be instantiated only if the current version of
	// the workflow template has the supplied version.
	//
	// This option cannot be used to instantiate a previous version of
	// workflow template.
	Version int32 `protobuf:"varint,2,opt,name=version,proto3" json:"version,omitempty"`
	// Optional. A tag that prevents multiple concurrent workflow
	// instances with the same tag from running. This mitigates risk of
	// concurrent instances started due to retries.
	//
	// It is recommended to always set this value to a
	// [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
	//
	// The tag must contain only letters (a-z, A-Z), numbers (0-9),
	// underscores (_), and hyphens (-). The maximum length is 40 characters.
	InstanceId           string   `protobuf:"bytes,3,opt,name=instance_id,json=instanceId,proto3" json:"instance_id,omitempty"`
	XXX_NoUnkeyedLiteral struct{} `json:"-"`
	XXX_unrecognized     []byte   `json:"-"`
	XXX_sizecache        int32    `json:"-"`
}

A request to instantiate a workflow template.

func (*InstantiateWorkflowTemplateRequest) Descriptor

func (*InstantiateWorkflowTemplateRequest) Descriptor() ([]byte, []int)

func (*InstantiateWorkflowTemplateRequest) GetInstanceId

func (m *InstantiateWorkflowTemplateRequest) GetInstanceId() string

func (*InstantiateWorkflowTemplateRequest) GetName

func (*InstantiateWorkflowTemplateRequest) GetVersion

func (m *InstantiateWorkflowTemplateRequest) GetVersion() int32

func (*InstantiateWorkflowTemplateRequest) ProtoMessage

func (*InstantiateWorkflowTemplateRequest) ProtoMessage()

func (*InstantiateWorkflowTemplateRequest) Reset

func (*InstantiateWorkflowTemplateRequest) String

func (*InstantiateWorkflowTemplateRequest) XXX_DiscardUnknown

func (m *InstantiateWorkflowTemplateRequest) XXX_DiscardUnknown()

func (*InstantiateWorkflowTemplateRequest) XXX_Marshal

func (m *InstantiateWorkflowTemplateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*InstantiateWorkflowTemplateRequest) XXX_Merge

func (*InstantiateWorkflowTemplateRequest) XXX_Size

func (*InstantiateWorkflowTemplateRequest) XXX_Unmarshal

func (m *InstantiateWorkflowTemplateRequest) XXX_Unmarshal(b []byte) error

type Job

type Job struct {
	// Optional. The fully qualified reference to the job, which can be used to
	// obtain the equivalent REST path of the job resource. If this property
	// is not specified when a job is created, the server generates a
	// <code>job_id</code>.
	Reference *JobReference `protobuf:"bytes,1,opt,name=reference,proto3" json:"reference,omitempty"`
	// Required. Job information, including how, when, and where to
	// run the job.
	Placement *JobPlacement `protobuf:"bytes,2,opt,name=placement,proto3" json:"placement,omitempty"`
	// Required. The application/framework-specific portion of the job.
	//
	// Types that are valid to be assigned to TypeJob:
	//	*Job_HadoopJob
	//	*Job_SparkJob
	//	*Job_PysparkJob
	//	*Job_HiveJob
	//	*Job_PigJob
	//	*Job_SparkSqlJob
	TypeJob isJob_TypeJob `protobuf_oneof:"type_job"`
	// Output only. The job status. Additional application-specific
	// status information may be contained in the <code>type_job</code>
	// and <code>yarn_applications</code> fields.
	Status *JobStatus `protobuf:"bytes,8,opt,name=status,proto3" json:"status,omitempty"`
	// Output only. The previous job status.
	StatusHistory []*JobStatus `protobuf:"bytes,13,rep,name=status_history,json=statusHistory,proto3" json:"status_history,omitempty"`
	// Output only. The collection of YARN applications spun up by this job.
	//
	// **Beta** Feature: This report is available for testing purposes only. It may
	// be changed before final release.
	YarnApplications []*YarnApplication `protobuf:"bytes,9,rep,name=yarn_applications,json=yarnApplications,proto3" json:"yarn_applications,omitempty"`
	// Output only. A URI pointing to the location of the stdout of the job's
	// driver program.
	DriverOutputResourceUri string `` /* 135-byte string literal not displayed */
	// Output only. If present, the location of miscellaneous control files
	// which may be used as part of job setup and handling. If not present,
	// control files may be placed in the same location as `driver_output_uri`.
	DriverControlFilesUri string `` /* 129-byte string literal not displayed */
	// Optional. The labels to associate with this job.
	// Label **keys** must contain 1 to 63 characters, and must conform to
	// [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
	// Label **values** may be empty, but, if present, must contain 1 to 63
	// characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
	// No more than 32 labels can be associated with a job.
	Labels map[string]string `` /* 154-byte string literal not displayed */
	// Optional. Job scheduling configuration.
	Scheduling           *JobScheduling `protobuf:"bytes,20,opt,name=scheduling,proto3" json:"scheduling,omitempty"`
	XXX_NoUnkeyedLiteral struct{}       `json:"-"`
	XXX_unrecognized     []byte         `json:"-"`
	XXX_sizecache        int32          `json:"-"`
}

A Cloud Dataproc job resource.

func (*Job) Descriptor

func (*Job) Descriptor() ([]byte, []int)

func (*Job) GetDriverControlFilesUri

func (m *Job) GetDriverControlFilesUri() string

func (*Job) GetDriverOutputResourceUri

func (m *Job) GetDriverOutputResourceUri() string

func (*Job) GetHadoopJob

func (m *Job) GetHadoopJob() *HadoopJob

func (*Job) GetHiveJob

func (m *Job) GetHiveJob() *HiveJob

func (*Job) GetLabels

func (m *Job) GetLabels() map[string]string

func (*Job) GetPigJob

func (m *Job) GetPigJob() *PigJob

func (*Job) GetPlacement

func (m *Job) GetPlacement() *JobPlacement

func (*Job) GetPysparkJob

func (m *Job) GetPysparkJob() *PySparkJob

func (*Job) GetReference

func (m *Job) GetReference() *JobReference

func (*Job) GetScheduling

func (m *Job) GetScheduling() *JobScheduling

func (*Job) GetSparkJob

func (m *Job) GetSparkJob() *SparkJob

func (*Job) GetSparkSqlJob

func (m *Job) GetSparkSqlJob() *SparkSqlJob

func (*Job) GetStatus

func (m *Job) GetStatus() *JobStatus

func (*Job) GetStatusHistory

func (m *Job) GetStatusHistory() []*JobStatus

func (*Job) GetTypeJob

func (m *Job) GetTypeJob() isJob_TypeJob

func (*Job) GetYarnApplications

func (m *Job) GetYarnApplications() []*YarnApplication

func (*Job) ProtoMessage

func (*Job) ProtoMessage()

func (*Job) Reset

func (m *Job) Reset()

func (*Job) String

func (m *Job) String() string

func (*Job) XXX_DiscardUnknown

func (m *Job) XXX_DiscardUnknown()

func (*Job) XXX_Marshal

func (m *Job) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*Job) XXX_Merge

func (m *Job) XXX_Merge(src proto.Message)

func (*Job) XXX_OneofFuncs

func (*Job) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{})

XXX_OneofFuncs is for the internal use of the proto package.

func (*Job) XXX_Size

func (m *Job) XXX_Size() int

func (*Job) XXX_Unmarshal

func (m *Job) XXX_Unmarshal(b []byte) error

type JobControllerClient

type JobControllerClient interface {
	// Submits a job to a cluster.
	SubmitJob(ctx context.Context, in *SubmitJobRequest, opts ...grpc.CallOption) (*Job, error)
	// Gets the resource representation for a job in a project.
	GetJob(ctx context.Context, in *GetJobRequest, opts ...grpc.CallOption) (*Job, error)
	// Lists regions/{region}/jobs in a project.
	ListJobs(ctx context.Context, in *ListJobsRequest, opts ...grpc.CallOption) (*ListJobsResponse, error)
	// Updates a job in a project.
	UpdateJob(ctx context.Context, in *UpdateJobRequest, opts ...grpc.CallOption) (*Job, error)
	// Starts a job cancellation request. To access the job resource
	// after cancellation, call
	// [regions/{region}/jobs.list](/dataproc/docs/reference/rest/v1beta2/projects.regions.jobs/list) or
	// [regions/{region}/jobs.get](/dataproc/docs/reference/rest/v1beta2/projects.regions.jobs/get).
	CancelJob(ctx context.Context, in *CancelJobRequest, opts ...grpc.CallOption) (*Job, error)
	// Deletes the job from the project. If the job is active, the delete fails,
	// and the response returns `FAILED_PRECONDITION`.
	DeleteJob(ctx context.Context, in *DeleteJobRequest, opts ...grpc.CallOption) (*empty.Empty, error)
}

JobControllerClient is the client API for JobController service.

For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.

func NewJobControllerClient

func NewJobControllerClient(cc *grpc.ClientConn) JobControllerClient

type JobControllerServer

type JobControllerServer interface {
	// Submits a job to a cluster.
	SubmitJob(context.Context, *SubmitJobRequest) (*Job, error)
	// Gets the resource representation for a job in a project.
	GetJob(context.Context, *GetJobRequest) (*Job, error)
	// Lists regions/{region}/jobs in a project.
	ListJobs(context.Context, *ListJobsRequest) (*ListJobsResponse, error)
	// Updates a job in a project.
	UpdateJob(context.Context, *UpdateJobRequest) (*Job, error)
	// Starts a job cancellation request. To access the job resource
	// after cancellation, call
	// [regions/{region}/jobs.list](/dataproc/docs/reference/rest/v1beta2/projects.regions.jobs/list) or
	// [regions/{region}/jobs.get](/dataproc/docs/reference/rest/v1beta2/projects.regions.jobs/get).
	CancelJob(context.Context, *CancelJobRequest) (*Job, error)
	// Deletes the job from the project. If the job is active, the delete fails,
	// and the response returns `FAILED_PRECONDITION`.
	DeleteJob(context.Context, *DeleteJobRequest) (*empty.Empty, error)
}

JobControllerServer is the server API for JobController service.

type JobPlacement

type JobPlacement struct {
	// Required. The name of the cluster where the job will be submitted.
	ClusterName string `protobuf:"bytes,1,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"`
	// Output only. A cluster UUID generated by the Cloud Dataproc service when
	// the job is submitted.
	ClusterUuid          string   `protobuf:"bytes,2,opt,name=cluster_uuid,json=clusterUuid,proto3" json:"cluster_uuid,omitempty"`
	XXX_NoUnkeyedLiteral struct{} `json:"-"`
	XXX_unrecognized     []byte   `json:"-"`
	XXX_sizecache        int32    `json:"-"`
}

Cloud Dataproc job config.

func (*JobPlacement) Descriptor

func (*JobPlacement) Descriptor() ([]byte, []int)

func (*JobPlacement) GetClusterName

func (m *JobPlacement) GetClusterName() string

func (*JobPlacement) GetClusterUuid

func (m *JobPlacement) GetClusterUuid() string

func (*JobPlacement) ProtoMessage

func (*JobPlacement) ProtoMessage()

func (*JobPlacement) Reset

func (m *JobPlacement) Reset()

func (*JobPlacement) String

func (m *JobPlacement) String() string

func (*JobPlacement) XXX_DiscardUnknown

func (m *JobPlacement) XXX_DiscardUnknown()

func (*JobPlacement) XXX_Marshal

func (m *JobPlacement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*JobPlacement) XXX_Merge

func (m *JobPlacement) XXX_Merge(src proto.Message)

func (*JobPlacement) XXX_Size

func (m *JobPlacement) XXX_Size() int

func (*JobPlacement) XXX_Unmarshal

func (m *JobPlacement) XXX_Unmarshal(b []byte) error

type JobReference

type JobReference struct {
	// Required. The ID of the Google Cloud Platform project that the job
	// belongs to.
	ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"`
	// Optional. The job ID, which must be unique within the project. The job ID
	// is generated by the server upon job submission or provided by the user as a
	// means to perform retries without creating duplicate jobs. The ID must
	// contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or
	// hyphens (-). The maximum length is 100 characters.
	JobId                string   `protobuf:"bytes,2,opt,name=job_id,json=jobId,proto3" json:"job_id,omitempty"`
	XXX_NoUnkeyedLiteral struct{} `json:"-"`
	XXX_unrecognized     []byte   `json:"-"`
	XXX_sizecache        int32    `json:"-"`
}

Encapsulates the full scoping used to reference a job.

func (*JobReference) Descriptor

func (*JobReference) Descriptor() ([]byte, []int)

func (*JobReference) GetJobId

func (m *JobReference) GetJobId() string

func (*JobReference) GetProjectId

func (m *JobReference) GetProjectId() string

func (*JobReference) ProtoMessage

func (*JobReference) ProtoMessage()

func (*JobReference) Reset

func (m *JobReference) Reset()

func (*JobReference) String

func (m *JobReference) String() string

func (*JobReference) XXX_DiscardUnknown

func (m *JobReference) XXX_DiscardUnknown()

func (*JobReference) XXX_Marshal

func (m *JobReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*JobReference) XXX_Merge

func (m *JobReference) XXX_Merge(src proto.Message)

func (*JobReference) XXX_Size

func (m *JobReference) XXX_Size() int

func (*JobReference) XXX_Unmarshal

func (m *JobReference) XXX_Unmarshal(b []byte) error

type JobScheduling

type JobScheduling struct {
	// Optional. Maximum number of times per hour a driver may be restarted as
	// a result of driver terminating with non-zero code before job is
	// reported failed.
	//
	// A job may be reported as thrashing if driver exits with non-zero code
	// 4 times within 10 minute window.
	//
	// Maximum value is 10.
	MaxFailuresPerHour   int32    `protobuf:"varint,1,opt,name=max_failures_per_hour,json=maxFailuresPerHour,proto3" json:"max_failures_per_hour,omitempty"`
	XXX_NoUnkeyedLiteral struct{} `json:"-"`
	XXX_unrecognized     []byte   `json:"-"`
	XXX_sizecache        int32    `json:"-"`
}

Job scheduling options.

func (*JobScheduling) Descriptor

func (*JobScheduling) Descriptor() ([]byte, []int)

func (*JobScheduling) GetMaxFailuresPerHour

func (m *JobScheduling) GetMaxFailuresPerHour() int32

func (*JobScheduling) ProtoMessage

func (*JobScheduling) ProtoMessage()

func (*JobScheduling) Reset

func (m *JobScheduling) Reset()

func (*JobScheduling) String

func (m *JobScheduling) String() string

func (*JobScheduling) XXX_DiscardUnknown

func (m *JobScheduling) XXX_DiscardUnknown()

func (*JobScheduling) XXX_Marshal

func (m *JobScheduling) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*JobScheduling) XXX_Merge

func (m *JobScheduling) XXX_Merge(src proto.Message)

func (*JobScheduling) XXX_Size

func (m *JobScheduling) XXX_Size() int

func (*JobScheduling) XXX_Unmarshal

func (m *JobScheduling) XXX_Unmarshal(b []byte) error

type JobStatus

type JobStatus struct {
	// Output only. A state message specifying the overall job state.
	State JobStatus_State `protobuf:"varint,1,opt,name=state,proto3,enum=google.cloud.dataproc.v1beta2.JobStatus_State" json:"state,omitempty"`
	// Output only. Optional job state details, such as an error
	// description if the state is <code>ERROR</code>.
	Details string `protobuf:"bytes,2,opt,name=details,proto3" json:"details,omitempty"`
	// Output only. The time when this state was entered.
	StateStartTime *timestamp.Timestamp `protobuf:"bytes,6,opt,name=state_start_time,json=stateStartTime,proto3" json:"state_start_time,omitempty"`
	// Output only. Additional state information, which includes
	// status reported by the agent.
	Substate             JobStatus_Substate `protobuf:"varint,7,opt,name=substate,proto3,enum=google.cloud.dataproc.v1beta2.JobStatus_Substate" json:"substate,omitempty"`
	XXX_NoUnkeyedLiteral struct{}           `json:"-"`
	XXX_unrecognized     []byte             `json:"-"`
	XXX_sizecache        int32              `json:"-"`
}

Cloud Dataproc job status.

func (*JobStatus) Descriptor

func (*JobStatus) Descriptor() ([]byte, []int)

func (*JobStatus) GetDetails

func (m *JobStatus) GetDetails() string

func (*JobStatus) GetState

func (m *JobStatus) GetState() JobStatus_State

func (*JobStatus) GetStateStartTime

func (m *JobStatus) GetStateStartTime() *timestamp.Timestamp

func (*JobStatus) GetSubstate

func (m *JobStatus) GetSubstate() JobStatus_Substate

func (*JobStatus) ProtoMessage

func (*JobStatus) ProtoMessage()

func (*JobStatus) Reset

func (m *JobStatus) Reset()

func (*JobStatus) String

func (m *JobStatus) String() string

func (*JobStatus) XXX_DiscardUnknown

func (m *JobStatus) XXX_DiscardUnknown()

func (*JobStatus) XXX_Marshal

func (m *JobStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*JobStatus) XXX_Merge

func (m *JobStatus) XXX_Merge(src proto.Message)

func (*JobStatus) XXX_Size

func (m *JobStatus) XXX_Size() int

func (*JobStatus) XXX_Unmarshal

func (m *JobStatus) XXX_Unmarshal(b []byte) error

type JobStatus_State

type JobStatus_State int32

The job state.

const (
	// The job state is unknown.
	JobStatus_STATE_UNSPECIFIED JobStatus_State = 0
	// The job is pending; it has been submitted, but is not yet running.
	JobStatus_PENDING JobStatus_State = 1
	// Job has been received by the service and completed initial setup;
	// it will soon be submitted to the cluster.
	JobStatus_SETUP_DONE JobStatus_State = 8
	// The job is running on the cluster.
	JobStatus_RUNNING JobStatus_State = 2
	// A CancelJob request has been received, but is pending.
	JobStatus_CANCEL_PENDING JobStatus_State = 3
	// Transient in-flight resources have been canceled, and the request to
	// cancel the running job has been issued to the cluster.
	JobStatus_CANCEL_STARTED JobStatus_State = 7
	// The job cancellation was successful.
	JobStatus_CANCELLED JobStatus_State = 4
	// The job has completed successfully.
	JobStatus_DONE JobStatus_State = 5
	// The job has completed, but encountered an error.
	JobStatus_ERROR JobStatus_State = 6
	// Job attempt has failed. The detail field contains failure details for
	// this attempt.
	//
	// Applies to restartable jobs only.
	JobStatus_ATTEMPT_FAILURE JobStatus_State = 9
)

func (JobStatus_State) EnumDescriptor

func (JobStatus_State) EnumDescriptor() ([]byte, []int)

func (JobStatus_State) String

func (x JobStatus_State) String() string

type JobStatus_Substate

type JobStatus_Substate int32

The job substate.

const (
	// The job substate is unknown.
	JobStatus_UNSPECIFIED JobStatus_Substate = 0
	// The Job is submitted to the agent.
	//
	// Applies to RUNNING state.
	JobStatus_SUBMITTED JobStatus_Substate = 1
	// The Job has been received and is awaiting execution (it may be waiting
	// for a condition to be met). See the "details" field for the reason for
	// the delay.
	//
	// Applies to RUNNING state.
	JobStatus_QUEUED JobStatus_Substate = 2
	// The agent-reported status is out of date, which may be caused by a
	// loss of communication between the agent and Cloud Dataproc. If the
	// agent does not send a timely update, the job will fail.
	//
	// Applies to RUNNING state.
	JobStatus_STALE_STATUS JobStatus_Substate = 3
)

func (JobStatus_Substate) EnumDescriptor

func (JobStatus_Substate) EnumDescriptor() ([]byte, []int)

func (JobStatus_Substate) String

func (x JobStatus_Substate) String() string

type Job_HadoopJob

type Job_HadoopJob struct {
	HadoopJob *HadoopJob `protobuf:"bytes,3,opt,name=hadoop_job,json=hadoopJob,proto3,oneof"`
}

type Job_HiveJob

type Job_HiveJob struct {
	HiveJob *HiveJob `protobuf:"bytes,6,opt,name=hive_job,json=hiveJob,proto3,oneof"`
}

type Job_PigJob

type Job_PigJob struct {
	PigJob *PigJob `protobuf:"bytes,7,opt,name=pig_job,json=pigJob,proto3,oneof"`
}

type Job_PysparkJob

type Job_PysparkJob struct {
	PysparkJob *PySparkJob `protobuf:"bytes,5,opt,name=pyspark_job,json=pysparkJob,proto3,oneof"`
}

type Job_SparkJob

type Job_SparkJob struct {
	SparkJob *SparkJob `protobuf:"bytes,4,opt,name=spark_job,json=sparkJob,proto3,oneof"`
}

type Job_SparkSqlJob

type Job_SparkSqlJob struct {
	SparkSqlJob *SparkSqlJob `protobuf:"bytes,12,opt,name=spark_sql_job,json=sparkSqlJob,proto3,oneof"`
}

type LifecycleConfig

type LifecycleConfig struct {
	// Optional. The longest duration that cluster would keep alive while staying
	//  idle; passing this threshold will cause cluster to be auto-deleted.
	IdleDeleteTtl *duration.Duration `protobuf:"bytes,1,opt,name=idle_delete_ttl,json=idleDeleteTtl,proto3" json:"idle_delete_ttl,omitempty"`
	// Optional. Either the exact time the cluster should be deleted at or
	// the cluster maximum age.
	//
	// Types that are valid to be assigned to Ttl:
	//	*LifecycleConfig_AutoDeleteTime
	//	*LifecycleConfig_AutoDeleteTtl
	Ttl                  isLifecycleConfig_Ttl `protobuf_oneof:"ttl"`
	XXX_NoUnkeyedLiteral struct{}              `json:"-"`
	XXX_unrecognized     []byte                `json:"-"`
	XXX_sizecache        int32                 `json:"-"`
}

Specifies the cluster auto delete related schedule configuration.

func (*LifecycleConfig) Descriptor

func (*LifecycleConfig) Descriptor() ([]byte, []int)

func (*LifecycleConfig) GetAutoDeleteTime

func (m *LifecycleConfig) GetAutoDeleteTime() *timestamp.Timestamp

func (*LifecycleConfig) GetAutoDeleteTtl

func (m *LifecycleConfig) GetAutoDeleteTtl() *duration.Duration

func (*LifecycleConfig) GetIdleDeleteTtl

func (m *LifecycleConfig) GetIdleDeleteTtl() *duration.Duration

func (*LifecycleConfig) GetTtl

func (m *LifecycleConfig) GetTtl() isLifecycleConfig_Ttl

func (*LifecycleConfig) ProtoMessage

func (*LifecycleConfig) ProtoMessage()

func (*LifecycleConfig) Reset

func (m *LifecycleConfig) Reset()

func (*LifecycleConfig) String

func (m *LifecycleConfig) String() string

func (*LifecycleConfig) XXX_DiscardUnknown

func (m *LifecycleConfig) XXX_DiscardUnknown()

func (*LifecycleConfig) XXX_Marshal

func (m *LifecycleConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*LifecycleConfig) XXX_Merge

func (m *LifecycleConfig) XXX_Merge(src proto.Message)

func (*LifecycleConfig) XXX_OneofFuncs

func (*LifecycleConfig) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{})

XXX_OneofFuncs is for the internal use of the proto package.

func (*LifecycleConfig) XXX_Size

func (m *LifecycleConfig) XXX_Size() int

func (*LifecycleConfig) XXX_Unmarshal

func (m *LifecycleConfig) XXX_Unmarshal(b []byte) error

type LifecycleConfig_AutoDeleteTime

type LifecycleConfig_AutoDeleteTime struct {
	AutoDeleteTime *timestamp.Timestamp `protobuf:"bytes,2,opt,name=auto_delete_time,json=autoDeleteTime,proto3,oneof"`
}

type LifecycleConfig_AutoDeleteTtl

type LifecycleConfig_AutoDeleteTtl struct {
	AutoDeleteTtl *duration.Duration `protobuf:"bytes,3,opt,name=auto_delete_ttl,json=autoDeleteTtl,proto3,oneof"`
}

type ListClustersRequest

type ListClustersRequest struct {
	// Required. The ID of the Google Cloud Platform project that the cluster
	// belongs to.
	ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"`
	// Required. The Cloud Dataproc region in which to handle the request.
	Region string `protobuf:"bytes,4,opt,name=region,proto3" json:"region,omitempty"`
	// Optional. A filter constraining the clusters to list. Filters are
	// case-sensitive and have the following syntax:
	//
	// field = value [AND [field = value]] ...
	//
	// where **field** is one of `status.state`, `clusterName`, or `labels.[KEY]`,
	// and `[KEY]` is a label key. **value** can be `*` to match all values.
	// `status.state` can be one of the following: `ACTIVE`, `INACTIVE`,
	// `CREATING`, `RUNNING`, `ERROR`, `DELETING`, or `UPDATING`. `ACTIVE`
	// contains the `CREATING`, `UPDATING`, and `RUNNING` states. `INACTIVE`
	// contains the `DELETING` and `ERROR` states.
	// `clusterName` is the name of the cluster provided at creation time.
	// Only the logical `AND` operator is supported; space-separated items are
	// treated as having an implicit `AND` operator.
	//
	// Example filter:
	//
	// status.state = ACTIVE AND clusterName = mycluster
	// AND labels.env = staging AND labels.starred = *
	Filter string `protobuf:"bytes,5,opt,name=filter,proto3" json:"filter,omitempty"`
	// Optional. The standard List page size.
	PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
	// Optional. The standard List page token.
	PageToken            string   `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
	XXX_NoUnkeyedLiteral struct{} `json:"-"`
	XXX_unrecognized     []byte   `json:"-"`
	XXX_sizecache        int32    `json:"-"`
}

A request to list the clusters in a project.

func (*ListClustersRequest) Descriptor

func (*ListClustersRequest) Descriptor() ([]byte, []int)

func (*ListClustersRequest) GetFilter

func (m *ListClustersRequest) GetFilter() string

func (*ListClustersRequest) GetPageSize

func (m *ListClustersRequest) GetPageSize() int32

func (*ListClustersRequest) GetPageToken

func (m *ListClustersRequest) GetPageToken() string

func (*ListClustersRequest) GetProjectId

func (m *ListClustersRequest) GetProjectId() string

func (*ListClustersRequest) GetRegion

func (m *ListClustersRequest) GetRegion() string

func (*ListClustersRequest) ProtoMessage

func (*ListClustersRequest) ProtoMessage()

func (*ListClustersRequest) Reset

func (m *ListClustersRequest) Reset()

func (*ListClustersRequest) String

func (m *ListClustersRequest) String() string

func (*ListClustersRequest) XXX_DiscardUnknown

func (m *ListClustersRequest) XXX_DiscardUnknown()

func (*ListClustersRequest) XXX_Marshal

func (m *ListClustersRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*ListClustersRequest) XXX_Merge

func (m *ListClustersRequest) XXX_Merge(src proto.Message)

func (*ListClustersRequest) XXX_Size

func (m *ListClustersRequest) XXX_Size() int

func (*ListClustersRequest) XXX_Unmarshal

func (m *ListClustersRequest) XXX_Unmarshal(b []byte) error

type ListClustersResponse

type ListClustersResponse struct {
	// Output only. The clusters in the project.
	Clusters []*Cluster `protobuf:"bytes,1,rep,name=clusters,proto3" json:"clusters,omitempty"`
	// Output only. This token is included in the response if there are more
	// results to fetch. To fetch additional results, provide this value as the
	// `page_token` in a subsequent <code>ListClustersRequest</code>.
	NextPageToken        string   `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
	XXX_NoUnkeyedLiteral struct{} `json:"-"`
	XXX_unrecognized     []byte   `json:"-"`
	XXX_sizecache        int32    `json:"-"`
}

The list of all clusters in a project.

func (*ListClustersResponse) Descriptor

func (*ListClustersResponse) Descriptor() ([]byte, []int)

func (*ListClustersResponse) GetClusters

func (m *ListClustersResponse) GetClusters() []*Cluster

func (*ListClustersResponse) GetNextPageToken

func (m *ListClustersResponse) GetNextPageToken() string

func (*ListClustersResponse) ProtoMessage

func (*ListClustersResponse) ProtoMessage()

func (*ListClustersResponse) Reset

func (m *ListClustersResponse) Reset()

func (*ListClustersResponse) String

func (m *ListClustersResponse) String() string

func (*ListClustersResponse) XXX_DiscardUnknown

func (m *ListClustersResponse) XXX_DiscardUnknown()

func (*ListClustersResponse) XXX_Marshal

func (m *ListClustersResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*ListClustersResponse) XXX_Merge

func (m *ListClustersResponse) XXX_Merge(src proto.Message)

func (*ListClustersResponse) XXX_Size

func (m *ListClustersResponse) XXX_Size() int

func (*ListClustersResponse) XXX_Unmarshal

func (m *ListClustersResponse) XXX_Unmarshal(b []byte) error

type ListJobsRequest

type ListJobsRequest struct {
	// Required. The ID of the Google Cloud Platform project that the job
	// belongs to.
	ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"`
	// Required. The Cloud Dataproc region in which to handle the request.
	Region string `protobuf:"bytes,6,opt,name=region,proto3" json:"region,omitempty"`
	// Optional. The number of results to return in each response.
	PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
	// Optional. The page token, returned by a previous call, to request the
	// next page of results.
	PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
	// Optional. If set, the returned jobs list includes only jobs that were
	// submitted to the named cluster.
	ClusterName string `protobuf:"bytes,4,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"`
	// Optional. Specifies enumerated categories of jobs to list.
	// (default = match ALL jobs).
	//
	// If `filter` is provided, `jobStateMatcher` will be ignored.
	JobStateMatcher ListJobsRequest_JobStateMatcher `` /* 176-byte string literal not displayed */
	// Optional. A filter constraining the jobs to list. Filters are
	// case-sensitive and have the following syntax:
	//
	// [field = value] AND [field [= value]] ...
	//
	// where **field** is `status.state` or `labels.[KEY]`, and `[KEY]` is a label
	// key. **value** can be `*` to match all values.
	// `status.state` can be either `ACTIVE` or `NON_ACTIVE`.
	// Only the logical `AND` operator is supported; space-separated items are
	// treated as having an implicit `AND` operator.
	//
	// Example filter:
	//
	// status.state = ACTIVE AND labels.env = staging AND labels.starred = *
	Filter               string   `protobuf:"bytes,7,opt,name=filter,proto3" json:"filter,omitempty"`
	XXX_NoUnkeyedLiteral struct{} `json:"-"`
	XXX_unrecognized     []byte   `json:"-"`
	XXX_sizecache        int32    `json:"-"`
}

A request to list jobs in a project.

func (*ListJobsRequest) Descriptor

func (*ListJobsRequest) Descriptor() ([]byte, []int)

func (*ListJobsRequest) GetClusterName

func (m *ListJobsRequest) GetClusterName() string

func (*ListJobsRequest) GetFilter

func (m *ListJobsRequest) GetFilter() string

func (*ListJobsRequest) GetJobStateMatcher

func (m *ListJobsRequest) GetJobStateMatcher() ListJobsRequest_JobStateMatcher

func (*ListJobsRequest) GetPageSize

func (m *ListJobsRequest) GetPageSize() int32

func (*ListJobsRequest) GetPageToken

func (m *ListJobsRequest) GetPageToken() string

func (*ListJobsRequest) GetProjectId

func (m *ListJobsRequest) GetProjectId() string

func (*ListJobsRequest) GetRegion

func (m *ListJobsRequest) GetRegion() string

func (*ListJobsRequest) ProtoMessage

func (*ListJobsRequest) ProtoMessage()

func (*ListJobsRequest) Reset

func (m *ListJobsRequest) Reset()

func (*ListJobsRequest) String

func (m *ListJobsRequest) String() string

func (*ListJobsRequest) XXX_DiscardUnknown

func (m *ListJobsRequest) XXX_DiscardUnknown()

func (*ListJobsRequest) XXX_Marshal

func (m *ListJobsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*ListJobsRequest) XXX_Merge

func (m *ListJobsRequest) XXX_Merge(src proto.Message)

func (*ListJobsRequest) XXX_Size

func (m *ListJobsRequest) XXX_Size() int

func (*ListJobsRequest) XXX_Unmarshal

func (m *ListJobsRequest) XXX_Unmarshal(b []byte) error

type ListJobsRequest_JobStateMatcher

type ListJobsRequest_JobStateMatcher int32

A matcher that specifies categories of job states.

const (
	// Match all jobs, regardless of state.
	ListJobsRequest_ALL ListJobsRequest_JobStateMatcher = 0
	// Only match jobs in non-terminal states: PENDING, RUNNING, or
	// CANCEL_PENDING.
	ListJobsRequest_ACTIVE ListJobsRequest_JobStateMatcher = 1
	// Only match jobs in terminal states: CANCELLED, DONE, or ERROR.
	ListJobsRequest_NON_ACTIVE ListJobsRequest_JobStateMatcher = 2
)

func (ListJobsRequest_JobStateMatcher) EnumDescriptor

func (ListJobsRequest_JobStateMatcher) EnumDescriptor() ([]byte, []int)

func (ListJobsRequest_JobStateMatcher) String

type ListJobsResponse

type ListJobsResponse struct {
	// Output only. Jobs list.
	Jobs []*Job `protobuf:"bytes,1,rep,name=jobs,proto3" json:"jobs,omitempty"`
	// Optional. This token is included in the response if there are more results
	// to fetch. To fetch additional results, provide this value as the
	// `page_token` in a subsequent <code>ListJobsRequest</code>.
	NextPageToken        string   `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
	XXX_NoUnkeyedLiteral struct{} `json:"-"`
	XXX_unrecognized     []byte   `json:"-"`
	XXX_sizecache        int32    `json:"-"`
}

A list of jobs in a project.

func (*ListJobsResponse) Descriptor

func (*ListJobsResponse) Descriptor() ([]byte, []int)

func (*ListJobsResponse) GetJobs

func (m *ListJobsResponse) GetJobs() []*Job

func (*ListJobsResponse) GetNextPageToken

func (m *ListJobsResponse) GetNextPageToken() string

func (*ListJobsResponse) ProtoMessage

func (*ListJobsResponse) ProtoMessage()

func (*ListJobsResponse) Reset

func (m *ListJobsResponse) Reset()

func (*ListJobsResponse) String

func (m *ListJobsResponse) String() string

func (*ListJobsResponse) XXX_DiscardUnknown

func (m *ListJobsResponse) XXX_DiscardUnknown()

func (*ListJobsResponse) XXX_Marshal

func (m *ListJobsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*ListJobsResponse) XXX_Merge

func (m *ListJobsResponse) XXX_Merge(src proto.Message)

func (*ListJobsResponse) XXX_Size

func (m *ListJobsResponse) XXX_Size() int

func (*ListJobsResponse) XXX_Unmarshal

func (m *ListJobsResponse) XXX_Unmarshal(b []byte) error

type ListWorkflowTemplatesRequest

type ListWorkflowTemplatesRequest struct {
	// Required. The "resource name" of the region, as described
	// in https://cloud.google.com/apis/design/resource_names of the form
	// `projects/{project_id}/regions/{region}`
	Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
	// Optional. The maximum number of results to return in each response.
	PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
	// Optional. The page token, returned by a previous call, to request the
	// next page of results.
	PageToken            string   `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
	XXX_NoUnkeyedLiteral struct{} `json:"-"`
	XXX_unrecognized     []byte   `json:"-"`
	XXX_sizecache        int32    `json:"-"`
}

A request to list workflow templates in a project.

func (*ListWorkflowTemplatesRequest) Descriptor

func (*ListWorkflowTemplatesRequest) Descriptor() ([]byte, []int)

func (*ListWorkflowTemplatesRequest) GetPageSize

func (m *ListWorkflowTemplatesRequest) GetPageSize() int32

func (*ListWorkflowTemplatesRequest) GetPageToken

func (m *ListWorkflowTemplatesRequest) GetPageToken() string

func (*ListWorkflowTemplatesRequest) GetParent

func (m *ListWorkflowTemplatesRequest) GetParent() string

func (*ListWorkflowTemplatesRequest) ProtoMessage

func (*ListWorkflowTemplatesRequest) ProtoMessage()

func (*ListWorkflowTemplatesRequest) Reset

func (m *ListWorkflowTemplatesRequest) Reset()

func (*ListWorkflowTemplatesRequest) String

func (*ListWorkflowTemplatesRequest) XXX_DiscardUnknown

func (m *ListWorkflowTemplatesRequest) XXX_DiscardUnknown()

func (*ListWorkflowTemplatesRequest) XXX_Marshal

func (m *ListWorkflowTemplatesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*ListWorkflowTemplatesRequest) XXX_Merge

func (m *ListWorkflowTemplatesRequest) XXX_Merge(src proto.Message)

func (*ListWorkflowTemplatesRequest) XXX_Size

func (m *ListWorkflowTemplatesRequest) XXX_Size() int

func (*ListWorkflowTemplatesRequest) XXX_Unmarshal

func (m *ListWorkflowTemplatesRequest) XXX_Unmarshal(b []byte) error

type ListWorkflowTemplatesResponse

type ListWorkflowTemplatesResponse struct {
	// Output only. WorkflowTemplates list.
	Templates []*WorkflowTemplate `protobuf:"bytes,1,rep,name=templates,proto3" json:"templates,omitempty"`
	// Output only. This token is included in the response if there are more results
	// to fetch. To fetch additional results, provide this value as the
	// page_token in a subsequent <code>ListWorkflowTemplatesRequest</code>.
	NextPageToken        string   `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
	XXX_NoUnkeyedLiteral struct{} `json:"-"`
	XXX_unrecognized     []byte   `json:"-"`
	XXX_sizecache        int32    `json:"-"`
}

A response to a request to list workflow templates in a project.

func (*ListWorkflowTemplatesResponse) Descriptor

func (*ListWorkflowTemplatesResponse) Descriptor() ([]byte, []int)

func (*ListWorkflowTemplatesResponse) GetNextPageToken

func (m *ListWorkflowTemplatesResponse) GetNextPageToken() string

func (*ListWorkflowTemplatesResponse) GetTemplates

func (m *ListWorkflowTemplatesResponse) GetTemplates() []*WorkflowTemplate

func (*ListWorkflowTemplatesResponse) ProtoMessage

func (*ListWorkflowTemplatesResponse) ProtoMessage()

func (*ListWorkflowTemplatesResponse) Reset

func (m *ListWorkflowTemplatesResponse) Reset()

func (*ListWorkflowTemplatesResponse) String

func (*ListWorkflowTemplatesResponse) XXX_DiscardUnknown

func (m *ListWorkflowTemplatesResponse) XXX_DiscardUnknown()

func (*ListWorkflowTemplatesResponse) XXX_Marshal

func (m *ListWorkflowTemplatesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*ListWorkflowTemplatesResponse) XXX_Merge

func (m *ListWorkflowTemplatesResponse) XXX_Merge(src proto.Message)

func (*ListWorkflowTemplatesResponse) XXX_Size

func (m *ListWorkflowTemplatesResponse) XXX_Size() int

func (*ListWorkflowTemplatesResponse) XXX_Unmarshal

func (m *ListWorkflowTemplatesResponse) XXX_Unmarshal(b []byte) error

type LoggingConfig

type LoggingConfig struct {
	// The per-package log levels for the driver. This may include
	// "root" package name to configure rootLogger.
	// Examples:
	//   'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
	DriverLogLevels      map[string]LoggingConfig_Level `` /* 252-byte string literal not displayed */
	XXX_NoUnkeyedLiteral struct{}                       `json:"-"`
	XXX_unrecognized     []byte                         `json:"-"`
	XXX_sizecache        int32                          `json:"-"`
}

The runtime logging config of the job.

func (*LoggingConfig) Descriptor

func (*LoggingConfig) Descriptor() ([]byte, []int)

func (*LoggingConfig) GetDriverLogLevels

func (m *LoggingConfig) GetDriverLogLevels() map[string]LoggingConfig_Level

func (*LoggingConfig) ProtoMessage

func (*LoggingConfig) ProtoMessage()

func (*LoggingConfig) Reset

func (m *LoggingConfig) Reset()

func (*LoggingConfig) String

func (m *LoggingConfig) String() string

func (*LoggingConfig) XXX_DiscardUnknown

func (m *LoggingConfig) XXX_DiscardUnknown()

func (*LoggingConfig) XXX_Marshal

func (m *LoggingConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*LoggingConfig) XXX_Merge

func (m *LoggingConfig) XXX_Merge(src proto.Message)

func (*LoggingConfig) XXX_Size

func (m *LoggingConfig) XXX_Size() int

func (*LoggingConfig) XXX_Unmarshal

func (m *LoggingConfig) XXX_Unmarshal(b []byte) error

type LoggingConfig_Level

type LoggingConfig_Level int32

The Log4j level for job execution. When running an [Apache Hive](http://hive.apache.org/) job, Cloud Dataproc configures the Hive client to an equivalent verbosity level.

const (
	// Level is unspecified. Use default level for log4j.
	LoggingConfig_LEVEL_UNSPECIFIED LoggingConfig_Level = 0
	// Use ALL level for log4j.
	LoggingConfig_ALL LoggingConfig_Level = 1
	// Use TRACE level for log4j.
	LoggingConfig_TRACE LoggingConfig_Level = 2
	// Use DEBUG level for log4j.
	LoggingConfig_DEBUG LoggingConfig_Level = 3
	// Use INFO level for log4j.
	LoggingConfig_INFO LoggingConfig_Level = 4
	// Use WARN level for log4j.
	LoggingConfig_WARN LoggingConfig_Level = 5
	// Use ERROR level for log4j.
	LoggingConfig_ERROR LoggingConfig_Level = 6
	// Use FATAL level for log4j.
	LoggingConfig_FATAL LoggingConfig_Level = 7
	// Turn off log4j.
	LoggingConfig_OFF LoggingConfig_Level = 8
)

func (LoggingConfig_Level) EnumDescriptor

func (LoggingConfig_Level) EnumDescriptor() ([]byte, []int)

func (LoggingConfig_Level) String

func (x LoggingConfig_Level) String() string

type ManagedCluster

type ManagedCluster struct {
	// Required. The cluster name prefix. A unique cluster name will be formed by
	// appending a random suffix.
	//
	// The name must contain only lower-case letters (a-z), numbers (0-9),
	// and hyphens (-). Must begin with a letter. Cannot begin or end with
	// hyphen. Must consist of between 2 and 35 characters.
	ClusterName string `protobuf:"bytes,2,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"`
	// Required. The cluster configuration.
	Config *ClusterConfig `protobuf:"bytes,3,opt,name=config,proto3" json:"config,omitempty"`
	// Optional. The labels to associate with this cluster.
	//
	// Label keys must be between 1 and 63 characters long, and must conform to
	// the following PCRE regular expression:
	// [\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62}
	//
	// Label values must be between 1 and 63 characters long, and must conform to
	// the following PCRE regular expression: [\p{Ll}\p{Lo}\p{N}_-]{0,63}
	//
	// No more than 32 labels can be associated with a given cluster.
	Labels               map[string]string `` /* 153-byte string literal not displayed */
	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
	XXX_unrecognized     []byte            `json:"-"`
	XXX_sizecache        int32             `json:"-"`
}

Cluster that is managed by the workflow.

func (*ManagedCluster) Descriptor

func (*ManagedCluster) Descriptor() ([]byte, []int)

func (*ManagedCluster) GetClusterName

func (m *ManagedCluster) GetClusterName() string

func (*ManagedCluster) GetConfig

func (m *ManagedCluster) GetConfig() *ClusterConfig

func (*ManagedCluster) GetLabels

func (m *ManagedCluster) GetLabels() map[string]string

func (*ManagedCluster) ProtoMessage

func (*ManagedCluster) ProtoMessage()

func (*ManagedCluster) Reset

func (m *ManagedCluster) Reset()

func (*ManagedCluster) String

func (m *ManagedCluster) String() string

func (*ManagedCluster) XXX_DiscardUnknown

func (m *ManagedCluster) XXX_DiscardUnknown()

func (*ManagedCluster) XXX_Marshal

func (m *ManagedCluster) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*ManagedCluster) XXX_Merge

func (m *ManagedCluster) XXX_Merge(src proto.Message)

func (*ManagedCluster) XXX_Size

func (m *ManagedCluster) XXX_Size() int

func (*ManagedCluster) XXX_Unmarshal

func (m *ManagedCluster) XXX_Unmarshal(b []byte) error

type ManagedGroupConfig

type ManagedGroupConfig struct {
	// Output only. The name of the Instance Template used for the Managed
	// Instance Group.
	InstanceTemplateName string `protobuf:"bytes,1,opt,name=instance_template_name,json=instanceTemplateName,proto3" json:"instance_template_name,omitempty"`
	// Output only. The name of the Instance Group Manager for this group.
	InstanceGroupManagerName string   `` /* 137-byte string literal not displayed */
	XXX_NoUnkeyedLiteral     struct{} `json:"-"`
	XXX_unrecognized         []byte   `json:"-"`
	XXX_sizecache            int32    `json:"-"`
}

Specifies the resources used to actively manage an instance group.

func (*ManagedGroupConfig) Descriptor

func (*ManagedGroupConfig) Descriptor() ([]byte, []int)

func (*ManagedGroupConfig) GetInstanceGroupManagerName

func (m *ManagedGroupConfig) GetInstanceGroupManagerName() string

func (*ManagedGroupConfig) GetInstanceTemplateName

func (m *ManagedGroupConfig) GetInstanceTemplateName() string

func (*ManagedGroupConfig) ProtoMessage

func (*ManagedGroupConfig) ProtoMessage()

func (*ManagedGroupConfig) Reset

func (m *ManagedGroupConfig) Reset()

func (*ManagedGroupConfig) String

func (m *ManagedGroupConfig) String() string

func (*ManagedGroupConfig) XXX_DiscardUnknown

func (m *ManagedGroupConfig) XXX_DiscardUnknown()

func (*ManagedGroupConfig) XXX_Marshal

func (m *ManagedGroupConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*ManagedGroupConfig) XXX_Merge

func (m *ManagedGroupConfig) XXX_Merge(src proto.Message)

func (*ManagedGroupConfig) XXX_Size

func (m *ManagedGroupConfig) XXX_Size() int

func (*ManagedGroupConfig) XXX_Unmarshal

func (m *ManagedGroupConfig) XXX_Unmarshal(b []byte) error

type NodeInitializationAction

type NodeInitializationAction struct {
	// Required. Cloud Storage URI of executable file.
	ExecutableFile string `protobuf:"bytes,1,opt,name=executable_file,json=executableFile,proto3" json:"executable_file,omitempty"`
	// Optional. Amount of time executable has to complete. Default is
	// 10 minutes. Cluster creation fails with an explanatory error message (the
	// name of the executable that caused the error and the exceeded timeout
	// period) if the executable is not completed at end of the timeout period.
	ExecutionTimeout     *duration.Duration `protobuf:"bytes,2,opt,name=execution_timeout,json=executionTimeout,proto3" json:"execution_timeout,omitempty"`
	XXX_NoUnkeyedLiteral struct{}           `json:"-"`
	XXX_unrecognized     []byte             `json:"-"`
	XXX_sizecache        int32              `json:"-"`
}

Specifies an executable to run on a fully configured node and a timeout period for executable completion.

func (*NodeInitializationAction) Descriptor

func (*NodeInitializationAction) Descriptor() ([]byte, []int)

func (*NodeInitializationAction) GetExecutableFile

func (m *NodeInitializationAction) GetExecutableFile() string

func (*NodeInitializationAction) GetExecutionTimeout

func (m *NodeInitializationAction) GetExecutionTimeout() *duration.Duration

func (*NodeInitializationAction) ProtoMessage

func (*NodeInitializationAction) ProtoMessage()

func (*NodeInitializationAction) Reset

func (m *NodeInitializationAction) Reset()

func (*NodeInitializationAction) String

func (m *NodeInitializationAction) String() string

func (*NodeInitializationAction) XXX_DiscardUnknown

func (m *NodeInitializationAction) XXX_DiscardUnknown()

func (*NodeInitializationAction) XXX_Marshal

func (m *NodeInitializationAction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*NodeInitializationAction) XXX_Merge

func (m *NodeInitializationAction) XXX_Merge(src proto.Message)

func (*NodeInitializationAction) XXX_Size

func (m *NodeInitializationAction) XXX_Size() int

func (*NodeInitializationAction) XXX_Unmarshal

func (m *NodeInitializationAction) XXX_Unmarshal(b []byte) error

type OrderedJob

type OrderedJob struct {
	// Required. The step id. The id must be unique among all jobs
	// within the template.
	//
	// The step id is used as prefix for job id, as job
	// `goog-dataproc-workflow-step-id` label, and in
	// [prerequisiteStepIds][google.cloud.dataproc.v1beta2.OrderedJob.prerequisite_step_ids] field from other
	// steps.
	//
	// The id must contain only letters (a-z, A-Z), numbers (0-9),
	// underscores (_), and hyphens (-). Cannot begin or end with underscore
	// or hyphen. Must consist of between 3 and 50 characters.
	StepId string `protobuf:"bytes,1,opt,name=step_id,json=stepId,proto3" json:"step_id,omitempty"`
	// Required. The job definition.
	//
	// Types that are valid to be assigned to JobType:
	//	*OrderedJob_HadoopJob
	//	*OrderedJob_SparkJob
	//	*OrderedJob_PysparkJob
	//	*OrderedJob_HiveJob
	//	*OrderedJob_PigJob
	//	*OrderedJob_SparkSqlJob
	JobType isOrderedJob_JobType `protobuf_oneof:"job_type"`
	// Optional. The labels to associate with this job.
	//
	// Label keys must be between 1 and 63 characters long, and must conform to
	// the following regular expression:
	// [\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62}
	//
	// Label values must be between 1 and 63 characters long, and must conform to
	// the following regular expression: [\p{Ll}\p{Lo}\p{N}_-]{0,63}
	//
	// No more than 32 labels can be associated with a given job.
	Labels map[string]string `` /* 153-byte string literal not displayed */
	// Optional. Job scheduling configuration.
	Scheduling *JobScheduling `protobuf:"bytes,9,opt,name=scheduling,proto3" json:"scheduling,omitempty"`
	// Optional. The optional list of prerequisite job step_ids.
	// If not specified, the job will start at the beginning of workflow.
	PrerequisiteStepIds  []string `protobuf:"bytes,10,rep,name=prerequisite_step_ids,json=prerequisiteStepIds,proto3" json:"prerequisite_step_ids,omitempty"`
	XXX_NoUnkeyedLiteral struct{} `json:"-"`
	XXX_unrecognized     []byte   `json:"-"`
	XXX_sizecache        int32    `json:"-"`
}

A job executed by the workflow.

func (*OrderedJob) Descriptor

func (*OrderedJob) Descriptor() ([]byte, []int)

func (*OrderedJob) GetHadoopJob

func (m *OrderedJob) GetHadoopJob() *HadoopJob

func (*OrderedJob) GetHiveJob

func (m *OrderedJob) GetHiveJob() *HiveJob

func (*OrderedJob) GetJobType

func (m *OrderedJob) GetJobType() isOrderedJob_JobType

func (*OrderedJob) GetLabels

func (m *OrderedJob) GetLabels() map[string]string

func (*OrderedJob) GetPigJob

func (m *OrderedJob) GetPigJob() *PigJob

func (*OrderedJob) GetPrerequisiteStepIds

func (m *OrderedJob) GetPrerequisiteStepIds() []string

func (*OrderedJob) GetPysparkJob

func (m *OrderedJob) GetPysparkJob() *PySparkJob

func (*OrderedJob) GetScheduling

func (m *OrderedJob) GetScheduling() *JobScheduling

func (*OrderedJob) GetSparkJob

func (m *OrderedJob) GetSparkJob() *SparkJob

func (*OrderedJob) GetSparkSqlJob

func (m *OrderedJob) GetSparkSqlJob() *SparkSqlJob

func (*OrderedJob) GetStepId

func (m *OrderedJob) GetStepId() string

func (*OrderedJob) ProtoMessage

func (*OrderedJob) ProtoMessage()

func (*OrderedJob) Reset

func (m *OrderedJob) Reset()

func (*OrderedJob) String

func (m *OrderedJob) String() string

func (*OrderedJob) XXX_DiscardUnknown

func (m *OrderedJob) XXX_DiscardUnknown()

func (*OrderedJob) XXX_Marshal

func (m *OrderedJob) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*OrderedJob) XXX_Merge

func (m *OrderedJob) XXX_Merge(src proto.Message)

func (*OrderedJob) XXX_OneofFuncs

func (*OrderedJob) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{})

XXX_OneofFuncs is for the internal use of the proto package.

func (*OrderedJob) XXX_Size

func (m *OrderedJob) XXX_Size() int

func (*OrderedJob) XXX_Unmarshal

func (m *OrderedJob) XXX_Unmarshal(b []byte) error

type OrderedJob_HadoopJob

type OrderedJob_HadoopJob struct {
	HadoopJob *HadoopJob `protobuf:"bytes,2,opt,name=hadoop_job,json=hadoopJob,proto3,oneof"`
}

type OrderedJob_HiveJob

type OrderedJob_HiveJob struct {
	HiveJob *HiveJob `protobuf:"bytes,5,opt,name=hive_job,json=hiveJob,proto3,oneof"`
}

type OrderedJob_PigJob

type OrderedJob_PigJob struct {
	PigJob *PigJob `protobuf:"bytes,6,opt,name=pig_job,json=pigJob,proto3,oneof"`
}

type OrderedJob_PysparkJob

type OrderedJob_PysparkJob struct {
	PysparkJob *PySparkJob `protobuf:"bytes,4,opt,name=pyspark_job,json=pysparkJob,proto3,oneof"`
}

type OrderedJob_SparkJob

type OrderedJob_SparkJob struct {
	SparkJob *SparkJob `protobuf:"bytes,3,opt,name=spark_job,json=sparkJob,proto3,oneof"`
}

type OrderedJob_SparkSqlJob

type OrderedJob_SparkSqlJob struct {
	SparkSqlJob *SparkSqlJob `protobuf:"bytes,7,opt,name=spark_sql_job,json=sparkSqlJob,proto3,oneof"`
}

type PigJob

type PigJob struct {
	// Required. The sequence of Pig queries to execute, specified as an HCFS
	// file URI or a list of queries.
	//
	// Types that are valid to be assigned to Queries:
	//	*PigJob_QueryFileUri
	//	*PigJob_QueryList
	Queries isPigJob_Queries `protobuf_oneof:"queries"`
	// Optional. Whether to continue executing queries if a query fails.
	// The default value is `false`. Setting to `true` can be useful when executing
	// independent parallel queries.
	ContinueOnFailure bool `protobuf:"varint,3,opt,name=continue_on_failure,json=continueOnFailure,proto3" json:"continue_on_failure,omitempty"`
	// Optional. Mapping of query variable names to values (equivalent to the Pig
	// command: `name=[value]`).
	ScriptVariables map[string]string `` /* 194-byte string literal not displayed */
	// Optional. A mapping of property names to values, used to configure Pig.
	// Properties that conflict with values set by the Cloud Dataproc API may be
	// overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml,
	// /etc/pig/conf/pig.properties, and classes in user code.
	Properties map[string]string `` /* 161-byte string literal not displayed */
	// Optional. HCFS URIs of jar files to add to the CLASSPATH of
	// the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs.
	JarFileUris []string `protobuf:"bytes,6,rep,name=jar_file_uris,json=jarFileUris,proto3" json:"jar_file_uris,omitempty"`
	// Optional. The runtime log config for job execution.
	LoggingConfig        *LoggingConfig `protobuf:"bytes,7,opt,name=logging_config,json=loggingConfig,proto3" json:"logging_config,omitempty"`
	XXX_NoUnkeyedLiteral struct{}       `json:"-"`
	XXX_unrecognized     []byte         `json:"-"`
	XXX_sizecache        int32          `json:"-"`
}

A Cloud Dataproc job for running [Apache Pig](https://pig.apache.org/) queries on YARN.

func (*PigJob) Descriptor

func (*PigJob) Descriptor() ([]byte, []int)

func (*PigJob) GetContinueOnFailure

func (m *PigJob) GetContinueOnFailure() bool

func (*PigJob) GetJarFileUris

func (m *PigJob) GetJarFileUris() []string

func (*PigJob) GetLoggingConfig

func (m *PigJob) GetLoggingConfig() *LoggingConfig

func (*PigJob) GetProperties

func (m *PigJob) GetProperties() map[string]string

func (*PigJob) GetQueries

func (m *PigJob) GetQueries() isPigJob_Queries

func (*PigJob) GetQueryFileUri

func (m *PigJob) GetQueryFileUri() string

func (*PigJob) GetQueryList

func (m *PigJob) GetQueryList() *QueryList

func (*PigJob) GetScriptVariables

func (m *PigJob) GetScriptVariables() map[string]string

func (*PigJob) ProtoMessage

func (*PigJob) ProtoMessage()

func (*PigJob) Reset

func (m *PigJob) Reset()

func (*PigJob) String

func (m *PigJob) String() string

func (*PigJob) XXX_DiscardUnknown

func (m *PigJob) XXX_DiscardUnknown()

func (*PigJob) XXX_Marshal

func (m *PigJob) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*PigJob) XXX_Merge

func (m *PigJob) XXX_Merge(src proto.Message)

func (*PigJob) XXX_OneofFuncs

func (*PigJob) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{})

XXX_OneofFuncs is for the internal use of the proto package.

func (*PigJob) XXX_Size

func (m *PigJob) XXX_Size() int

func (*PigJob) XXX_Unmarshal

func (m *PigJob) XXX_Unmarshal(b []byte) error

type PigJob_QueryFileUri

type PigJob_QueryFileUri struct {
	QueryFileUri string `protobuf:"bytes,1,opt,name=query_file_uri,json=queryFileUri,proto3,oneof"`
}

type PigJob_QueryList

type PigJob_QueryList struct {
	QueryList *QueryList `protobuf:"bytes,2,opt,name=query_list,json=queryList,proto3,oneof"`
}

type PySparkJob

type PySparkJob struct {
	// Required. The HCFS URI of the main Python file to use as the driver. Must
	// be a .py file.
	MainPythonFileUri string `protobuf:"bytes,1,opt,name=main_python_file_uri,json=mainPythonFileUri,proto3" json:"main_python_file_uri,omitempty"`
	// Optional. The arguments to pass to the driver.  Do not include arguments,
	// such as `--conf`, that can be set as job properties, since a collision may
	// occur that causes an incorrect job submission.
	Args []string `protobuf:"bytes,2,rep,name=args,proto3" json:"args,omitempty"`
	// Optional. HCFS file URIs of Python files to pass to the PySpark
	// framework. Supported file types: .py, .egg, and .zip.
	PythonFileUris []string `protobuf:"bytes,3,rep,name=python_file_uris,json=pythonFileUris,proto3" json:"python_file_uris,omitempty"`
	// Optional. HCFS URIs of jar files to add to the CLASSPATHs of the
	// Python driver and tasks.
	JarFileUris []string `protobuf:"bytes,4,rep,name=jar_file_uris,json=jarFileUris,proto3" json:"jar_file_uris,omitempty"`
	// Optional. HCFS URIs of files to be copied to the working directory of
	// Python drivers and distributed tasks. Useful for naively parallel tasks.
	FileUris []string `protobuf:"bytes,5,rep,name=file_uris,json=fileUris,proto3" json:"file_uris,omitempty"`
	// Optional. HCFS URIs of archives to be extracted in the working directory of
	// .jar, .tar, .tar.gz, .tgz, and .zip.
	ArchiveUris []string `protobuf:"bytes,6,rep,name=archive_uris,json=archiveUris,proto3" json:"archive_uris,omitempty"`
	// Optional. A mapping of property names to values, used to configure PySpark.
	// Properties that conflict with values set by the Cloud Dataproc API may be
	// overwritten. Can include properties set in
	// /etc/spark/conf/spark-defaults.conf and classes in user code.
	Properties map[string]string `` /* 161-byte string literal not displayed */
	// Optional. The runtime log config for job execution.
	LoggingConfig        *LoggingConfig `protobuf:"bytes,8,opt,name=logging_config,json=loggingConfig,proto3" json:"logging_config,omitempty"`
	XXX_NoUnkeyedLiteral struct{}       `json:"-"`
	XXX_unrecognized     []byte         `json:"-"`
	XXX_sizecache        int32          `json:"-"`
}

A Cloud Dataproc job for running [Apache PySpark](https://spark.apache.org/docs/0.9.0/python-programming-guide.html) applications on YARN.

func (*PySparkJob) Descriptor

func (*PySparkJob) Descriptor() ([]byte, []int)

func (*PySparkJob) GetArchiveUris

func (m *PySparkJob) GetArchiveUris() []string

func (*PySparkJob) GetArgs

func (m *PySparkJob) GetArgs() []string

func (*PySparkJob) GetFileUris

func (m *PySparkJob) GetFileUris() []string

func (*PySparkJob) GetJarFileUris

func (m *PySparkJob) GetJarFileUris() []string

func (*PySparkJob) GetLoggingConfig

func (m *PySparkJob) GetLoggingConfig() *LoggingConfig

func (*PySparkJob) GetMainPythonFileUri

func (m *PySparkJob) GetMainPythonFileUri() string

func (*PySparkJob) GetProperties

func (m *PySparkJob) GetProperties() map[string]string

func (*PySparkJob) GetPythonFileUris

func (m *PySparkJob) GetPythonFileUris() []string

func (*PySparkJob) ProtoMessage

func (*PySparkJob) ProtoMessage()

func (*PySparkJob) Reset

func (m *PySparkJob) Reset()

func (*PySparkJob) String

func (m *PySparkJob) String() string

func (*PySparkJob) XXX_DiscardUnknown

func (m *PySparkJob) XXX_DiscardUnknown()

func (*PySparkJob) XXX_Marshal

func (m *PySparkJob) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*PySparkJob) XXX_Merge

func (m *PySparkJob) XXX_Merge(src proto.Message)

func (*PySparkJob) XXX_Size

func (m *PySparkJob) XXX_Size() int

func (*PySparkJob) XXX_Unmarshal

func (m *PySparkJob) XXX_Unmarshal(b []byte) error

type QueryList

type QueryList struct {
	// Required. The queries to execute. You do not need to terminate a query
	// with a semicolon. Multiple queries can be specified in one string
	// by separating each with a semicolon. Here is an example of an Cloud
	// Dataproc API snippet that uses a QueryList to specify a HiveJob:
	//
	//     "hiveJob": {
	//       "queryList": {
	//         "queries": [
	//           "query1",
	//           "query2",
	//           "query3;query4",
	//         ]
	//       }
	//     }
	Queries              []string `protobuf:"bytes,1,rep,name=queries,proto3" json:"queries,omitempty"`
	XXX_NoUnkeyedLiteral struct{} `json:"-"`
	XXX_unrecognized     []byte   `json:"-"`
	XXX_sizecache        int32    `json:"-"`
}

A list of queries to run on a cluster.

func (*QueryList) Descriptor

func (*QueryList) Descriptor() ([]byte, []int)

func (*QueryList) GetQueries

func (m *QueryList) GetQueries() []string

func (*QueryList) ProtoMessage

func (*QueryList) ProtoMessage()

func (*QueryList) Reset

func (m *QueryList) Reset()

func (*QueryList) String

func (m *QueryList) String() string

func (*QueryList) XXX_DiscardUnknown

func (m *QueryList) XXX_DiscardUnknown()

func (*QueryList) XXX_Marshal

func (m *QueryList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*QueryList) XXX_Merge

func (m *QueryList) XXX_Merge(src proto.Message)

func (*QueryList) XXX_Size

func (m *QueryList) XXX_Size() int

func (*QueryList) XXX_Unmarshal

func (m *QueryList) XXX_Unmarshal(b []byte) error

type SoftwareConfig

type SoftwareConfig struct {
	// Optional. The version of software inside the cluster. It must be one of the supported
	// [Cloud Dataproc Versions](/dataproc/docs/concepts/versioning/dataproc-versions#supported_cloud_dataproc_versions),
	// such as "1.2" (including a subminor version, such as "1.2.29"), or the
	// ["preview" version](/dataproc/docs/concepts/versioning/dataproc-versions#other_versions).
	// If unspecified, it defaults to the latest version.
	ImageVersion string `protobuf:"bytes,1,opt,name=image_version,json=imageVersion,proto3" json:"image_version,omitempty"`
	// Optional. The properties to set on daemon config files.
	//
	// Property keys are specified in `prefix:property` format, such as
	// `core:fs.defaultFS`. The following are supported prefixes
	// and their mappings:
	//
	// * capacity-scheduler: `capacity-scheduler.xml`
	// * core:   `core-site.xml`
	// * distcp: `distcp-default.xml`
	// * hdfs:   `hdfs-site.xml`
	// * hive:   `hive-site.xml`
	// * mapred: `mapred-site.xml`
	// * pig:    `pig.properties`
	// * spark:  `spark-defaults.conf`
	// * yarn:   `yarn-site.xml`
	//
	// For more information, see
	// [Cluster properties](/dataproc/docs/concepts/cluster-properties).
	Properties           map[string]string `` /* 161-byte string literal not displayed */
	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
	XXX_unrecognized     []byte            `json:"-"`
	XXX_sizecache        int32             `json:"-"`
}

Specifies the selection and config of software inside the cluster.

func (*SoftwareConfig) Descriptor

func (*SoftwareConfig) Descriptor() ([]byte, []int)

func (*SoftwareConfig) GetImageVersion

func (m *SoftwareConfig) GetImageVersion() string

func (*SoftwareConfig) GetProperties

func (m *SoftwareConfig) GetProperties() map[string]string

func (*SoftwareConfig) ProtoMessage

func (*SoftwareConfig) ProtoMessage()

func (*SoftwareConfig) Reset

func (m *SoftwareConfig) Reset()

func (*SoftwareConfig) String

func (m *SoftwareConfig) String() string

func (*SoftwareConfig) XXX_DiscardUnknown

func (m *SoftwareConfig) XXX_DiscardUnknown()

func (*SoftwareConfig) XXX_Marshal

func (m *SoftwareConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*SoftwareConfig) XXX_Merge

func (m *SoftwareConfig) XXX_Merge(src proto.Message)

func (*SoftwareConfig) XXX_Size

func (m *SoftwareConfig) XXX_Size() int

func (*SoftwareConfig) XXX_Unmarshal

func (m *SoftwareConfig) XXX_Unmarshal(b []byte) error

type SparkJob

type SparkJob struct {
	// Required. The specification of the main method to call to drive the job.
	// Specify either the jar file that contains the main class or the main class
	// name. To pass both a main jar and a main class in that jar, add the jar to
	// `CommonJob.jar_file_uris`, and then specify the main class name in `main_class`.
	//
	// Types that are valid to be assigned to Driver:
	//	*SparkJob_MainJarFileUri
	//	*SparkJob_MainClass
	Driver isSparkJob_Driver `protobuf_oneof:"driver"`
	// Optional. The arguments to pass to the driver. Do not include arguments,
	// such as `--conf`, that can be set as job properties, since a collision may
	// occur that causes an incorrect job submission.
	Args []string `protobuf:"bytes,3,rep,name=args,proto3" json:"args,omitempty"`
	// Optional. HCFS URIs of jar files to add to the CLASSPATHs of the
	// Spark driver and tasks.
	JarFileUris []string `protobuf:"bytes,4,rep,name=jar_file_uris,json=jarFileUris,proto3" json:"jar_file_uris,omitempty"`
	// Optional. HCFS URIs of files to be copied to the working directory of
	// Spark drivers and distributed tasks. Useful for naively parallel tasks.
	FileUris []string `protobuf:"bytes,5,rep,name=file_uris,json=fileUris,proto3" json:"file_uris,omitempty"`
	// Optional. HCFS URIs of archives to be extracted in the working directory
	// of Spark drivers and tasks. Supported file types:
	// .jar, .tar, .tar.gz, .tgz, and .zip.
	ArchiveUris []string `protobuf:"bytes,6,rep,name=archive_uris,json=archiveUris,proto3" json:"archive_uris,omitempty"`
	// Optional. A mapping of property names to values, used to configure Spark.
	// Properties that conflict with values set by the Cloud Dataproc API may be
	// overwritten. Can include properties set in
	// /etc/spark/conf/spark-defaults.conf and classes in user code.
	Properties map[string]string `` /* 161-byte string literal not displayed */
	// Optional. The runtime log config for job execution.
	LoggingConfig        *LoggingConfig `protobuf:"bytes,8,opt,name=logging_config,json=loggingConfig,proto3" json:"logging_config,omitempty"`
	XXX_NoUnkeyedLiteral struct{}       `json:"-"`
	XXX_unrecognized     []byte         `json:"-"`
	XXX_sizecache        int32          `json:"-"`
}

A Cloud Dataproc job for running [Apache Spark](http://spark.apache.org/) applications on YARN.

func (*SparkJob) Descriptor

func (*SparkJob) Descriptor() ([]byte, []int)

func (*SparkJob) GetArchiveUris

func (m *SparkJob) GetArchiveUris() []string

func (*SparkJob) GetArgs

func (m *SparkJob) GetArgs() []string

func (*SparkJob) GetDriver

func (m *SparkJob) GetDriver() isSparkJob_Driver

func (*SparkJob) GetFileUris

func (m *SparkJob) GetFileUris() []string

func (*SparkJob) GetJarFileUris

func (m *SparkJob) GetJarFileUris() []string

func (*SparkJob) GetLoggingConfig

func (m *SparkJob) GetLoggingConfig() *LoggingConfig

func (*SparkJob) GetMainClass

func (m *SparkJob) GetMainClass() string

func (*SparkJob) GetMainJarFileUri

func (m *SparkJob) GetMainJarFileUri() string

func (*SparkJob) GetProperties

func (m *SparkJob) GetProperties() map[string]string

func (*SparkJob) ProtoMessage

func (*SparkJob) ProtoMessage()

func (*SparkJob) Reset

func (m *SparkJob) Reset()

func (*SparkJob) String

func (m *SparkJob) String() string

func (*SparkJob) XXX_DiscardUnknown

func (m *SparkJob) XXX_DiscardUnknown()

func (*SparkJob) XXX_Marshal

func (m *SparkJob) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*SparkJob) XXX_Merge

func (m *SparkJob) XXX_Merge(src proto.Message)

func (*SparkJob) XXX_OneofFuncs

func (*SparkJob) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{})

XXX_OneofFuncs is for the internal use of the proto package.

func (*SparkJob) XXX_Size

func (m *SparkJob) XXX_Size() int

func (*SparkJob) XXX_Unmarshal

func (m *SparkJob) XXX_Unmarshal(b []byte) error

type SparkJob_MainClass

type SparkJob_MainClass struct {
	MainClass string `protobuf:"bytes,2,opt,name=main_class,json=mainClass,proto3,oneof"`
}

type SparkJob_MainJarFileUri

type SparkJob_MainJarFileUri struct {
	MainJarFileUri string `protobuf:"bytes,1,opt,name=main_jar_file_uri,json=mainJarFileUri,proto3,oneof"`
}

type SparkSqlJob

type SparkSqlJob struct {
	// Required. The sequence of Spark SQL queries to execute, specified as
	// either an HCFS file URI or as a list of queries.
	//
	// Types that are valid to be assigned to Queries:
	//	*SparkSqlJob_QueryFileUri
	//	*SparkSqlJob_QueryList
	Queries isSparkSqlJob_Queries `protobuf_oneof:"queries"`
	// Optional. Mapping of query variable names to values (equivalent to the
	// Spark SQL command: SET `name="value";`).
	ScriptVariables map[string]string `` /* 194-byte string literal not displayed */
	// Optional. A mapping of property names to values, used to configure
	// Spark SQL's SparkConf. Properties that conflict with values set by the
	// Cloud Dataproc API may be overwritten.
	Properties map[string]string `` /* 161-byte string literal not displayed */
	// Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH.
	JarFileUris []string `protobuf:"bytes,56,rep,name=jar_file_uris,json=jarFileUris,proto3" json:"jar_file_uris,omitempty"`
	// Optional. The runtime log config for job execution.
	LoggingConfig        *LoggingConfig `protobuf:"bytes,6,opt,name=logging_config,json=loggingConfig,proto3" json:"logging_config,omitempty"`
	XXX_NoUnkeyedLiteral struct{}       `json:"-"`
	XXX_unrecognized     []byte         `json:"-"`
	XXX_sizecache        int32          `json:"-"`
}

A Cloud Dataproc job for running [Apache Spark SQL](http://spark.apache.org/sql/) queries.

func (*SparkSqlJob) Descriptor

func (*SparkSqlJob) Descriptor() ([]byte, []int)

func (*SparkSqlJob) GetJarFileUris

func (m *SparkSqlJob) GetJarFileUris() []string

func (*SparkSqlJob) GetLoggingConfig

func (m *SparkSqlJob) GetLoggingConfig() *LoggingConfig

func (*SparkSqlJob) GetProperties

func (m *SparkSqlJob) GetProperties() map[string]string

func (*SparkSqlJob) GetQueries

func (m *SparkSqlJob) GetQueries() isSparkSqlJob_Queries

func (*SparkSqlJob) GetQueryFileUri

func (m *SparkSqlJob) GetQueryFileUri() string

func (*SparkSqlJob) GetQueryList

func (m *SparkSqlJob) GetQueryList() *QueryList

func (*SparkSqlJob) GetScriptVariables

func (m *SparkSqlJob) GetScriptVariables() map[string]string

func (*SparkSqlJob) ProtoMessage

func (*SparkSqlJob) ProtoMessage()

func (*SparkSqlJob) Reset

func (m *SparkSqlJob) Reset()

func (*SparkSqlJob) String

func (m *SparkSqlJob) String() string

func (*SparkSqlJob) XXX_DiscardUnknown

func (m *SparkSqlJob) XXX_DiscardUnknown()

func (*SparkSqlJob) XXX_Marshal

func (m *SparkSqlJob) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*SparkSqlJob) XXX_Merge

func (m *SparkSqlJob) XXX_Merge(src proto.Message)

func (*SparkSqlJob) XXX_OneofFuncs

func (*SparkSqlJob) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{})

XXX_OneofFuncs is for the internal use of the proto package.

func (*SparkSqlJob) XXX_Size

func (m *SparkSqlJob) XXX_Size() int

func (*SparkSqlJob) XXX_Unmarshal

func (m *SparkSqlJob) XXX_Unmarshal(b []byte) error

type SparkSqlJob_QueryFileUri

type SparkSqlJob_QueryFileUri struct {
	QueryFileUri string `protobuf:"bytes,1,opt,name=query_file_uri,json=queryFileUri,proto3,oneof"`
}

type SparkSqlJob_QueryList

type SparkSqlJob_QueryList struct {
	QueryList *QueryList `protobuf:"bytes,2,opt,name=query_list,json=queryList,proto3,oneof"`
}

type SubmitJobRequest

type SubmitJobRequest struct {
	// Required. The ID of the Google Cloud Platform project that the job
	// belongs to.
	ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"`
	// Required. The Cloud Dataproc region in which to handle the request.
	Region string `protobuf:"bytes,3,opt,name=region,proto3" json:"region,omitempty"`
	// Required. The job resource.
	Job *Job `protobuf:"bytes,2,opt,name=job,proto3" json:"job,omitempty"`
	// Optional. A unique id used to identify the request. If the server
	// receives two [SubmitJobRequest][google.cloud.dataproc.v1beta2.SubmitJobRequest] requests  with the same
	// id, then the second request will be ignored and the
	// first [Job][google.cloud.dataproc.v1beta2.Job] created and stored in the backend
	// is returned.
	//
	// It is recommended to always set this value to a
	// [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
	//
	// The id must contain only letters (a-z, A-Z), numbers (0-9),
	// underscores (_), and hyphens (-). The maximum length is 40 characters.
	RequestId            string   `protobuf:"bytes,4,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"`
	XXX_NoUnkeyedLiteral struct{} `json:"-"`
	XXX_unrecognized     []byte   `json:"-"`
	XXX_sizecache        int32    `json:"-"`
}

A request to submit a job.

func (*SubmitJobRequest) Descriptor

func (*SubmitJobRequest) Descriptor() ([]byte, []int)

func (*SubmitJobRequest) GetJob

func (m *SubmitJobRequest) GetJob() *Job

func (*SubmitJobRequest) GetProjectId

func (m *SubmitJobRequest) GetProjectId() string

func (*SubmitJobRequest) GetRegion

func (m *SubmitJobRequest) GetRegion() string

func (*SubmitJobRequest) GetRequestId

func (m *SubmitJobRequest) GetRequestId() string

func (*SubmitJobRequest) ProtoMessage

func (*SubmitJobRequest) ProtoMessage()

func (*SubmitJobRequest) Reset

func (m *SubmitJobRequest) Reset()

func (*SubmitJobRequest) String

func (m *SubmitJobRequest) String() string

func (*SubmitJobRequest) XXX_DiscardUnknown

func (m *SubmitJobRequest) XXX_DiscardUnknown()

func (*SubmitJobRequest) XXX_Marshal

func (m *SubmitJobRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*SubmitJobRequest) XXX_Merge

func (m *SubmitJobRequest) XXX_Merge(src proto.Message)

func (*SubmitJobRequest) XXX_Size

func (m *SubmitJobRequest) XXX_Size() int

func (*SubmitJobRequest) XXX_Unmarshal

func (m *SubmitJobRequest) XXX_Unmarshal(b []byte) error

type UpdateClusterRequest

type UpdateClusterRequest struct {
	// Required. The ID of the Google Cloud Platform project the
	// cluster belongs to.
	ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"`
	// Required. The Cloud Dataproc region in which to handle the request.
	Region string `protobuf:"bytes,5,opt,name=region,proto3" json:"region,omitempty"`
	// Required. The cluster name.
	ClusterName string `protobuf:"bytes,2,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"`
	// Required. The changes to the cluster.
	Cluster *Cluster `protobuf:"bytes,3,opt,name=cluster,proto3" json:"cluster,omitempty"`
	// Optional. Timeout for graceful YARN decomissioning. Graceful
	// decommissioning allows removing nodes from the cluster without
	// interrupting jobs in progress. Timeout specifies how long to wait for jobs
	// in progress to finish before forcefully removing nodes (and potentially
	// interrupting jobs). Default timeout is 0 (for forceful decommission), and
	// the maximum allowed timeout is 1 day.
	//
	// Only supported on Dataproc image versions 1.2 and higher.
	GracefulDecommissionTimeout *duration.Duration `` /* 144-byte string literal not displayed */
	// Required. Specifies the path, relative to `Cluster`, of
	// the field to update. For example, to change the number of workers
	// in a cluster to 5, the `update_mask` parameter would be
	// specified as `config.worker_config.num_instances`,
	// and the `PATCH` request body would specify the new value, as follows:
	//
	//     {
	//       "config":{
	//         "workerConfig":{
	//           "numInstances":"5"
	//         }
	//       }
	//     }
	//
	// Similarly, to change the number of preemptible workers in a cluster to 5, the
	// `update_mask` parameter would be `config.secondary_worker_config.num_instances`,
	// and the `PATCH` request body would be set as follows:
	//
	//     {
	//       "config":{
	//         "secondaryWorkerConfig":{
	//           "numInstances":"5"
	//         }
	//       }
	//     }
	// <strong>Note:</strong> currently only the following fields can be updated:
	//
	// <table>
	// <tr>
	// <td><strong>Mask</strong></td><td><strong>Purpose</strong></td>
	// </tr>
	// <tr>
	// <td>labels</td><td>Updates labels</td>
	// </tr>
	// <tr>
	// <td>config.worker_config.num_instances</td><td>Resize primary worker group</td>
	// </tr>
	// <tr>
	// <td>config.secondary_worker_config.num_instances</td><td>Resize secondary worker group</td>
	// </tr>
	// <tr>
	// <td>config.lifecycle_config.auto_delete_ttl</td><td>Reset MAX TTL duration</td>
	// </tr>
	// <tr>
	// <td>config.lifecycle_config.auto_delete_time</td><td>Update MAX TTL deletion timestamp</td>
	// </tr>
	// <tr>
	// <td>config.lifecycle_config.idle_delete_ttl</td><td>Update Idle TTL duration</td>
	// </tr>
	// </table>
	UpdateMask *field_mask.FieldMask `protobuf:"bytes,4,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"`
	// Optional. A unique id used to identify the request. If the server
	// receives two [UpdateClusterRequest][google.cloud.dataproc.v1beta2.UpdateClusterRequest] requests  with the same
	// id, then the second request will be ignored and the
	// first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the
	// backend is returned.
	//
	// It is recommended to always set this value to a
	// [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
	//
	// The id must contain only letters (a-z, A-Z), numbers (0-9),
	// underscores (_), and hyphens (-). The maximum length is 40 characters.
	RequestId            string   `protobuf:"bytes,7,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"`
	XXX_NoUnkeyedLiteral struct{} `json:"-"`
	XXX_unrecognized     []byte   `json:"-"`
	XXX_sizecache        int32    `json:"-"`
}

A request to update a cluster.

func (*UpdateClusterRequest) Descriptor

func (*UpdateClusterRequest) Descriptor() ([]byte, []int)

func (*UpdateClusterRequest) GetCluster

func (m *UpdateClusterRequest) GetCluster() *Cluster

func (*UpdateClusterRequest) GetClusterName

func (m *UpdateClusterRequest) GetClusterName() string

func (*UpdateClusterRequest) GetGracefulDecommissionTimeout

func (m *UpdateClusterRequest) GetGracefulDecommissionTimeout() *duration.Duration

func (*UpdateClusterRequest) GetProjectId

func (m *UpdateClusterRequest) GetProjectId() string

func (*UpdateClusterRequest) GetRegion

func (m *UpdateClusterRequest) GetRegion() string

func (*UpdateClusterRequest) GetRequestId

func (m *UpdateClusterRequest) GetRequestId() string

func (*UpdateClusterRequest) GetUpdateMask

func (m *UpdateClusterRequest) GetUpdateMask() *field_mask.FieldMask

func (*UpdateClusterRequest) ProtoMessage

func (*UpdateClusterRequest) ProtoMessage()

func (*UpdateClusterRequest) Reset

func (m *UpdateClusterRequest) Reset()

func (*UpdateClusterRequest) String

func (m *UpdateClusterRequest) String() string

func (*UpdateClusterRequest) XXX_DiscardUnknown

func (m *UpdateClusterRequest) XXX_DiscardUnknown()

func (*UpdateClusterRequest) XXX_Marshal

func (m *UpdateClusterRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*UpdateClusterRequest) XXX_Merge

func (m *UpdateClusterRequest) XXX_Merge(src proto.Message)

func (*UpdateClusterRequest) XXX_Size

func (m *UpdateClusterRequest) XXX_Size() int

func (*UpdateClusterRequest) XXX_Unmarshal

func (m *UpdateClusterRequest) XXX_Unmarshal(b []byte) error

type UpdateJobRequest

type UpdateJobRequest struct {
	// Required. The ID of the Google Cloud Platform project that the job
	// belongs to.
	ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"`
	// Required. The Cloud Dataproc region in which to handle the request.
	Region string `protobuf:"bytes,2,opt,name=region,proto3" json:"region,omitempty"`
	// Required. The job ID.
	JobId string `protobuf:"bytes,3,opt,name=job_id,json=jobId,proto3" json:"job_id,omitempty"`
	// Required. The changes to the job.
	Job *Job `protobuf:"bytes,4,opt,name=job,proto3" json:"job,omitempty"`
	// Required. Specifies the path, relative to <code>Job</code>, of
	// the field to update. For example, to update the labels of a Job the
	// <code>update_mask</code> parameter would be specified as
	// <code>labels</code>, and the `PATCH` request body would specify the new
	// value. <strong>Note:</strong> Currently, <code>labels</code> is the only
	// field that can be updated.
	UpdateMask           *field_mask.FieldMask `protobuf:"bytes,5,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"`
	XXX_NoUnkeyedLiteral struct{}              `json:"-"`
	XXX_unrecognized     []byte                `json:"-"`
	XXX_sizecache        int32                 `json:"-"`
}

A request to update a job.

func (*UpdateJobRequest) Descriptor

func (*UpdateJobRequest) Descriptor() ([]byte, []int)

func (*UpdateJobRequest) GetJob

func (m *UpdateJobRequest) GetJob() *Job

func (*UpdateJobRequest) GetJobId

func (m *UpdateJobRequest) GetJobId() string

func (*UpdateJobRequest) GetProjectId

func (m *UpdateJobRequest) GetProjectId() string

func (*UpdateJobRequest) GetRegion

func (m *UpdateJobRequest) GetRegion() string

func (*UpdateJobRequest) GetUpdateMask

func (m *UpdateJobRequest) GetUpdateMask() *field_mask.FieldMask

func (*UpdateJobRequest) ProtoMessage

func (*UpdateJobRequest) ProtoMessage()

func (*UpdateJobRequest) Reset

func (m *UpdateJobRequest) Reset()

func (*UpdateJobRequest) String

func (m *UpdateJobRequest) String() string

func (*UpdateJobRequest) XXX_DiscardUnknown

func (m *UpdateJobRequest) XXX_DiscardUnknown()

func (*UpdateJobRequest) XXX_Marshal

func (m *UpdateJobRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*UpdateJobRequest) XXX_Merge

func (m *UpdateJobRequest) XXX_Merge(src proto.Message)

func (*UpdateJobRequest) XXX_Size

func (m *UpdateJobRequest) XXX_Size() int

func (*UpdateJobRequest) XXX_Unmarshal

func (m *UpdateJobRequest) XXX_Unmarshal(b []byte) error

type UpdateWorkflowTemplateRequest

type UpdateWorkflowTemplateRequest struct {
	// Required. The updated workflow template.
	//
	// The `template.version` field must match the current version.
	Template             *WorkflowTemplate `protobuf:"bytes,1,opt,name=template,proto3" json:"template,omitempty"`
	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
	XXX_unrecognized     []byte            `json:"-"`
	XXX_sizecache        int32             `json:"-"`
}

A request to update a workflow template.

func (*UpdateWorkflowTemplateRequest) Descriptor

func (*UpdateWorkflowTemplateRequest) Descriptor() ([]byte, []int)

func (*UpdateWorkflowTemplateRequest) GetTemplate

func (*UpdateWorkflowTemplateRequest) ProtoMessage

func (*UpdateWorkflowTemplateRequest) ProtoMessage()

func (*UpdateWorkflowTemplateRequest) Reset

func (m *UpdateWorkflowTemplateRequest) Reset()

func (*UpdateWorkflowTemplateRequest) String

func (*UpdateWorkflowTemplateRequest) XXX_DiscardUnknown

func (m *UpdateWorkflowTemplateRequest) XXX_DiscardUnknown()

func (*UpdateWorkflowTemplateRequest) XXX_Marshal

func (m *UpdateWorkflowTemplateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*UpdateWorkflowTemplateRequest) XXX_Merge

func (m *UpdateWorkflowTemplateRequest) XXX_Merge(src proto.Message)

func (*UpdateWorkflowTemplateRequest) XXX_Size

func (m *UpdateWorkflowTemplateRequest) XXX_Size() int

func (*UpdateWorkflowTemplateRequest) XXX_Unmarshal

func (m *UpdateWorkflowTemplateRequest) XXX_Unmarshal(b []byte) error

type WorkflowGraph

type WorkflowGraph struct {
	// Output only. The workflow nodes.
	Nodes                []*WorkflowNode `protobuf:"bytes,1,rep,name=nodes,proto3" json:"nodes,omitempty"`
	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
	XXX_unrecognized     []byte          `json:"-"`
	XXX_sizecache        int32           `json:"-"`
}

The workflow graph.

func (*WorkflowGraph) Descriptor

func (*WorkflowGraph) Descriptor() ([]byte, []int)

func (*WorkflowGraph) GetNodes

func (m *WorkflowGraph) GetNodes() []*WorkflowNode

func (*WorkflowGraph) ProtoMessage

func (*WorkflowGraph) ProtoMessage()

func (*WorkflowGraph) Reset

func (m *WorkflowGraph) Reset()

func (*WorkflowGraph) String

func (m *WorkflowGraph) String() string

func (*WorkflowGraph) XXX_DiscardUnknown

func (m *WorkflowGraph) XXX_DiscardUnknown()

func (*WorkflowGraph) XXX_Marshal

func (m *WorkflowGraph) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*WorkflowGraph) XXX_Merge

func (m *WorkflowGraph) XXX_Merge(src proto.Message)

func (*WorkflowGraph) XXX_Size

func (m *WorkflowGraph) XXX_Size() int

func (*WorkflowGraph) XXX_Unmarshal

func (m *WorkflowGraph) XXX_Unmarshal(b []byte) error

type WorkflowMetadata

type WorkflowMetadata struct {
	// Output only. The "resource name" of the template.
	Template string `protobuf:"bytes,1,opt,name=template,proto3" json:"template,omitempty"`
	// Output only. The version of template at the time of
	// workflow instantiation.
	Version int32 `protobuf:"varint,2,opt,name=version,proto3" json:"version,omitempty"`
	// Output only. The create cluster operation metadata.
	CreateCluster *ClusterOperation `protobuf:"bytes,3,opt,name=create_cluster,json=createCluster,proto3" json:"create_cluster,omitempty"`
	// Output only. The workflow graph.
	Graph *WorkflowGraph `protobuf:"bytes,4,opt,name=graph,proto3" json:"graph,omitempty"`
	// Output only. The delete cluster operation metadata.
	DeleteCluster *ClusterOperation `protobuf:"bytes,5,opt,name=delete_cluster,json=deleteCluster,proto3" json:"delete_cluster,omitempty"`
	// Output only. The workflow state.
	State WorkflowMetadata_State `protobuf:"varint,6,opt,name=state,proto3,enum=google.cloud.dataproc.v1beta2.WorkflowMetadata_State" json:"state,omitempty"`
	// Output only. The name of the managed cluster.
	ClusterName string `protobuf:"bytes,7,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"`
	// Map from parameter names to values that were used for those parameters.
	Parameters           map[string]string `` /* 161-byte string literal not displayed */
	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
	XXX_unrecognized     []byte            `json:"-"`
	XXX_sizecache        int32             `json:"-"`
}

A Cloud Dataproc workflow template resource.

func (*WorkflowMetadata) Descriptor

func (*WorkflowMetadata) Descriptor() ([]byte, []int)

func (*WorkflowMetadata) GetClusterName

func (m *WorkflowMetadata) GetClusterName() string

func (*WorkflowMetadata) GetCreateCluster

func (m *WorkflowMetadata) GetCreateCluster() *ClusterOperation

func (*WorkflowMetadata) GetDeleteCluster

func (m *WorkflowMetadata) GetDeleteCluster() *ClusterOperation

func (*WorkflowMetadata) GetGraph

func (m *WorkflowMetadata) GetGraph() *WorkflowGraph

func (*WorkflowMetadata) GetParameters

func (m *WorkflowMetadata) GetParameters() map[string]string

func (*WorkflowMetadata) GetState

func (*WorkflowMetadata) GetTemplate

func (m *WorkflowMetadata) GetTemplate() string

func (*WorkflowMetadata) GetVersion

func (m *WorkflowMetadata) GetVersion() int32

func (*WorkflowMetadata) ProtoMessage

func (*WorkflowMetadata) ProtoMessage()

func (*WorkflowMetadata) Reset

func (m *WorkflowMetadata) Reset()

func (*WorkflowMetadata) String

func (m *WorkflowMetadata) String() string

func (*WorkflowMetadata) XXX_DiscardUnknown

func (m *WorkflowMetadata) XXX_DiscardUnknown()

func (*WorkflowMetadata) XXX_Marshal

func (m *WorkflowMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*WorkflowMetadata) XXX_Merge

func (m *WorkflowMetadata) XXX_Merge(src proto.Message)

func (*WorkflowMetadata) XXX_Size

func (m *WorkflowMetadata) XXX_Size() int

func (*WorkflowMetadata) XXX_Unmarshal

func (m *WorkflowMetadata) XXX_Unmarshal(b []byte) error

type WorkflowMetadata_State

type WorkflowMetadata_State int32

The operation state.

const (
	// Unused.
	WorkflowMetadata_UNKNOWN WorkflowMetadata_State = 0
	// The operation has been created.
	WorkflowMetadata_PENDING WorkflowMetadata_State = 1
	// The operation is running.
	WorkflowMetadata_RUNNING WorkflowMetadata_State = 2
	// The operation is done; either cancelled or completed.
	WorkflowMetadata_DONE WorkflowMetadata_State = 3
)

func (WorkflowMetadata_State) EnumDescriptor

func (WorkflowMetadata_State) EnumDescriptor() ([]byte, []int)

func (WorkflowMetadata_State) String

func (x WorkflowMetadata_State) String() string

type WorkflowNode

type WorkflowNode struct {
	// Output only. The name of the node.
	StepId string `protobuf:"bytes,1,opt,name=step_id,json=stepId,proto3" json:"step_id,omitempty"`
	// Output only. Node's prerequisite nodes.
	PrerequisiteStepIds []string `protobuf:"bytes,2,rep,name=prerequisite_step_ids,json=prerequisiteStepIds,proto3" json:"prerequisite_step_ids,omitempty"`
	// Output only. The job id; populated after the node enters RUNNING state.
	JobId string `protobuf:"bytes,3,opt,name=job_id,json=jobId,proto3" json:"job_id,omitempty"`
	// Output only. The node state.
	State WorkflowNode_NodeState `protobuf:"varint,5,opt,name=state,proto3,enum=google.cloud.dataproc.v1beta2.WorkflowNode_NodeState" json:"state,omitempty"`
	// Output only. The error detail.
	Error                string   `protobuf:"bytes,6,opt,name=error,proto3" json:"error,omitempty"`
	XXX_NoUnkeyedLiteral struct{} `json:"-"`
	XXX_unrecognized     []byte   `json:"-"`
	XXX_sizecache        int32    `json:"-"`
}

The workflow node.

func (*WorkflowNode) Descriptor

func (*WorkflowNode) Descriptor() ([]byte, []int)

func (*WorkflowNode) GetError

func (m *WorkflowNode) GetError() string

func (*WorkflowNode) GetJobId

func (m *WorkflowNode) GetJobId() string

func (*WorkflowNode) GetPrerequisiteStepIds

func (m *WorkflowNode) GetPrerequisiteStepIds() []string

func (*WorkflowNode) GetState

func (m *WorkflowNode) GetState() WorkflowNode_NodeState

func (*WorkflowNode) GetStepId

func (m *WorkflowNode) GetStepId() string

func (*WorkflowNode) ProtoMessage

func (*WorkflowNode) ProtoMessage()

func (*WorkflowNode) Reset

func (m *WorkflowNode) Reset()

func (*WorkflowNode) String

func (m *WorkflowNode) String() string

func (*WorkflowNode) XXX_DiscardUnknown

func (m *WorkflowNode) XXX_DiscardUnknown()

func (*WorkflowNode) XXX_Marshal

func (m *WorkflowNode) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*WorkflowNode) XXX_Merge

func (m *WorkflowNode) XXX_Merge(src proto.Message)

func (*WorkflowNode) XXX_Size

func (m *WorkflowNode) XXX_Size() int

func (*WorkflowNode) XXX_Unmarshal

func (m *WorkflowNode) XXX_Unmarshal(b []byte) error

type WorkflowNode_NodeState

type WorkflowNode_NodeState int32

The workflow node state.

const (
	// State is unspecified.
	WorkflowNode_NODE_STATUS_UNSPECIFIED WorkflowNode_NodeState = 0
	// The node is awaiting prerequisite node to finish.
	WorkflowNode_BLOCKED WorkflowNode_NodeState = 1
	// The node is runnable but not running.
	WorkflowNode_RUNNABLE WorkflowNode_NodeState = 2
	// The node is running.
	WorkflowNode_RUNNING WorkflowNode_NodeState = 3
	// The node completed successfully.
	WorkflowNode_COMPLETED WorkflowNode_NodeState = 4
	// The node failed. A node can be marked FAILED because
	// its ancestor or peer failed.
	WorkflowNode_FAILED WorkflowNode_NodeState = 5
)

func (WorkflowNode_NodeState) EnumDescriptor

func (WorkflowNode_NodeState) EnumDescriptor() ([]byte, []int)

func (WorkflowNode_NodeState) String

func (x WorkflowNode_NodeState) String() string

type WorkflowTemplate

type WorkflowTemplate struct {
	// Required. The template id.
	//
	// The id must contain only letters (a-z, A-Z), numbers (0-9),
	// underscores (_), and hyphens (-). Cannot begin or end with underscore
	// or hyphen. Must consist of between 3 and 50 characters.
	Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"`
	// Output only. The "resource name" of the template, as described
	// in https://cloud.google.com/apis/design/resource_names of the form
	// `projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`
	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
	// Optional. Used to perform a consistent read-modify-write.
	//
	// This field should be left blank for a `CreateWorkflowTemplate` request. It
	// is required for an `UpdateWorkflowTemplate` request, and must match the
	// current server version. A typical update template flow would fetch the
	// current template with a `GetWorkflowTemplate` request, which will return
	// the current template with the `version` field filled in with the
	// current server version. The user updates other fields in the template,
	// then returns it as part of the `UpdateWorkflowTemplate` request.
	Version int32 `protobuf:"varint,3,opt,name=version,proto3" json:"version,omitempty"`
	// Output only. The time template was created.
	CreateTime *timestamp.Timestamp `protobuf:"bytes,4,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"`
	// Output only. The time template was last updated.
	UpdateTime *timestamp.Timestamp `protobuf:"bytes,5,opt,name=update_time,json=updateTime,proto3" json:"update_time,omitempty"`
	// Optional. The labels to associate with this template. These labels
	// will be propagated to all jobs and clusters created by the workflow
	// instance.
	//
	// Label **keys** must contain 1 to 63 characters, and must conform to
	// [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
	//
	// Label **values** may be empty, but, if present, must contain 1 to 63
	// characters, and must conform to
	// [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
	//
	// No more than 32 labels can be associated with a template.
	Labels map[string]string `` /* 153-byte string literal not displayed */
	// Required. WorkflowTemplate scheduling information.
	Placement *WorkflowTemplatePlacement `protobuf:"bytes,7,opt,name=placement,proto3" json:"placement,omitempty"`
	// Required. The Directed Acyclic Graph of Jobs to submit.
	Jobs                 []*OrderedJob `protobuf:"bytes,8,rep,name=jobs,proto3" json:"jobs,omitempty"`
	XXX_NoUnkeyedLiteral struct{}      `json:"-"`
	XXX_unrecognized     []byte        `json:"-"`
	XXX_sizecache        int32         `json:"-"`
}

A Cloud Dataproc workflow template resource.

func (*WorkflowTemplate) Descriptor

func (*WorkflowTemplate) Descriptor() ([]byte, []int)

func (*WorkflowTemplate) GetCreateTime

func (m *WorkflowTemplate) GetCreateTime() *timestamp.Timestamp

func (*WorkflowTemplate) GetId

func (m *WorkflowTemplate) GetId() string

func (*WorkflowTemplate) GetJobs

func (m *WorkflowTemplate) GetJobs() []*OrderedJob

func (*WorkflowTemplate) GetLabels

func (m *WorkflowTemplate) GetLabels() map[string]string

func (*WorkflowTemplate) GetName

func (m *WorkflowTemplate) GetName() string

func (*WorkflowTemplate) GetPlacement

func (m *WorkflowTemplate) GetPlacement() *WorkflowTemplatePlacement

func (*WorkflowTemplate) GetUpdateTime

func (m *WorkflowTemplate) GetUpdateTime() *timestamp.Timestamp

func (*WorkflowTemplate) GetVersion

func (m *WorkflowTemplate) GetVersion() int32

func (*WorkflowTemplate) ProtoMessage

func (*WorkflowTemplate) ProtoMessage()

func (*WorkflowTemplate) Reset

func (m *WorkflowTemplate) Reset()

func (*WorkflowTemplate) String

func (m *WorkflowTemplate) String() string

func (*WorkflowTemplate) XXX_DiscardUnknown

func (m *WorkflowTemplate) XXX_DiscardUnknown()

func (*WorkflowTemplate) XXX_Marshal

func (m *WorkflowTemplate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*WorkflowTemplate) XXX_Merge

func (m *WorkflowTemplate) XXX_Merge(src proto.Message)

func (*WorkflowTemplate) XXX_Size

func (m *WorkflowTemplate) XXX_Size() int

func (*WorkflowTemplate) XXX_Unmarshal

func (m *WorkflowTemplate) XXX_Unmarshal(b []byte) error

type WorkflowTemplatePlacement

type WorkflowTemplatePlacement struct {
	// Required. Specifies where workflow executes; either on a managed
	// cluster or an existing cluster chosen by labels.
	//
	// Types that are valid to be assigned to Placement:
	//	*WorkflowTemplatePlacement_ManagedCluster
	//	*WorkflowTemplatePlacement_ClusterSelector
	Placement            isWorkflowTemplatePlacement_Placement `protobuf_oneof:"placement"`
	XXX_NoUnkeyedLiteral struct{}                              `json:"-"`
	XXX_unrecognized     []byte                                `json:"-"`
	XXX_sizecache        int32                                 `json:"-"`
}

Specifies workflow execution target.

Either `managed_cluster` or `cluster_selector` is required.

func (*WorkflowTemplatePlacement) Descriptor

func (*WorkflowTemplatePlacement) Descriptor() ([]byte, []int)

func (*WorkflowTemplatePlacement) GetClusterSelector

func (m *WorkflowTemplatePlacement) GetClusterSelector() *ClusterSelector

func (*WorkflowTemplatePlacement) GetManagedCluster

func (m *WorkflowTemplatePlacement) GetManagedCluster() *ManagedCluster

func (*WorkflowTemplatePlacement) GetPlacement

func (m *WorkflowTemplatePlacement) GetPlacement() isWorkflowTemplatePlacement_Placement

func (*WorkflowTemplatePlacement) ProtoMessage

func (*WorkflowTemplatePlacement) ProtoMessage()

func (*WorkflowTemplatePlacement) Reset

func (m *WorkflowTemplatePlacement) Reset()

func (*WorkflowTemplatePlacement) String

func (m *WorkflowTemplatePlacement) String() string

func (*WorkflowTemplatePlacement) XXX_DiscardUnknown

func (m *WorkflowTemplatePlacement) XXX_DiscardUnknown()

func (*WorkflowTemplatePlacement) XXX_Marshal

func (m *WorkflowTemplatePlacement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*WorkflowTemplatePlacement) XXX_Merge

func (m *WorkflowTemplatePlacement) XXX_Merge(src proto.Message)

func (*WorkflowTemplatePlacement) XXX_OneofFuncs

func (*WorkflowTemplatePlacement) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{})

XXX_OneofFuncs is for the internal use of the proto package.

func (*WorkflowTemplatePlacement) XXX_Size

func (m *WorkflowTemplatePlacement) XXX_Size() int

func (*WorkflowTemplatePlacement) XXX_Unmarshal

func (m *WorkflowTemplatePlacement) XXX_Unmarshal(b []byte) error

type WorkflowTemplatePlacement_ClusterSelector

type WorkflowTemplatePlacement_ClusterSelector struct {
	ClusterSelector *ClusterSelector `protobuf:"bytes,2,opt,name=cluster_selector,json=clusterSelector,proto3,oneof"`
}

type WorkflowTemplatePlacement_ManagedCluster

type WorkflowTemplatePlacement_ManagedCluster struct {
	ManagedCluster *ManagedCluster `protobuf:"bytes,1,opt,name=managed_cluster,json=managedCluster,proto3,oneof"`
}

type WorkflowTemplateServiceClient

type WorkflowTemplateServiceClient interface {
	// Creates new workflow template.
	CreateWorkflowTemplate(ctx context.Context, in *CreateWorkflowTemplateRequest, opts ...grpc.CallOption) (*WorkflowTemplate, error)
	// Retrieves the latest workflow template.
	//
	// Can retrieve previously instantiated template by specifying optional
	// version parameter.
	GetWorkflowTemplate(ctx context.Context, in *GetWorkflowTemplateRequest, opts ...grpc.CallOption) (*WorkflowTemplate, error)
	// Instantiates a template and begins execution.
	//
	// The returned Operation can be used to track execution of
	// workflow by polling
	// [operations.get][google.longrunning.Operations.GetOperation].
	// The Operation will complete when entire workflow is finished.
	//
	// The running workflow can be aborted via
	// [operations.cancel][google.longrunning.Operations.CancelOperation].
	// This will cause any inflight jobs to be cancelled and workflow-owned
	// clusters to be deleted.
	//
	// The [Operation.metadata][google.longrunning.Operation.metadata] will be
	// [WorkflowMetadata][google.cloud.dataproc.v1beta2.WorkflowMetadata].
	//
	// On successful completion,
	// [Operation.response][google.longrunning.Operation.response] will be
	// [Empty][google.protobuf.Empty].
	InstantiateWorkflowTemplate(ctx context.Context, in *InstantiateWorkflowTemplateRequest, opts ...grpc.CallOption) (*longrunning.Operation, error)
	// Instantiates a template and begins execution.
	//
	// This method is equivalent to executing the sequence
	// [CreateWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.CreateWorkflowTemplate], [InstantiateWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.InstantiateWorkflowTemplate],
	// [DeleteWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.DeleteWorkflowTemplate].
	//
	// The returned Operation can be used to track execution of
	// workflow by polling
	// [operations.get][google.longrunning.Operations.GetOperation].
	// The Operation will complete when entire workflow is finished.
	//
	// The running workflow can be aborted via
	// [operations.cancel][google.longrunning.Operations.CancelOperation].
	// This will cause any inflight jobs to be cancelled and workflow-owned
	// clusters to be deleted.
	//
	// The [Operation.metadata][google.longrunning.Operation.metadata] will be
	// [WorkflowMetadata][google.cloud.dataproc.v1beta2.WorkflowMetadata].
	//
	// On successful completion,
	// [Operation.response][google.longrunning.Operation.response] will be
	// [Empty][google.protobuf.Empty].
	InstantiateInlineWorkflowTemplate(ctx context.Context, in *InstantiateInlineWorkflowTemplateRequest, opts ...grpc.CallOption) (*longrunning.Operation, error)
	// Updates (replaces) workflow template. The updated template
	// must contain version that matches the current server version.
	UpdateWorkflowTemplate(ctx context.Context, in *UpdateWorkflowTemplateRequest, opts ...grpc.CallOption) (*WorkflowTemplate, error)
	// Lists workflows that match the specified filter in the request.
	ListWorkflowTemplates(ctx context.Context, in *ListWorkflowTemplatesRequest, opts ...grpc.CallOption) (*ListWorkflowTemplatesResponse, error)
	// Deletes a workflow template. It does not cancel in-progress workflows.
	DeleteWorkflowTemplate(ctx context.Context, in *DeleteWorkflowTemplateRequest, opts ...grpc.CallOption) (*empty.Empty, error)
}

WorkflowTemplateServiceClient is the client API for WorkflowTemplateService service.

For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.

func NewWorkflowTemplateServiceClient

func NewWorkflowTemplateServiceClient(cc *grpc.ClientConn) WorkflowTemplateServiceClient

type WorkflowTemplateServiceServer

type WorkflowTemplateServiceServer interface {
	// Creates new workflow template.
	CreateWorkflowTemplate(context.Context, *CreateWorkflowTemplateRequest) (*WorkflowTemplate, error)
	// Retrieves the latest workflow template.
	//
	// Can retrieve previously instantiated template by specifying optional
	// version parameter.
	GetWorkflowTemplate(context.Context, *GetWorkflowTemplateRequest) (*WorkflowTemplate, error)
	// Instantiates a template and begins execution.
	//
	// The returned Operation can be used to track execution of
	// workflow by polling
	// [operations.get][google.longrunning.Operations.GetOperation].
	// The Operation will complete when entire workflow is finished.
	//
	// The running workflow can be aborted via
	// [operations.cancel][google.longrunning.Operations.CancelOperation].
	// This will cause any inflight jobs to be cancelled and workflow-owned
	// clusters to be deleted.
	//
	// The [Operation.metadata][google.longrunning.Operation.metadata] will be
	// [WorkflowMetadata][google.cloud.dataproc.v1beta2.WorkflowMetadata].
	//
	// On successful completion,
	// [Operation.response][google.longrunning.Operation.response] will be
	// [Empty][google.protobuf.Empty].
	InstantiateWorkflowTemplate(context.Context, *InstantiateWorkflowTemplateRequest) (*longrunning.Operation, error)
	// Instantiates a template and begins execution.
	//
	// This method is equivalent to executing the sequence
	// [CreateWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.CreateWorkflowTemplate], [InstantiateWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.InstantiateWorkflowTemplate],
	// [DeleteWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.DeleteWorkflowTemplate].
	//
	// The returned Operation can be used to track execution of
	// workflow by polling
	// [operations.get][google.longrunning.Operations.GetOperation].
	// The Operation will complete when entire workflow is finished.
	//
	// The running workflow can be aborted via
	// [operations.cancel][google.longrunning.Operations.CancelOperation].
	// This will cause any inflight jobs to be cancelled and workflow-owned
	// clusters to be deleted.
	//
	// The [Operation.metadata][google.longrunning.Operation.metadata] will be
	// [WorkflowMetadata][google.cloud.dataproc.v1beta2.WorkflowMetadata].
	//
	// On successful completion,
	// [Operation.response][google.longrunning.Operation.response] will be
	// [Empty][google.protobuf.Empty].
	InstantiateInlineWorkflowTemplate(context.Context, *InstantiateInlineWorkflowTemplateRequest) (*longrunning.Operation, error)
	// Updates (replaces) workflow template. The updated template
	// must contain version that matches the current server version.
	UpdateWorkflowTemplate(context.Context, *UpdateWorkflowTemplateRequest) (*WorkflowTemplate, error)
	// Lists workflows that match the specified filter in the request.
	ListWorkflowTemplates(context.Context, *ListWorkflowTemplatesRequest) (*ListWorkflowTemplatesResponse, error)
	// Deletes a workflow template. It does not cancel in-progress workflows.
	DeleteWorkflowTemplate(context.Context, *DeleteWorkflowTemplateRequest) (*empty.Empty, error)
}

WorkflowTemplateServiceServer is the server API for WorkflowTemplateService service.

type YarnApplication

type YarnApplication struct {
	// Required. The application name.
	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
	// Required. The application state.
	State YarnApplication_State `protobuf:"varint,2,opt,name=state,proto3,enum=google.cloud.dataproc.v1beta2.YarnApplication_State" json:"state,omitempty"`
	// Required. The numerical progress of the application, from 1 to 100.
	Progress float32 `protobuf:"fixed32,3,opt,name=progress,proto3" json:"progress,omitempty"`
	// Optional. The HTTP URL of the ApplicationMaster, HistoryServer, or
	// TimelineServer that provides application-specific information. The URL uses
	// the internal hostname, and requires a proxy server for resolution and,
	// possibly, access.
	TrackingUrl          string   `protobuf:"bytes,4,opt,name=tracking_url,json=trackingUrl,proto3" json:"tracking_url,omitempty"`
	XXX_NoUnkeyedLiteral struct{} `json:"-"`
	XXX_unrecognized     []byte   `json:"-"`
	XXX_sizecache        int32    `json:"-"`
}

A YARN application created by a job. Application information is a subset of <code>org.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto</code>.

**Beta Feature**: This report is available for testing purposes only. It may be changed before final release.

func (*YarnApplication) Descriptor

func (*YarnApplication) Descriptor() ([]byte, []int)

func (*YarnApplication) GetName

func (m *YarnApplication) GetName() string

func (*YarnApplication) GetProgress

func (m *YarnApplication) GetProgress() float32

func (*YarnApplication) GetState

func (m *YarnApplication) GetState() YarnApplication_State

func (*YarnApplication) GetTrackingUrl

func (m *YarnApplication) GetTrackingUrl() string

func (*YarnApplication) ProtoMessage

func (*YarnApplication) ProtoMessage()

func (*YarnApplication) Reset

func (m *YarnApplication) Reset()

func (*YarnApplication) String

func (m *YarnApplication) String() string

func (*YarnApplication) XXX_DiscardUnknown

func (m *YarnApplication) XXX_DiscardUnknown()

func (*YarnApplication) XXX_Marshal

func (m *YarnApplication) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*YarnApplication) XXX_Merge

func (m *YarnApplication) XXX_Merge(src proto.Message)

func (*YarnApplication) XXX_Size

func (m *YarnApplication) XXX_Size() int

func (*YarnApplication) XXX_Unmarshal

func (m *YarnApplication) XXX_Unmarshal(b []byte) error

type YarnApplication_State

type YarnApplication_State int32

The application state, corresponding to <code>YarnProtos.YarnApplicationStateProto</code>.

const (
	// Status is unspecified.
	YarnApplication_STATE_UNSPECIFIED YarnApplication_State = 0
	// Status is NEW.
	YarnApplication_NEW YarnApplication_State = 1
	// Status is NEW_SAVING.
	YarnApplication_NEW_SAVING YarnApplication_State = 2
	// Status is SUBMITTED.
	YarnApplication_SUBMITTED YarnApplication_State = 3
	// Status is ACCEPTED.
	YarnApplication_ACCEPTED YarnApplication_State = 4
	// Status is RUNNING.
	YarnApplication_RUNNING YarnApplication_State = 5
	// Status is FINISHED.
	YarnApplication_FINISHED YarnApplication_State = 6
	// Status is FAILED.
	YarnApplication_FAILED YarnApplication_State = 7
	// Status is KILLED.
	YarnApplication_KILLED YarnApplication_State = 8
)

func (YarnApplication_State) EnumDescriptor

func (YarnApplication_State) EnumDescriptor() ([]byte, []int)

func (YarnApplication_State) String

func (x YarnApplication_State) String() string

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL