dataproc

package
v0.0.0-...-2824937 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Nov 2, 2020 License: MIT, Apache-2.0 Imports: 11 Imported by: 0

Documentation

Overview

Package dataproc is a generated protocol buffer package.

It is generated from these files:

google/cloud/dataproc/v1/clusters.proto
google/cloud/dataproc/v1/jobs.proto
google/cloud/dataproc/v1/operations.proto

It has these top-level messages:

Cluster
ClusterConfig
GceClusterConfig
InstanceGroupConfig
ManagedGroupConfig
AcceleratorConfig
DiskConfig
NodeInitializationAction
ClusterStatus
SoftwareConfig
ClusterMetrics
CreateClusterRequest
UpdateClusterRequest
DeleteClusterRequest
GetClusterRequest
ListClustersRequest
ListClustersResponse
DiagnoseClusterRequest
DiagnoseClusterResults
LoggingConfig
HadoopJob
SparkJob
PySparkJob
QueryList
HiveJob
SparkSqlJob
PigJob
JobPlacement
JobStatus
JobReference
YarnApplication
Job
JobScheduling
SubmitJobRequest
GetJobRequest
ListJobsRequest
UpdateJobRequest
ListJobsResponse
CancelJobRequest
DeleteJobRequest
ClusterOperationStatus
ClusterOperationMetadata

Index

Constants

This section is empty.

Variables

View Source
var ClusterOperationStatus_State_name = map[int32]string{
	0: "UNKNOWN",
	1: "PENDING",
	2: "RUNNING",
	3: "DONE",
}
View Source
var ClusterOperationStatus_State_value = map[string]int32{
	"UNKNOWN": 0,
	"PENDING": 1,
	"RUNNING": 2,
	"DONE":    3,
}
View Source
var ClusterStatus_State_name = map[int32]string{
	0: "UNKNOWN",
	1: "CREATING",
	2: "RUNNING",
	3: "ERROR",
	4: "DELETING",
	5: "UPDATING",
}
View Source
var ClusterStatus_State_value = map[string]int32{
	"UNKNOWN":  0,
	"CREATING": 1,
	"RUNNING":  2,
	"ERROR":    3,
	"DELETING": 4,
	"UPDATING": 5,
}
View Source
var ClusterStatus_Substate_name = map[int32]string{
	0: "UNSPECIFIED",
	1: "UNHEALTHY",
	2: "STALE_STATUS",
}
View Source
var ClusterStatus_Substate_value = map[string]int32{
	"UNSPECIFIED":  0,
	"UNHEALTHY":    1,
	"STALE_STATUS": 2,
}
View Source
var JobStatus_State_name = map[int32]string{
	0: "STATE_UNSPECIFIED",
	1: "PENDING",
	8: "SETUP_DONE",
	2: "RUNNING",
	3: "CANCEL_PENDING",
	7: "CANCEL_STARTED",
	4: "CANCELLED",
	5: "DONE",
	6: "ERROR",
	9: "ATTEMPT_FAILURE",
}
View Source
var JobStatus_State_value = map[string]int32{
	"STATE_UNSPECIFIED": 0,
	"PENDING":           1,
	"SETUP_DONE":        8,
	"RUNNING":           2,
	"CANCEL_PENDING":    3,
	"CANCEL_STARTED":    7,
	"CANCELLED":         4,
	"DONE":              5,
	"ERROR":             6,
	"ATTEMPT_FAILURE":   9,
}
View Source
var JobStatus_Substate_name = map[int32]string{
	0: "UNSPECIFIED",
	1: "SUBMITTED",
	2: "QUEUED",
	3: "STALE_STATUS",
}
View Source
var JobStatus_Substate_value = map[string]int32{
	"UNSPECIFIED":  0,
	"SUBMITTED":    1,
	"QUEUED":       2,
	"STALE_STATUS": 3,
}
View Source
var ListJobsRequest_JobStateMatcher_name = map[int32]string{
	0: "ALL",
	1: "ACTIVE",
	2: "NON_ACTIVE",
}
View Source
var ListJobsRequest_JobStateMatcher_value = map[string]int32{
	"ALL":        0,
	"ACTIVE":     1,
	"NON_ACTIVE": 2,
}
View Source
var LoggingConfig_Level_name = map[int32]string{
	0: "LEVEL_UNSPECIFIED",
	1: "ALL",
	2: "TRACE",
	3: "DEBUG",
	4: "INFO",
	5: "WARN",
	6: "ERROR",
	7: "FATAL",
	8: "OFF",
}
View Source
var LoggingConfig_Level_value = map[string]int32{
	"LEVEL_UNSPECIFIED": 0,
	"ALL":               1,
	"TRACE":             2,
	"DEBUG":             3,
	"INFO":              4,
	"WARN":              5,
	"ERROR":             6,
	"FATAL":             7,
	"OFF":               8,
}
View Source
var YarnApplication_State_name = map[int32]string{
	0: "STATE_UNSPECIFIED",
	1: "NEW",
	2: "NEW_SAVING",
	3: "SUBMITTED",
	4: "ACCEPTED",
	5: "RUNNING",
	6: "FINISHED",
	7: "FAILED",
	8: "KILLED",
}
View Source
var YarnApplication_State_value = map[string]int32{
	"STATE_UNSPECIFIED": 0,
	"NEW":               1,
	"NEW_SAVING":        2,
	"SUBMITTED":         3,
	"ACCEPTED":          4,
	"RUNNING":           5,
	"FINISHED":          6,
	"FAILED":            7,
	"KILLED":            8,
}

Functions

func RegisterClusterControllerServer

func RegisterClusterControllerServer(s *grpc.Server, srv ClusterControllerServer)

func RegisterJobControllerServer

func RegisterJobControllerServer(s *grpc.Server, srv JobControllerServer)

Types

type AcceleratorConfig

type AcceleratorConfig struct {
	// Full URL, partial URI, or short name of the accelerator type resource to
	// expose to this instance. See [Google Compute Engine AcceleratorTypes](
	// /compute/docs/reference/beta/acceleratorTypes)
	//
	// Examples
	// * `https://www.googleapis.com/compute/beta/projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80`
	// * `projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80`
	// * `nvidia-tesla-k80`
	AcceleratorTypeUri string `protobuf:"bytes,1,opt,name=accelerator_type_uri,json=acceleratorTypeUri" json:"accelerator_type_uri,omitempty"`
	// The number of the accelerator cards of this type exposed to this instance.
	AcceleratorCount int32 `protobuf:"varint,2,opt,name=accelerator_count,json=acceleratorCount" json:"accelerator_count,omitempty"`
}

Specifies the type and number of accelerator cards attached to the instances of an instance group (see [GPUs on Compute Engine](/compute/docs/gpus/)).

func (*AcceleratorConfig) Descriptor

func (*AcceleratorConfig) Descriptor() ([]byte, []int)

func (*AcceleratorConfig) GetAcceleratorCount

func (m *AcceleratorConfig) GetAcceleratorCount() int32

func (*AcceleratorConfig) GetAcceleratorTypeUri

func (m *AcceleratorConfig) GetAcceleratorTypeUri() string

func (*AcceleratorConfig) ProtoMessage

func (*AcceleratorConfig) ProtoMessage()

func (*AcceleratorConfig) Reset

func (m *AcceleratorConfig) Reset()

func (*AcceleratorConfig) String

func (m *AcceleratorConfig) String() string

type CancelJobRequest

type CancelJobRequest struct {
	// Required. The ID of the Google Cloud Platform project that the job
	// belongs to.
	ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"`
	// Required. The Cloud Dataproc region in which to handle the request.
	Region string `protobuf:"bytes,3,opt,name=region" json:"region,omitempty"`
	// Required. The job ID.
	JobId string `protobuf:"bytes,2,opt,name=job_id,json=jobId" json:"job_id,omitempty"`
}

A request to cancel a job.

func (*CancelJobRequest) Descriptor

func (*CancelJobRequest) Descriptor() ([]byte, []int)

func (*CancelJobRequest) GetJobId

func (m *CancelJobRequest) GetJobId() string

func (*CancelJobRequest) GetProjectId

func (m *CancelJobRequest) GetProjectId() string

func (*CancelJobRequest) GetRegion

func (m *CancelJobRequest) GetRegion() string

func (*CancelJobRequest) ProtoMessage

func (*CancelJobRequest) ProtoMessage()

func (*CancelJobRequest) Reset

func (m *CancelJobRequest) Reset()

func (*CancelJobRequest) String

func (m *CancelJobRequest) String() string

type Cluster

type Cluster struct {
	// Required. The Google Cloud Platform project ID that the cluster belongs to.
	ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"`
	// Required. The cluster name. Cluster names within a project must be
	// unique. Names of deleted clusters can be reused.
	ClusterName string `protobuf:"bytes,2,opt,name=cluster_name,json=clusterName" json:"cluster_name,omitempty"`
	// Required. The cluster config. Note that Cloud Dataproc may set
	// default values, and values may change when clusters are updated.
	Config *ClusterConfig `protobuf:"bytes,3,opt,name=config" json:"config,omitempty"`
	// Optional. The labels to associate with this cluster.
	// Label **keys** must contain 1 to 63 characters, and must conform to
	// [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
	// Label **values** may be empty, but, if present, must contain 1 to 63
	// characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
	// No more than 32 labels can be associated with a cluster.
	Labels map[string]string `` /* 132-byte string literal not displayed */
	// Output-only. Cluster status.
	Status *ClusterStatus `protobuf:"bytes,4,opt,name=status" json:"status,omitempty"`
	// Output-only. The previous cluster status.
	StatusHistory []*ClusterStatus `protobuf:"bytes,7,rep,name=status_history,json=statusHistory" json:"status_history,omitempty"`
	// Output-only. A cluster UUID (Unique Universal Identifier). Cloud Dataproc
	// generates this value when it creates the cluster.
	ClusterUuid string `protobuf:"bytes,6,opt,name=cluster_uuid,json=clusterUuid" json:"cluster_uuid,omitempty"`
	// Contains cluster daemon metrics such as HDFS and YARN stats.
	//
	// **Beta Feature**: This report is available for testing purposes only. It may
	// be changed before final release.
	Metrics *ClusterMetrics `protobuf:"bytes,9,opt,name=metrics" json:"metrics,omitempty"`
}

Describes the identifying information, config, and status of a cluster of Google Compute Engine instances.

func (*Cluster) Descriptor

func (*Cluster) Descriptor() ([]byte, []int)

func (*Cluster) GetClusterName

func (m *Cluster) GetClusterName() string

func (*Cluster) GetClusterUuid

func (m *Cluster) GetClusterUuid() string

func (*Cluster) GetConfig

func (m *Cluster) GetConfig() *ClusterConfig

func (*Cluster) GetLabels

func (m *Cluster) GetLabels() map[string]string

func (*Cluster) GetMetrics

func (m *Cluster) GetMetrics() *ClusterMetrics

func (*Cluster) GetProjectId

func (m *Cluster) GetProjectId() string

func (*Cluster) GetStatus

func (m *Cluster) GetStatus() *ClusterStatus

func (*Cluster) GetStatusHistory

func (m *Cluster) GetStatusHistory() []*ClusterStatus

func (*Cluster) ProtoMessage

func (*Cluster) ProtoMessage()

func (*Cluster) Reset

func (m *Cluster) Reset()

func (*Cluster) String

func (m *Cluster) String() string

type ClusterConfig

type ClusterConfig struct {
	// Optional. A Google Cloud Storage staging bucket used for sharing generated
	// SSH keys and config. If you do not specify a staging bucket, Cloud
	// Dataproc will determine an appropriate Cloud Storage location (US,
	// ASIA, or EU) for your cluster's staging bucket according to the Google
	// Compute Engine zone where your cluster is deployed, and then it will create
	// and manage this project-level, per-location bucket for you.
	ConfigBucket string `protobuf:"bytes,1,opt,name=config_bucket,json=configBucket" json:"config_bucket,omitempty"`
	// Required. The shared Google Compute Engine config settings for
	// all instances in a cluster.
	GceClusterConfig *GceClusterConfig `protobuf:"bytes,8,opt,name=gce_cluster_config,json=gceClusterConfig" json:"gce_cluster_config,omitempty"`
	// Optional. The Google Compute Engine config settings for
	// the master instance in a cluster.
	MasterConfig *InstanceGroupConfig `protobuf:"bytes,9,opt,name=master_config,json=masterConfig" json:"master_config,omitempty"`
	// Optional. The Google Compute Engine config settings for
	// worker instances in a cluster.
	WorkerConfig *InstanceGroupConfig `protobuf:"bytes,10,opt,name=worker_config,json=workerConfig" json:"worker_config,omitempty"`
	// Optional. The Google Compute Engine config settings for
	// additional worker instances in a cluster.
	SecondaryWorkerConfig *InstanceGroupConfig `protobuf:"bytes,12,opt,name=secondary_worker_config,json=secondaryWorkerConfig" json:"secondary_worker_config,omitempty"`
	// Optional. The config settings for software inside the cluster.
	SoftwareConfig *SoftwareConfig `protobuf:"bytes,13,opt,name=software_config,json=softwareConfig" json:"software_config,omitempty"`
	// Optional. Commands to execute on each node after config is
	// completed. By default, executables are run on master and all worker nodes.
	// You can test a node's `role` metadata to run an executable on
	// a master or worker node, as shown below using `curl` (you can also use `wget`):
	//
	//     ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role)
	//     if [[ "${ROLE}" == 'Master' ]]; then
	//       ... master specific actions ...
	//     else
	//       ... worker specific actions ...
	//     fi
	InitializationActions []*NodeInitializationAction `protobuf:"bytes,11,rep,name=initialization_actions,json=initializationActions" json:"initialization_actions,omitempty"`
}

The cluster config.

func (*ClusterConfig) Descriptor

func (*ClusterConfig) Descriptor() ([]byte, []int)

func (*ClusterConfig) GetConfigBucket

func (m *ClusterConfig) GetConfigBucket() string

func (*ClusterConfig) GetGceClusterConfig

func (m *ClusterConfig) GetGceClusterConfig() *GceClusterConfig

func (*ClusterConfig) GetInitializationActions

func (m *ClusterConfig) GetInitializationActions() []*NodeInitializationAction

func (*ClusterConfig) GetMasterConfig

func (m *ClusterConfig) GetMasterConfig() *InstanceGroupConfig

func (*ClusterConfig) GetSecondaryWorkerConfig

func (m *ClusterConfig) GetSecondaryWorkerConfig() *InstanceGroupConfig

func (*ClusterConfig) GetSoftwareConfig

func (m *ClusterConfig) GetSoftwareConfig() *SoftwareConfig

func (*ClusterConfig) GetWorkerConfig

func (m *ClusterConfig) GetWorkerConfig() *InstanceGroupConfig

func (*ClusterConfig) ProtoMessage

func (*ClusterConfig) ProtoMessage()

func (*ClusterConfig) Reset

func (m *ClusterConfig) Reset()

func (*ClusterConfig) String

func (m *ClusterConfig) String() string

type ClusterControllerClient

type ClusterControllerClient interface {
	// Creates a cluster in a project.
	CreateCluster(ctx context.Context, in *CreateClusterRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error)
	// Updates a cluster in a project.
	UpdateCluster(ctx context.Context, in *UpdateClusterRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error)
	// Deletes a cluster in a project.
	DeleteCluster(ctx context.Context, in *DeleteClusterRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error)
	// Gets the resource representation for a cluster in a project.
	GetCluster(ctx context.Context, in *GetClusterRequest, opts ...grpc.CallOption) (*Cluster, error)
	// Lists all regions/{region}/clusters in a project.
	ListClusters(ctx context.Context, in *ListClustersRequest, opts ...grpc.CallOption) (*ListClustersResponse, error)
	// Gets cluster diagnostic information.
	// After the operation completes, the Operation.response field
	// contains `DiagnoseClusterOutputLocation`.
	DiagnoseCluster(ctx context.Context, in *DiagnoseClusterRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error)
}

func NewClusterControllerClient

func NewClusterControllerClient(cc *grpc.ClientConn) ClusterControllerClient

type ClusterControllerServer

type ClusterControllerServer interface {
	// Creates a cluster in a project.
	CreateCluster(context.Context, *CreateClusterRequest) (*google_longrunning.Operation, error)
	// Updates a cluster in a project.
	UpdateCluster(context.Context, *UpdateClusterRequest) (*google_longrunning.Operation, error)
	// Deletes a cluster in a project.
	DeleteCluster(context.Context, *DeleteClusterRequest) (*google_longrunning.Operation, error)
	// Gets the resource representation for a cluster in a project.
	GetCluster(context.Context, *GetClusterRequest) (*Cluster, error)
	// Lists all regions/{region}/clusters in a project.
	ListClusters(context.Context, *ListClustersRequest) (*ListClustersResponse, error)
	// Gets cluster diagnostic information.
	// After the operation completes, the Operation.response field
	// contains `DiagnoseClusterOutputLocation`.
	DiagnoseCluster(context.Context, *DiagnoseClusterRequest) (*google_longrunning.Operation, error)
}

type ClusterMetrics

type ClusterMetrics struct {
	// The HDFS metrics.
	HdfsMetrics map[string]int64 `` /* 162-byte string literal not displayed */
	// The YARN metrics.
	YarnMetrics map[string]int64 `` /* 162-byte string literal not displayed */
}

Contains cluster daemon metrics, such as HDFS and YARN stats.

**Beta Feature**: This report is available for testing purposes only. It may be changed before final release.

func (*ClusterMetrics) Descriptor

func (*ClusterMetrics) Descriptor() ([]byte, []int)

func (*ClusterMetrics) GetHdfsMetrics

func (m *ClusterMetrics) GetHdfsMetrics() map[string]int64

func (*ClusterMetrics) GetYarnMetrics

func (m *ClusterMetrics) GetYarnMetrics() map[string]int64

func (*ClusterMetrics) ProtoMessage

func (*ClusterMetrics) ProtoMessage()

func (*ClusterMetrics) Reset

func (m *ClusterMetrics) Reset()

func (*ClusterMetrics) String

func (m *ClusterMetrics) String() string

type ClusterOperationMetadata

type ClusterOperationMetadata struct {
	// Output-only. Name of the cluster for the operation.
	ClusterName string `protobuf:"bytes,7,opt,name=cluster_name,json=clusterName" json:"cluster_name,omitempty"`
	// Output-only. Cluster UUID for the operation.
	ClusterUuid string `protobuf:"bytes,8,opt,name=cluster_uuid,json=clusterUuid" json:"cluster_uuid,omitempty"`
	// Output-only. Current operation status.
	Status *ClusterOperationStatus `protobuf:"bytes,9,opt,name=status" json:"status,omitempty"`
	// Output-only. The previous operation status.
	StatusHistory []*ClusterOperationStatus `protobuf:"bytes,10,rep,name=status_history,json=statusHistory" json:"status_history,omitempty"`
	// Output-only. The operation type.
	OperationType string `protobuf:"bytes,11,opt,name=operation_type,json=operationType" json:"operation_type,omitempty"`
	// Output-only. Short description of operation.
	Description string `protobuf:"bytes,12,opt,name=description" json:"description,omitempty"`
	// Output-only. Labels associated with the operation
	Labels map[string]string `` /* 133-byte string literal not displayed */
	// Output-only. Errors encountered during operation execution.
	Warnings []string `protobuf:"bytes,14,rep,name=warnings" json:"warnings,omitempty"`
}

Metadata describing the operation.

func (*ClusterOperationMetadata) Descriptor

func (*ClusterOperationMetadata) Descriptor() ([]byte, []int)

func (*ClusterOperationMetadata) GetClusterName

func (m *ClusterOperationMetadata) GetClusterName() string

func (*ClusterOperationMetadata) GetClusterUuid

func (m *ClusterOperationMetadata) GetClusterUuid() string

func (*ClusterOperationMetadata) GetDescription

func (m *ClusterOperationMetadata) GetDescription() string

func (*ClusterOperationMetadata) GetLabels

func (m *ClusterOperationMetadata) GetLabels() map[string]string

func (*ClusterOperationMetadata) GetOperationType

func (m *ClusterOperationMetadata) GetOperationType() string

func (*ClusterOperationMetadata) GetStatus

func (*ClusterOperationMetadata) GetStatusHistory

func (m *ClusterOperationMetadata) GetStatusHistory() []*ClusterOperationStatus

func (*ClusterOperationMetadata) GetWarnings

func (m *ClusterOperationMetadata) GetWarnings() []string

func (*ClusterOperationMetadata) ProtoMessage

func (*ClusterOperationMetadata) ProtoMessage()

func (*ClusterOperationMetadata) Reset

func (m *ClusterOperationMetadata) Reset()

func (*ClusterOperationMetadata) String

func (m *ClusterOperationMetadata) String() string

type ClusterOperationStatus

type ClusterOperationStatus struct {
	// Output-only. A message containing the operation state.
	State ClusterOperationStatus_State `protobuf:"varint,1,opt,name=state,enum=google.cloud.dataproc.v1.ClusterOperationStatus_State" json:"state,omitempty"`
	// Output-only. A message containing the detailed operation state.
	InnerState string `protobuf:"bytes,2,opt,name=inner_state,json=innerState" json:"inner_state,omitempty"`
	// Output-only.A message containing any operation metadata details.
	Details string `protobuf:"bytes,3,opt,name=details" json:"details,omitempty"`
	// Output-only. The time this state was entered.
	StateStartTime *google_protobuf3.Timestamp `protobuf:"bytes,4,opt,name=state_start_time,json=stateStartTime" json:"state_start_time,omitempty"`
}

The status of the operation.

func (*ClusterOperationStatus) Descriptor

func (*ClusterOperationStatus) Descriptor() ([]byte, []int)

func (*ClusterOperationStatus) GetDetails

func (m *ClusterOperationStatus) GetDetails() string

func (*ClusterOperationStatus) GetInnerState

func (m *ClusterOperationStatus) GetInnerState() string

func (*ClusterOperationStatus) GetState

func (*ClusterOperationStatus) GetStateStartTime

func (m *ClusterOperationStatus) GetStateStartTime() *google_protobuf3.Timestamp

func (*ClusterOperationStatus) ProtoMessage

func (*ClusterOperationStatus) ProtoMessage()

func (*ClusterOperationStatus) Reset

func (m *ClusterOperationStatus) Reset()

func (*ClusterOperationStatus) String

func (m *ClusterOperationStatus) String() string

type ClusterOperationStatus_State

type ClusterOperationStatus_State int32

The operation state.

const (
	// Unused.
	ClusterOperationStatus_UNKNOWN ClusterOperationStatus_State = 0
	// The operation has been created.
	ClusterOperationStatus_PENDING ClusterOperationStatus_State = 1
	// The operation is running.
	ClusterOperationStatus_RUNNING ClusterOperationStatus_State = 2
	// The operation is done; either cancelled or completed.
	ClusterOperationStatus_DONE ClusterOperationStatus_State = 3
)

func (ClusterOperationStatus_State) EnumDescriptor

func (ClusterOperationStatus_State) EnumDescriptor() ([]byte, []int)

func (ClusterOperationStatus_State) String

type ClusterStatus

type ClusterStatus struct {
	// Output-only. The cluster's state.
	State ClusterStatus_State `protobuf:"varint,1,opt,name=state,enum=google.cloud.dataproc.v1.ClusterStatus_State" json:"state,omitempty"`
	// Output-only. Optional details of cluster's state.
	Detail string `protobuf:"bytes,2,opt,name=detail" json:"detail,omitempty"`
	// Output-only. Time when this state was entered.
	StateStartTime *google_protobuf3.Timestamp `protobuf:"bytes,3,opt,name=state_start_time,json=stateStartTime" json:"state_start_time,omitempty"`
	// Output-only. Additional state information that includes
	// status reported by the agent.
	Substate ClusterStatus_Substate `protobuf:"varint,4,opt,name=substate,enum=google.cloud.dataproc.v1.ClusterStatus_Substate" json:"substate,omitempty"`
}

The status of a cluster and its instances.

func (*ClusterStatus) Descriptor

func (*ClusterStatus) Descriptor() ([]byte, []int)

func (*ClusterStatus) GetDetail

func (m *ClusterStatus) GetDetail() string

func (*ClusterStatus) GetState

func (m *ClusterStatus) GetState() ClusterStatus_State

func (*ClusterStatus) GetStateStartTime

func (m *ClusterStatus) GetStateStartTime() *google_protobuf3.Timestamp

func (*ClusterStatus) GetSubstate

func (m *ClusterStatus) GetSubstate() ClusterStatus_Substate

func (*ClusterStatus) ProtoMessage

func (*ClusterStatus) ProtoMessage()

func (*ClusterStatus) Reset

func (m *ClusterStatus) Reset()

func (*ClusterStatus) String

func (m *ClusterStatus) String() string

type ClusterStatus_State

type ClusterStatus_State int32

The cluster state.

const (
	// The cluster state is unknown.
	ClusterStatus_UNKNOWN ClusterStatus_State = 0
	// The cluster is being created and set up. It is not ready for use.
	ClusterStatus_CREATING ClusterStatus_State = 1
	// The cluster is currently running and healthy. It is ready for use.
	ClusterStatus_RUNNING ClusterStatus_State = 2
	// The cluster encountered an error. It is not ready for use.
	ClusterStatus_ERROR ClusterStatus_State = 3
	// The cluster is being deleted. It cannot be used.
	ClusterStatus_DELETING ClusterStatus_State = 4
	// The cluster is being updated. It continues to accept and process jobs.
	ClusterStatus_UPDATING ClusterStatus_State = 5
)

func (ClusterStatus_State) EnumDescriptor

func (ClusterStatus_State) EnumDescriptor() ([]byte, []int)

func (ClusterStatus_State) String

func (x ClusterStatus_State) String() string

type ClusterStatus_Substate

type ClusterStatus_Substate int32
const (
	ClusterStatus_UNSPECIFIED ClusterStatus_Substate = 0
	// The cluster is known to be in an unhealthy state
	// (for example, critical daemons are not running or HDFS capacity is
	// exhausted).
	//
	// Applies to RUNNING state.
	ClusterStatus_UNHEALTHY ClusterStatus_Substate = 1
	// The agent-reported status is out of date (may occur if
	// Cloud Dataproc loses communication with Agent).
	//
	// Applies to RUNNING state.
	ClusterStatus_STALE_STATUS ClusterStatus_Substate = 2
)

func (ClusterStatus_Substate) EnumDescriptor

func (ClusterStatus_Substate) EnumDescriptor() ([]byte, []int)

func (ClusterStatus_Substate) String

func (x ClusterStatus_Substate) String() string

type CreateClusterRequest

type CreateClusterRequest struct {
	// Required. The ID of the Google Cloud Platform project that the cluster
	// belongs to.
	ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"`
	// Required. The Cloud Dataproc region in which to handle the request.
	Region string `protobuf:"bytes,3,opt,name=region" json:"region,omitempty"`
	// Required. The cluster to create.
	Cluster *Cluster `protobuf:"bytes,2,opt,name=cluster" json:"cluster,omitempty"`
}

A request to create a cluster.

func (*CreateClusterRequest) Descriptor

func (*CreateClusterRequest) Descriptor() ([]byte, []int)

func (*CreateClusterRequest) GetCluster

func (m *CreateClusterRequest) GetCluster() *Cluster

func (*CreateClusterRequest) GetProjectId

func (m *CreateClusterRequest) GetProjectId() string

func (*CreateClusterRequest) GetRegion

func (m *CreateClusterRequest) GetRegion() string

func (*CreateClusterRequest) ProtoMessage

func (*CreateClusterRequest) ProtoMessage()

func (*CreateClusterRequest) Reset

func (m *CreateClusterRequest) Reset()

func (*CreateClusterRequest) String

func (m *CreateClusterRequest) String() string

type DeleteClusterRequest

type DeleteClusterRequest struct {
	// Required. The ID of the Google Cloud Platform project that the cluster
	// belongs to.
	ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"`
	// Required. The Cloud Dataproc region in which to handle the request.
	Region string `protobuf:"bytes,3,opt,name=region" json:"region,omitempty"`
	// Required. The cluster name.
	ClusterName string `protobuf:"bytes,2,opt,name=cluster_name,json=clusterName" json:"cluster_name,omitempty"`
}

A request to delete a cluster.

func (*DeleteClusterRequest) Descriptor

func (*DeleteClusterRequest) Descriptor() ([]byte, []int)

func (*DeleteClusterRequest) GetClusterName

func (m *DeleteClusterRequest) GetClusterName() string

func (*DeleteClusterRequest) GetProjectId

func (m *DeleteClusterRequest) GetProjectId() string

func (*DeleteClusterRequest) GetRegion

func (m *DeleteClusterRequest) GetRegion() string

func (*DeleteClusterRequest) ProtoMessage

func (*DeleteClusterRequest) ProtoMessage()

func (*DeleteClusterRequest) Reset

func (m *DeleteClusterRequest) Reset()

func (*DeleteClusterRequest) String

func (m *DeleteClusterRequest) String() string

type DeleteJobRequest

type DeleteJobRequest struct {
	// Required. The ID of the Google Cloud Platform project that the job
	// belongs to.
	ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"`
	// Required. The Cloud Dataproc region in which to handle the request.
	Region string `protobuf:"bytes,3,opt,name=region" json:"region,omitempty"`
	// Required. The job ID.
	JobId string `protobuf:"bytes,2,opt,name=job_id,json=jobId" json:"job_id,omitempty"`
}

A request to delete a job.

func (*DeleteJobRequest) Descriptor

func (*DeleteJobRequest) Descriptor() ([]byte, []int)

func (*DeleteJobRequest) GetJobId

func (m *DeleteJobRequest) GetJobId() string

func (*DeleteJobRequest) GetProjectId

func (m *DeleteJobRequest) GetProjectId() string

func (*DeleteJobRequest) GetRegion

func (m *DeleteJobRequest) GetRegion() string

func (*DeleteJobRequest) ProtoMessage

func (*DeleteJobRequest) ProtoMessage()

func (*DeleteJobRequest) Reset

func (m *DeleteJobRequest) Reset()

func (*DeleteJobRequest) String

func (m *DeleteJobRequest) String() string

type DiagnoseClusterRequest

type DiagnoseClusterRequest struct {
	// Required. The ID of the Google Cloud Platform project that the cluster
	// belongs to.
	ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"`
	// Required. The Cloud Dataproc region in which to handle the request.
	Region string `protobuf:"bytes,3,opt,name=region" json:"region,omitempty"`
	// Required. The cluster name.
	ClusterName string `protobuf:"bytes,2,opt,name=cluster_name,json=clusterName" json:"cluster_name,omitempty"`
}

A request to collect cluster diagnostic information.

func (*DiagnoseClusterRequest) Descriptor

func (*DiagnoseClusterRequest) Descriptor() ([]byte, []int)

func (*DiagnoseClusterRequest) GetClusterName

func (m *DiagnoseClusterRequest) GetClusterName() string

func (*DiagnoseClusterRequest) GetProjectId

func (m *DiagnoseClusterRequest) GetProjectId() string

func (*DiagnoseClusterRequest) GetRegion

func (m *DiagnoseClusterRequest) GetRegion() string

func (*DiagnoseClusterRequest) ProtoMessage

func (*DiagnoseClusterRequest) ProtoMessage()

func (*DiagnoseClusterRequest) Reset

func (m *DiagnoseClusterRequest) Reset()

func (*DiagnoseClusterRequest) String

func (m *DiagnoseClusterRequest) String() string

type DiagnoseClusterResults

type DiagnoseClusterResults struct {
	// Output-only. The Google Cloud Storage URI of the diagnostic output.
	// The output report is a plain text file with a summary of collected
	// diagnostics.
	OutputUri string `protobuf:"bytes,1,opt,name=output_uri,json=outputUri" json:"output_uri,omitempty"`
}

The location of diagnostic output.

func (*DiagnoseClusterResults) Descriptor

func (*DiagnoseClusterResults) Descriptor() ([]byte, []int)

func (*DiagnoseClusterResults) GetOutputUri

func (m *DiagnoseClusterResults) GetOutputUri() string

func (*DiagnoseClusterResults) ProtoMessage

func (*DiagnoseClusterResults) ProtoMessage()

func (*DiagnoseClusterResults) Reset

func (m *DiagnoseClusterResults) Reset()

func (*DiagnoseClusterResults) String

func (m *DiagnoseClusterResults) String() string

type DiskConfig

type DiskConfig struct {
	// Optional. Size in GB of the boot disk (default is 500GB).
	BootDiskSizeGb int32 `protobuf:"varint,1,opt,name=boot_disk_size_gb,json=bootDiskSizeGb" json:"boot_disk_size_gb,omitempty"`
	// Optional. Number of attached SSDs, from 0 to 4 (default is 0).
	// If SSDs are not attached, the boot disk is used to store runtime logs and
	// [HDFS](https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data.
	// If one or more SSDs are attached, this runtime bulk
	// data is spread across them, and the boot disk contains only basic
	// config and installed binaries.
	NumLocalSsds int32 `protobuf:"varint,2,opt,name=num_local_ssds,json=numLocalSsds" json:"num_local_ssds,omitempty"`
}

Specifies the config of disk options for a group of VM instances.

func (*DiskConfig) Descriptor

func (*DiskConfig) Descriptor() ([]byte, []int)

func (*DiskConfig) GetBootDiskSizeGb

func (m *DiskConfig) GetBootDiskSizeGb() int32

func (*DiskConfig) GetNumLocalSsds

func (m *DiskConfig) GetNumLocalSsds() int32

func (*DiskConfig) ProtoMessage

func (*DiskConfig) ProtoMessage()

func (*DiskConfig) Reset

func (m *DiskConfig) Reset()

func (*DiskConfig) String

func (m *DiskConfig) String() string

type GceClusterConfig

type GceClusterConfig struct {
	// Optional. The zone where the Google Compute Engine cluster will be located.
	// On a create request, it is required in the "global" region. If omitted
	// in a non-global Cloud Dataproc region, the service will pick a zone in the
	// corresponding Compute Engine region. On a get request, zone will
	// always be present.
	//
	// A full URL, partial URI, or short name are valid. Examples:
	//
	// * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]`
	// * `projects/[project_id]/zones/[zone]`
	// * `us-central1-f`
	ZoneUri string `protobuf:"bytes,1,opt,name=zone_uri,json=zoneUri" json:"zone_uri,omitempty"`
	// Optional. The Google Compute Engine network to be used for machine
	// communications. Cannot be specified with subnetwork_uri. If neither
	// `network_uri` nor `subnetwork_uri` is specified, the "default" network of
	// the project is used, if it exists. Cannot be a "Custom Subnet Network" (see
	// [Using Subnetworks](/compute/docs/subnetworks) for more information).
	//
	// A full URL, partial URI, or short name are valid. Examples:
	//
	// * `https://www.googleapis.com/compute/v1/projects/[project_id]/regions/global/default`
	// * `projects/[project_id]/regions/global/default`
	// * `default`
	NetworkUri string `protobuf:"bytes,2,opt,name=network_uri,json=networkUri" json:"network_uri,omitempty"`
	// Optional. The Google Compute Engine subnetwork to be used for machine
	// communications. Cannot be specified with network_uri.
	//
	// A full URL, partial URI, or short name are valid. Examples:
	//
	// * `https://www.googleapis.com/compute/v1/projects/[project_id]/regions/us-east1/sub0`
	// * `projects/[project_id]/regions/us-east1/sub0`
	// * `sub0`
	SubnetworkUri string `protobuf:"bytes,6,opt,name=subnetwork_uri,json=subnetworkUri" json:"subnetwork_uri,omitempty"`
	// Optional. If true, all instances in the cluster will only have internal IP
	// addresses. By default, clusters are not restricted to internal IP addresses,
	// and will have ephemeral external IP addresses assigned to each instance.
	// This `internal_ip_only` restriction can only be enabled for subnetwork
	// enabled networks, and all off-cluster dependencies must be configured to be
	// accessible without external IP addresses.
	InternalIpOnly bool `protobuf:"varint,7,opt,name=internal_ip_only,json=internalIpOnly" json:"internal_ip_only,omitempty"`
	// Optional. The service account of the instances. Defaults to the default
	// Google Compute Engine service account. Custom service accounts need
	// permissions equivalent to the folloing IAM roles:
	//
	// * roles/logging.logWriter
	// * roles/storage.objectAdmin
	//
	// (see https://cloud.google.com/compute/docs/access/service-accounts#custom_service_accounts
	// for more information).
	// Example: `[account_id]@[project_id].iam.gserviceaccount.com`
	ServiceAccount string `protobuf:"bytes,8,opt,name=service_account,json=serviceAccount" json:"service_account,omitempty"`
	// Optional. The URIs of service account scopes to be included in Google
	// Compute Engine instances. The following base set of scopes is always
	// included:
	//
	// * https://www.googleapis.com/auth/cloud.useraccounts.readonly
	// * https://www.googleapis.com/auth/devstorage.read_write
	// * https://www.googleapis.com/auth/logging.write
	//
	// If no scopes are specified, the following defaults are also provided:
	//
	// * https://www.googleapis.com/auth/bigquery
	// * https://www.googleapis.com/auth/bigtable.admin.table
	// * https://www.googleapis.com/auth/bigtable.data
	// * https://www.googleapis.com/auth/devstorage.full_control
	ServiceAccountScopes []string `protobuf:"bytes,3,rep,name=service_account_scopes,json=serviceAccountScopes" json:"service_account_scopes,omitempty"`
	// The Google Compute Engine tags to add to all instances (see
	// [Tagging instances](/compute/docs/label-or-tag-resources#tags)).
	Tags []string `protobuf:"bytes,4,rep,name=tags" json:"tags,omitempty"`
	// The Google Compute Engine metadata entries to add to all instances (see
	// [Project and instance metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
	Metadata map[string]string `` /* 136-byte string literal not displayed */
}

Common config settings for resources of Google Compute Engine cluster instances, applicable to all instances in the cluster.

func (*GceClusterConfig) Descriptor

func (*GceClusterConfig) Descriptor() ([]byte, []int)

func (*GceClusterConfig) GetInternalIpOnly

func (m *GceClusterConfig) GetInternalIpOnly() bool

func (*GceClusterConfig) GetMetadata

func (m *GceClusterConfig) GetMetadata() map[string]string

func (*GceClusterConfig) GetNetworkUri

func (m *GceClusterConfig) GetNetworkUri() string

func (*GceClusterConfig) GetServiceAccount

func (m *GceClusterConfig) GetServiceAccount() string

func (*GceClusterConfig) GetServiceAccountScopes

func (m *GceClusterConfig) GetServiceAccountScopes() []string

func (*GceClusterConfig) GetSubnetworkUri

func (m *GceClusterConfig) GetSubnetworkUri() string

func (*GceClusterConfig) GetTags

func (m *GceClusterConfig) GetTags() []string

func (*GceClusterConfig) GetZoneUri

func (m *GceClusterConfig) GetZoneUri() string

func (*GceClusterConfig) ProtoMessage

func (*GceClusterConfig) ProtoMessage()

func (*GceClusterConfig) Reset

func (m *GceClusterConfig) Reset()

func (*GceClusterConfig) String

func (m *GceClusterConfig) String() string

type GetClusterRequest

type GetClusterRequest struct {
	// Required. The ID of the Google Cloud Platform project that the cluster
	// belongs to.
	ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"`
	// Required. The Cloud Dataproc region in which to handle the request.
	Region string `protobuf:"bytes,3,opt,name=region" json:"region,omitempty"`
	// Required. The cluster name.
	ClusterName string `protobuf:"bytes,2,opt,name=cluster_name,json=clusterName" json:"cluster_name,omitempty"`
}

Request to get the resource representation for a cluster in a project.

func (*GetClusterRequest) Descriptor

func (*GetClusterRequest) Descriptor() ([]byte, []int)

func (*GetClusterRequest) GetClusterName

func (m *GetClusterRequest) GetClusterName() string

func (*GetClusterRequest) GetProjectId

func (m *GetClusterRequest) GetProjectId() string

func (*GetClusterRequest) GetRegion

func (m *GetClusterRequest) GetRegion() string

func (*GetClusterRequest) ProtoMessage

func (*GetClusterRequest) ProtoMessage()

func (*GetClusterRequest) Reset

func (m *GetClusterRequest) Reset()

func (*GetClusterRequest) String

func (m *GetClusterRequest) String() string

type GetJobRequest

type GetJobRequest struct {
	// Required. The ID of the Google Cloud Platform project that the job
	// belongs to.
	ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"`
	// Required. The Cloud Dataproc region in which to handle the request.
	Region string `protobuf:"bytes,3,opt,name=region" json:"region,omitempty"`
	// Required. The job ID.
	JobId string `protobuf:"bytes,2,opt,name=job_id,json=jobId" json:"job_id,omitempty"`
}

A request to get the resource representation for a job in a project.

func (*GetJobRequest) Descriptor

func (*GetJobRequest) Descriptor() ([]byte, []int)

func (*GetJobRequest) GetJobId

func (m *GetJobRequest) GetJobId() string

func (*GetJobRequest) GetProjectId

func (m *GetJobRequest) GetProjectId() string

func (*GetJobRequest) GetRegion

func (m *GetJobRequest) GetRegion() string

func (*GetJobRequest) ProtoMessage

func (*GetJobRequest) ProtoMessage()

func (*GetJobRequest) Reset

func (m *GetJobRequest) Reset()

func (*GetJobRequest) String

func (m *GetJobRequest) String() string

type HadoopJob

type HadoopJob struct {
	// Required. Indicates the location of the driver's main class. Specify
	// either the jar file that contains the main class or the main class name.
	// To specify both, add the jar file to `jar_file_uris`, and then specify
	// the main class name in this property.
	//
	// Types that are valid to be assigned to Driver:
	//	*HadoopJob_MainJarFileUri
	//	*HadoopJob_MainClass
	Driver isHadoopJob_Driver `protobuf_oneof:"driver"`
	// Optional. The arguments to pass to the driver. Do not
	// include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as job
	// properties, since a collision may occur that causes an incorrect job
	// submission.
	Args []string `protobuf:"bytes,3,rep,name=args" json:"args,omitempty"`
	// Optional. Jar file URIs to add to the CLASSPATHs of the
	// Hadoop driver and tasks.
	JarFileUris []string `protobuf:"bytes,4,rep,name=jar_file_uris,json=jarFileUris" json:"jar_file_uris,omitempty"`
	// Optional. HCFS (Hadoop Compatible Filesystem) URIs of files to be copied
	// to the working directory of Hadoop drivers and distributed tasks. Useful
	// for naively parallel tasks.
	FileUris []string `protobuf:"bytes,5,rep,name=file_uris,json=fileUris" json:"file_uris,omitempty"`
	// Optional. HCFS URIs of archives to be extracted in the working directory of
	// Hadoop drivers and tasks. Supported file types:
	// .jar, .tar, .tar.gz, .tgz, or .zip.
	ArchiveUris []string `protobuf:"bytes,6,rep,name=archive_uris,json=archiveUris" json:"archive_uris,omitempty"`
	// Optional. A mapping of property names to values, used to configure Hadoop.
	// Properties that conflict with values set by the Cloud Dataproc API may be
	// overwritten. Can include properties set in /etc/hadoop/conf/*-site and
	// classes in user code.
	Properties map[string]string `` /* 140-byte string literal not displayed */
	// Optional. The runtime log config for job execution.
	LoggingConfig *LoggingConfig `protobuf:"bytes,8,opt,name=logging_config,json=loggingConfig" json:"logging_config,omitempty"`
}

A Cloud Dataproc job for running [Apache Hadoop MapReduce](https://hadoop.apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduceTutorial.html) jobs on [Apache Hadoop YARN](https://hadoop.apache.org/docs/r2.7.1/hadoop-yarn/hadoop-yarn-site/YARN.html).

func (*HadoopJob) Descriptor

func (*HadoopJob) Descriptor() ([]byte, []int)

func (*HadoopJob) GetArchiveUris

func (m *HadoopJob) GetArchiveUris() []string

func (*HadoopJob) GetArgs

func (m *HadoopJob) GetArgs() []string

func (*HadoopJob) GetDriver

func (m *HadoopJob) GetDriver() isHadoopJob_Driver

func (*HadoopJob) GetFileUris

func (m *HadoopJob) GetFileUris() []string

func (*HadoopJob) GetJarFileUris

func (m *HadoopJob) GetJarFileUris() []string

func (*HadoopJob) GetLoggingConfig

func (m *HadoopJob) GetLoggingConfig() *LoggingConfig

func (*HadoopJob) GetMainClass

func (m *HadoopJob) GetMainClass() string

func (*HadoopJob) GetMainJarFileUri

func (m *HadoopJob) GetMainJarFileUri() string

func (*HadoopJob) GetProperties

func (m *HadoopJob) GetProperties() map[string]string

func (*HadoopJob) ProtoMessage

func (*HadoopJob) ProtoMessage()

func (*HadoopJob) Reset

func (m *HadoopJob) Reset()

func (*HadoopJob) String

func (m *HadoopJob) String() string

func (*HadoopJob) XXX_OneofFuncs

func (*HadoopJob) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{})

XXX_OneofFuncs is for the internal use of the proto package.

type HadoopJob_MainClass

type HadoopJob_MainClass struct {
	MainClass string `protobuf:"bytes,2,opt,name=main_class,json=mainClass,oneof"`
}

type HadoopJob_MainJarFileUri

type HadoopJob_MainJarFileUri struct {
	MainJarFileUri string `protobuf:"bytes,1,opt,name=main_jar_file_uri,json=mainJarFileUri,oneof"`
}

type HiveJob

type HiveJob struct {
	// Required. The sequence of Hive queries to execute, specified as either
	// an HCFS file URI or a list of queries.
	//
	// Types that are valid to be assigned to Queries:
	//	*HiveJob_QueryFileUri
	//	*HiveJob_QueryList
	Queries isHiveJob_Queries `protobuf_oneof:"queries"`
	// Optional. Whether to continue executing queries if a query fails.
	// The default value is `false`. Setting to `true` can be useful when executing
	// independent parallel queries.
	ContinueOnFailure bool `protobuf:"varint,3,opt,name=continue_on_failure,json=continueOnFailure" json:"continue_on_failure,omitempty"`
	// Optional. Mapping of query variable names to values (equivalent to the
	// Hive command: `SET name="value";`).
	ScriptVariables map[string]string `` /* 173-byte string literal not displayed */
	// Optional. A mapping of property names and values, used to configure Hive.
	// Properties that conflict with values set by the Cloud Dataproc API may be
	// overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml,
	// /etc/hive/conf/hive-site.xml, and classes in user code.
	Properties map[string]string `` /* 140-byte string literal not displayed */
	// Optional. HCFS URIs of jar files to add to the CLASSPATH of the
	// Hive server and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes
	// and UDFs.
	JarFileUris []string `protobuf:"bytes,6,rep,name=jar_file_uris,json=jarFileUris" json:"jar_file_uris,omitempty"`
}

A Cloud Dataproc job for running [Apache Hive](https://hive.apache.org/) queries on YARN.

func (*HiveJob) Descriptor

func (*HiveJob) Descriptor() ([]byte, []int)

func (*HiveJob) GetContinueOnFailure

func (m *HiveJob) GetContinueOnFailure() bool

func (*HiveJob) GetJarFileUris

func (m *HiveJob) GetJarFileUris() []string

func (*HiveJob) GetProperties

func (m *HiveJob) GetProperties() map[string]string

func (*HiveJob) GetQueries

func (m *HiveJob) GetQueries() isHiveJob_Queries

func (*HiveJob) GetQueryFileUri

func (m *HiveJob) GetQueryFileUri() string

func (*HiveJob) GetQueryList

func (m *HiveJob) GetQueryList() *QueryList

func (*HiveJob) GetScriptVariables

func (m *HiveJob) GetScriptVariables() map[string]string

func (*HiveJob) ProtoMessage

func (*HiveJob) ProtoMessage()

func (*HiveJob) Reset

func (m *HiveJob) Reset()

func (*HiveJob) String

func (m *HiveJob) String() string

func (*HiveJob) XXX_OneofFuncs

func (*HiveJob) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{})

XXX_OneofFuncs is for the internal use of the proto package.

type HiveJob_QueryFileUri

type HiveJob_QueryFileUri struct {
	QueryFileUri string `protobuf:"bytes,1,opt,name=query_file_uri,json=queryFileUri,oneof"`
}

type HiveJob_QueryList

type HiveJob_QueryList struct {
	QueryList *QueryList `protobuf:"bytes,2,opt,name=query_list,json=queryList,oneof"`
}

type InstanceGroupConfig

type InstanceGroupConfig struct {
	// Optional. The number of VM instances in the instance group.
	// For master instance groups, must be set to 1.
	NumInstances int32 `protobuf:"varint,1,opt,name=num_instances,json=numInstances" json:"num_instances,omitempty"`
	// Optional. The list of instance names. Cloud Dataproc derives the names from
	// `cluster_name`, `num_instances`, and the instance group if not set by user
	// (recommended practice is to let Cloud Dataproc derive the name).
	InstanceNames []string `protobuf:"bytes,2,rep,name=instance_names,json=instanceNames" json:"instance_names,omitempty"`
	// Output-only. The Google Compute Engine image resource used for cluster
	// instances. Inferred from `SoftwareConfig.image_version`.
	ImageUri string `protobuf:"bytes,3,opt,name=image_uri,json=imageUri" json:"image_uri,omitempty"`
	// Optional. The Google Compute Engine machine type used for cluster instances.
	//
	// A full URL, partial URI, or short name are valid. Examples:
	//
	// * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2`
	// * `projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2`
	// * `n1-standard-2`
	MachineTypeUri string `protobuf:"bytes,4,opt,name=machine_type_uri,json=machineTypeUri" json:"machine_type_uri,omitempty"`
	// Optional. Disk option config settings.
	DiskConfig *DiskConfig `protobuf:"bytes,5,opt,name=disk_config,json=diskConfig" json:"disk_config,omitempty"`
	// Optional. Specifies that this instance group contains preemptible instances.
	IsPreemptible bool `protobuf:"varint,6,opt,name=is_preemptible,json=isPreemptible" json:"is_preemptible,omitempty"`
	// Output-only. The config for Google Compute Engine Instance Group
	// Manager that manages this group.
	// This is only used for preemptible instance groups.
	ManagedGroupConfig *ManagedGroupConfig `protobuf:"bytes,7,opt,name=managed_group_config,json=managedGroupConfig" json:"managed_group_config,omitempty"`
	// Optional. The Google Compute Engine accelerator configuration for these
	// instances.
	//
	// **Beta Feature**: This feature is still under development. It may be
	// changed before final release.
	Accelerators []*AcceleratorConfig `protobuf:"bytes,8,rep,name=accelerators" json:"accelerators,omitempty"`
}

Optional. The config settings for Google Compute Engine resources in an instance group, such as a master or worker group.

func (*InstanceGroupConfig) Descriptor

func (*InstanceGroupConfig) Descriptor() ([]byte, []int)

func (*InstanceGroupConfig) GetAccelerators

func (m *InstanceGroupConfig) GetAccelerators() []*AcceleratorConfig

func (*InstanceGroupConfig) GetDiskConfig

func (m *InstanceGroupConfig) GetDiskConfig() *DiskConfig

func (*InstanceGroupConfig) GetImageUri

func (m *InstanceGroupConfig) GetImageUri() string

func (*InstanceGroupConfig) GetInstanceNames

func (m *InstanceGroupConfig) GetInstanceNames() []string

func (*InstanceGroupConfig) GetIsPreemptible

func (m *InstanceGroupConfig) GetIsPreemptible() bool

func (*InstanceGroupConfig) GetMachineTypeUri

func (m *InstanceGroupConfig) GetMachineTypeUri() string

func (*InstanceGroupConfig) GetManagedGroupConfig

func (m *InstanceGroupConfig) GetManagedGroupConfig() *ManagedGroupConfig

func (*InstanceGroupConfig) GetNumInstances

func (m *InstanceGroupConfig) GetNumInstances() int32

func (*InstanceGroupConfig) ProtoMessage

func (*InstanceGroupConfig) ProtoMessage()

func (*InstanceGroupConfig) Reset

func (m *InstanceGroupConfig) Reset()

func (*InstanceGroupConfig) String

func (m *InstanceGroupConfig) String() string

type Job

type Job struct {
	// Optional. The fully qualified reference to the job, which can be used to
	// obtain the equivalent REST path of the job resource. If this property
	// is not specified when a job is created, the server generates a
	// <code>job_id</code>.
	Reference *JobReference `protobuf:"bytes,1,opt,name=reference" json:"reference,omitempty"`
	// Required. Job information, including how, when, and where to
	// run the job.
	Placement *JobPlacement `protobuf:"bytes,2,opt,name=placement" json:"placement,omitempty"`
	// Required. The application/framework-specific portion of the job.
	//
	// Types that are valid to be assigned to TypeJob:
	//	*Job_HadoopJob
	//	*Job_SparkJob
	//	*Job_PysparkJob
	//	*Job_HiveJob
	//	*Job_PigJob
	//	*Job_SparkSqlJob
	TypeJob isJob_TypeJob `protobuf_oneof:"type_job"`
	// Output-only. The job status. Additional application-specific
	// status information may be contained in the <code>type_job</code>
	// and <code>yarn_applications</code> fields.
	Status *JobStatus `protobuf:"bytes,8,opt,name=status" json:"status,omitempty"`
	// Output-only. The previous job status.
	StatusHistory []*JobStatus `protobuf:"bytes,13,rep,name=status_history,json=statusHistory" json:"status_history,omitempty"`
	// Output-only. The collection of YARN applications spun up by this job.
	//
	// **Beta** Feature: This report is available for testing purposes only. It may
	// be changed before final release.
	YarnApplications []*YarnApplication `protobuf:"bytes,9,rep,name=yarn_applications,json=yarnApplications" json:"yarn_applications,omitempty"`
	// Output-only. A URI pointing to the location of the stdout of the job's
	// driver program.
	DriverOutputResourceUri string `` /* 128-byte string literal not displayed */
	// Output-only. If present, the location of miscellaneous control files
	// which may be used as part of job setup and handling. If not present,
	// control files may be placed in the same location as `driver_output_uri`.
	DriverControlFilesUri string `protobuf:"bytes,15,opt,name=driver_control_files_uri,json=driverControlFilesUri" json:"driver_control_files_uri,omitempty"`
	// Optional. The labels to associate with this job.
	// Label **keys** must contain 1 to 63 characters, and must conform to
	// [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
	// Label **values** may be empty, but, if present, must contain 1 to 63
	// characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
	// No more than 32 labels can be associated with a job.
	Labels map[string]string `` /* 133-byte string literal not displayed */
	// Optional. Job scheduling configuration.
	Scheduling *JobScheduling `protobuf:"bytes,20,opt,name=scheduling" json:"scheduling,omitempty"`
}

A Cloud Dataproc job resource.

func (*Job) Descriptor

func (*Job) Descriptor() ([]byte, []int)

func (*Job) GetDriverControlFilesUri

func (m *Job) GetDriverControlFilesUri() string

func (*Job) GetDriverOutputResourceUri

func (m *Job) GetDriverOutputResourceUri() string

func (*Job) GetHadoopJob

func (m *Job) GetHadoopJob() *HadoopJob

func (*Job) GetHiveJob

func (m *Job) GetHiveJob() *HiveJob

func (*Job) GetLabels

func (m *Job) GetLabels() map[string]string

func (*Job) GetPigJob

func (m *Job) GetPigJob() *PigJob

func (*Job) GetPlacement

func (m *Job) GetPlacement() *JobPlacement

func (*Job) GetPysparkJob

func (m *Job) GetPysparkJob() *PySparkJob

func (*Job) GetReference

func (m *Job) GetReference() *JobReference

func (*Job) GetScheduling

func (m *Job) GetScheduling() *JobScheduling

func (*Job) GetSparkJob

func (m *Job) GetSparkJob() *SparkJob

func (*Job) GetSparkSqlJob

func (m *Job) GetSparkSqlJob() *SparkSqlJob

func (*Job) GetStatus

func (m *Job) GetStatus() *JobStatus

func (*Job) GetStatusHistory

func (m *Job) GetStatusHistory() []*JobStatus

func (*Job) GetTypeJob

func (m *Job) GetTypeJob() isJob_TypeJob

func (*Job) GetYarnApplications

func (m *Job) GetYarnApplications() []*YarnApplication

func (*Job) ProtoMessage

func (*Job) ProtoMessage()

func (*Job) Reset

func (m *Job) Reset()

func (*Job) String

func (m *Job) String() string

func (*Job) XXX_OneofFuncs

func (*Job) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{})

XXX_OneofFuncs is for the internal use of the proto package.

type JobControllerClient

type JobControllerClient interface {
	// Submits a job to a cluster.
	SubmitJob(ctx context.Context, in *SubmitJobRequest, opts ...grpc.CallOption) (*Job, error)
	// Gets the resource representation for a job in a project.
	GetJob(ctx context.Context, in *GetJobRequest, opts ...grpc.CallOption) (*Job, error)
	// Lists regions/{region}/jobs in a project.
	ListJobs(ctx context.Context, in *ListJobsRequest, opts ...grpc.CallOption) (*ListJobsResponse, error)
	// Updates a job in a project.
	UpdateJob(ctx context.Context, in *UpdateJobRequest, opts ...grpc.CallOption) (*Job, error)
	// Starts a job cancellation request. To access the job resource
	// after cancellation, call
	// [regions/{region}/jobs.list](/dataproc/docs/reference/rest/v1/projects.regions.jobs/list) or
	// [regions/{region}/jobs.get](/dataproc/docs/reference/rest/v1/projects.regions.jobs/get).
	CancelJob(ctx context.Context, in *CancelJobRequest, opts ...grpc.CallOption) (*Job, error)
	// Deletes the job from the project. If the job is active, the delete fails,
	// and the response returns `FAILED_PRECONDITION`.
	DeleteJob(ctx context.Context, in *DeleteJobRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error)
}

func NewJobControllerClient

func NewJobControllerClient(cc *grpc.ClientConn) JobControllerClient

type JobControllerServer

type JobControllerServer interface {
	// Submits a job to a cluster.
	SubmitJob(context.Context, *SubmitJobRequest) (*Job, error)
	// Gets the resource representation for a job in a project.
	GetJob(context.Context, *GetJobRequest) (*Job, error)
	// Lists regions/{region}/jobs in a project.
	ListJobs(context.Context, *ListJobsRequest) (*ListJobsResponse, error)
	// Updates a job in a project.
	UpdateJob(context.Context, *UpdateJobRequest) (*Job, error)
	// Starts a job cancellation request. To access the job resource
	// after cancellation, call
	// [regions/{region}/jobs.list](/dataproc/docs/reference/rest/v1/projects.regions.jobs/list) or
	// [regions/{region}/jobs.get](/dataproc/docs/reference/rest/v1/projects.regions.jobs/get).
	CancelJob(context.Context, *CancelJobRequest) (*Job, error)
	// Deletes the job from the project. If the job is active, the delete fails,
	// and the response returns `FAILED_PRECONDITION`.
	DeleteJob(context.Context, *DeleteJobRequest) (*google_protobuf2.Empty, error)
}

type JobPlacement

type JobPlacement struct {
	// Required. The name of the cluster where the job will be submitted.
	ClusterName string `protobuf:"bytes,1,opt,name=cluster_name,json=clusterName" json:"cluster_name,omitempty"`
	// Output-only. A cluster UUID generated by the Cloud Dataproc service when
	// the job is submitted.
	ClusterUuid string `protobuf:"bytes,2,opt,name=cluster_uuid,json=clusterUuid" json:"cluster_uuid,omitempty"`
}

Cloud Dataproc job config.

func (*JobPlacement) Descriptor

func (*JobPlacement) Descriptor() ([]byte, []int)

func (*JobPlacement) GetClusterName

func (m *JobPlacement) GetClusterName() string

func (*JobPlacement) GetClusterUuid

func (m *JobPlacement) GetClusterUuid() string

func (*JobPlacement) ProtoMessage

func (*JobPlacement) ProtoMessage()

func (*JobPlacement) Reset

func (m *JobPlacement) Reset()

func (*JobPlacement) String

func (m *JobPlacement) String() string

type JobReference

type JobReference struct {
	// Required. The ID of the Google Cloud Platform project that the job
	// belongs to.
	ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"`
	// Optional. The job ID, which must be unique within the project. The job ID
	// is generated by the server upon job submission or provided by the user as a
	// means to perform retries without creating duplicate jobs. The ID must
	// contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or
	// hyphens (-). The maximum length is 100 characters.
	JobId string `protobuf:"bytes,2,opt,name=job_id,json=jobId" json:"job_id,omitempty"`
}

Encapsulates the full scoping used to reference a job.

func (*JobReference) Descriptor

func (*JobReference) Descriptor() ([]byte, []int)

func (*JobReference) GetJobId

func (m *JobReference) GetJobId() string

func (*JobReference) GetProjectId

func (m *JobReference) GetProjectId() string

func (*JobReference) ProtoMessage

func (*JobReference) ProtoMessage()

func (*JobReference) Reset

func (m *JobReference) Reset()

func (*JobReference) String

func (m *JobReference) String() string

type JobScheduling

type JobScheduling struct {
	// Optional. Maximum number of times per hour a driver may be restarted as
	// a result of driver terminating with non-zero code before job is
	// reported failed.
	//
	// A job may be reported as thrashing if driver exits with non-zero code
	// 4 times within 10 minute window.
	//
	// Maximum value is 10.
	MaxFailuresPerHour int32 `protobuf:"varint,1,opt,name=max_failures_per_hour,json=maxFailuresPerHour" json:"max_failures_per_hour,omitempty"`
}

Job scheduling options.

**Beta Feature**: These options are available for testing purposes only. They may be changed before final release.

func (*JobScheduling) Descriptor

func (*JobScheduling) Descriptor() ([]byte, []int)

func (*JobScheduling) GetMaxFailuresPerHour

func (m *JobScheduling) GetMaxFailuresPerHour() int32

func (*JobScheduling) ProtoMessage

func (*JobScheduling) ProtoMessage()

func (*JobScheduling) Reset

func (m *JobScheduling) Reset()

func (*JobScheduling) String

func (m *JobScheduling) String() string

type JobStatus

type JobStatus struct {
	// Output-only. A state message specifying the overall job state.
	State JobStatus_State `protobuf:"varint,1,opt,name=state,enum=google.cloud.dataproc.v1.JobStatus_State" json:"state,omitempty"`
	// Output-only. Optional job state details, such as an error
	// description if the state is <code>ERROR</code>.
	Details string `protobuf:"bytes,2,opt,name=details" json:"details,omitempty"`
	// Output-only. The time when this state was entered.
	StateStartTime *google_protobuf3.Timestamp `protobuf:"bytes,6,opt,name=state_start_time,json=stateStartTime" json:"state_start_time,omitempty"`
	// Output-only. Additional state information, which includes
	// status reported by the agent.
	Substate JobStatus_Substate `protobuf:"varint,7,opt,name=substate,enum=google.cloud.dataproc.v1.JobStatus_Substate" json:"substate,omitempty"`
}

Cloud Dataproc job status.

func (*JobStatus) Descriptor

func (*JobStatus) Descriptor() ([]byte, []int)

func (*JobStatus) GetDetails

func (m *JobStatus) GetDetails() string

func (*JobStatus) GetState

func (m *JobStatus) GetState() JobStatus_State

func (*JobStatus) GetStateStartTime

func (m *JobStatus) GetStateStartTime() *google_protobuf3.Timestamp

func (*JobStatus) GetSubstate

func (m *JobStatus) GetSubstate() JobStatus_Substate

func (*JobStatus) ProtoMessage

func (*JobStatus) ProtoMessage()

func (*JobStatus) Reset

func (m *JobStatus) Reset()

func (*JobStatus) String

func (m *JobStatus) String() string

type JobStatus_State

type JobStatus_State int32

The job state.

const (
	// The job state is unknown.
	JobStatus_STATE_UNSPECIFIED JobStatus_State = 0
	// The job is pending; it has been submitted, but is not yet running.
	JobStatus_PENDING JobStatus_State = 1
	// Job has been received by the service and completed initial setup;
	// it will soon be submitted to the cluster.
	JobStatus_SETUP_DONE JobStatus_State = 8
	// The job is running on the cluster.
	JobStatus_RUNNING JobStatus_State = 2
	// A CancelJob request has been received, but is pending.
	JobStatus_CANCEL_PENDING JobStatus_State = 3
	// Transient in-flight resources have been canceled, and the request to
	// cancel the running job has been issued to the cluster.
	JobStatus_CANCEL_STARTED JobStatus_State = 7
	// The job cancellation was successful.
	JobStatus_CANCELLED JobStatus_State = 4
	// The job has completed successfully.
	JobStatus_DONE JobStatus_State = 5
	// The job has completed, but encountered an error.
	JobStatus_ERROR JobStatus_State = 6
	// Job attempt has failed. The detail field contains failure details for
	// this attempt.
	//
	// Applies to restartable jobs only.
	JobStatus_ATTEMPT_FAILURE JobStatus_State = 9
)

func (JobStatus_State) EnumDescriptor

func (JobStatus_State) EnumDescriptor() ([]byte, []int)

func (JobStatus_State) String

func (x JobStatus_State) String() string

type JobStatus_Substate

type JobStatus_Substate int32
const (
	JobStatus_UNSPECIFIED JobStatus_Substate = 0
	// The Job is submitted to the agent.
	//
	// Applies to RUNNING state.
	JobStatus_SUBMITTED JobStatus_Substate = 1
	// The Job has been received and is awaiting execution (it may be waiting
	// for a condition to be met). See the "details" field for the reason for
	// the delay.
	//
	// Applies to RUNNING state.
	JobStatus_QUEUED JobStatus_Substate = 2
	// The agent-reported status is out of date, which may be caused by a
	// loss of communication between the agent and Cloud Dataproc. If the
	// agent does not send a timely update, the job will fail.
	//
	// Applies to RUNNING state.
	JobStatus_STALE_STATUS JobStatus_Substate = 3
)

func (JobStatus_Substate) EnumDescriptor

func (JobStatus_Substate) EnumDescriptor() ([]byte, []int)

func (JobStatus_Substate) String

func (x JobStatus_Substate) String() string

type Job_HadoopJob

type Job_HadoopJob struct {
	HadoopJob *HadoopJob `protobuf:"bytes,3,opt,name=hadoop_job,json=hadoopJob,oneof"`
}

type Job_HiveJob

type Job_HiveJob struct {
	HiveJob *HiveJob `protobuf:"bytes,6,opt,name=hive_job,json=hiveJob,oneof"`
}

type Job_PigJob

type Job_PigJob struct {
	PigJob *PigJob `protobuf:"bytes,7,opt,name=pig_job,json=pigJob,oneof"`
}

type Job_PysparkJob

type Job_PysparkJob struct {
	PysparkJob *PySparkJob `protobuf:"bytes,5,opt,name=pyspark_job,json=pysparkJob,oneof"`
}

type Job_SparkJob

type Job_SparkJob struct {
	SparkJob *SparkJob `protobuf:"bytes,4,opt,name=spark_job,json=sparkJob,oneof"`
}

type Job_SparkSqlJob

type Job_SparkSqlJob struct {
	SparkSqlJob *SparkSqlJob `protobuf:"bytes,12,opt,name=spark_sql_job,json=sparkSqlJob,oneof"`
}

type ListClustersRequest

type ListClustersRequest struct {
	// Required. The ID of the Google Cloud Platform project that the cluster
	// belongs to.
	ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"`
	// Required. The Cloud Dataproc region in which to handle the request.
	Region string `protobuf:"bytes,4,opt,name=region" json:"region,omitempty"`
	// Optional. A filter constraining the clusters to list. Filters are
	// case-sensitive and have the following syntax:
	//
	// field = value [AND [field = value]] ...
	//
	// where **field** is one of `status.state`, `clusterName`, or `labels.[KEY]`,
	// and `[KEY]` is a label key. **value** can be `*` to match all values.
	// `status.state` can be one of the following: `ACTIVE`, `INACTIVE`,
	// `CREATING`, `RUNNING`, `ERROR`, `DELETING`, or `UPDATING`. `ACTIVE`
	// contains the `CREATING`, `UPDATING`, and `RUNNING` states. `INACTIVE`
	// contains the `DELETING` and `ERROR` states.
	// `clusterName` is the name of the cluster provided at creation time.
	// Only the logical `AND` operator is supported; space-separated items are
	// treated as having an implicit `AND` operator.
	//
	// Example filter:
	//
	// status.state = ACTIVE AND clusterName = mycluster
	// AND labels.env = staging AND labels.starred = *
	Filter string `protobuf:"bytes,5,opt,name=filter" json:"filter,omitempty"`
	// Optional. The standard List page size.
	PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize" json:"page_size,omitempty"`
	// Optional. The standard List page token.
	PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken" json:"page_token,omitempty"`
}

A request to list the clusters in a project.

func (*ListClustersRequest) Descriptor

func (*ListClustersRequest) Descriptor() ([]byte, []int)

func (*ListClustersRequest) GetFilter

func (m *ListClustersRequest) GetFilter() string

func (*ListClustersRequest) GetPageSize

func (m *ListClustersRequest) GetPageSize() int32

func (*ListClustersRequest) GetPageToken

func (m *ListClustersRequest) GetPageToken() string

func (*ListClustersRequest) GetProjectId

func (m *ListClustersRequest) GetProjectId() string

func (*ListClustersRequest) GetRegion

func (m *ListClustersRequest) GetRegion() string

func (*ListClustersRequest) ProtoMessage

func (*ListClustersRequest) ProtoMessage()

func (*ListClustersRequest) Reset

func (m *ListClustersRequest) Reset()

func (*ListClustersRequest) String

func (m *ListClustersRequest) String() string

type ListClustersResponse

type ListClustersResponse struct {
	// Output-only. The clusters in the project.
	Clusters []*Cluster `protobuf:"bytes,1,rep,name=clusters" json:"clusters,omitempty"`
	// Output-only. This token is included in the response if there are more
	// results to fetch. To fetch additional results, provide this value as the
	// `page_token` in a subsequent `ListClustersRequest`.
	NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"`
}

The list of all clusters in a project.

func (*ListClustersResponse) Descriptor

func (*ListClustersResponse) Descriptor() ([]byte, []int)

func (*ListClustersResponse) GetClusters

func (m *ListClustersResponse) GetClusters() []*Cluster

func (*ListClustersResponse) GetNextPageToken

func (m *ListClustersResponse) GetNextPageToken() string

func (*ListClustersResponse) ProtoMessage

func (*ListClustersResponse) ProtoMessage()

func (*ListClustersResponse) Reset

func (m *ListClustersResponse) Reset()

func (*ListClustersResponse) String

func (m *ListClustersResponse) String() string

type ListJobsRequest

type ListJobsRequest struct {
	// Required. The ID of the Google Cloud Platform project that the job
	// belongs to.
	ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"`
	// Required. The Cloud Dataproc region in which to handle the request.
	Region string `protobuf:"bytes,6,opt,name=region" json:"region,omitempty"`
	// Optional. The number of results to return in each response.
	PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize" json:"page_size,omitempty"`
	// Optional. The page token, returned by a previous call, to request the
	// next page of results.
	PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken" json:"page_token,omitempty"`
	// Optional. If set, the returned jobs list includes only jobs that were
	// submitted to the named cluster.
	ClusterName string `protobuf:"bytes,4,opt,name=cluster_name,json=clusterName" json:"cluster_name,omitempty"`
	// Optional. Specifies enumerated categories of jobs to list.
	// (default = match ALL jobs).
	//
	// If `filter` is provided, `jobStateMatcher` will be ignored.
	JobStateMatcher ListJobsRequest_JobStateMatcher `` /* 164-byte string literal not displayed */
	// Optional. A filter constraining the jobs to list. Filters are
	// case-sensitive and have the following syntax:
	//
	// [field = value] AND [field [= value]] ...
	//
	// where **field** is `status.state` or `labels.[KEY]`, and `[KEY]` is a label
	// key. **value** can be `*` to match all values.
	// `status.state` can be either `ACTIVE` or `NON_ACTIVE`.
	// Only the logical `AND` operator is supported; space-separated items are
	// treated as having an implicit `AND` operator.
	//
	// Example filter:
	//
	// status.state = ACTIVE AND labels.env = staging AND labels.starred = *
	Filter string `protobuf:"bytes,7,opt,name=filter" json:"filter,omitempty"`
}

A request to list jobs in a project.

func (*ListJobsRequest) Descriptor

func (*ListJobsRequest) Descriptor() ([]byte, []int)

func (*ListJobsRequest) GetClusterName

func (m *ListJobsRequest) GetClusterName() string

func (*ListJobsRequest) GetFilter

func (m *ListJobsRequest) GetFilter() string

func (*ListJobsRequest) GetJobStateMatcher

func (m *ListJobsRequest) GetJobStateMatcher() ListJobsRequest_JobStateMatcher

func (*ListJobsRequest) GetPageSize

func (m *ListJobsRequest) GetPageSize() int32

func (*ListJobsRequest) GetPageToken

func (m *ListJobsRequest) GetPageToken() string

func (*ListJobsRequest) GetProjectId

func (m *ListJobsRequest) GetProjectId() string

func (*ListJobsRequest) GetRegion

func (m *ListJobsRequest) GetRegion() string

func (*ListJobsRequest) ProtoMessage

func (*ListJobsRequest) ProtoMessage()

func (*ListJobsRequest) Reset

func (m *ListJobsRequest) Reset()

func (*ListJobsRequest) String

func (m *ListJobsRequest) String() string

type ListJobsRequest_JobStateMatcher

type ListJobsRequest_JobStateMatcher int32

A matcher that specifies categories of job states.

const (
	// Match all jobs, regardless of state.
	ListJobsRequest_ALL ListJobsRequest_JobStateMatcher = 0
	// Only match jobs in non-terminal states: PENDING, RUNNING, or
	// CANCEL_PENDING.
	ListJobsRequest_ACTIVE ListJobsRequest_JobStateMatcher = 1
	// Only match jobs in terminal states: CANCELLED, DONE, or ERROR.
	ListJobsRequest_NON_ACTIVE ListJobsRequest_JobStateMatcher = 2
)

func (ListJobsRequest_JobStateMatcher) EnumDescriptor

func (ListJobsRequest_JobStateMatcher) EnumDescriptor() ([]byte, []int)

func (ListJobsRequest_JobStateMatcher) String

type ListJobsResponse

type ListJobsResponse struct {
	// Output-only. Jobs list.
	Jobs []*Job `protobuf:"bytes,1,rep,name=jobs" json:"jobs,omitempty"`
	// Optional. This token is included in the response if there are more results
	// to fetch. To fetch additional results, provide this value as the
	// `page_token` in a subsequent <code>ListJobsRequest</code>.
	NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"`
}

A list of jobs in a project.

func (*ListJobsResponse) Descriptor

func (*ListJobsResponse) Descriptor() ([]byte, []int)

func (*ListJobsResponse) GetJobs

func (m *ListJobsResponse) GetJobs() []*Job

func (*ListJobsResponse) GetNextPageToken

func (m *ListJobsResponse) GetNextPageToken() string

func (*ListJobsResponse) ProtoMessage

func (*ListJobsResponse) ProtoMessage()

func (*ListJobsResponse) Reset

func (m *ListJobsResponse) Reset()

func (*ListJobsResponse) String

func (m *ListJobsResponse) String() string

type LoggingConfig

type LoggingConfig struct {
	// The per-package log levels for the driver. This may include
	// "root" package name to configure rootLogger.
	// Examples:
	//   'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
	DriverLogLevels map[string]LoggingConfig_Level `` /* 226-byte string literal not displayed */
}

The runtime logging config of the job.

func (*LoggingConfig) Descriptor

func (*LoggingConfig) Descriptor() ([]byte, []int)

func (*LoggingConfig) GetDriverLogLevels

func (m *LoggingConfig) GetDriverLogLevels() map[string]LoggingConfig_Level

func (*LoggingConfig) ProtoMessage

func (*LoggingConfig) ProtoMessage()

func (*LoggingConfig) Reset

func (m *LoggingConfig) Reset()

func (*LoggingConfig) String

func (m *LoggingConfig) String() string

type LoggingConfig_Level

type LoggingConfig_Level int32

The Log4j level for job execution. When running an [Apache Hive](http://hive.apache.org/) job, Cloud Dataproc configures the Hive client to an equivalent verbosity level.

const (
	// Level is unspecified. Use default level for log4j.
	LoggingConfig_LEVEL_UNSPECIFIED LoggingConfig_Level = 0
	// Use ALL level for log4j.
	LoggingConfig_ALL LoggingConfig_Level = 1
	// Use TRACE level for log4j.
	LoggingConfig_TRACE LoggingConfig_Level = 2
	// Use DEBUG level for log4j.
	LoggingConfig_DEBUG LoggingConfig_Level = 3
	// Use INFO level for log4j.
	LoggingConfig_INFO LoggingConfig_Level = 4
	// Use WARN level for log4j.
	LoggingConfig_WARN LoggingConfig_Level = 5
	// Use ERROR level for log4j.
	LoggingConfig_ERROR LoggingConfig_Level = 6
	// Use FATAL level for log4j.
	LoggingConfig_FATAL LoggingConfig_Level = 7
	// Turn off log4j.
	LoggingConfig_OFF LoggingConfig_Level = 8
)

func (LoggingConfig_Level) EnumDescriptor

func (LoggingConfig_Level) EnumDescriptor() ([]byte, []int)

func (LoggingConfig_Level) String

func (x LoggingConfig_Level) String() string

type ManagedGroupConfig

type ManagedGroupConfig struct {
	// Output-only. The name of the Instance Template used for the Managed
	// Instance Group.
	InstanceTemplateName string `protobuf:"bytes,1,opt,name=instance_template_name,json=instanceTemplateName" json:"instance_template_name,omitempty"`
	// Output-only. The name of the Instance Group Manager for this group.
	InstanceGroupManagerName string `` /* 130-byte string literal not displayed */
}

Specifies the resources used to actively manage an instance group.

func (*ManagedGroupConfig) Descriptor

func (*ManagedGroupConfig) Descriptor() ([]byte, []int)

func (*ManagedGroupConfig) GetInstanceGroupManagerName

func (m *ManagedGroupConfig) GetInstanceGroupManagerName() string

func (*ManagedGroupConfig) GetInstanceTemplateName

func (m *ManagedGroupConfig) GetInstanceTemplateName() string

func (*ManagedGroupConfig) ProtoMessage

func (*ManagedGroupConfig) ProtoMessage()

func (*ManagedGroupConfig) Reset

func (m *ManagedGroupConfig) Reset()

func (*ManagedGroupConfig) String

func (m *ManagedGroupConfig) String() string

type NodeInitializationAction

type NodeInitializationAction struct {
	// Required. Google Cloud Storage URI of executable file.
	ExecutableFile string `protobuf:"bytes,1,opt,name=executable_file,json=executableFile" json:"executable_file,omitempty"`
	// Optional. Amount of time executable has to complete. Default is
	// 10 minutes. Cluster creation fails with an explanatory error message (the
	// name of the executable that caused the error and the exceeded timeout
	// period) if the executable is not completed at end of the timeout period.
	ExecutionTimeout *google_protobuf4.Duration `protobuf:"bytes,2,opt,name=execution_timeout,json=executionTimeout" json:"execution_timeout,omitempty"`
}

Specifies an executable to run on a fully configured node and a timeout period for executable completion.

func (*NodeInitializationAction) Descriptor

func (*NodeInitializationAction) Descriptor() ([]byte, []int)

func (*NodeInitializationAction) GetExecutableFile

func (m *NodeInitializationAction) GetExecutableFile() string

func (*NodeInitializationAction) GetExecutionTimeout

func (m *NodeInitializationAction) GetExecutionTimeout() *google_protobuf4.Duration

func (*NodeInitializationAction) ProtoMessage

func (*NodeInitializationAction) ProtoMessage()

func (*NodeInitializationAction) Reset

func (m *NodeInitializationAction) Reset()

func (*NodeInitializationAction) String

func (m *NodeInitializationAction) String() string

type PigJob

type PigJob struct {
	// Required. The sequence of Pig queries to execute, specified as an HCFS
	// file URI or a list of queries.
	//
	// Types that are valid to be assigned to Queries:
	//	*PigJob_QueryFileUri
	//	*PigJob_QueryList
	Queries isPigJob_Queries `protobuf_oneof:"queries"`
	// Optional. Whether to continue executing queries if a query fails.
	// The default value is `false`. Setting to `true` can be useful when executing
	// independent parallel queries.
	ContinueOnFailure bool `protobuf:"varint,3,opt,name=continue_on_failure,json=continueOnFailure" json:"continue_on_failure,omitempty"`
	// Optional. Mapping of query variable names to values (equivalent to the Pig
	// command: `name=[value]`).
	ScriptVariables map[string]string `` /* 173-byte string literal not displayed */
	// Optional. A mapping of property names to values, used to configure Pig.
	// Properties that conflict with values set by the Cloud Dataproc API may be
	// overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml,
	// /etc/pig/conf/pig.properties, and classes in user code.
	Properties map[string]string `` /* 140-byte string literal not displayed */
	// Optional. HCFS URIs of jar files to add to the CLASSPATH of
	// the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs.
	JarFileUris []string `protobuf:"bytes,6,rep,name=jar_file_uris,json=jarFileUris" json:"jar_file_uris,omitempty"`
	// Optional. The runtime log config for job execution.
	LoggingConfig *LoggingConfig `protobuf:"bytes,7,opt,name=logging_config,json=loggingConfig" json:"logging_config,omitempty"`
}

A Cloud Dataproc job for running [Apache Pig](https://pig.apache.org/) queries on YARN.

func (*PigJob) Descriptor

func (*PigJob) Descriptor() ([]byte, []int)

func (*PigJob) GetContinueOnFailure

func (m *PigJob) GetContinueOnFailure() bool

func (*PigJob) GetJarFileUris

func (m *PigJob) GetJarFileUris() []string

func (*PigJob) GetLoggingConfig

func (m *PigJob) GetLoggingConfig() *LoggingConfig

func (*PigJob) GetProperties

func (m *PigJob) GetProperties() map[string]string

func (*PigJob) GetQueries

func (m *PigJob) GetQueries() isPigJob_Queries

func (*PigJob) GetQueryFileUri

func (m *PigJob) GetQueryFileUri() string

func (*PigJob) GetQueryList

func (m *PigJob) GetQueryList() *QueryList

func (*PigJob) GetScriptVariables

func (m *PigJob) GetScriptVariables() map[string]string

func (*PigJob) ProtoMessage

func (*PigJob) ProtoMessage()

func (*PigJob) Reset

func (m *PigJob) Reset()

func (*PigJob) String

func (m *PigJob) String() string

func (*PigJob) XXX_OneofFuncs

func (*PigJob) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{})

XXX_OneofFuncs is for the internal use of the proto package.

type PigJob_QueryFileUri

type PigJob_QueryFileUri struct {
	QueryFileUri string `protobuf:"bytes,1,opt,name=query_file_uri,json=queryFileUri,oneof"`
}

type PigJob_QueryList

type PigJob_QueryList struct {
	QueryList *QueryList `protobuf:"bytes,2,opt,name=query_list,json=queryList,oneof"`
}

type PySparkJob

type PySparkJob struct {
	// Required. The HCFS URI of the main Python file to use as the driver. Must
	// be a .py file.
	MainPythonFileUri string `protobuf:"bytes,1,opt,name=main_python_file_uri,json=mainPythonFileUri" json:"main_python_file_uri,omitempty"`
	// Optional. The arguments to pass to the driver.  Do not include arguments,
	// such as `--conf`, that can be set as job properties, since a collision may
	// occur that causes an incorrect job submission.
	Args []string `protobuf:"bytes,2,rep,name=args" json:"args,omitempty"`
	// Optional. HCFS file URIs of Python files to pass to the PySpark
	// framework. Supported file types: .py, .egg, and .zip.
	PythonFileUris []string `protobuf:"bytes,3,rep,name=python_file_uris,json=pythonFileUris" json:"python_file_uris,omitempty"`
	// Optional. HCFS URIs of jar files to add to the CLASSPATHs of the
	// Python driver and tasks.
	JarFileUris []string `protobuf:"bytes,4,rep,name=jar_file_uris,json=jarFileUris" json:"jar_file_uris,omitempty"`
	// Optional. HCFS URIs of files to be copied to the working directory of
	// Python drivers and distributed tasks. Useful for naively parallel tasks.
	FileUris []string `protobuf:"bytes,5,rep,name=file_uris,json=fileUris" json:"file_uris,omitempty"`
	// Optional. HCFS URIs of archives to be extracted in the working directory of
	// .jar, .tar, .tar.gz, .tgz, and .zip.
	ArchiveUris []string `protobuf:"bytes,6,rep,name=archive_uris,json=archiveUris" json:"archive_uris,omitempty"`
	// Optional. A mapping of property names to values, used to configure PySpark.
	// Properties that conflict with values set by the Cloud Dataproc API may be
	// overwritten. Can include properties set in
	// /etc/spark/conf/spark-defaults.conf and classes in user code.
	Properties map[string]string `` /* 140-byte string literal not displayed */
	// Optional. The runtime log config for job execution.
	LoggingConfig *LoggingConfig `protobuf:"bytes,8,opt,name=logging_config,json=loggingConfig" json:"logging_config,omitempty"`
}

A Cloud Dataproc job for running [Apache PySpark](https://spark.apache.org/docs/0.9.0/python-programming-guide.html) applications on YARN.

func (*PySparkJob) Descriptor

func (*PySparkJob) Descriptor() ([]byte, []int)

func (*PySparkJob) GetArchiveUris

func (m *PySparkJob) GetArchiveUris() []string

func (*PySparkJob) GetArgs

func (m *PySparkJob) GetArgs() []string

func (*PySparkJob) GetFileUris

func (m *PySparkJob) GetFileUris() []string

func (*PySparkJob) GetJarFileUris

func (m *PySparkJob) GetJarFileUris() []string

func (*PySparkJob) GetLoggingConfig

func (m *PySparkJob) GetLoggingConfig() *LoggingConfig

func (*PySparkJob) GetMainPythonFileUri

func (m *PySparkJob) GetMainPythonFileUri() string

func (*PySparkJob) GetProperties

func (m *PySparkJob) GetProperties() map[string]string

func (*PySparkJob) GetPythonFileUris

func (m *PySparkJob) GetPythonFileUris() []string

func (*PySparkJob) ProtoMessage

func (*PySparkJob) ProtoMessage()

func (*PySparkJob) Reset

func (m *PySparkJob) Reset()

func (*PySparkJob) String

func (m *PySparkJob) String() string

type QueryList

type QueryList struct {
	// Required. The queries to execute. You do not need to terminate a query
	// with a semicolon. Multiple queries can be specified in one string
	// by separating each with a semicolon. Here is an example of an Cloud
	// Dataproc API snippet that uses a QueryList to specify a HiveJob:
	//
	//     "hiveJob": {
	//       "queryList": {
	//         "queries": [
	//           "query1",
	//           "query2",
	//           "query3;query4",
	//         ]
	//       }
	//     }
	Queries []string `protobuf:"bytes,1,rep,name=queries" json:"queries,omitempty"`
}

A list of queries to run on a cluster.

func (*QueryList) Descriptor

func (*QueryList) Descriptor() ([]byte, []int)

func (*QueryList) GetQueries

func (m *QueryList) GetQueries() []string

func (*QueryList) ProtoMessage

func (*QueryList) ProtoMessage()

func (*QueryList) Reset

func (m *QueryList) Reset()

func (*QueryList) String

func (m *QueryList) String() string

type SoftwareConfig

type SoftwareConfig struct {
	// Optional. The version of software inside the cluster. It must match the
	// regular expression `[0-9]+\.[0-9]+`. If unspecified, it defaults to the
	// latest version (see [Cloud Dataproc Versioning](/dataproc/versioning)).
	ImageVersion string `protobuf:"bytes,1,opt,name=image_version,json=imageVersion" json:"image_version,omitempty"`
	// Optional. The properties to set on daemon config files.
	//
	// Property keys are specified in `prefix:property` format, such as
	// `core:fs.defaultFS`. The following are supported prefixes
	// and their mappings:
	//
	// * capacity-scheduler: `capacity-scheduler.xml`
	// * core:   `core-site.xml`
	// * distcp: `distcp-default.xml`
	// * hdfs:   `hdfs-site.xml`
	// * hive:   `hive-site.xml`
	// * mapred: `mapred-site.xml`
	// * pig:    `pig.properties`
	// * spark:  `spark-defaults.conf`
	// * yarn:   `yarn-site.xml`
	//
	// For more information, see
	// [Cluster properties](/dataproc/docs/concepts/cluster-properties).
	Properties map[string]string `` /* 140-byte string literal not displayed */
}

Specifies the selection and config of software inside the cluster.

func (*SoftwareConfig) Descriptor

func (*SoftwareConfig) Descriptor() ([]byte, []int)

func (*SoftwareConfig) GetImageVersion

func (m *SoftwareConfig) GetImageVersion() string

func (*SoftwareConfig) GetProperties

func (m *SoftwareConfig) GetProperties() map[string]string

func (*SoftwareConfig) ProtoMessage

func (*SoftwareConfig) ProtoMessage()

func (*SoftwareConfig) Reset

func (m *SoftwareConfig) Reset()

func (*SoftwareConfig) String

func (m *SoftwareConfig) String() string

type SparkJob

type SparkJob struct {
	// Required. The specification of the main method to call to drive the job.
	// Specify either the jar file that contains the main class or the main class
	// name. To pass both a main jar and a main class in that jar, add the jar to
	// `CommonJob.jar_file_uris`, and then specify the main class name in `main_class`.
	//
	// Types that are valid to be assigned to Driver:
	//	*SparkJob_MainJarFileUri
	//	*SparkJob_MainClass
	Driver isSparkJob_Driver `protobuf_oneof:"driver"`
	// Optional. The arguments to pass to the driver. Do not include arguments,
	// such as `--conf`, that can be set as job properties, since a collision may
	// occur that causes an incorrect job submission.
	Args []string `protobuf:"bytes,3,rep,name=args" json:"args,omitempty"`
	// Optional. HCFS URIs of jar files to add to the CLASSPATHs of the
	// Spark driver and tasks.
	JarFileUris []string `protobuf:"bytes,4,rep,name=jar_file_uris,json=jarFileUris" json:"jar_file_uris,omitempty"`
	// Optional. HCFS URIs of files to be copied to the working directory of
	// Spark drivers and distributed tasks. Useful for naively parallel tasks.
	FileUris []string `protobuf:"bytes,5,rep,name=file_uris,json=fileUris" json:"file_uris,omitempty"`
	// Optional. HCFS URIs of archives to be extracted in the working directory
	// of Spark drivers and tasks. Supported file types:
	// .jar, .tar, .tar.gz, .tgz, and .zip.
	ArchiveUris []string `protobuf:"bytes,6,rep,name=archive_uris,json=archiveUris" json:"archive_uris,omitempty"`
	// Optional. A mapping of property names to values, used to configure Spark.
	// Properties that conflict with values set by the Cloud Dataproc API may be
	// overwritten. Can include properties set in
	// /etc/spark/conf/spark-defaults.conf and classes in user code.
	Properties map[string]string `` /* 140-byte string literal not displayed */
	// Optional. The runtime log config for job execution.
	LoggingConfig *LoggingConfig `protobuf:"bytes,8,opt,name=logging_config,json=loggingConfig" json:"logging_config,omitempty"`
}

A Cloud Dataproc job for running [Apache Spark](http://spark.apache.org/) applications on YARN.

func (*SparkJob) Descriptor

func (*SparkJob) Descriptor() ([]byte, []int)

func (*SparkJob) GetArchiveUris

func (m *SparkJob) GetArchiveUris() []string

func (*SparkJob) GetArgs

func (m *SparkJob) GetArgs() []string

func (*SparkJob) GetDriver

func (m *SparkJob) GetDriver() isSparkJob_Driver

func (*SparkJob) GetFileUris

func (m *SparkJob) GetFileUris() []string

func (*SparkJob) GetJarFileUris

func (m *SparkJob) GetJarFileUris() []string

func (*SparkJob) GetLoggingConfig

func (m *SparkJob) GetLoggingConfig() *LoggingConfig

func (*SparkJob) GetMainClass

func (m *SparkJob) GetMainClass() string

func (*SparkJob) GetMainJarFileUri

func (m *SparkJob) GetMainJarFileUri() string

func (*SparkJob) GetProperties

func (m *SparkJob) GetProperties() map[string]string

func (*SparkJob) ProtoMessage

func (*SparkJob) ProtoMessage()

func (*SparkJob) Reset

func (m *SparkJob) Reset()

func (*SparkJob) String

func (m *SparkJob) String() string

func (*SparkJob) XXX_OneofFuncs

func (*SparkJob) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{})

XXX_OneofFuncs is for the internal use of the proto package.

type SparkJob_MainClass

type SparkJob_MainClass struct {
	MainClass string `protobuf:"bytes,2,opt,name=main_class,json=mainClass,oneof"`
}

type SparkJob_MainJarFileUri

type SparkJob_MainJarFileUri struct {
	MainJarFileUri string `protobuf:"bytes,1,opt,name=main_jar_file_uri,json=mainJarFileUri,oneof"`
}

type SparkSqlJob

type SparkSqlJob struct {
	// Required. The sequence of Spark SQL queries to execute, specified as
	// either an HCFS file URI or as a list of queries.
	//
	// Types that are valid to be assigned to Queries:
	//	*SparkSqlJob_QueryFileUri
	//	*SparkSqlJob_QueryList
	Queries isSparkSqlJob_Queries `protobuf_oneof:"queries"`
	// Optional. Mapping of query variable names to values (equivalent to the
	// Spark SQL command: SET `name="value";`).
	ScriptVariables map[string]string `` /* 173-byte string literal not displayed */
	// Optional. A mapping of property names to values, used to configure
	// Spark SQL's SparkConf. Properties that conflict with values set by the
	// Cloud Dataproc API may be overwritten.
	Properties map[string]string `` /* 140-byte string literal not displayed */
	// Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH.
	JarFileUris []string `protobuf:"bytes,56,rep,name=jar_file_uris,json=jarFileUris" json:"jar_file_uris,omitempty"`
	// Optional. The runtime log config for job execution.
	LoggingConfig *LoggingConfig `protobuf:"bytes,6,opt,name=logging_config,json=loggingConfig" json:"logging_config,omitempty"`
}

A Cloud Dataproc job for running [Apache Spark SQL](http://spark.apache.org/sql/) queries.

func (*SparkSqlJob) Descriptor

func (*SparkSqlJob) Descriptor() ([]byte, []int)

func (*SparkSqlJob) GetJarFileUris

func (m *SparkSqlJob) GetJarFileUris() []string

func (*SparkSqlJob) GetLoggingConfig

func (m *SparkSqlJob) GetLoggingConfig() *LoggingConfig

func (*SparkSqlJob) GetProperties

func (m *SparkSqlJob) GetProperties() map[string]string

func (*SparkSqlJob) GetQueries

func (m *SparkSqlJob) GetQueries() isSparkSqlJob_Queries

func (*SparkSqlJob) GetQueryFileUri

func (m *SparkSqlJob) GetQueryFileUri() string

func (*SparkSqlJob) GetQueryList

func (m *SparkSqlJob) GetQueryList() *QueryList

func (*SparkSqlJob) GetScriptVariables

func (m *SparkSqlJob) GetScriptVariables() map[string]string

func (*SparkSqlJob) ProtoMessage

func (*SparkSqlJob) ProtoMessage()

func (*SparkSqlJob) Reset

func (m *SparkSqlJob) Reset()

func (*SparkSqlJob) String

func (m *SparkSqlJob) String() string

func (*SparkSqlJob) XXX_OneofFuncs

func (*SparkSqlJob) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{})

XXX_OneofFuncs is for the internal use of the proto package.

type SparkSqlJob_QueryFileUri

type SparkSqlJob_QueryFileUri struct {
	QueryFileUri string `protobuf:"bytes,1,opt,name=query_file_uri,json=queryFileUri,oneof"`
}

type SparkSqlJob_QueryList

type SparkSqlJob_QueryList struct {
	QueryList *QueryList `protobuf:"bytes,2,opt,name=query_list,json=queryList,oneof"`
}

type SubmitJobRequest

type SubmitJobRequest struct {
	// Required. The ID of the Google Cloud Platform project that the job
	// belongs to.
	ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"`
	// Required. The Cloud Dataproc region in which to handle the request.
	Region string `protobuf:"bytes,3,opt,name=region" json:"region,omitempty"`
	// Required. The job resource.
	Job *Job `protobuf:"bytes,2,opt,name=job" json:"job,omitempty"`
}

A request to submit a job.

func (*SubmitJobRequest) Descriptor

func (*SubmitJobRequest) Descriptor() ([]byte, []int)

func (*SubmitJobRequest) GetJob

func (m *SubmitJobRequest) GetJob() *Job

func (*SubmitJobRequest) GetProjectId

func (m *SubmitJobRequest) GetProjectId() string

func (*SubmitJobRequest) GetRegion

func (m *SubmitJobRequest) GetRegion() string

func (*SubmitJobRequest) ProtoMessage

func (*SubmitJobRequest) ProtoMessage()

func (*SubmitJobRequest) Reset

func (m *SubmitJobRequest) Reset()

func (*SubmitJobRequest) String

func (m *SubmitJobRequest) String() string

type UpdateClusterRequest

type UpdateClusterRequest struct {
	// Required. The ID of the Google Cloud Platform project the
	// cluster belongs to.
	ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"`
	// Required. The Cloud Dataproc region in which to handle the request.
	Region string `protobuf:"bytes,5,opt,name=region" json:"region,omitempty"`
	// Required. The cluster name.
	ClusterName string `protobuf:"bytes,2,opt,name=cluster_name,json=clusterName" json:"cluster_name,omitempty"`
	// Required. The changes to the cluster.
	Cluster *Cluster `protobuf:"bytes,3,opt,name=cluster" json:"cluster,omitempty"`
	// Required. Specifies the path, relative to `Cluster`, of
	// the field to update. For example, to change the number of workers
	// in a cluster to 5, the `update_mask` parameter would be
	// specified as `config.worker_config.num_instances`,
	// and the `PATCH` request body would specify the new value, as follows:
	//
	//     {
	//       "config":{
	//         "workerConfig":{
	//           "numInstances":"5"
	//         }
	//       }
	//     }
	// Similarly, to change the number of preemptible workers in a cluster to 5,
	// the `update_mask` parameter would be
	// `config.secondary_worker_config.num_instances`, and the `PATCH` request
	// body would be set as follows:
	//
	//     {
	//       "config":{
	//         "secondaryWorkerConfig":{
	//           "numInstances":"5"
	//         }
	//       }
	//     }
	// <strong>Note:</strong> Currently, only the following fields can be updated:
	//
	//  <table>
	//  <tbody>
	//  <tr>
	//  <td><strong>Mask</strong></td>
	//  <td><strong>Purpose</strong></td>
	//  </tr>
	//  <tr>
	//  <td><strong><em>labels</em></strong></td>
	//  <td>Update labels</td>
	//  </tr>
	//  <tr>
	//  <td><strong><em>config.worker_config.num_instances</em></strong></td>
	//  <td>Resize primary worker group</td>
	//  </tr>
	//  <tr>
	//  <td><strong><em>config.secondary_worker_config.num_instances</em></strong></td>
	//  <td>Resize secondary worker group</td>
	//  </tr>
	//  </tbody>
	//  </table>
	UpdateMask *google_protobuf5.FieldMask `protobuf:"bytes,4,opt,name=update_mask,json=updateMask" json:"update_mask,omitempty"`
}

A request to update a cluster.

func (*UpdateClusterRequest) Descriptor

func (*UpdateClusterRequest) Descriptor() ([]byte, []int)

func (*UpdateClusterRequest) GetCluster

func (m *UpdateClusterRequest) GetCluster() *Cluster

func (*UpdateClusterRequest) GetClusterName

func (m *UpdateClusterRequest) GetClusterName() string

func (*UpdateClusterRequest) GetProjectId

func (m *UpdateClusterRequest) GetProjectId() string

func (*UpdateClusterRequest) GetRegion

func (m *UpdateClusterRequest) GetRegion() string

func (*UpdateClusterRequest) GetUpdateMask

func (m *UpdateClusterRequest) GetUpdateMask() *google_protobuf5.FieldMask

func (*UpdateClusterRequest) ProtoMessage

func (*UpdateClusterRequest) ProtoMessage()

func (*UpdateClusterRequest) Reset

func (m *UpdateClusterRequest) Reset()

func (*UpdateClusterRequest) String

func (m *UpdateClusterRequest) String() string

type UpdateJobRequest

type UpdateJobRequest struct {
	// Required. The ID of the Google Cloud Platform project that the job
	// belongs to.
	ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"`
	// Required. The Cloud Dataproc region in which to handle the request.
	Region string `protobuf:"bytes,2,opt,name=region" json:"region,omitempty"`
	// Required. The job ID.
	JobId string `protobuf:"bytes,3,opt,name=job_id,json=jobId" json:"job_id,omitempty"`
	// Required. The changes to the job.
	Job *Job `protobuf:"bytes,4,opt,name=job" json:"job,omitempty"`
	// Required. Specifies the path, relative to <code>Job</code>, of
	// the field to update. For example, to update the labels of a Job the
	// <code>update_mask</code> parameter would be specified as
	// <code>labels</code>, and the `PATCH` request body would specify the new
	// value. <strong>Note:</strong> Currently, <code>labels</code> is the only
	// field that can be updated.
	UpdateMask *google_protobuf5.FieldMask `protobuf:"bytes,5,opt,name=update_mask,json=updateMask" json:"update_mask,omitempty"`
}

A request to update a job.

func (*UpdateJobRequest) Descriptor

func (*UpdateJobRequest) Descriptor() ([]byte, []int)

func (*UpdateJobRequest) GetJob

func (m *UpdateJobRequest) GetJob() *Job

func (*UpdateJobRequest) GetJobId

func (m *UpdateJobRequest) GetJobId() string

func (*UpdateJobRequest) GetProjectId

func (m *UpdateJobRequest) GetProjectId() string

func (*UpdateJobRequest) GetRegion

func (m *UpdateJobRequest) GetRegion() string

func (*UpdateJobRequest) GetUpdateMask

func (m *UpdateJobRequest) GetUpdateMask() *google_protobuf5.FieldMask

func (*UpdateJobRequest) ProtoMessage

func (*UpdateJobRequest) ProtoMessage()

func (*UpdateJobRequest) Reset

func (m *UpdateJobRequest) Reset()

func (*UpdateJobRequest) String

func (m *UpdateJobRequest) String() string

type YarnApplication

type YarnApplication struct {
	// Required. The application name.
	Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
	// Required. The application state.
	State YarnApplication_State `protobuf:"varint,2,opt,name=state,enum=google.cloud.dataproc.v1.YarnApplication_State" json:"state,omitempty"`
	// Required. The numerical progress of the application, from 1 to 100.
	Progress float32 `protobuf:"fixed32,3,opt,name=progress" json:"progress,omitempty"`
	// Optional. The HTTP URL of the ApplicationMaster, HistoryServer, or
	// TimelineServer that provides application-specific information. The URL uses
	// the internal hostname, and requires a proxy server for resolution and,
	// possibly, access.
	TrackingUrl string `protobuf:"bytes,4,opt,name=tracking_url,json=trackingUrl" json:"tracking_url,omitempty"`
}

A YARN application created by a job. Application information is a subset of <code>org.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto</code>.

**Beta Feature**: This report is available for testing purposes only. It may be changed before final release.

func (*YarnApplication) Descriptor

func (*YarnApplication) Descriptor() ([]byte, []int)

func (*YarnApplication) GetName

func (m *YarnApplication) GetName() string

func (*YarnApplication) GetProgress

func (m *YarnApplication) GetProgress() float32

func (*YarnApplication) GetState

func (m *YarnApplication) GetState() YarnApplication_State

func (*YarnApplication) GetTrackingUrl

func (m *YarnApplication) GetTrackingUrl() string

func (*YarnApplication) ProtoMessage

func (*YarnApplication) ProtoMessage()

func (*YarnApplication) Reset

func (m *YarnApplication) Reset()

func (*YarnApplication) String

func (m *YarnApplication) String() string

type YarnApplication_State

type YarnApplication_State int32

The application state, corresponding to <code>YarnProtos.YarnApplicationStateProto</code>.

const (
	// Status is unspecified.
	YarnApplication_STATE_UNSPECIFIED YarnApplication_State = 0
	// Status is NEW.
	YarnApplication_NEW YarnApplication_State = 1
	// Status is NEW_SAVING.
	YarnApplication_NEW_SAVING YarnApplication_State = 2
	// Status is SUBMITTED.
	YarnApplication_SUBMITTED YarnApplication_State = 3
	// Status is ACCEPTED.
	YarnApplication_ACCEPTED YarnApplication_State = 4
	// Status is RUNNING.
	YarnApplication_RUNNING YarnApplication_State = 5
	// Status is FINISHED.
	YarnApplication_FINISHED YarnApplication_State = 6
	// Status is FAILED.
	YarnApplication_FAILED YarnApplication_State = 7
	// Status is KILLED.
	YarnApplication_KILLED YarnApplication_State = 8
)

func (YarnApplication_State) EnumDescriptor

func (YarnApplication_State) EnumDescriptor() ([]byte, []int)

func (YarnApplication_State) String

func (x YarnApplication_State) String() string

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL