dataproc

package
v2.13.0 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Apr 13, 2020 License: Apache-2.0 Imports: 4 Imported by: 0

Documentation

Overview

nolint: lll

nolint: lll

nolint: lll

nolint: lll

nolint: lll

nolint: lll

nolint: lll

nolint: lll

nolint: lll

nolint: lll

Index

Constants

This section is empty.

Variables

This section is empty.

Functions

This section is empty.

Types

type AutoscalingPolicy

type AutoscalingPolicy struct {
	pulumi.CustomResourceState

	// Basic algorithm for autoscaling.
	BasicAlgorithm AutoscalingPolicyBasicAlgorithmPtrOutput `pulumi:"basicAlgorithm"`
	// The location where the autoscaling poicy should reside. The default value is 'global'.
	Location pulumi.StringPtrOutput `pulumi:"location"`
	// The "resource name" of the autoscaling policy.
	Name pulumi.StringOutput `pulumi:"name"`
	// The policy id. The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot
	// begin or end with underscore or hyphen. Must consist of between 3 and 50 characters.
	PolicyId pulumi.StringOutput `pulumi:"policyId"`
	// The ID of the project in which the resource belongs.
	// If it is not provided, the provider project is used.
	Project pulumi.StringOutput `pulumi:"project"`
	// Describes how the autoscaler will operate for secondary workers.
	SecondaryWorkerConfig AutoscalingPolicySecondaryWorkerConfigPtrOutput `pulumi:"secondaryWorkerConfig"`
	// Describes how the autoscaler will operate for primary workers.
	WorkerConfig AutoscalingPolicyWorkerConfigPtrOutput `pulumi:"workerConfig"`
}

Describes an autoscaling policy for Dataproc cluster autoscaler.

> This content is derived from https://github.com/terraform-providers/terraform-provider-google/blob/master/website/docs/r/dataproc_autoscaling_policy.html.markdown.

func GetAutoscalingPolicy

func GetAutoscalingPolicy(ctx *pulumi.Context,
	name string, id pulumi.IDInput, state *AutoscalingPolicyState, opts ...pulumi.ResourceOption) (*AutoscalingPolicy, error)

GetAutoscalingPolicy gets an existing AutoscalingPolicy resource's state with the given name, ID, and optional state properties that are used to uniquely qualify the lookup (nil if not required).

func NewAutoscalingPolicy

func NewAutoscalingPolicy(ctx *pulumi.Context,
	name string, args *AutoscalingPolicyArgs, opts ...pulumi.ResourceOption) (*AutoscalingPolicy, error)

NewAutoscalingPolicy registers a new resource with the given unique name, arguments, and options.

type AutoscalingPolicyArgs

type AutoscalingPolicyArgs struct {
	// Basic algorithm for autoscaling.
	BasicAlgorithm AutoscalingPolicyBasicAlgorithmPtrInput
	// The location where the autoscaling poicy should reside. The default value is 'global'.
	Location pulumi.StringPtrInput
	// The policy id. The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot
	// begin or end with underscore or hyphen. Must consist of between 3 and 50 characters.
	PolicyId pulumi.StringInput
	// The ID of the project in which the resource belongs.
	// If it is not provided, the provider project is used.
	Project pulumi.StringPtrInput
	// Describes how the autoscaler will operate for secondary workers.
	SecondaryWorkerConfig AutoscalingPolicySecondaryWorkerConfigPtrInput
	// Describes how the autoscaler will operate for primary workers.
	WorkerConfig AutoscalingPolicyWorkerConfigPtrInput
}

The set of arguments for constructing a AutoscalingPolicy resource.

func (AutoscalingPolicyArgs) ElementType

func (AutoscalingPolicyArgs) ElementType() reflect.Type

type AutoscalingPolicyBasicAlgorithm

type AutoscalingPolicyBasicAlgorithm struct {
	CooldownPeriod *string                                   `pulumi:"cooldownPeriod"`
	YarnConfig     AutoscalingPolicyBasicAlgorithmYarnConfig `pulumi:"yarnConfig"`
}

type AutoscalingPolicyBasicAlgorithmArgs

type AutoscalingPolicyBasicAlgorithmArgs struct {
	CooldownPeriod pulumi.StringPtrInput                          `pulumi:"cooldownPeriod"`
	YarnConfig     AutoscalingPolicyBasicAlgorithmYarnConfigInput `pulumi:"yarnConfig"`
}

func (AutoscalingPolicyBasicAlgorithmArgs) ElementType

func (AutoscalingPolicyBasicAlgorithmArgs) ToAutoscalingPolicyBasicAlgorithmOutput

func (i AutoscalingPolicyBasicAlgorithmArgs) ToAutoscalingPolicyBasicAlgorithmOutput() AutoscalingPolicyBasicAlgorithmOutput

func (AutoscalingPolicyBasicAlgorithmArgs) ToAutoscalingPolicyBasicAlgorithmOutputWithContext

func (i AutoscalingPolicyBasicAlgorithmArgs) ToAutoscalingPolicyBasicAlgorithmOutputWithContext(ctx context.Context) AutoscalingPolicyBasicAlgorithmOutput

func (AutoscalingPolicyBasicAlgorithmArgs) ToAutoscalingPolicyBasicAlgorithmPtrOutput

func (i AutoscalingPolicyBasicAlgorithmArgs) ToAutoscalingPolicyBasicAlgorithmPtrOutput() AutoscalingPolicyBasicAlgorithmPtrOutput

func (AutoscalingPolicyBasicAlgorithmArgs) ToAutoscalingPolicyBasicAlgorithmPtrOutputWithContext

func (i AutoscalingPolicyBasicAlgorithmArgs) ToAutoscalingPolicyBasicAlgorithmPtrOutputWithContext(ctx context.Context) AutoscalingPolicyBasicAlgorithmPtrOutput

type AutoscalingPolicyBasicAlgorithmInput

type AutoscalingPolicyBasicAlgorithmInput interface {
	pulumi.Input

	ToAutoscalingPolicyBasicAlgorithmOutput() AutoscalingPolicyBasicAlgorithmOutput
	ToAutoscalingPolicyBasicAlgorithmOutputWithContext(context.Context) AutoscalingPolicyBasicAlgorithmOutput
}

type AutoscalingPolicyBasicAlgorithmOutput

type AutoscalingPolicyBasicAlgorithmOutput struct{ *pulumi.OutputState }

func (AutoscalingPolicyBasicAlgorithmOutput) CooldownPeriod

func (AutoscalingPolicyBasicAlgorithmOutput) ElementType

func (AutoscalingPolicyBasicAlgorithmOutput) ToAutoscalingPolicyBasicAlgorithmOutput

func (o AutoscalingPolicyBasicAlgorithmOutput) ToAutoscalingPolicyBasicAlgorithmOutput() AutoscalingPolicyBasicAlgorithmOutput

func (AutoscalingPolicyBasicAlgorithmOutput) ToAutoscalingPolicyBasicAlgorithmOutputWithContext

func (o AutoscalingPolicyBasicAlgorithmOutput) ToAutoscalingPolicyBasicAlgorithmOutputWithContext(ctx context.Context) AutoscalingPolicyBasicAlgorithmOutput

func (AutoscalingPolicyBasicAlgorithmOutput) ToAutoscalingPolicyBasicAlgorithmPtrOutput

func (o AutoscalingPolicyBasicAlgorithmOutput) ToAutoscalingPolicyBasicAlgorithmPtrOutput() AutoscalingPolicyBasicAlgorithmPtrOutput

func (AutoscalingPolicyBasicAlgorithmOutput) ToAutoscalingPolicyBasicAlgorithmPtrOutputWithContext

func (o AutoscalingPolicyBasicAlgorithmOutput) ToAutoscalingPolicyBasicAlgorithmPtrOutputWithContext(ctx context.Context) AutoscalingPolicyBasicAlgorithmPtrOutput

func (AutoscalingPolicyBasicAlgorithmOutput) YarnConfig

type AutoscalingPolicyBasicAlgorithmPtrInput

type AutoscalingPolicyBasicAlgorithmPtrInput interface {
	pulumi.Input

	ToAutoscalingPolicyBasicAlgorithmPtrOutput() AutoscalingPolicyBasicAlgorithmPtrOutput
	ToAutoscalingPolicyBasicAlgorithmPtrOutputWithContext(context.Context) AutoscalingPolicyBasicAlgorithmPtrOutput
}

type AutoscalingPolicyBasicAlgorithmPtrOutput

type AutoscalingPolicyBasicAlgorithmPtrOutput struct{ *pulumi.OutputState }

func (AutoscalingPolicyBasicAlgorithmPtrOutput) CooldownPeriod

func (AutoscalingPolicyBasicAlgorithmPtrOutput) Elem

func (AutoscalingPolicyBasicAlgorithmPtrOutput) ElementType

func (AutoscalingPolicyBasicAlgorithmPtrOutput) ToAutoscalingPolicyBasicAlgorithmPtrOutput

func (o AutoscalingPolicyBasicAlgorithmPtrOutput) ToAutoscalingPolicyBasicAlgorithmPtrOutput() AutoscalingPolicyBasicAlgorithmPtrOutput

func (AutoscalingPolicyBasicAlgorithmPtrOutput) ToAutoscalingPolicyBasicAlgorithmPtrOutputWithContext

func (o AutoscalingPolicyBasicAlgorithmPtrOutput) ToAutoscalingPolicyBasicAlgorithmPtrOutputWithContext(ctx context.Context) AutoscalingPolicyBasicAlgorithmPtrOutput

func (AutoscalingPolicyBasicAlgorithmPtrOutput) YarnConfig

type AutoscalingPolicyBasicAlgorithmYarnConfig

type AutoscalingPolicyBasicAlgorithmYarnConfig struct {
	GracefulDecommissionTimeout string   `pulumi:"gracefulDecommissionTimeout"`
	ScaleDownFactor             float64  `pulumi:"scaleDownFactor"`
	ScaleDownMinWorkerFraction  *float64 `pulumi:"scaleDownMinWorkerFraction"`
	ScaleUpFactor               float64  `pulumi:"scaleUpFactor"`
	ScaleUpMinWorkerFraction    *float64 `pulumi:"scaleUpMinWorkerFraction"`
}

type AutoscalingPolicyBasicAlgorithmYarnConfigArgs

type AutoscalingPolicyBasicAlgorithmYarnConfigArgs struct {
	GracefulDecommissionTimeout pulumi.StringInput     `pulumi:"gracefulDecommissionTimeout"`
	ScaleDownFactor             pulumi.Float64Input    `pulumi:"scaleDownFactor"`
	ScaleDownMinWorkerFraction  pulumi.Float64PtrInput `pulumi:"scaleDownMinWorkerFraction"`
	ScaleUpFactor               pulumi.Float64Input    `pulumi:"scaleUpFactor"`
	ScaleUpMinWorkerFraction    pulumi.Float64PtrInput `pulumi:"scaleUpMinWorkerFraction"`
}

func (AutoscalingPolicyBasicAlgorithmYarnConfigArgs) ElementType

func (AutoscalingPolicyBasicAlgorithmYarnConfigArgs) ToAutoscalingPolicyBasicAlgorithmYarnConfigOutput

func (i AutoscalingPolicyBasicAlgorithmYarnConfigArgs) ToAutoscalingPolicyBasicAlgorithmYarnConfigOutput() AutoscalingPolicyBasicAlgorithmYarnConfigOutput

func (AutoscalingPolicyBasicAlgorithmYarnConfigArgs) ToAutoscalingPolicyBasicAlgorithmYarnConfigOutputWithContext

func (i AutoscalingPolicyBasicAlgorithmYarnConfigArgs) ToAutoscalingPolicyBasicAlgorithmYarnConfigOutputWithContext(ctx context.Context) AutoscalingPolicyBasicAlgorithmYarnConfigOutput

type AutoscalingPolicyBasicAlgorithmYarnConfigInput

type AutoscalingPolicyBasicAlgorithmYarnConfigInput interface {
	pulumi.Input

	ToAutoscalingPolicyBasicAlgorithmYarnConfigOutput() AutoscalingPolicyBasicAlgorithmYarnConfigOutput
	ToAutoscalingPolicyBasicAlgorithmYarnConfigOutputWithContext(context.Context) AutoscalingPolicyBasicAlgorithmYarnConfigOutput
}

type AutoscalingPolicyBasicAlgorithmYarnConfigOutput

type AutoscalingPolicyBasicAlgorithmYarnConfigOutput struct{ *pulumi.OutputState }

func (AutoscalingPolicyBasicAlgorithmYarnConfigOutput) ElementType

func (AutoscalingPolicyBasicAlgorithmYarnConfigOutput) GracefulDecommissionTimeout

func (AutoscalingPolicyBasicAlgorithmYarnConfigOutput) ScaleDownFactor

func (AutoscalingPolicyBasicAlgorithmYarnConfigOutput) ScaleDownMinWorkerFraction

func (AutoscalingPolicyBasicAlgorithmYarnConfigOutput) ScaleUpFactor

func (AutoscalingPolicyBasicAlgorithmYarnConfigOutput) ScaleUpMinWorkerFraction

func (AutoscalingPolicyBasicAlgorithmYarnConfigOutput) ToAutoscalingPolicyBasicAlgorithmYarnConfigOutput

func (o AutoscalingPolicyBasicAlgorithmYarnConfigOutput) ToAutoscalingPolicyBasicAlgorithmYarnConfigOutput() AutoscalingPolicyBasicAlgorithmYarnConfigOutput

func (AutoscalingPolicyBasicAlgorithmYarnConfigOutput) ToAutoscalingPolicyBasicAlgorithmYarnConfigOutputWithContext

func (o AutoscalingPolicyBasicAlgorithmYarnConfigOutput) ToAutoscalingPolicyBasicAlgorithmYarnConfigOutputWithContext(ctx context.Context) AutoscalingPolicyBasicAlgorithmYarnConfigOutput

type AutoscalingPolicySecondaryWorkerConfig

type AutoscalingPolicySecondaryWorkerConfig struct {
	MaxInstances *int `pulumi:"maxInstances"`
	MinInstances *int `pulumi:"minInstances"`
	Weight       *int `pulumi:"weight"`
}

type AutoscalingPolicySecondaryWorkerConfigArgs

type AutoscalingPolicySecondaryWorkerConfigArgs struct {
	MaxInstances pulumi.IntPtrInput `pulumi:"maxInstances"`
	MinInstances pulumi.IntPtrInput `pulumi:"minInstances"`
	Weight       pulumi.IntPtrInput `pulumi:"weight"`
}

func (AutoscalingPolicySecondaryWorkerConfigArgs) ElementType

func (AutoscalingPolicySecondaryWorkerConfigArgs) ToAutoscalingPolicySecondaryWorkerConfigOutput

func (i AutoscalingPolicySecondaryWorkerConfigArgs) ToAutoscalingPolicySecondaryWorkerConfigOutput() AutoscalingPolicySecondaryWorkerConfigOutput

func (AutoscalingPolicySecondaryWorkerConfigArgs) ToAutoscalingPolicySecondaryWorkerConfigOutputWithContext

func (i AutoscalingPolicySecondaryWorkerConfigArgs) ToAutoscalingPolicySecondaryWorkerConfigOutputWithContext(ctx context.Context) AutoscalingPolicySecondaryWorkerConfigOutput

func (AutoscalingPolicySecondaryWorkerConfigArgs) ToAutoscalingPolicySecondaryWorkerConfigPtrOutput

func (i AutoscalingPolicySecondaryWorkerConfigArgs) ToAutoscalingPolicySecondaryWorkerConfigPtrOutput() AutoscalingPolicySecondaryWorkerConfigPtrOutput

func (AutoscalingPolicySecondaryWorkerConfigArgs) ToAutoscalingPolicySecondaryWorkerConfigPtrOutputWithContext

func (i AutoscalingPolicySecondaryWorkerConfigArgs) ToAutoscalingPolicySecondaryWorkerConfigPtrOutputWithContext(ctx context.Context) AutoscalingPolicySecondaryWorkerConfigPtrOutput

type AutoscalingPolicySecondaryWorkerConfigInput

type AutoscalingPolicySecondaryWorkerConfigInput interface {
	pulumi.Input

	ToAutoscalingPolicySecondaryWorkerConfigOutput() AutoscalingPolicySecondaryWorkerConfigOutput
	ToAutoscalingPolicySecondaryWorkerConfigOutputWithContext(context.Context) AutoscalingPolicySecondaryWorkerConfigOutput
}

type AutoscalingPolicySecondaryWorkerConfigOutput

type AutoscalingPolicySecondaryWorkerConfigOutput struct{ *pulumi.OutputState }

func (AutoscalingPolicySecondaryWorkerConfigOutput) ElementType

func (AutoscalingPolicySecondaryWorkerConfigOutput) MaxInstances

func (AutoscalingPolicySecondaryWorkerConfigOutput) MinInstances

func (AutoscalingPolicySecondaryWorkerConfigOutput) ToAutoscalingPolicySecondaryWorkerConfigOutput

func (o AutoscalingPolicySecondaryWorkerConfigOutput) ToAutoscalingPolicySecondaryWorkerConfigOutput() AutoscalingPolicySecondaryWorkerConfigOutput

func (AutoscalingPolicySecondaryWorkerConfigOutput) ToAutoscalingPolicySecondaryWorkerConfigOutputWithContext

func (o AutoscalingPolicySecondaryWorkerConfigOutput) ToAutoscalingPolicySecondaryWorkerConfigOutputWithContext(ctx context.Context) AutoscalingPolicySecondaryWorkerConfigOutput

func (AutoscalingPolicySecondaryWorkerConfigOutput) ToAutoscalingPolicySecondaryWorkerConfigPtrOutput

func (o AutoscalingPolicySecondaryWorkerConfigOutput) ToAutoscalingPolicySecondaryWorkerConfigPtrOutput() AutoscalingPolicySecondaryWorkerConfigPtrOutput

func (AutoscalingPolicySecondaryWorkerConfigOutput) ToAutoscalingPolicySecondaryWorkerConfigPtrOutputWithContext

func (o AutoscalingPolicySecondaryWorkerConfigOutput) ToAutoscalingPolicySecondaryWorkerConfigPtrOutputWithContext(ctx context.Context) AutoscalingPolicySecondaryWorkerConfigPtrOutput

func (AutoscalingPolicySecondaryWorkerConfigOutput) Weight

type AutoscalingPolicySecondaryWorkerConfigPtrInput

type AutoscalingPolicySecondaryWorkerConfigPtrInput interface {
	pulumi.Input

	ToAutoscalingPolicySecondaryWorkerConfigPtrOutput() AutoscalingPolicySecondaryWorkerConfigPtrOutput
	ToAutoscalingPolicySecondaryWorkerConfigPtrOutputWithContext(context.Context) AutoscalingPolicySecondaryWorkerConfigPtrOutput
}

type AutoscalingPolicySecondaryWorkerConfigPtrOutput

type AutoscalingPolicySecondaryWorkerConfigPtrOutput struct{ *pulumi.OutputState }

func (AutoscalingPolicySecondaryWorkerConfigPtrOutput) Elem

func (AutoscalingPolicySecondaryWorkerConfigPtrOutput) ElementType

func (AutoscalingPolicySecondaryWorkerConfigPtrOutput) MaxInstances

func (AutoscalingPolicySecondaryWorkerConfigPtrOutput) MinInstances

func (AutoscalingPolicySecondaryWorkerConfigPtrOutput) ToAutoscalingPolicySecondaryWorkerConfigPtrOutput

func (o AutoscalingPolicySecondaryWorkerConfigPtrOutput) ToAutoscalingPolicySecondaryWorkerConfigPtrOutput() AutoscalingPolicySecondaryWorkerConfigPtrOutput

func (AutoscalingPolicySecondaryWorkerConfigPtrOutput) ToAutoscalingPolicySecondaryWorkerConfigPtrOutputWithContext

func (o AutoscalingPolicySecondaryWorkerConfigPtrOutput) ToAutoscalingPolicySecondaryWorkerConfigPtrOutputWithContext(ctx context.Context) AutoscalingPolicySecondaryWorkerConfigPtrOutput

func (AutoscalingPolicySecondaryWorkerConfigPtrOutput) Weight

type AutoscalingPolicyState

type AutoscalingPolicyState struct {
	// Basic algorithm for autoscaling.
	BasicAlgorithm AutoscalingPolicyBasicAlgorithmPtrInput
	// The location where the autoscaling poicy should reside. The default value is 'global'.
	Location pulumi.StringPtrInput
	// The "resource name" of the autoscaling policy.
	Name pulumi.StringPtrInput
	// The policy id. The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot
	// begin or end with underscore or hyphen. Must consist of between 3 and 50 characters.
	PolicyId pulumi.StringPtrInput
	// The ID of the project in which the resource belongs.
	// If it is not provided, the provider project is used.
	Project pulumi.StringPtrInput
	// Describes how the autoscaler will operate for secondary workers.
	SecondaryWorkerConfig AutoscalingPolicySecondaryWorkerConfigPtrInput
	// Describes how the autoscaler will operate for primary workers.
	WorkerConfig AutoscalingPolicyWorkerConfigPtrInput
}

func (AutoscalingPolicyState) ElementType

func (AutoscalingPolicyState) ElementType() reflect.Type

type AutoscalingPolicyWorkerConfig

type AutoscalingPolicyWorkerConfig struct {
	MaxInstances int  `pulumi:"maxInstances"`
	MinInstances *int `pulumi:"minInstances"`
	Weight       *int `pulumi:"weight"`
}

type AutoscalingPolicyWorkerConfigArgs

type AutoscalingPolicyWorkerConfigArgs struct {
	MaxInstances pulumi.IntInput    `pulumi:"maxInstances"`
	MinInstances pulumi.IntPtrInput `pulumi:"minInstances"`
	Weight       pulumi.IntPtrInput `pulumi:"weight"`
}

func (AutoscalingPolicyWorkerConfigArgs) ElementType

func (AutoscalingPolicyWorkerConfigArgs) ToAutoscalingPolicyWorkerConfigOutput

func (i AutoscalingPolicyWorkerConfigArgs) ToAutoscalingPolicyWorkerConfigOutput() AutoscalingPolicyWorkerConfigOutput

func (AutoscalingPolicyWorkerConfigArgs) ToAutoscalingPolicyWorkerConfigOutputWithContext

func (i AutoscalingPolicyWorkerConfigArgs) ToAutoscalingPolicyWorkerConfigOutputWithContext(ctx context.Context) AutoscalingPolicyWorkerConfigOutput

func (AutoscalingPolicyWorkerConfigArgs) ToAutoscalingPolicyWorkerConfigPtrOutput

func (i AutoscalingPolicyWorkerConfigArgs) ToAutoscalingPolicyWorkerConfigPtrOutput() AutoscalingPolicyWorkerConfigPtrOutput

func (AutoscalingPolicyWorkerConfigArgs) ToAutoscalingPolicyWorkerConfigPtrOutputWithContext

func (i AutoscalingPolicyWorkerConfigArgs) ToAutoscalingPolicyWorkerConfigPtrOutputWithContext(ctx context.Context) AutoscalingPolicyWorkerConfigPtrOutput

type AutoscalingPolicyWorkerConfigInput

type AutoscalingPolicyWorkerConfigInput interface {
	pulumi.Input

	ToAutoscalingPolicyWorkerConfigOutput() AutoscalingPolicyWorkerConfigOutput
	ToAutoscalingPolicyWorkerConfigOutputWithContext(context.Context) AutoscalingPolicyWorkerConfigOutput
}

type AutoscalingPolicyWorkerConfigOutput

type AutoscalingPolicyWorkerConfigOutput struct{ *pulumi.OutputState }

func (AutoscalingPolicyWorkerConfigOutput) ElementType

func (AutoscalingPolicyWorkerConfigOutput) MaxInstances

func (AutoscalingPolicyWorkerConfigOutput) MinInstances

func (AutoscalingPolicyWorkerConfigOutput) ToAutoscalingPolicyWorkerConfigOutput

func (o AutoscalingPolicyWorkerConfigOutput) ToAutoscalingPolicyWorkerConfigOutput() AutoscalingPolicyWorkerConfigOutput

func (AutoscalingPolicyWorkerConfigOutput) ToAutoscalingPolicyWorkerConfigOutputWithContext

func (o AutoscalingPolicyWorkerConfigOutput) ToAutoscalingPolicyWorkerConfigOutputWithContext(ctx context.Context) AutoscalingPolicyWorkerConfigOutput

func (AutoscalingPolicyWorkerConfigOutput) ToAutoscalingPolicyWorkerConfigPtrOutput

func (o AutoscalingPolicyWorkerConfigOutput) ToAutoscalingPolicyWorkerConfigPtrOutput() AutoscalingPolicyWorkerConfigPtrOutput

func (AutoscalingPolicyWorkerConfigOutput) ToAutoscalingPolicyWorkerConfigPtrOutputWithContext

func (o AutoscalingPolicyWorkerConfigOutput) ToAutoscalingPolicyWorkerConfigPtrOutputWithContext(ctx context.Context) AutoscalingPolicyWorkerConfigPtrOutput

func (AutoscalingPolicyWorkerConfigOutput) Weight

type AutoscalingPolicyWorkerConfigPtrInput

type AutoscalingPolicyWorkerConfigPtrInput interface {
	pulumi.Input

	ToAutoscalingPolicyWorkerConfigPtrOutput() AutoscalingPolicyWorkerConfigPtrOutput
	ToAutoscalingPolicyWorkerConfigPtrOutputWithContext(context.Context) AutoscalingPolicyWorkerConfigPtrOutput
}

type AutoscalingPolicyWorkerConfigPtrOutput

type AutoscalingPolicyWorkerConfigPtrOutput struct{ *pulumi.OutputState }

func (AutoscalingPolicyWorkerConfigPtrOutput) Elem

func (AutoscalingPolicyWorkerConfigPtrOutput) ElementType

func (AutoscalingPolicyWorkerConfigPtrOutput) MaxInstances

func (AutoscalingPolicyWorkerConfigPtrOutput) MinInstances

func (AutoscalingPolicyWorkerConfigPtrOutput) ToAutoscalingPolicyWorkerConfigPtrOutput

func (o AutoscalingPolicyWorkerConfigPtrOutput) ToAutoscalingPolicyWorkerConfigPtrOutput() AutoscalingPolicyWorkerConfigPtrOutput

func (AutoscalingPolicyWorkerConfigPtrOutput) ToAutoscalingPolicyWorkerConfigPtrOutputWithContext

func (o AutoscalingPolicyWorkerConfigPtrOutput) ToAutoscalingPolicyWorkerConfigPtrOutputWithContext(ctx context.Context) AutoscalingPolicyWorkerConfigPtrOutput

func (AutoscalingPolicyWorkerConfigPtrOutput) Weight

type Cluster

type Cluster struct {
	pulumi.CustomResourceState

	// Allows you to configure various aspects of the cluster.
	// Structure defined below.
	ClusterConfig ClusterClusterConfigOutput `pulumi:"clusterConfig"`
	// The list of labels (key/value pairs) to be applied to
	// instances in the cluster. GCP generates some itself including `goog-dataproc-cluster-name`
	// which is the name of the cluster.
	Labels pulumi.StringMapOutput `pulumi:"labels"`
	// The name of the cluster, unique within the project and
	// zone.
	Name pulumi.StringOutput `pulumi:"name"`
	// The ID of the project in which the `cluster` will exist. If it
	// is not provided, the provider project is used.
	Project pulumi.StringOutput `pulumi:"project"`
	// The region in which the cluster and associated nodes will be created in.
	// Defaults to `global`.
	Region pulumi.StringPtrOutput `pulumi:"region"`
}

Manages a Cloud Dataproc cluster resource within GCP. For more information see [the official dataproc documentation](https://cloud.google.com/dataproc/).

!> **Warning:** Due to limitations of the API, all arguments except `labels`,`cluster_config.worker_config.num_instances` and `cluster_config.preemptible_worker_config.num_instances` are non-updatable. Changing others will cause recreation of the whole cluster!

> This content is derived from https://github.com/terraform-providers/terraform-provider-google/blob/master/website/docs/r/dataproc_cluster.html.markdown.

func GetCluster

func GetCluster(ctx *pulumi.Context,
	name string, id pulumi.IDInput, state *ClusterState, opts ...pulumi.ResourceOption) (*Cluster, error)

GetCluster gets an existing Cluster resource's state with the given name, ID, and optional state properties that are used to uniquely qualify the lookup (nil if not required).

func NewCluster

func NewCluster(ctx *pulumi.Context,
	name string, args *ClusterArgs, opts ...pulumi.ResourceOption) (*Cluster, error)

NewCluster registers a new resource with the given unique name, arguments, and options.

type ClusterArgs

type ClusterArgs struct {
	// Allows you to configure various aspects of the cluster.
	// Structure defined below.
	ClusterConfig ClusterClusterConfigPtrInput
	// The list of labels (key/value pairs) to be applied to
	// instances in the cluster. GCP generates some itself including `goog-dataproc-cluster-name`
	// which is the name of the cluster.
	Labels pulumi.StringMapInput
	// The name of the cluster, unique within the project and
	// zone.
	Name pulumi.StringPtrInput
	// The ID of the project in which the `cluster` will exist. If it
	// is not provided, the provider project is used.
	Project pulumi.StringPtrInput
	// The region in which the cluster and associated nodes will be created in.
	// Defaults to `global`.
	Region pulumi.StringPtrInput
}

The set of arguments for constructing a Cluster resource.

func (ClusterArgs) ElementType

func (ClusterArgs) ElementType() reflect.Type

type ClusterClusterConfig

type ClusterClusterConfig struct {
	// The autoscaling policy config associated with the cluster.
	// Structure defined below.
	AutoscalingConfig *ClusterClusterConfigAutoscalingConfig `pulumi:"autoscalingConfig"`
	Bucket            *string                                `pulumi:"bucket"`
	// The Customer managed encryption keys settings for the cluster.
	// Structure defined below.
	EncryptionConfig *ClusterClusterConfigEncryptionConfig `pulumi:"encryptionConfig"`
	// Common config settings for resources of Google Compute Engine cluster
	// instances, applicable to all instances in the cluster. Structure defined below.
	GceClusterConfig *ClusterClusterConfigGceClusterConfig `pulumi:"gceClusterConfig"`
	// Commands to execute on each node after config is completed.
	// You can specify multiple versions of these. Structure defined below.
	InitializationActions []ClusterClusterConfigInitializationAction `pulumi:"initializationActions"`
	LifecycleConfig       *ClusterClusterConfigLifecycleConfig       `pulumi:"lifecycleConfig"`
	// The Google Compute Engine config settings for the master instances
	// in a cluster.. Structure defined below.
	MasterConfig *ClusterClusterConfigMasterConfig `pulumi:"masterConfig"`
	// The Google Compute Engine config settings for the additional (aka
	// preemptible) instances in a cluster. Structure defined below.
	PreemptibleWorkerConfig *ClusterClusterConfigPreemptibleWorkerConfig `pulumi:"preemptibleWorkerConfig"`
	// Security related configuration. Structure defined below.
	SecurityConfig *ClusterClusterConfigSecurityConfig `pulumi:"securityConfig"`
	// The config settings for software inside the cluster.
	// Structure defined below.
	SoftwareConfig *ClusterClusterConfigSoftwareConfig `pulumi:"softwareConfig"`
	// The Cloud Storage staging bucket used to stage files,
	// such as Hadoop jars, between client machines and the cluster.
	// Note: If you don't explicitly specify a `stagingBucket`
	// then GCP will auto create / assign one for you. However, you are not guaranteed
	// an auto generated bucket which is solely dedicated to your cluster; it may be shared
	// with other clusters in the same region/zone also choosing to use the auto generation
	// option.
	StagingBucket *string `pulumi:"stagingBucket"`
	// The Google Compute Engine config settings for the worker instances
	// in a cluster.. Structure defined below.
	WorkerConfig *ClusterClusterConfigWorkerConfig `pulumi:"workerConfig"`
}

type ClusterClusterConfigArgs

type ClusterClusterConfigArgs struct {
	// The autoscaling policy config associated with the cluster.
	// Structure defined below.
	AutoscalingConfig ClusterClusterConfigAutoscalingConfigPtrInput `pulumi:"autoscalingConfig"`
	Bucket            pulumi.StringPtrInput                         `pulumi:"bucket"`
	// The Customer managed encryption keys settings for the cluster.
	// Structure defined below.
	EncryptionConfig ClusterClusterConfigEncryptionConfigPtrInput `pulumi:"encryptionConfig"`
	// Common config settings for resources of Google Compute Engine cluster
	// instances, applicable to all instances in the cluster. Structure defined below.
	GceClusterConfig ClusterClusterConfigGceClusterConfigPtrInput `pulumi:"gceClusterConfig"`
	// Commands to execute on each node after config is completed.
	// You can specify multiple versions of these. Structure defined below.
	InitializationActions ClusterClusterConfigInitializationActionArrayInput `pulumi:"initializationActions"`
	LifecycleConfig       ClusterClusterConfigLifecycleConfigPtrInput        `pulumi:"lifecycleConfig"`
	// The Google Compute Engine config settings for the master instances
	// in a cluster.. Structure defined below.
	MasterConfig ClusterClusterConfigMasterConfigPtrInput `pulumi:"masterConfig"`
	// The Google Compute Engine config settings for the additional (aka
	// preemptible) instances in a cluster. Structure defined below.
	PreemptibleWorkerConfig ClusterClusterConfigPreemptibleWorkerConfigPtrInput `pulumi:"preemptibleWorkerConfig"`
	// Security related configuration. Structure defined below.
	SecurityConfig ClusterClusterConfigSecurityConfigPtrInput `pulumi:"securityConfig"`
	// The config settings for software inside the cluster.
	// Structure defined below.
	SoftwareConfig ClusterClusterConfigSoftwareConfigPtrInput `pulumi:"softwareConfig"`
	// The Cloud Storage staging bucket used to stage files,
	// such as Hadoop jars, between client machines and the cluster.
	// Note: If you don't explicitly specify a `stagingBucket`
	// then GCP will auto create / assign one for you. However, you are not guaranteed
	// an auto generated bucket which is solely dedicated to your cluster; it may be shared
	// with other clusters in the same region/zone also choosing to use the auto generation
	// option.
	StagingBucket pulumi.StringPtrInput `pulumi:"stagingBucket"`
	// The Google Compute Engine config settings for the worker instances
	// in a cluster.. Structure defined below.
	WorkerConfig ClusterClusterConfigWorkerConfigPtrInput `pulumi:"workerConfig"`
}

func (ClusterClusterConfigArgs) ElementType

func (ClusterClusterConfigArgs) ElementType() reflect.Type

func (ClusterClusterConfigArgs) ToClusterClusterConfigOutput

func (i ClusterClusterConfigArgs) ToClusterClusterConfigOutput() ClusterClusterConfigOutput

func (ClusterClusterConfigArgs) ToClusterClusterConfigOutputWithContext

func (i ClusterClusterConfigArgs) ToClusterClusterConfigOutputWithContext(ctx context.Context) ClusterClusterConfigOutput

func (ClusterClusterConfigArgs) ToClusterClusterConfigPtrOutput

func (i ClusterClusterConfigArgs) ToClusterClusterConfigPtrOutput() ClusterClusterConfigPtrOutput

func (ClusterClusterConfigArgs) ToClusterClusterConfigPtrOutputWithContext

func (i ClusterClusterConfigArgs) ToClusterClusterConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigPtrOutput

type ClusterClusterConfigAutoscalingConfig

type ClusterClusterConfigAutoscalingConfig struct {
	// The autoscaling policy used by the cluster.
	PolicyUri string `pulumi:"policyUri"`
}

type ClusterClusterConfigAutoscalingConfigArgs

type ClusterClusterConfigAutoscalingConfigArgs struct {
	// The autoscaling policy used by the cluster.
	PolicyUri pulumi.StringInput `pulumi:"policyUri"`
}

func (ClusterClusterConfigAutoscalingConfigArgs) ElementType

func (ClusterClusterConfigAutoscalingConfigArgs) ToClusterClusterConfigAutoscalingConfigOutput

func (i ClusterClusterConfigAutoscalingConfigArgs) ToClusterClusterConfigAutoscalingConfigOutput() ClusterClusterConfigAutoscalingConfigOutput

func (ClusterClusterConfigAutoscalingConfigArgs) ToClusterClusterConfigAutoscalingConfigOutputWithContext

func (i ClusterClusterConfigAutoscalingConfigArgs) ToClusterClusterConfigAutoscalingConfigOutputWithContext(ctx context.Context) ClusterClusterConfigAutoscalingConfigOutput

func (ClusterClusterConfigAutoscalingConfigArgs) ToClusterClusterConfigAutoscalingConfigPtrOutput

func (i ClusterClusterConfigAutoscalingConfigArgs) ToClusterClusterConfigAutoscalingConfigPtrOutput() ClusterClusterConfigAutoscalingConfigPtrOutput

func (ClusterClusterConfigAutoscalingConfigArgs) ToClusterClusterConfigAutoscalingConfigPtrOutputWithContext

func (i ClusterClusterConfigAutoscalingConfigArgs) ToClusterClusterConfigAutoscalingConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigAutoscalingConfigPtrOutput

type ClusterClusterConfigAutoscalingConfigInput

type ClusterClusterConfigAutoscalingConfigInput interface {
	pulumi.Input

	ToClusterClusterConfigAutoscalingConfigOutput() ClusterClusterConfigAutoscalingConfigOutput
	ToClusterClusterConfigAutoscalingConfigOutputWithContext(context.Context) ClusterClusterConfigAutoscalingConfigOutput
}

type ClusterClusterConfigAutoscalingConfigOutput

type ClusterClusterConfigAutoscalingConfigOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigAutoscalingConfigOutput) ElementType

func (ClusterClusterConfigAutoscalingConfigOutput) PolicyUri

The autoscaling policy used by the cluster.

func (ClusterClusterConfigAutoscalingConfigOutput) ToClusterClusterConfigAutoscalingConfigOutput

func (o ClusterClusterConfigAutoscalingConfigOutput) ToClusterClusterConfigAutoscalingConfigOutput() ClusterClusterConfigAutoscalingConfigOutput

func (ClusterClusterConfigAutoscalingConfigOutput) ToClusterClusterConfigAutoscalingConfigOutputWithContext

func (o ClusterClusterConfigAutoscalingConfigOutput) ToClusterClusterConfigAutoscalingConfigOutputWithContext(ctx context.Context) ClusterClusterConfigAutoscalingConfigOutput

func (ClusterClusterConfigAutoscalingConfigOutput) ToClusterClusterConfigAutoscalingConfigPtrOutput

func (o ClusterClusterConfigAutoscalingConfigOutput) ToClusterClusterConfigAutoscalingConfigPtrOutput() ClusterClusterConfigAutoscalingConfigPtrOutput

func (ClusterClusterConfigAutoscalingConfigOutput) ToClusterClusterConfigAutoscalingConfigPtrOutputWithContext

func (o ClusterClusterConfigAutoscalingConfigOutput) ToClusterClusterConfigAutoscalingConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigAutoscalingConfigPtrOutput

type ClusterClusterConfigAutoscalingConfigPtrInput

type ClusterClusterConfigAutoscalingConfigPtrInput interface {
	pulumi.Input

	ToClusterClusterConfigAutoscalingConfigPtrOutput() ClusterClusterConfigAutoscalingConfigPtrOutput
	ToClusterClusterConfigAutoscalingConfigPtrOutputWithContext(context.Context) ClusterClusterConfigAutoscalingConfigPtrOutput
}

type ClusterClusterConfigAutoscalingConfigPtrOutput

type ClusterClusterConfigAutoscalingConfigPtrOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigAutoscalingConfigPtrOutput) Elem

func (ClusterClusterConfigAutoscalingConfigPtrOutput) ElementType

func (ClusterClusterConfigAutoscalingConfigPtrOutput) PolicyUri

The autoscaling policy used by the cluster.

func (ClusterClusterConfigAutoscalingConfigPtrOutput) ToClusterClusterConfigAutoscalingConfigPtrOutput

func (o ClusterClusterConfigAutoscalingConfigPtrOutput) ToClusterClusterConfigAutoscalingConfigPtrOutput() ClusterClusterConfigAutoscalingConfigPtrOutput

func (ClusterClusterConfigAutoscalingConfigPtrOutput) ToClusterClusterConfigAutoscalingConfigPtrOutputWithContext

func (o ClusterClusterConfigAutoscalingConfigPtrOutput) ToClusterClusterConfigAutoscalingConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigAutoscalingConfigPtrOutput

type ClusterClusterConfigEncryptionConfig

type ClusterClusterConfigEncryptionConfig struct {
	// The Cloud KMS key name to use for PD disk encryption for
	// all instances in the cluster.
	KmsKeyName string `pulumi:"kmsKeyName"`
}

type ClusterClusterConfigEncryptionConfigArgs

type ClusterClusterConfigEncryptionConfigArgs struct {
	// The Cloud KMS key name to use for PD disk encryption for
	// all instances in the cluster.
	KmsKeyName pulumi.StringInput `pulumi:"kmsKeyName"`
}

func (ClusterClusterConfigEncryptionConfigArgs) ElementType

func (ClusterClusterConfigEncryptionConfigArgs) ToClusterClusterConfigEncryptionConfigOutput

func (i ClusterClusterConfigEncryptionConfigArgs) ToClusterClusterConfigEncryptionConfigOutput() ClusterClusterConfigEncryptionConfigOutput

func (ClusterClusterConfigEncryptionConfigArgs) ToClusterClusterConfigEncryptionConfigOutputWithContext

func (i ClusterClusterConfigEncryptionConfigArgs) ToClusterClusterConfigEncryptionConfigOutputWithContext(ctx context.Context) ClusterClusterConfigEncryptionConfigOutput

func (ClusterClusterConfigEncryptionConfigArgs) ToClusterClusterConfigEncryptionConfigPtrOutput

func (i ClusterClusterConfigEncryptionConfigArgs) ToClusterClusterConfigEncryptionConfigPtrOutput() ClusterClusterConfigEncryptionConfigPtrOutput

func (ClusterClusterConfigEncryptionConfigArgs) ToClusterClusterConfigEncryptionConfigPtrOutputWithContext

func (i ClusterClusterConfigEncryptionConfigArgs) ToClusterClusterConfigEncryptionConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigEncryptionConfigPtrOutput

type ClusterClusterConfigEncryptionConfigInput

type ClusterClusterConfigEncryptionConfigInput interface {
	pulumi.Input

	ToClusterClusterConfigEncryptionConfigOutput() ClusterClusterConfigEncryptionConfigOutput
	ToClusterClusterConfigEncryptionConfigOutputWithContext(context.Context) ClusterClusterConfigEncryptionConfigOutput
}

type ClusterClusterConfigEncryptionConfigOutput

type ClusterClusterConfigEncryptionConfigOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigEncryptionConfigOutput) ElementType

func (ClusterClusterConfigEncryptionConfigOutput) KmsKeyName

The Cloud KMS key name to use for PD disk encryption for all instances in the cluster.

func (ClusterClusterConfigEncryptionConfigOutput) ToClusterClusterConfigEncryptionConfigOutput

func (o ClusterClusterConfigEncryptionConfigOutput) ToClusterClusterConfigEncryptionConfigOutput() ClusterClusterConfigEncryptionConfigOutput

func (ClusterClusterConfigEncryptionConfigOutput) ToClusterClusterConfigEncryptionConfigOutputWithContext

func (o ClusterClusterConfigEncryptionConfigOutput) ToClusterClusterConfigEncryptionConfigOutputWithContext(ctx context.Context) ClusterClusterConfigEncryptionConfigOutput

func (ClusterClusterConfigEncryptionConfigOutput) ToClusterClusterConfigEncryptionConfigPtrOutput

func (o ClusterClusterConfigEncryptionConfigOutput) ToClusterClusterConfigEncryptionConfigPtrOutput() ClusterClusterConfigEncryptionConfigPtrOutput

func (ClusterClusterConfigEncryptionConfigOutput) ToClusterClusterConfigEncryptionConfigPtrOutputWithContext

func (o ClusterClusterConfigEncryptionConfigOutput) ToClusterClusterConfigEncryptionConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigEncryptionConfigPtrOutput

type ClusterClusterConfigEncryptionConfigPtrInput

type ClusterClusterConfigEncryptionConfigPtrInput interface {
	pulumi.Input

	ToClusterClusterConfigEncryptionConfigPtrOutput() ClusterClusterConfigEncryptionConfigPtrOutput
	ToClusterClusterConfigEncryptionConfigPtrOutputWithContext(context.Context) ClusterClusterConfigEncryptionConfigPtrOutput
}

type ClusterClusterConfigEncryptionConfigPtrOutput

type ClusterClusterConfigEncryptionConfigPtrOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigEncryptionConfigPtrOutput) Elem

func (ClusterClusterConfigEncryptionConfigPtrOutput) ElementType

func (ClusterClusterConfigEncryptionConfigPtrOutput) KmsKeyName

The Cloud KMS key name to use for PD disk encryption for all instances in the cluster.

func (ClusterClusterConfigEncryptionConfigPtrOutput) ToClusterClusterConfigEncryptionConfigPtrOutput

func (o ClusterClusterConfigEncryptionConfigPtrOutput) ToClusterClusterConfigEncryptionConfigPtrOutput() ClusterClusterConfigEncryptionConfigPtrOutput

func (ClusterClusterConfigEncryptionConfigPtrOutput) ToClusterClusterConfigEncryptionConfigPtrOutputWithContext

func (o ClusterClusterConfigEncryptionConfigPtrOutput) ToClusterClusterConfigEncryptionConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigEncryptionConfigPtrOutput

type ClusterClusterConfigGceClusterConfig

type ClusterClusterConfigGceClusterConfig struct {
	// By default, clusters are not restricted to internal IP addresses,
	// and will have ephemeral external IP addresses assigned to each instance. If set to true, all
	// instances in the cluster will only have internal IP addresses. Note: Private Google Access
	// (also known as `privateIpGoogleAccess`) must be enabled on the subnetwork that the cluster
	// will be launched in.
	InternalIpOnly *bool `pulumi:"internalIpOnly"`
	// A map of the Compute Engine metadata entries to add to all instances
	// (see [Project and instance metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
	Metadata map[string]string `pulumi:"metadata"`
	// The name or selfLink of the Google Compute Engine
	// network to the cluster will be part of. Conflicts with `subnetwork`.
	// If neither is specified, this defaults to the "default" network.
	Network *string `pulumi:"network"`
	// The service account to be used by the Node VMs.
	// If not specified, the "default" service account is used.
	ServiceAccount *string `pulumi:"serviceAccount"`
	// The set of Google API scopes
	// to be made available on all of the node VMs under the `serviceAccount`
	// specified. These can be	either FQDNs, or scope aliases. The following scopes
	// must be set if any other scopes are set. They're necessary to ensure the
	// correct functioning ofthe cluster, and are set automatically by the API:
	ServiceAccountScopes []string `pulumi:"serviceAccountScopes"`
	// The name or selfLink of the Google Compute Engine
	// subnetwork the cluster will be part of. Conflicts with `network`.
	Subnetwork *string `pulumi:"subnetwork"`
	// The list of instance tags applied to instances in the cluster.
	// Tags are used to identify valid sources or targets for network firewalls.
	Tags []string `pulumi:"tags"`
	// The GCP zone where your data is stored and used (i.e. where
	// the master and the worker nodes will be created in). If `region` is set to 'global' (default)
	// then `zone` is mandatory, otherwise GCP is able to make use of [Auto Zone Placement](https://cloud.google.com/dataproc/docs/concepts/auto-zone)
	// to determine this automatically for you.
	// Note: This setting additionally determines and restricts
	// which computing resources are available for use with other configs such as
	// `cluster_config.master_config.machine_type` and `cluster_config.worker_config.machine_type`.
	Zone *string `pulumi:"zone"`
}

type ClusterClusterConfigGceClusterConfigArgs

type ClusterClusterConfigGceClusterConfigArgs struct {
	// By default, clusters are not restricted to internal IP addresses,
	// and will have ephemeral external IP addresses assigned to each instance. If set to true, all
	// instances in the cluster will only have internal IP addresses. Note: Private Google Access
	// (also known as `privateIpGoogleAccess`) must be enabled on the subnetwork that the cluster
	// will be launched in.
	InternalIpOnly pulumi.BoolPtrInput `pulumi:"internalIpOnly"`
	// A map of the Compute Engine metadata entries to add to all instances
	// (see [Project and instance metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
	Metadata pulumi.StringMapInput `pulumi:"metadata"`
	// The name or selfLink of the Google Compute Engine
	// network to the cluster will be part of. Conflicts with `subnetwork`.
	// If neither is specified, this defaults to the "default" network.
	Network pulumi.StringPtrInput `pulumi:"network"`
	// The service account to be used by the Node VMs.
	// If not specified, the "default" service account is used.
	ServiceAccount pulumi.StringPtrInput `pulumi:"serviceAccount"`
	// The set of Google API scopes
	// to be made available on all of the node VMs under the `serviceAccount`
	// specified. These can be	either FQDNs, or scope aliases. The following scopes
	// must be set if any other scopes are set. They're necessary to ensure the
	// correct functioning ofthe cluster, and are set automatically by the API:
	ServiceAccountScopes pulumi.StringArrayInput `pulumi:"serviceAccountScopes"`
	// The name or selfLink of the Google Compute Engine
	// subnetwork the cluster will be part of. Conflicts with `network`.
	Subnetwork pulumi.StringPtrInput `pulumi:"subnetwork"`
	// The list of instance tags applied to instances in the cluster.
	// Tags are used to identify valid sources or targets for network firewalls.
	Tags pulumi.StringArrayInput `pulumi:"tags"`
	// The GCP zone where your data is stored and used (i.e. where
	// the master and the worker nodes will be created in). If `region` is set to 'global' (default)
	// then `zone` is mandatory, otherwise GCP is able to make use of [Auto Zone Placement](https://cloud.google.com/dataproc/docs/concepts/auto-zone)
	// to determine this automatically for you.
	// Note: This setting additionally determines and restricts
	// which computing resources are available for use with other configs such as
	// `cluster_config.master_config.machine_type` and `cluster_config.worker_config.machine_type`.
	Zone pulumi.StringPtrInput `pulumi:"zone"`
}

func (ClusterClusterConfigGceClusterConfigArgs) ElementType

func (ClusterClusterConfigGceClusterConfigArgs) ToClusterClusterConfigGceClusterConfigOutput

func (i ClusterClusterConfigGceClusterConfigArgs) ToClusterClusterConfigGceClusterConfigOutput() ClusterClusterConfigGceClusterConfigOutput

func (ClusterClusterConfigGceClusterConfigArgs) ToClusterClusterConfigGceClusterConfigOutputWithContext

func (i ClusterClusterConfigGceClusterConfigArgs) ToClusterClusterConfigGceClusterConfigOutputWithContext(ctx context.Context) ClusterClusterConfigGceClusterConfigOutput

func (ClusterClusterConfigGceClusterConfigArgs) ToClusterClusterConfigGceClusterConfigPtrOutput

func (i ClusterClusterConfigGceClusterConfigArgs) ToClusterClusterConfigGceClusterConfigPtrOutput() ClusterClusterConfigGceClusterConfigPtrOutput

func (ClusterClusterConfigGceClusterConfigArgs) ToClusterClusterConfigGceClusterConfigPtrOutputWithContext

func (i ClusterClusterConfigGceClusterConfigArgs) ToClusterClusterConfigGceClusterConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigGceClusterConfigPtrOutput

type ClusterClusterConfigGceClusterConfigInput

type ClusterClusterConfigGceClusterConfigInput interface {
	pulumi.Input

	ToClusterClusterConfigGceClusterConfigOutput() ClusterClusterConfigGceClusterConfigOutput
	ToClusterClusterConfigGceClusterConfigOutputWithContext(context.Context) ClusterClusterConfigGceClusterConfigOutput
}

type ClusterClusterConfigGceClusterConfigOutput

type ClusterClusterConfigGceClusterConfigOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigGceClusterConfigOutput) ElementType

func (ClusterClusterConfigGceClusterConfigOutput) InternalIpOnly

By default, clusters are not restricted to internal IP addresses, and will have ephemeral external IP addresses assigned to each instance. If set to true, all instances in the cluster will only have internal IP addresses. Note: Private Google Access (also known as `privateIpGoogleAccess`) must be enabled on the subnetwork that the cluster will be launched in.

func (ClusterClusterConfigGceClusterConfigOutput) Metadata

A map of the Compute Engine metadata entries to add to all instances (see [Project and instance metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).

func (ClusterClusterConfigGceClusterConfigOutput) Network

The name or selfLink of the Google Compute Engine network to the cluster will be part of. Conflicts with `subnetwork`. If neither is specified, this defaults to the "default" network.

func (ClusterClusterConfigGceClusterConfigOutput) ServiceAccount

The service account to be used by the Node VMs. If not specified, the "default" service account is used.

func (ClusterClusterConfigGceClusterConfigOutput) ServiceAccountScopes

The set of Google API scopes to be made available on all of the node VMs under the `serviceAccount` specified. These can be either FQDNs, or scope aliases. The following scopes must be set if any other scopes are set. They're necessary to ensure the correct functioning ofthe cluster, and are set automatically by the API:

func (ClusterClusterConfigGceClusterConfigOutput) Subnetwork

The name or selfLink of the Google Compute Engine subnetwork the cluster will be part of. Conflicts with `network`.

func (ClusterClusterConfigGceClusterConfigOutput) Tags

The list of instance tags applied to instances in the cluster. Tags are used to identify valid sources or targets for network firewalls.

func (ClusterClusterConfigGceClusterConfigOutput) ToClusterClusterConfigGceClusterConfigOutput

func (o ClusterClusterConfigGceClusterConfigOutput) ToClusterClusterConfigGceClusterConfigOutput() ClusterClusterConfigGceClusterConfigOutput

func (ClusterClusterConfigGceClusterConfigOutput) ToClusterClusterConfigGceClusterConfigOutputWithContext

func (o ClusterClusterConfigGceClusterConfigOutput) ToClusterClusterConfigGceClusterConfigOutputWithContext(ctx context.Context) ClusterClusterConfigGceClusterConfigOutput

func (ClusterClusterConfigGceClusterConfigOutput) ToClusterClusterConfigGceClusterConfigPtrOutput

func (o ClusterClusterConfigGceClusterConfigOutput) ToClusterClusterConfigGceClusterConfigPtrOutput() ClusterClusterConfigGceClusterConfigPtrOutput

func (ClusterClusterConfigGceClusterConfigOutput) ToClusterClusterConfigGceClusterConfigPtrOutputWithContext

func (o ClusterClusterConfigGceClusterConfigOutput) ToClusterClusterConfigGceClusterConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigGceClusterConfigPtrOutput

func (ClusterClusterConfigGceClusterConfigOutput) Zone

The GCP zone where your data is stored and used (i.e. where the master and the worker nodes will be created in). If `region` is set to 'global' (default) then `zone` is mandatory, otherwise GCP is able to make use of [Auto Zone Placement](https://cloud.google.com/dataproc/docs/concepts/auto-zone) to determine this automatically for you. Note: This setting additionally determines and restricts which computing resources are available for use with other configs such as `cluster_config.master_config.machine_type` and `cluster_config.worker_config.machine_type`.

type ClusterClusterConfigGceClusterConfigPtrInput

type ClusterClusterConfigGceClusterConfigPtrInput interface {
	pulumi.Input

	ToClusterClusterConfigGceClusterConfigPtrOutput() ClusterClusterConfigGceClusterConfigPtrOutput
	ToClusterClusterConfigGceClusterConfigPtrOutputWithContext(context.Context) ClusterClusterConfigGceClusterConfigPtrOutput
}

type ClusterClusterConfigGceClusterConfigPtrOutput

type ClusterClusterConfigGceClusterConfigPtrOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigGceClusterConfigPtrOutput) Elem

func (ClusterClusterConfigGceClusterConfigPtrOutput) ElementType

func (ClusterClusterConfigGceClusterConfigPtrOutput) InternalIpOnly

By default, clusters are not restricted to internal IP addresses, and will have ephemeral external IP addresses assigned to each instance. If set to true, all instances in the cluster will only have internal IP addresses. Note: Private Google Access (also known as `privateIpGoogleAccess`) must be enabled on the subnetwork that the cluster will be launched in.

func (ClusterClusterConfigGceClusterConfigPtrOutput) Metadata

A map of the Compute Engine metadata entries to add to all instances (see [Project and instance metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).

func (ClusterClusterConfigGceClusterConfigPtrOutput) Network

The name or selfLink of the Google Compute Engine network to the cluster will be part of. Conflicts with `subnetwork`. If neither is specified, this defaults to the "default" network.

func (ClusterClusterConfigGceClusterConfigPtrOutput) ServiceAccount

The service account to be used by the Node VMs. If not specified, the "default" service account is used.

func (ClusterClusterConfigGceClusterConfigPtrOutput) ServiceAccountScopes

The set of Google API scopes to be made available on all of the node VMs under the `serviceAccount` specified. These can be either FQDNs, or scope aliases. The following scopes must be set if any other scopes are set. They're necessary to ensure the correct functioning ofthe cluster, and are set automatically by the API:

func (ClusterClusterConfigGceClusterConfigPtrOutput) Subnetwork

The name or selfLink of the Google Compute Engine subnetwork the cluster will be part of. Conflicts with `network`.

func (ClusterClusterConfigGceClusterConfigPtrOutput) Tags

The list of instance tags applied to instances in the cluster. Tags are used to identify valid sources or targets for network firewalls.

func (ClusterClusterConfigGceClusterConfigPtrOutput) ToClusterClusterConfigGceClusterConfigPtrOutput

func (o ClusterClusterConfigGceClusterConfigPtrOutput) ToClusterClusterConfigGceClusterConfigPtrOutput() ClusterClusterConfigGceClusterConfigPtrOutput

func (ClusterClusterConfigGceClusterConfigPtrOutput) ToClusterClusterConfigGceClusterConfigPtrOutputWithContext

func (o ClusterClusterConfigGceClusterConfigPtrOutput) ToClusterClusterConfigGceClusterConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigGceClusterConfigPtrOutput

func (ClusterClusterConfigGceClusterConfigPtrOutput) Zone

The GCP zone where your data is stored and used (i.e. where the master and the worker nodes will be created in). If `region` is set to 'global' (default) then `zone` is mandatory, otherwise GCP is able to make use of [Auto Zone Placement](https://cloud.google.com/dataproc/docs/concepts/auto-zone) to determine this automatically for you. Note: This setting additionally determines and restricts which computing resources are available for use with other configs such as `cluster_config.master_config.machine_type` and `cluster_config.worker_config.machine_type`.

type ClusterClusterConfigInitializationAction

type ClusterClusterConfigInitializationAction struct {
	Script string `pulumi:"script"`
	// The maximum duration (in seconds) which `script` is
	// allowed to take to execute its action. GCP will default to a predetermined
	// computed value if not set (currently 300).
	TimeoutSec *int `pulumi:"timeoutSec"`
}

type ClusterClusterConfigInitializationActionArgs

type ClusterClusterConfigInitializationActionArgs struct {
	Script pulumi.StringInput `pulumi:"script"`
	// The maximum duration (in seconds) which `script` is
	// allowed to take to execute its action. GCP will default to a predetermined
	// computed value if not set (currently 300).
	TimeoutSec pulumi.IntPtrInput `pulumi:"timeoutSec"`
}

func (ClusterClusterConfigInitializationActionArgs) ElementType

func (ClusterClusterConfigInitializationActionArgs) ToClusterClusterConfigInitializationActionOutput

func (i ClusterClusterConfigInitializationActionArgs) ToClusterClusterConfigInitializationActionOutput() ClusterClusterConfigInitializationActionOutput

func (ClusterClusterConfigInitializationActionArgs) ToClusterClusterConfigInitializationActionOutputWithContext

func (i ClusterClusterConfigInitializationActionArgs) ToClusterClusterConfigInitializationActionOutputWithContext(ctx context.Context) ClusterClusterConfigInitializationActionOutput

type ClusterClusterConfigInitializationActionArray

type ClusterClusterConfigInitializationActionArray []ClusterClusterConfigInitializationActionInput

func (ClusterClusterConfigInitializationActionArray) ElementType

func (ClusterClusterConfigInitializationActionArray) ToClusterClusterConfigInitializationActionArrayOutput

func (i ClusterClusterConfigInitializationActionArray) ToClusterClusterConfigInitializationActionArrayOutput() ClusterClusterConfigInitializationActionArrayOutput

func (ClusterClusterConfigInitializationActionArray) ToClusterClusterConfigInitializationActionArrayOutputWithContext

func (i ClusterClusterConfigInitializationActionArray) ToClusterClusterConfigInitializationActionArrayOutputWithContext(ctx context.Context) ClusterClusterConfigInitializationActionArrayOutput

type ClusterClusterConfigInitializationActionArrayInput

type ClusterClusterConfigInitializationActionArrayInput interface {
	pulumi.Input

	ToClusterClusterConfigInitializationActionArrayOutput() ClusterClusterConfigInitializationActionArrayOutput
	ToClusterClusterConfigInitializationActionArrayOutputWithContext(context.Context) ClusterClusterConfigInitializationActionArrayOutput
}

type ClusterClusterConfigInitializationActionArrayOutput

type ClusterClusterConfigInitializationActionArrayOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigInitializationActionArrayOutput) ElementType

func (ClusterClusterConfigInitializationActionArrayOutput) Index

func (ClusterClusterConfigInitializationActionArrayOutput) ToClusterClusterConfigInitializationActionArrayOutput

func (o ClusterClusterConfigInitializationActionArrayOutput) ToClusterClusterConfigInitializationActionArrayOutput() ClusterClusterConfigInitializationActionArrayOutput

func (ClusterClusterConfigInitializationActionArrayOutput) ToClusterClusterConfigInitializationActionArrayOutputWithContext

func (o ClusterClusterConfigInitializationActionArrayOutput) ToClusterClusterConfigInitializationActionArrayOutputWithContext(ctx context.Context) ClusterClusterConfigInitializationActionArrayOutput

type ClusterClusterConfigInitializationActionInput

type ClusterClusterConfigInitializationActionInput interface {
	pulumi.Input

	ToClusterClusterConfigInitializationActionOutput() ClusterClusterConfigInitializationActionOutput
	ToClusterClusterConfigInitializationActionOutputWithContext(context.Context) ClusterClusterConfigInitializationActionOutput
}

type ClusterClusterConfigInitializationActionOutput

type ClusterClusterConfigInitializationActionOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigInitializationActionOutput) ElementType

func (ClusterClusterConfigInitializationActionOutput) Script

func (ClusterClusterConfigInitializationActionOutput) TimeoutSec

The maximum duration (in seconds) which `script` is allowed to take to execute its action. GCP will default to a predetermined computed value if not set (currently 300).

func (ClusterClusterConfigInitializationActionOutput) ToClusterClusterConfigInitializationActionOutput

func (o ClusterClusterConfigInitializationActionOutput) ToClusterClusterConfigInitializationActionOutput() ClusterClusterConfigInitializationActionOutput

func (ClusterClusterConfigInitializationActionOutput) ToClusterClusterConfigInitializationActionOutputWithContext

func (o ClusterClusterConfigInitializationActionOutput) ToClusterClusterConfigInitializationActionOutputWithContext(ctx context.Context) ClusterClusterConfigInitializationActionOutput

type ClusterClusterConfigInput

type ClusterClusterConfigInput interface {
	pulumi.Input

	ToClusterClusterConfigOutput() ClusterClusterConfigOutput
	ToClusterClusterConfigOutputWithContext(context.Context) ClusterClusterConfigOutput
}

type ClusterClusterConfigLifecycleConfig

type ClusterClusterConfigLifecycleConfig struct {
	// The time when cluster will be auto-deleted.
	// A timestamp in RFC3339 UTC "Zulu" format, accurate to nanoseconds.
	// Example: "2014-10-02T15:01:23.045123456Z".
	AutoDeleteTime *string `pulumi:"autoDeleteTime"`
	// The duration to keep the cluster alive while idling
	// (no jobs running). After this TTL, the cluster will be deleted. Valid range: [10m, 14d].
	IdleDeleteTtl *string `pulumi:"idleDeleteTtl"`
	IdleStartTime *string `pulumi:"idleStartTime"`
}

type ClusterClusterConfigLifecycleConfigArgs

type ClusterClusterConfigLifecycleConfigArgs struct {
	// The time when cluster will be auto-deleted.
	// A timestamp in RFC3339 UTC "Zulu" format, accurate to nanoseconds.
	// Example: "2014-10-02T15:01:23.045123456Z".
	AutoDeleteTime pulumi.StringPtrInput `pulumi:"autoDeleteTime"`
	// The duration to keep the cluster alive while idling
	// (no jobs running). After this TTL, the cluster will be deleted. Valid range: [10m, 14d].
	IdleDeleteTtl pulumi.StringPtrInput `pulumi:"idleDeleteTtl"`
	IdleStartTime pulumi.StringPtrInput `pulumi:"idleStartTime"`
}

func (ClusterClusterConfigLifecycleConfigArgs) ElementType

func (ClusterClusterConfigLifecycleConfigArgs) ToClusterClusterConfigLifecycleConfigOutput

func (i ClusterClusterConfigLifecycleConfigArgs) ToClusterClusterConfigLifecycleConfigOutput() ClusterClusterConfigLifecycleConfigOutput

func (ClusterClusterConfigLifecycleConfigArgs) ToClusterClusterConfigLifecycleConfigOutputWithContext

func (i ClusterClusterConfigLifecycleConfigArgs) ToClusterClusterConfigLifecycleConfigOutputWithContext(ctx context.Context) ClusterClusterConfigLifecycleConfigOutput

func (ClusterClusterConfigLifecycleConfigArgs) ToClusterClusterConfigLifecycleConfigPtrOutput

func (i ClusterClusterConfigLifecycleConfigArgs) ToClusterClusterConfigLifecycleConfigPtrOutput() ClusterClusterConfigLifecycleConfigPtrOutput

func (ClusterClusterConfigLifecycleConfigArgs) ToClusterClusterConfigLifecycleConfigPtrOutputWithContext

func (i ClusterClusterConfigLifecycleConfigArgs) ToClusterClusterConfigLifecycleConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigLifecycleConfigPtrOutput

type ClusterClusterConfigLifecycleConfigInput

type ClusterClusterConfigLifecycleConfigInput interface {
	pulumi.Input

	ToClusterClusterConfigLifecycleConfigOutput() ClusterClusterConfigLifecycleConfigOutput
	ToClusterClusterConfigLifecycleConfigOutputWithContext(context.Context) ClusterClusterConfigLifecycleConfigOutput
}

type ClusterClusterConfigLifecycleConfigOutput

type ClusterClusterConfigLifecycleConfigOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigLifecycleConfigOutput) AutoDeleteTime

The time when cluster will be auto-deleted. A timestamp in RFC3339 UTC "Zulu" format, accurate to nanoseconds. Example: "2014-10-02T15:01:23.045123456Z".

func (ClusterClusterConfigLifecycleConfigOutput) ElementType

func (ClusterClusterConfigLifecycleConfigOutput) IdleDeleteTtl

The duration to keep the cluster alive while idling (no jobs running). After this TTL, the cluster will be deleted. Valid range: [10m, 14d].

func (ClusterClusterConfigLifecycleConfigOutput) IdleStartTime

func (ClusterClusterConfigLifecycleConfigOutput) ToClusterClusterConfigLifecycleConfigOutput

func (o ClusterClusterConfigLifecycleConfigOutput) ToClusterClusterConfigLifecycleConfigOutput() ClusterClusterConfigLifecycleConfigOutput

func (ClusterClusterConfigLifecycleConfigOutput) ToClusterClusterConfigLifecycleConfigOutputWithContext

func (o ClusterClusterConfigLifecycleConfigOutput) ToClusterClusterConfigLifecycleConfigOutputWithContext(ctx context.Context) ClusterClusterConfigLifecycleConfigOutput

func (ClusterClusterConfigLifecycleConfigOutput) ToClusterClusterConfigLifecycleConfigPtrOutput

func (o ClusterClusterConfigLifecycleConfigOutput) ToClusterClusterConfigLifecycleConfigPtrOutput() ClusterClusterConfigLifecycleConfigPtrOutput

func (ClusterClusterConfigLifecycleConfigOutput) ToClusterClusterConfigLifecycleConfigPtrOutputWithContext

func (o ClusterClusterConfigLifecycleConfigOutput) ToClusterClusterConfigLifecycleConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigLifecycleConfigPtrOutput

type ClusterClusterConfigLifecycleConfigPtrInput

type ClusterClusterConfigLifecycleConfigPtrInput interface {
	pulumi.Input

	ToClusterClusterConfigLifecycleConfigPtrOutput() ClusterClusterConfigLifecycleConfigPtrOutput
	ToClusterClusterConfigLifecycleConfigPtrOutputWithContext(context.Context) ClusterClusterConfigLifecycleConfigPtrOutput
}

type ClusterClusterConfigLifecycleConfigPtrOutput

type ClusterClusterConfigLifecycleConfigPtrOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigLifecycleConfigPtrOutput) AutoDeleteTime

The time when cluster will be auto-deleted. A timestamp in RFC3339 UTC "Zulu" format, accurate to nanoseconds. Example: "2014-10-02T15:01:23.045123456Z".

func (ClusterClusterConfigLifecycleConfigPtrOutput) Elem

func (ClusterClusterConfigLifecycleConfigPtrOutput) ElementType

func (ClusterClusterConfigLifecycleConfigPtrOutput) IdleDeleteTtl

The duration to keep the cluster alive while idling (no jobs running). After this TTL, the cluster will be deleted. Valid range: [10m, 14d].

func (ClusterClusterConfigLifecycleConfigPtrOutput) IdleStartTime

func (ClusterClusterConfigLifecycleConfigPtrOutput) ToClusterClusterConfigLifecycleConfigPtrOutput

func (o ClusterClusterConfigLifecycleConfigPtrOutput) ToClusterClusterConfigLifecycleConfigPtrOutput() ClusterClusterConfigLifecycleConfigPtrOutput

func (ClusterClusterConfigLifecycleConfigPtrOutput) ToClusterClusterConfigLifecycleConfigPtrOutputWithContext

func (o ClusterClusterConfigLifecycleConfigPtrOutput) ToClusterClusterConfigLifecycleConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigLifecycleConfigPtrOutput

type ClusterClusterConfigMasterConfig

type ClusterClusterConfigMasterConfig struct {
	// The Compute Engine accelerator configuration for these instances. Can be specified multiple times.
	Accelerators []ClusterClusterConfigMasterConfigAccelerator `pulumi:"accelerators"`
	// Disk Config
	DiskConfig *ClusterClusterConfigMasterConfigDiskConfig `pulumi:"diskConfig"`
	// The URI for the image to use for this worker.  See [the guide](https://cloud.google.com/dataproc/docs/guides/dataproc-images)
	// for more information.
	ImageUri      *string  `pulumi:"imageUri"`
	InstanceNames []string `pulumi:"instanceNames"`
	// The name of a Google Compute Engine machine type
	// to create for the worker nodes. If not specified, GCP will default to a predetermined
	// computed value (currently `n1-standard-4`).
	MachineType *string `pulumi:"machineType"`
	// The name of a minimum generation of CPU family
	// for the master. If not specified, GCP will default to a predetermined computed value
	// for each zone. See [the guide](https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform)
	// for details about which CPU families are available (and defaulted) for each zone.
	MinCpuPlatform *string `pulumi:"minCpuPlatform"`
	NumInstances   *int    `pulumi:"numInstances"`
}

type ClusterClusterConfigMasterConfigAccelerator

type ClusterClusterConfigMasterConfigAccelerator struct {
	// The number of the accelerator cards of this type exposed to this instance. Often restricted to one of `1`, `2`, `4`, or `8`.
	AcceleratorCount int `pulumi:"acceleratorCount"`
	// The short name of the accelerator type to expose to this instance. For example, `nvidia-tesla-k80`.
	AcceleratorType string `pulumi:"acceleratorType"`
}

type ClusterClusterConfigMasterConfigAcceleratorArgs

type ClusterClusterConfigMasterConfigAcceleratorArgs struct {
	// The number of the accelerator cards of this type exposed to this instance. Often restricted to one of `1`, `2`, `4`, or `8`.
	AcceleratorCount pulumi.IntInput `pulumi:"acceleratorCount"`
	// The short name of the accelerator type to expose to this instance. For example, `nvidia-tesla-k80`.
	AcceleratorType pulumi.StringInput `pulumi:"acceleratorType"`
}

func (ClusterClusterConfigMasterConfigAcceleratorArgs) ElementType

func (ClusterClusterConfigMasterConfigAcceleratorArgs) ToClusterClusterConfigMasterConfigAcceleratorOutput

func (i ClusterClusterConfigMasterConfigAcceleratorArgs) ToClusterClusterConfigMasterConfigAcceleratorOutput() ClusterClusterConfigMasterConfigAcceleratorOutput

func (ClusterClusterConfigMasterConfigAcceleratorArgs) ToClusterClusterConfigMasterConfigAcceleratorOutputWithContext

func (i ClusterClusterConfigMasterConfigAcceleratorArgs) ToClusterClusterConfigMasterConfigAcceleratorOutputWithContext(ctx context.Context) ClusterClusterConfigMasterConfigAcceleratorOutput

type ClusterClusterConfigMasterConfigAcceleratorArray

type ClusterClusterConfigMasterConfigAcceleratorArray []ClusterClusterConfigMasterConfigAcceleratorInput

func (ClusterClusterConfigMasterConfigAcceleratorArray) ElementType

func (ClusterClusterConfigMasterConfigAcceleratorArray) ToClusterClusterConfigMasterConfigAcceleratorArrayOutput

func (i ClusterClusterConfigMasterConfigAcceleratorArray) ToClusterClusterConfigMasterConfigAcceleratorArrayOutput() ClusterClusterConfigMasterConfigAcceleratorArrayOutput

func (ClusterClusterConfigMasterConfigAcceleratorArray) ToClusterClusterConfigMasterConfigAcceleratorArrayOutputWithContext

func (i ClusterClusterConfigMasterConfigAcceleratorArray) ToClusterClusterConfigMasterConfigAcceleratorArrayOutputWithContext(ctx context.Context) ClusterClusterConfigMasterConfigAcceleratorArrayOutput

type ClusterClusterConfigMasterConfigAcceleratorArrayInput

type ClusterClusterConfigMasterConfigAcceleratorArrayInput interface {
	pulumi.Input

	ToClusterClusterConfigMasterConfigAcceleratorArrayOutput() ClusterClusterConfigMasterConfigAcceleratorArrayOutput
	ToClusterClusterConfigMasterConfigAcceleratorArrayOutputWithContext(context.Context) ClusterClusterConfigMasterConfigAcceleratorArrayOutput
}

type ClusterClusterConfigMasterConfigAcceleratorArrayOutput

type ClusterClusterConfigMasterConfigAcceleratorArrayOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigMasterConfigAcceleratorArrayOutput) ElementType

func (ClusterClusterConfigMasterConfigAcceleratorArrayOutput) Index

func (ClusterClusterConfigMasterConfigAcceleratorArrayOutput) ToClusterClusterConfigMasterConfigAcceleratorArrayOutput

func (ClusterClusterConfigMasterConfigAcceleratorArrayOutput) ToClusterClusterConfigMasterConfigAcceleratorArrayOutputWithContext

func (o ClusterClusterConfigMasterConfigAcceleratorArrayOutput) ToClusterClusterConfigMasterConfigAcceleratorArrayOutputWithContext(ctx context.Context) ClusterClusterConfigMasterConfigAcceleratorArrayOutput

type ClusterClusterConfigMasterConfigAcceleratorInput

type ClusterClusterConfigMasterConfigAcceleratorInput interface {
	pulumi.Input

	ToClusterClusterConfigMasterConfigAcceleratorOutput() ClusterClusterConfigMasterConfigAcceleratorOutput
	ToClusterClusterConfigMasterConfigAcceleratorOutputWithContext(context.Context) ClusterClusterConfigMasterConfigAcceleratorOutput
}

type ClusterClusterConfigMasterConfigAcceleratorOutput

type ClusterClusterConfigMasterConfigAcceleratorOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigMasterConfigAcceleratorOutput) AcceleratorCount

The number of the accelerator cards of this type exposed to this instance. Often restricted to one of `1`, `2`, `4`, or `8`.

func (ClusterClusterConfigMasterConfigAcceleratorOutput) AcceleratorType

The short name of the accelerator type to expose to this instance. For example, `nvidia-tesla-k80`.

func (ClusterClusterConfigMasterConfigAcceleratorOutput) ElementType

func (ClusterClusterConfigMasterConfigAcceleratorOutput) ToClusterClusterConfigMasterConfigAcceleratorOutput

func (o ClusterClusterConfigMasterConfigAcceleratorOutput) ToClusterClusterConfigMasterConfigAcceleratorOutput() ClusterClusterConfigMasterConfigAcceleratorOutput

func (ClusterClusterConfigMasterConfigAcceleratorOutput) ToClusterClusterConfigMasterConfigAcceleratorOutputWithContext

func (o ClusterClusterConfigMasterConfigAcceleratorOutput) ToClusterClusterConfigMasterConfigAcceleratorOutputWithContext(ctx context.Context) ClusterClusterConfigMasterConfigAcceleratorOutput

type ClusterClusterConfigMasterConfigArgs

type ClusterClusterConfigMasterConfigArgs struct {
	// The Compute Engine accelerator configuration for these instances. Can be specified multiple times.
	Accelerators ClusterClusterConfigMasterConfigAcceleratorArrayInput `pulumi:"accelerators"`
	// Disk Config
	DiskConfig ClusterClusterConfigMasterConfigDiskConfigPtrInput `pulumi:"diskConfig"`
	// The URI for the image to use for this worker.  See [the guide](https://cloud.google.com/dataproc/docs/guides/dataproc-images)
	// for more information.
	ImageUri      pulumi.StringPtrInput   `pulumi:"imageUri"`
	InstanceNames pulumi.StringArrayInput `pulumi:"instanceNames"`
	// The name of a Google Compute Engine machine type
	// to create for the worker nodes. If not specified, GCP will default to a predetermined
	// computed value (currently `n1-standard-4`).
	MachineType pulumi.StringPtrInput `pulumi:"machineType"`
	// The name of a minimum generation of CPU family
	// for the master. If not specified, GCP will default to a predetermined computed value
	// for each zone. See [the guide](https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform)
	// for details about which CPU families are available (and defaulted) for each zone.
	MinCpuPlatform pulumi.StringPtrInput `pulumi:"minCpuPlatform"`
	NumInstances   pulumi.IntPtrInput    `pulumi:"numInstances"`
}

func (ClusterClusterConfigMasterConfigArgs) ElementType

func (ClusterClusterConfigMasterConfigArgs) ToClusterClusterConfigMasterConfigOutput

func (i ClusterClusterConfigMasterConfigArgs) ToClusterClusterConfigMasterConfigOutput() ClusterClusterConfigMasterConfigOutput

func (ClusterClusterConfigMasterConfigArgs) ToClusterClusterConfigMasterConfigOutputWithContext

func (i ClusterClusterConfigMasterConfigArgs) ToClusterClusterConfigMasterConfigOutputWithContext(ctx context.Context) ClusterClusterConfigMasterConfigOutput

func (ClusterClusterConfigMasterConfigArgs) ToClusterClusterConfigMasterConfigPtrOutput

func (i ClusterClusterConfigMasterConfigArgs) ToClusterClusterConfigMasterConfigPtrOutput() ClusterClusterConfigMasterConfigPtrOutput

func (ClusterClusterConfigMasterConfigArgs) ToClusterClusterConfigMasterConfigPtrOutputWithContext

func (i ClusterClusterConfigMasterConfigArgs) ToClusterClusterConfigMasterConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigMasterConfigPtrOutput

type ClusterClusterConfigMasterConfigDiskConfig

type ClusterClusterConfigMasterConfigDiskConfig struct {
	// Size of the primary disk attached to each preemptible worker node, specified
	// in GB. The smallest allowed disk size is 10GB. GCP will default to a predetermined
	// computed value if not set (currently 500GB). Note: If SSDs are not
	// attached, it also contains the HDFS data blocks and Hadoop working directories.
	BootDiskSizeGb *int `pulumi:"bootDiskSizeGb"`
	// The disk type of the primary disk attached to each preemptible worker node.
	// One of `"pd-ssd"` or `"pd-standard"`. Defaults to `"pd-standard"`.
	BootDiskType *string `pulumi:"bootDiskType"`
	// The amount of local SSD disks that will be
	// attached to each preemptible worker node. Defaults to 0.
	NumLocalSsds *int `pulumi:"numLocalSsds"`
}

type ClusterClusterConfigMasterConfigDiskConfigArgs

type ClusterClusterConfigMasterConfigDiskConfigArgs struct {
	// Size of the primary disk attached to each preemptible worker node, specified
	// in GB. The smallest allowed disk size is 10GB. GCP will default to a predetermined
	// computed value if not set (currently 500GB). Note: If SSDs are not
	// attached, it also contains the HDFS data blocks and Hadoop working directories.
	BootDiskSizeGb pulumi.IntPtrInput `pulumi:"bootDiskSizeGb"`
	// The disk type of the primary disk attached to each preemptible worker node.
	// One of `"pd-ssd"` or `"pd-standard"`. Defaults to `"pd-standard"`.
	BootDiskType pulumi.StringPtrInput `pulumi:"bootDiskType"`
	// The amount of local SSD disks that will be
	// attached to each preemptible worker node. Defaults to 0.
	NumLocalSsds pulumi.IntPtrInput `pulumi:"numLocalSsds"`
}

func (ClusterClusterConfigMasterConfigDiskConfigArgs) ElementType

func (ClusterClusterConfigMasterConfigDiskConfigArgs) ToClusterClusterConfigMasterConfigDiskConfigOutput

func (i ClusterClusterConfigMasterConfigDiskConfigArgs) ToClusterClusterConfigMasterConfigDiskConfigOutput() ClusterClusterConfigMasterConfigDiskConfigOutput

func (ClusterClusterConfigMasterConfigDiskConfigArgs) ToClusterClusterConfigMasterConfigDiskConfigOutputWithContext

func (i ClusterClusterConfigMasterConfigDiskConfigArgs) ToClusterClusterConfigMasterConfigDiskConfigOutputWithContext(ctx context.Context) ClusterClusterConfigMasterConfigDiskConfigOutput

func (ClusterClusterConfigMasterConfigDiskConfigArgs) ToClusterClusterConfigMasterConfigDiskConfigPtrOutput

func (i ClusterClusterConfigMasterConfigDiskConfigArgs) ToClusterClusterConfigMasterConfigDiskConfigPtrOutput() ClusterClusterConfigMasterConfigDiskConfigPtrOutput

func (ClusterClusterConfigMasterConfigDiskConfigArgs) ToClusterClusterConfigMasterConfigDiskConfigPtrOutputWithContext

func (i ClusterClusterConfigMasterConfigDiskConfigArgs) ToClusterClusterConfigMasterConfigDiskConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigMasterConfigDiskConfigPtrOutput

type ClusterClusterConfigMasterConfigDiskConfigInput

type ClusterClusterConfigMasterConfigDiskConfigInput interface {
	pulumi.Input

	ToClusterClusterConfigMasterConfigDiskConfigOutput() ClusterClusterConfigMasterConfigDiskConfigOutput
	ToClusterClusterConfigMasterConfigDiskConfigOutputWithContext(context.Context) ClusterClusterConfigMasterConfigDiskConfigOutput
}

type ClusterClusterConfigMasterConfigDiskConfigOutput

type ClusterClusterConfigMasterConfigDiskConfigOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigMasterConfigDiskConfigOutput) BootDiskSizeGb

Size of the primary disk attached to each preemptible worker node, specified in GB. The smallest allowed disk size is 10GB. GCP will default to a predetermined computed value if not set (currently 500GB). Note: If SSDs are not attached, it also contains the HDFS data blocks and Hadoop working directories.

func (ClusterClusterConfigMasterConfigDiskConfigOutput) BootDiskType

The disk type of the primary disk attached to each preemptible worker node. One of `"pd-ssd"` or `"pd-standard"`. Defaults to `"pd-standard"`.

func (ClusterClusterConfigMasterConfigDiskConfigOutput) ElementType

func (ClusterClusterConfigMasterConfigDiskConfigOutput) NumLocalSsds

The amount of local SSD disks that will be attached to each preemptible worker node. Defaults to 0.

func (ClusterClusterConfigMasterConfigDiskConfigOutput) ToClusterClusterConfigMasterConfigDiskConfigOutput

func (o ClusterClusterConfigMasterConfigDiskConfigOutput) ToClusterClusterConfigMasterConfigDiskConfigOutput() ClusterClusterConfigMasterConfigDiskConfigOutput

func (ClusterClusterConfigMasterConfigDiskConfigOutput) ToClusterClusterConfigMasterConfigDiskConfigOutputWithContext

func (o ClusterClusterConfigMasterConfigDiskConfigOutput) ToClusterClusterConfigMasterConfigDiskConfigOutputWithContext(ctx context.Context) ClusterClusterConfigMasterConfigDiskConfigOutput

func (ClusterClusterConfigMasterConfigDiskConfigOutput) ToClusterClusterConfigMasterConfigDiskConfigPtrOutput

func (o ClusterClusterConfigMasterConfigDiskConfigOutput) ToClusterClusterConfigMasterConfigDiskConfigPtrOutput() ClusterClusterConfigMasterConfigDiskConfigPtrOutput

func (ClusterClusterConfigMasterConfigDiskConfigOutput) ToClusterClusterConfigMasterConfigDiskConfigPtrOutputWithContext

func (o ClusterClusterConfigMasterConfigDiskConfigOutput) ToClusterClusterConfigMasterConfigDiskConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigMasterConfigDiskConfigPtrOutput

type ClusterClusterConfigMasterConfigDiskConfigPtrInput

type ClusterClusterConfigMasterConfigDiskConfigPtrInput interface {
	pulumi.Input

	ToClusterClusterConfigMasterConfigDiskConfigPtrOutput() ClusterClusterConfigMasterConfigDiskConfigPtrOutput
	ToClusterClusterConfigMasterConfigDiskConfigPtrOutputWithContext(context.Context) ClusterClusterConfigMasterConfigDiskConfigPtrOutput
}

type ClusterClusterConfigMasterConfigDiskConfigPtrOutput

type ClusterClusterConfigMasterConfigDiskConfigPtrOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigMasterConfigDiskConfigPtrOutput) BootDiskSizeGb

Size of the primary disk attached to each preemptible worker node, specified in GB. The smallest allowed disk size is 10GB. GCP will default to a predetermined computed value if not set (currently 500GB). Note: If SSDs are not attached, it also contains the HDFS data blocks and Hadoop working directories.

func (ClusterClusterConfigMasterConfigDiskConfigPtrOutput) BootDiskType

The disk type of the primary disk attached to each preemptible worker node. One of `"pd-ssd"` or `"pd-standard"`. Defaults to `"pd-standard"`.

func (ClusterClusterConfigMasterConfigDiskConfigPtrOutput) Elem

func (ClusterClusterConfigMasterConfigDiskConfigPtrOutput) ElementType

func (ClusterClusterConfigMasterConfigDiskConfigPtrOutput) NumLocalSsds

The amount of local SSD disks that will be attached to each preemptible worker node. Defaults to 0.

func (ClusterClusterConfigMasterConfigDiskConfigPtrOutput) ToClusterClusterConfigMasterConfigDiskConfigPtrOutput

func (o ClusterClusterConfigMasterConfigDiskConfigPtrOutput) ToClusterClusterConfigMasterConfigDiskConfigPtrOutput() ClusterClusterConfigMasterConfigDiskConfigPtrOutput

func (ClusterClusterConfigMasterConfigDiskConfigPtrOutput) ToClusterClusterConfigMasterConfigDiskConfigPtrOutputWithContext

func (o ClusterClusterConfigMasterConfigDiskConfigPtrOutput) ToClusterClusterConfigMasterConfigDiskConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigMasterConfigDiskConfigPtrOutput

type ClusterClusterConfigMasterConfigInput

type ClusterClusterConfigMasterConfigInput interface {
	pulumi.Input

	ToClusterClusterConfigMasterConfigOutput() ClusterClusterConfigMasterConfigOutput
	ToClusterClusterConfigMasterConfigOutputWithContext(context.Context) ClusterClusterConfigMasterConfigOutput
}

type ClusterClusterConfigMasterConfigOutput

type ClusterClusterConfigMasterConfigOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigMasterConfigOutput) Accelerators

The Compute Engine accelerator configuration for these instances. Can be specified multiple times.

func (ClusterClusterConfigMasterConfigOutput) DiskConfig

Disk Config

func (ClusterClusterConfigMasterConfigOutput) ElementType

func (ClusterClusterConfigMasterConfigOutput) ImageUri

The URI for the image to use for this worker. See [the guide](https://cloud.google.com/dataproc/docs/guides/dataproc-images) for more information.

func (ClusterClusterConfigMasterConfigOutput) InstanceNames

func (ClusterClusterConfigMasterConfigOutput) MachineType

The name of a Google Compute Engine machine type to create for the worker nodes. If not specified, GCP will default to a predetermined computed value (currently `n1-standard-4`).

func (ClusterClusterConfigMasterConfigOutput) MinCpuPlatform

The name of a minimum generation of CPU family for the master. If not specified, GCP will default to a predetermined computed value for each zone. See [the guide](https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform) for details about which CPU families are available (and defaulted) for each zone.

func (ClusterClusterConfigMasterConfigOutput) NumInstances

func (ClusterClusterConfigMasterConfigOutput) ToClusterClusterConfigMasterConfigOutput

func (o ClusterClusterConfigMasterConfigOutput) ToClusterClusterConfigMasterConfigOutput() ClusterClusterConfigMasterConfigOutput

func (ClusterClusterConfigMasterConfigOutput) ToClusterClusterConfigMasterConfigOutputWithContext

func (o ClusterClusterConfigMasterConfigOutput) ToClusterClusterConfigMasterConfigOutputWithContext(ctx context.Context) ClusterClusterConfigMasterConfigOutput

func (ClusterClusterConfigMasterConfigOutput) ToClusterClusterConfigMasterConfigPtrOutput

func (o ClusterClusterConfigMasterConfigOutput) ToClusterClusterConfigMasterConfigPtrOutput() ClusterClusterConfigMasterConfigPtrOutput

func (ClusterClusterConfigMasterConfigOutput) ToClusterClusterConfigMasterConfigPtrOutputWithContext

func (o ClusterClusterConfigMasterConfigOutput) ToClusterClusterConfigMasterConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigMasterConfigPtrOutput

type ClusterClusterConfigMasterConfigPtrInput

type ClusterClusterConfigMasterConfigPtrInput interface {
	pulumi.Input

	ToClusterClusterConfigMasterConfigPtrOutput() ClusterClusterConfigMasterConfigPtrOutput
	ToClusterClusterConfigMasterConfigPtrOutputWithContext(context.Context) ClusterClusterConfigMasterConfigPtrOutput
}

type ClusterClusterConfigMasterConfigPtrOutput

type ClusterClusterConfigMasterConfigPtrOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigMasterConfigPtrOutput) Accelerators

The Compute Engine accelerator configuration for these instances. Can be specified multiple times.

func (ClusterClusterConfigMasterConfigPtrOutput) DiskConfig

Disk Config

func (ClusterClusterConfigMasterConfigPtrOutput) Elem

func (ClusterClusterConfigMasterConfigPtrOutput) ElementType

func (ClusterClusterConfigMasterConfigPtrOutput) ImageUri

The URI for the image to use for this worker. See [the guide](https://cloud.google.com/dataproc/docs/guides/dataproc-images) for more information.

func (ClusterClusterConfigMasterConfigPtrOutput) InstanceNames

func (ClusterClusterConfigMasterConfigPtrOutput) MachineType

The name of a Google Compute Engine machine type to create for the worker nodes. If not specified, GCP will default to a predetermined computed value (currently `n1-standard-4`).

func (ClusterClusterConfigMasterConfigPtrOutput) MinCpuPlatform

The name of a minimum generation of CPU family for the master. If not specified, GCP will default to a predetermined computed value for each zone. See [the guide](https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform) for details about which CPU families are available (and defaulted) for each zone.

func (ClusterClusterConfigMasterConfigPtrOutput) NumInstances

func (ClusterClusterConfigMasterConfigPtrOutput) ToClusterClusterConfigMasterConfigPtrOutput

func (o ClusterClusterConfigMasterConfigPtrOutput) ToClusterClusterConfigMasterConfigPtrOutput() ClusterClusterConfigMasterConfigPtrOutput

func (ClusterClusterConfigMasterConfigPtrOutput) ToClusterClusterConfigMasterConfigPtrOutputWithContext

func (o ClusterClusterConfigMasterConfigPtrOutput) ToClusterClusterConfigMasterConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigMasterConfigPtrOutput

type ClusterClusterConfigOutput

type ClusterClusterConfigOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigOutput) AutoscalingConfig

The autoscaling policy config associated with the cluster. Structure defined below.

func (ClusterClusterConfigOutput) Bucket

func (ClusterClusterConfigOutput) ElementType

func (ClusterClusterConfigOutput) ElementType() reflect.Type

func (ClusterClusterConfigOutput) EncryptionConfig

The Customer managed encryption keys settings for the cluster. Structure defined below.

func (ClusterClusterConfigOutput) GceClusterConfig

Common config settings for resources of Google Compute Engine cluster instances, applicable to all instances in the cluster. Structure defined below.

func (ClusterClusterConfigOutput) InitializationActions

Commands to execute on each node after config is completed. You can specify multiple versions of these. Structure defined below.

func (ClusterClusterConfigOutput) LifecycleConfig

func (ClusterClusterConfigOutput) MasterConfig

The Google Compute Engine config settings for the master instances in a cluster.. Structure defined below.

func (ClusterClusterConfigOutput) PreemptibleWorkerConfig

The Google Compute Engine config settings for the additional (aka preemptible) instances in a cluster. Structure defined below.

func (ClusterClusterConfigOutput) SecurityConfig

Security related configuration. Structure defined below.

func (ClusterClusterConfigOutput) SoftwareConfig

The config settings for software inside the cluster. Structure defined below.

func (ClusterClusterConfigOutput) StagingBucket

The Cloud Storage staging bucket used to stage files, such as Hadoop jars, between client machines and the cluster. Note: If you don't explicitly specify a `stagingBucket` then GCP will auto create / assign one for you. However, you are not guaranteed an auto generated bucket which is solely dedicated to your cluster; it may be shared with other clusters in the same region/zone also choosing to use the auto generation option.

func (ClusterClusterConfigOutput) ToClusterClusterConfigOutput

func (o ClusterClusterConfigOutput) ToClusterClusterConfigOutput() ClusterClusterConfigOutput

func (ClusterClusterConfigOutput) ToClusterClusterConfigOutputWithContext

func (o ClusterClusterConfigOutput) ToClusterClusterConfigOutputWithContext(ctx context.Context) ClusterClusterConfigOutput

func (ClusterClusterConfigOutput) ToClusterClusterConfigPtrOutput

func (o ClusterClusterConfigOutput) ToClusterClusterConfigPtrOutput() ClusterClusterConfigPtrOutput

func (ClusterClusterConfigOutput) ToClusterClusterConfigPtrOutputWithContext

func (o ClusterClusterConfigOutput) ToClusterClusterConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigPtrOutput

func (ClusterClusterConfigOutput) WorkerConfig

The Google Compute Engine config settings for the worker instances in a cluster.. Structure defined below.

type ClusterClusterConfigPreemptibleWorkerConfig

type ClusterClusterConfigPreemptibleWorkerConfig struct {
	// Disk Config
	DiskConfig    *ClusterClusterConfigPreemptibleWorkerConfigDiskConfig `pulumi:"diskConfig"`
	InstanceNames []string                                               `pulumi:"instanceNames"`
	NumInstances  *int                                                   `pulumi:"numInstances"`
}

type ClusterClusterConfigPreemptibleWorkerConfigArgs

type ClusterClusterConfigPreemptibleWorkerConfigArgs struct {
	// Disk Config
	DiskConfig    ClusterClusterConfigPreemptibleWorkerConfigDiskConfigPtrInput `pulumi:"diskConfig"`
	InstanceNames pulumi.StringArrayInput                                       `pulumi:"instanceNames"`
	NumInstances  pulumi.IntPtrInput                                            `pulumi:"numInstances"`
}

func (ClusterClusterConfigPreemptibleWorkerConfigArgs) ElementType

func (ClusterClusterConfigPreemptibleWorkerConfigArgs) ToClusterClusterConfigPreemptibleWorkerConfigOutput

func (i ClusterClusterConfigPreemptibleWorkerConfigArgs) ToClusterClusterConfigPreemptibleWorkerConfigOutput() ClusterClusterConfigPreemptibleWorkerConfigOutput

func (ClusterClusterConfigPreemptibleWorkerConfigArgs) ToClusterClusterConfigPreemptibleWorkerConfigOutputWithContext

func (i ClusterClusterConfigPreemptibleWorkerConfigArgs) ToClusterClusterConfigPreemptibleWorkerConfigOutputWithContext(ctx context.Context) ClusterClusterConfigPreemptibleWorkerConfigOutput

func (ClusterClusterConfigPreemptibleWorkerConfigArgs) ToClusterClusterConfigPreemptibleWorkerConfigPtrOutput

func (i ClusterClusterConfigPreemptibleWorkerConfigArgs) ToClusterClusterConfigPreemptibleWorkerConfigPtrOutput() ClusterClusterConfigPreemptibleWorkerConfigPtrOutput

func (ClusterClusterConfigPreemptibleWorkerConfigArgs) ToClusterClusterConfigPreemptibleWorkerConfigPtrOutputWithContext

func (i ClusterClusterConfigPreemptibleWorkerConfigArgs) ToClusterClusterConfigPreemptibleWorkerConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigPreemptibleWorkerConfigPtrOutput

type ClusterClusterConfigPreemptibleWorkerConfigDiskConfig

type ClusterClusterConfigPreemptibleWorkerConfigDiskConfig struct {
	// Size of the primary disk attached to each preemptible worker node, specified
	// in GB. The smallest allowed disk size is 10GB. GCP will default to a predetermined
	// computed value if not set (currently 500GB). Note: If SSDs are not
	// attached, it also contains the HDFS data blocks and Hadoop working directories.
	BootDiskSizeGb *int `pulumi:"bootDiskSizeGb"`
	// The disk type of the primary disk attached to each preemptible worker node.
	// One of `"pd-ssd"` or `"pd-standard"`. Defaults to `"pd-standard"`.
	BootDiskType *string `pulumi:"bootDiskType"`
	// The amount of local SSD disks that will be
	// attached to each preemptible worker node. Defaults to 0.
	NumLocalSsds *int `pulumi:"numLocalSsds"`
}

type ClusterClusterConfigPreemptibleWorkerConfigDiskConfigArgs

type ClusterClusterConfigPreemptibleWorkerConfigDiskConfigArgs struct {
	// Size of the primary disk attached to each preemptible worker node, specified
	// in GB. The smallest allowed disk size is 10GB. GCP will default to a predetermined
	// computed value if not set (currently 500GB). Note: If SSDs are not
	// attached, it also contains the HDFS data blocks and Hadoop working directories.
	BootDiskSizeGb pulumi.IntPtrInput `pulumi:"bootDiskSizeGb"`
	// The disk type of the primary disk attached to each preemptible worker node.
	// One of `"pd-ssd"` or `"pd-standard"`. Defaults to `"pd-standard"`.
	BootDiskType pulumi.StringPtrInput `pulumi:"bootDiskType"`
	// The amount of local SSD disks that will be
	// attached to each preemptible worker node. Defaults to 0.
	NumLocalSsds pulumi.IntPtrInput `pulumi:"numLocalSsds"`
}

func (ClusterClusterConfigPreemptibleWorkerConfigDiskConfigArgs) ElementType

func (ClusterClusterConfigPreemptibleWorkerConfigDiskConfigArgs) ToClusterClusterConfigPreemptibleWorkerConfigDiskConfigOutput

func (ClusterClusterConfigPreemptibleWorkerConfigDiskConfigArgs) ToClusterClusterConfigPreemptibleWorkerConfigDiskConfigOutputWithContext

func (i ClusterClusterConfigPreemptibleWorkerConfigDiskConfigArgs) ToClusterClusterConfigPreemptibleWorkerConfigDiskConfigOutputWithContext(ctx context.Context) ClusterClusterConfigPreemptibleWorkerConfigDiskConfigOutput

func (ClusterClusterConfigPreemptibleWorkerConfigDiskConfigArgs) ToClusterClusterConfigPreemptibleWorkerConfigDiskConfigPtrOutput

func (ClusterClusterConfigPreemptibleWorkerConfigDiskConfigArgs) ToClusterClusterConfigPreemptibleWorkerConfigDiskConfigPtrOutputWithContext

func (i ClusterClusterConfigPreemptibleWorkerConfigDiskConfigArgs) ToClusterClusterConfigPreemptibleWorkerConfigDiskConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigPreemptibleWorkerConfigDiskConfigPtrOutput

type ClusterClusterConfigPreemptibleWorkerConfigDiskConfigInput

type ClusterClusterConfigPreemptibleWorkerConfigDiskConfigInput interface {
	pulumi.Input

	ToClusterClusterConfigPreemptibleWorkerConfigDiskConfigOutput() ClusterClusterConfigPreemptibleWorkerConfigDiskConfigOutput
	ToClusterClusterConfigPreemptibleWorkerConfigDiskConfigOutputWithContext(context.Context) ClusterClusterConfigPreemptibleWorkerConfigDiskConfigOutput
}

type ClusterClusterConfigPreemptibleWorkerConfigDiskConfigOutput

type ClusterClusterConfigPreemptibleWorkerConfigDiskConfigOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigPreemptibleWorkerConfigDiskConfigOutput) BootDiskSizeGb

Size of the primary disk attached to each preemptible worker node, specified in GB. The smallest allowed disk size is 10GB. GCP will default to a predetermined computed value if not set (currently 500GB). Note: If SSDs are not attached, it also contains the HDFS data blocks and Hadoop working directories.

func (ClusterClusterConfigPreemptibleWorkerConfigDiskConfigOutput) BootDiskType

The disk type of the primary disk attached to each preemptible worker node. One of `"pd-ssd"` or `"pd-standard"`. Defaults to `"pd-standard"`.

func (ClusterClusterConfigPreemptibleWorkerConfigDiskConfigOutput) ElementType

func (ClusterClusterConfigPreemptibleWorkerConfigDiskConfigOutput) NumLocalSsds

The amount of local SSD disks that will be attached to each preemptible worker node. Defaults to 0.

func (ClusterClusterConfigPreemptibleWorkerConfigDiskConfigOutput) ToClusterClusterConfigPreemptibleWorkerConfigDiskConfigOutput

func (ClusterClusterConfigPreemptibleWorkerConfigDiskConfigOutput) ToClusterClusterConfigPreemptibleWorkerConfigDiskConfigOutputWithContext

func (o ClusterClusterConfigPreemptibleWorkerConfigDiskConfigOutput) ToClusterClusterConfigPreemptibleWorkerConfigDiskConfigOutputWithContext(ctx context.Context) ClusterClusterConfigPreemptibleWorkerConfigDiskConfigOutput

func (ClusterClusterConfigPreemptibleWorkerConfigDiskConfigOutput) ToClusterClusterConfigPreemptibleWorkerConfigDiskConfigPtrOutput

func (ClusterClusterConfigPreemptibleWorkerConfigDiskConfigOutput) ToClusterClusterConfigPreemptibleWorkerConfigDiskConfigPtrOutputWithContext

func (o ClusterClusterConfigPreemptibleWorkerConfigDiskConfigOutput) ToClusterClusterConfigPreemptibleWorkerConfigDiskConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigPreemptibleWorkerConfigDiskConfigPtrOutput

type ClusterClusterConfigPreemptibleWorkerConfigDiskConfigPtrInput

type ClusterClusterConfigPreemptibleWorkerConfigDiskConfigPtrInput interface {
	pulumi.Input

	ToClusterClusterConfigPreemptibleWorkerConfigDiskConfigPtrOutput() ClusterClusterConfigPreemptibleWorkerConfigDiskConfigPtrOutput
	ToClusterClusterConfigPreemptibleWorkerConfigDiskConfigPtrOutputWithContext(context.Context) ClusterClusterConfigPreemptibleWorkerConfigDiskConfigPtrOutput
}

type ClusterClusterConfigPreemptibleWorkerConfigDiskConfigPtrOutput

type ClusterClusterConfigPreemptibleWorkerConfigDiskConfigPtrOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigPreemptibleWorkerConfigDiskConfigPtrOutput) BootDiskSizeGb

Size of the primary disk attached to each preemptible worker node, specified in GB. The smallest allowed disk size is 10GB. GCP will default to a predetermined computed value if not set (currently 500GB). Note: If SSDs are not attached, it also contains the HDFS data blocks and Hadoop working directories.

func (ClusterClusterConfigPreemptibleWorkerConfigDiskConfigPtrOutput) BootDiskType

The disk type of the primary disk attached to each preemptible worker node. One of `"pd-ssd"` or `"pd-standard"`. Defaults to `"pd-standard"`.

func (ClusterClusterConfigPreemptibleWorkerConfigDiskConfigPtrOutput) Elem

func (ClusterClusterConfigPreemptibleWorkerConfigDiskConfigPtrOutput) ElementType

func (ClusterClusterConfigPreemptibleWorkerConfigDiskConfigPtrOutput) NumLocalSsds

The amount of local SSD disks that will be attached to each preemptible worker node. Defaults to 0.

func (ClusterClusterConfigPreemptibleWorkerConfigDiskConfigPtrOutput) ToClusterClusterConfigPreemptibleWorkerConfigDiskConfigPtrOutput

func (ClusterClusterConfigPreemptibleWorkerConfigDiskConfigPtrOutput) ToClusterClusterConfigPreemptibleWorkerConfigDiskConfigPtrOutputWithContext

func (o ClusterClusterConfigPreemptibleWorkerConfigDiskConfigPtrOutput) ToClusterClusterConfigPreemptibleWorkerConfigDiskConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigPreemptibleWorkerConfigDiskConfigPtrOutput

type ClusterClusterConfigPreemptibleWorkerConfigInput

type ClusterClusterConfigPreemptibleWorkerConfigInput interface {
	pulumi.Input

	ToClusterClusterConfigPreemptibleWorkerConfigOutput() ClusterClusterConfigPreemptibleWorkerConfigOutput
	ToClusterClusterConfigPreemptibleWorkerConfigOutputWithContext(context.Context) ClusterClusterConfigPreemptibleWorkerConfigOutput
}

type ClusterClusterConfigPreemptibleWorkerConfigOutput

type ClusterClusterConfigPreemptibleWorkerConfigOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigPreemptibleWorkerConfigOutput) DiskConfig

Disk Config

func (ClusterClusterConfigPreemptibleWorkerConfigOutput) ElementType

func (ClusterClusterConfigPreemptibleWorkerConfigOutput) InstanceNames

func (ClusterClusterConfigPreemptibleWorkerConfigOutput) NumInstances

func (ClusterClusterConfigPreemptibleWorkerConfigOutput) ToClusterClusterConfigPreemptibleWorkerConfigOutput

func (o ClusterClusterConfigPreemptibleWorkerConfigOutput) ToClusterClusterConfigPreemptibleWorkerConfigOutput() ClusterClusterConfigPreemptibleWorkerConfigOutput

func (ClusterClusterConfigPreemptibleWorkerConfigOutput) ToClusterClusterConfigPreemptibleWorkerConfigOutputWithContext

func (o ClusterClusterConfigPreemptibleWorkerConfigOutput) ToClusterClusterConfigPreemptibleWorkerConfigOutputWithContext(ctx context.Context) ClusterClusterConfigPreemptibleWorkerConfigOutput

func (ClusterClusterConfigPreemptibleWorkerConfigOutput) ToClusterClusterConfigPreemptibleWorkerConfigPtrOutput

func (o ClusterClusterConfigPreemptibleWorkerConfigOutput) ToClusterClusterConfigPreemptibleWorkerConfigPtrOutput() ClusterClusterConfigPreemptibleWorkerConfigPtrOutput

func (ClusterClusterConfigPreemptibleWorkerConfigOutput) ToClusterClusterConfigPreemptibleWorkerConfigPtrOutputWithContext

func (o ClusterClusterConfigPreemptibleWorkerConfigOutput) ToClusterClusterConfigPreemptibleWorkerConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigPreemptibleWorkerConfigPtrOutput

type ClusterClusterConfigPreemptibleWorkerConfigPtrInput

type ClusterClusterConfigPreemptibleWorkerConfigPtrInput interface {
	pulumi.Input

	ToClusterClusterConfigPreemptibleWorkerConfigPtrOutput() ClusterClusterConfigPreemptibleWorkerConfigPtrOutput
	ToClusterClusterConfigPreemptibleWorkerConfigPtrOutputWithContext(context.Context) ClusterClusterConfigPreemptibleWorkerConfigPtrOutput
}

type ClusterClusterConfigPreemptibleWorkerConfigPtrOutput

type ClusterClusterConfigPreemptibleWorkerConfigPtrOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigPreemptibleWorkerConfigPtrOutput) DiskConfig

Disk Config

func (ClusterClusterConfigPreemptibleWorkerConfigPtrOutput) Elem

func (ClusterClusterConfigPreemptibleWorkerConfigPtrOutput) ElementType

func (ClusterClusterConfigPreemptibleWorkerConfigPtrOutput) InstanceNames

func (ClusterClusterConfigPreemptibleWorkerConfigPtrOutput) NumInstances

func (ClusterClusterConfigPreemptibleWorkerConfigPtrOutput) ToClusterClusterConfigPreemptibleWorkerConfigPtrOutput

func (ClusterClusterConfigPreemptibleWorkerConfigPtrOutput) ToClusterClusterConfigPreemptibleWorkerConfigPtrOutputWithContext

func (o ClusterClusterConfigPreemptibleWorkerConfigPtrOutput) ToClusterClusterConfigPreemptibleWorkerConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigPreemptibleWorkerConfigPtrOutput

type ClusterClusterConfigPtrInput

type ClusterClusterConfigPtrInput interface {
	pulumi.Input

	ToClusterClusterConfigPtrOutput() ClusterClusterConfigPtrOutput
	ToClusterClusterConfigPtrOutputWithContext(context.Context) ClusterClusterConfigPtrOutput
}

type ClusterClusterConfigPtrOutput

type ClusterClusterConfigPtrOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigPtrOutput) AutoscalingConfig

The autoscaling policy config associated with the cluster. Structure defined below.

func (ClusterClusterConfigPtrOutput) Bucket

func (ClusterClusterConfigPtrOutput) Elem

func (ClusterClusterConfigPtrOutput) ElementType

func (ClusterClusterConfigPtrOutput) EncryptionConfig

The Customer managed encryption keys settings for the cluster. Structure defined below.

func (ClusterClusterConfigPtrOutput) GceClusterConfig

Common config settings for resources of Google Compute Engine cluster instances, applicable to all instances in the cluster. Structure defined below.

func (ClusterClusterConfigPtrOutput) InitializationActions

Commands to execute on each node after config is completed. You can specify multiple versions of these. Structure defined below.

func (ClusterClusterConfigPtrOutput) LifecycleConfig

func (ClusterClusterConfigPtrOutput) MasterConfig

The Google Compute Engine config settings for the master instances in a cluster.. Structure defined below.

func (ClusterClusterConfigPtrOutput) PreemptibleWorkerConfig

The Google Compute Engine config settings for the additional (aka preemptible) instances in a cluster. Structure defined below.

func (ClusterClusterConfigPtrOutput) SecurityConfig

Security related configuration. Structure defined below.

func (ClusterClusterConfigPtrOutput) SoftwareConfig

The config settings for software inside the cluster. Structure defined below.

func (ClusterClusterConfigPtrOutput) StagingBucket

The Cloud Storage staging bucket used to stage files, such as Hadoop jars, between client machines and the cluster. Note: If you don't explicitly specify a `stagingBucket` then GCP will auto create / assign one for you. However, you are not guaranteed an auto generated bucket which is solely dedicated to your cluster; it may be shared with other clusters in the same region/zone also choosing to use the auto generation option.

func (ClusterClusterConfigPtrOutput) ToClusterClusterConfigPtrOutput

func (o ClusterClusterConfigPtrOutput) ToClusterClusterConfigPtrOutput() ClusterClusterConfigPtrOutput

func (ClusterClusterConfigPtrOutput) ToClusterClusterConfigPtrOutputWithContext

func (o ClusterClusterConfigPtrOutput) ToClusterClusterConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigPtrOutput

func (ClusterClusterConfigPtrOutput) WorkerConfig

The Google Compute Engine config settings for the worker instances in a cluster.. Structure defined below.

type ClusterClusterConfigSecurityConfig

type ClusterClusterConfigSecurityConfig struct {
	// Kerberos Configuration
	KerberosConfig ClusterClusterConfigSecurityConfigKerberosConfig `pulumi:"kerberosConfig"`
}

type ClusterClusterConfigSecurityConfigArgs

type ClusterClusterConfigSecurityConfigArgs struct {
	// Kerberos Configuration
	KerberosConfig ClusterClusterConfigSecurityConfigKerberosConfigInput `pulumi:"kerberosConfig"`
}

func (ClusterClusterConfigSecurityConfigArgs) ElementType

func (ClusterClusterConfigSecurityConfigArgs) ToClusterClusterConfigSecurityConfigOutput

func (i ClusterClusterConfigSecurityConfigArgs) ToClusterClusterConfigSecurityConfigOutput() ClusterClusterConfigSecurityConfigOutput

func (ClusterClusterConfigSecurityConfigArgs) ToClusterClusterConfigSecurityConfigOutputWithContext

func (i ClusterClusterConfigSecurityConfigArgs) ToClusterClusterConfigSecurityConfigOutputWithContext(ctx context.Context) ClusterClusterConfigSecurityConfigOutput

func (ClusterClusterConfigSecurityConfigArgs) ToClusterClusterConfigSecurityConfigPtrOutput

func (i ClusterClusterConfigSecurityConfigArgs) ToClusterClusterConfigSecurityConfigPtrOutput() ClusterClusterConfigSecurityConfigPtrOutput

func (ClusterClusterConfigSecurityConfigArgs) ToClusterClusterConfigSecurityConfigPtrOutputWithContext

func (i ClusterClusterConfigSecurityConfigArgs) ToClusterClusterConfigSecurityConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigSecurityConfigPtrOutput

type ClusterClusterConfigSecurityConfigInput

type ClusterClusterConfigSecurityConfigInput interface {
	pulumi.Input

	ToClusterClusterConfigSecurityConfigOutput() ClusterClusterConfigSecurityConfigOutput
	ToClusterClusterConfigSecurityConfigOutputWithContext(context.Context) ClusterClusterConfigSecurityConfigOutput
}

type ClusterClusterConfigSecurityConfigKerberosConfig

type ClusterClusterConfigSecurityConfigKerberosConfig struct {
	// The admin server (IP or hostname) for the
	// remote trusted realm in a cross realm trust relationship.
	CrossRealmTrustAdminServer *string `pulumi:"crossRealmTrustAdminServer"`
	// The KDC (IP or hostname) for the
	// remote trusted realm in a cross realm trust relationship.
	CrossRealmTrustKdc *string `pulumi:"crossRealmTrustKdc"`
	// The remote realm the Dataproc on-cluster KDC will
	// trust, should the user enable cross realm trust.
	CrossRealmTrustRealm *string `pulumi:"crossRealmTrustRealm"`
	// The Cloud Storage URI of a KMS
	// encrypted file containing the shared password between the on-cluster Kerberos realm
	// and the remote trusted realm, in a cross realm trust relationship.
	CrossRealmTrustSharedPasswordUri *string `pulumi:"crossRealmTrustSharedPasswordUri"`
	// Flag to indicate whether to Kerberize the cluster.
	EnableKerberos *bool `pulumi:"enableKerberos"`
	// The Cloud Storage URI of a KMS encrypted file containing
	// the master key of the KDC database.
	KdcDbKeyUri *string `pulumi:"kdcDbKeyUri"`
	// The Cloud Storage URI of a KMS encrypted file containing
	// the password to the user provided key. For the self-signed certificate, this password
	// is generated by Dataproc.
	KeyPasswordUri *string `pulumi:"keyPasswordUri"`
	// The Cloud Storage URI of a KMS encrypted file containing
	// the password to the user provided keystore. For the self-signed certificated, the password
	// is generated by Dataproc.
	KeystorePasswordUri *string `pulumi:"keystorePasswordUri"`
	// The Cloud Storage URI of the keystore file used for SSL encryption.
	// If not provided, Dataproc will provide a self-signed certificate.
	KeystoreUri *string `pulumi:"keystoreUri"`
	// The URI of the KMS key used to encrypt various sensitive files.
	KmsKeyUri string `pulumi:"kmsKeyUri"`
	// The name of the on-cluster Kerberos realm. If not specified, the
	// uppercased domain of hostnames will be the realm.
	Realm *string `pulumi:"realm"`
	// The Cloud Storage URI of a KMS encrypted file
	// containing the root principal password.
	RootPrincipalPasswordUri string `pulumi:"rootPrincipalPasswordUri"`
	// The lifetime of the ticket granting ticket, in hours.
	TgtLifetimeHours *int `pulumi:"tgtLifetimeHours"`
	// The Cloud Storage URI of a KMS encrypted file
	// containing the password to the user provided truststore. For the self-signed
	// certificate, this password is generated by Dataproc.
	TruststorePasswordUri *string `pulumi:"truststorePasswordUri"`
	// The Cloud Storage URI of the truststore file used for
	// SSL encryption. If not provided, Dataproc will provide a self-signed certificate.
	TruststoreUri *string `pulumi:"truststoreUri"`
}

type ClusterClusterConfigSecurityConfigKerberosConfigArgs

type ClusterClusterConfigSecurityConfigKerberosConfigArgs struct {
	// The admin server (IP or hostname) for the
	// remote trusted realm in a cross realm trust relationship.
	CrossRealmTrustAdminServer pulumi.StringPtrInput `pulumi:"crossRealmTrustAdminServer"`
	// The KDC (IP or hostname) for the
	// remote trusted realm in a cross realm trust relationship.
	CrossRealmTrustKdc pulumi.StringPtrInput `pulumi:"crossRealmTrustKdc"`
	// The remote realm the Dataproc on-cluster KDC will
	// trust, should the user enable cross realm trust.
	CrossRealmTrustRealm pulumi.StringPtrInput `pulumi:"crossRealmTrustRealm"`
	// The Cloud Storage URI of a KMS
	// encrypted file containing the shared password between the on-cluster Kerberos realm
	// and the remote trusted realm, in a cross realm trust relationship.
	CrossRealmTrustSharedPasswordUri pulumi.StringPtrInput `pulumi:"crossRealmTrustSharedPasswordUri"`
	// Flag to indicate whether to Kerberize the cluster.
	EnableKerberos pulumi.BoolPtrInput `pulumi:"enableKerberos"`
	// The Cloud Storage URI of a KMS encrypted file containing
	// the master key of the KDC database.
	KdcDbKeyUri pulumi.StringPtrInput `pulumi:"kdcDbKeyUri"`
	// The Cloud Storage URI of a KMS encrypted file containing
	// the password to the user provided key. For the self-signed certificate, this password
	// is generated by Dataproc.
	KeyPasswordUri pulumi.StringPtrInput `pulumi:"keyPasswordUri"`
	// The Cloud Storage URI of a KMS encrypted file containing
	// the password to the user provided keystore. For the self-signed certificated, the password
	// is generated by Dataproc.
	KeystorePasswordUri pulumi.StringPtrInput `pulumi:"keystorePasswordUri"`
	// The Cloud Storage URI of the keystore file used for SSL encryption.
	// If not provided, Dataproc will provide a self-signed certificate.
	KeystoreUri pulumi.StringPtrInput `pulumi:"keystoreUri"`
	// The URI of the KMS key used to encrypt various sensitive files.
	KmsKeyUri pulumi.StringInput `pulumi:"kmsKeyUri"`
	// The name of the on-cluster Kerberos realm. If not specified, the
	// uppercased domain of hostnames will be the realm.
	Realm pulumi.StringPtrInput `pulumi:"realm"`
	// The Cloud Storage URI of a KMS encrypted file
	// containing the root principal password.
	RootPrincipalPasswordUri pulumi.StringInput `pulumi:"rootPrincipalPasswordUri"`
	// The lifetime of the ticket granting ticket, in hours.
	TgtLifetimeHours pulumi.IntPtrInput `pulumi:"tgtLifetimeHours"`
	// The Cloud Storage URI of a KMS encrypted file
	// containing the password to the user provided truststore. For the self-signed
	// certificate, this password is generated by Dataproc.
	TruststorePasswordUri pulumi.StringPtrInput `pulumi:"truststorePasswordUri"`
	// The Cloud Storage URI of the truststore file used for
	// SSL encryption. If not provided, Dataproc will provide a self-signed certificate.
	TruststoreUri pulumi.StringPtrInput `pulumi:"truststoreUri"`
}

func (ClusterClusterConfigSecurityConfigKerberosConfigArgs) ElementType

func (ClusterClusterConfigSecurityConfigKerberosConfigArgs) ToClusterClusterConfigSecurityConfigKerberosConfigOutput

func (i ClusterClusterConfigSecurityConfigKerberosConfigArgs) ToClusterClusterConfigSecurityConfigKerberosConfigOutput() ClusterClusterConfigSecurityConfigKerberosConfigOutput

func (ClusterClusterConfigSecurityConfigKerberosConfigArgs) ToClusterClusterConfigSecurityConfigKerberosConfigOutputWithContext

func (i ClusterClusterConfigSecurityConfigKerberosConfigArgs) ToClusterClusterConfigSecurityConfigKerberosConfigOutputWithContext(ctx context.Context) ClusterClusterConfigSecurityConfigKerberosConfigOutput

type ClusterClusterConfigSecurityConfigKerberosConfigInput

type ClusterClusterConfigSecurityConfigKerberosConfigInput interface {
	pulumi.Input

	ToClusterClusterConfigSecurityConfigKerberosConfigOutput() ClusterClusterConfigSecurityConfigKerberosConfigOutput
	ToClusterClusterConfigSecurityConfigKerberosConfigOutputWithContext(context.Context) ClusterClusterConfigSecurityConfigKerberosConfigOutput
}

type ClusterClusterConfigSecurityConfigKerberosConfigOutput

type ClusterClusterConfigSecurityConfigKerberosConfigOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigSecurityConfigKerberosConfigOutput) CrossRealmTrustAdminServer

The admin server (IP or hostname) for the remote trusted realm in a cross realm trust relationship.

func (ClusterClusterConfigSecurityConfigKerberosConfigOutput) CrossRealmTrustKdc

The KDC (IP or hostname) for the remote trusted realm in a cross realm trust relationship.

func (ClusterClusterConfigSecurityConfigKerberosConfigOutput) CrossRealmTrustRealm

The remote realm the Dataproc on-cluster KDC will trust, should the user enable cross realm trust.

func (ClusterClusterConfigSecurityConfigKerberosConfigOutput) CrossRealmTrustSharedPasswordUri

The Cloud Storage URI of a KMS encrypted file containing the shared password between the on-cluster Kerberos realm and the remote trusted realm, in a cross realm trust relationship.

func (ClusterClusterConfigSecurityConfigKerberosConfigOutput) ElementType

func (ClusterClusterConfigSecurityConfigKerberosConfigOutput) EnableKerberos

Flag to indicate whether to Kerberize the cluster.

func (ClusterClusterConfigSecurityConfigKerberosConfigOutput) KdcDbKeyUri

The Cloud Storage URI of a KMS encrypted file containing the master key of the KDC database.

func (ClusterClusterConfigSecurityConfigKerberosConfigOutput) KeyPasswordUri

The Cloud Storage URI of a KMS encrypted file containing the password to the user provided key. For the self-signed certificate, this password is generated by Dataproc.

func (ClusterClusterConfigSecurityConfigKerberosConfigOutput) KeystorePasswordUri

The Cloud Storage URI of a KMS encrypted file containing the password to the user provided keystore. For the self-signed certificated, the password is generated by Dataproc.

func (ClusterClusterConfigSecurityConfigKerberosConfigOutput) KeystoreUri

The Cloud Storage URI of the keystore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.

func (ClusterClusterConfigSecurityConfigKerberosConfigOutput) KmsKeyUri

The URI of the KMS key used to encrypt various sensitive files.

func (ClusterClusterConfigSecurityConfigKerberosConfigOutput) Realm

The name of the on-cluster Kerberos realm. If not specified, the uppercased domain of hostnames will be the realm.

func (ClusterClusterConfigSecurityConfigKerberosConfigOutput) RootPrincipalPasswordUri

The Cloud Storage URI of a KMS encrypted file containing the root principal password.

func (ClusterClusterConfigSecurityConfigKerberosConfigOutput) TgtLifetimeHours

The lifetime of the ticket granting ticket, in hours.

func (ClusterClusterConfigSecurityConfigKerberosConfigOutput) ToClusterClusterConfigSecurityConfigKerberosConfigOutput

func (ClusterClusterConfigSecurityConfigKerberosConfigOutput) ToClusterClusterConfigSecurityConfigKerberosConfigOutputWithContext

func (o ClusterClusterConfigSecurityConfigKerberosConfigOutput) ToClusterClusterConfigSecurityConfigKerberosConfigOutputWithContext(ctx context.Context) ClusterClusterConfigSecurityConfigKerberosConfigOutput

func (ClusterClusterConfigSecurityConfigKerberosConfigOutput) TruststorePasswordUri

The Cloud Storage URI of a KMS encrypted file containing the password to the user provided truststore. For the self-signed certificate, this password is generated by Dataproc.

func (ClusterClusterConfigSecurityConfigKerberosConfigOutput) TruststoreUri

The Cloud Storage URI of the truststore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.

type ClusterClusterConfigSecurityConfigOutput

type ClusterClusterConfigSecurityConfigOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigSecurityConfigOutput) ElementType

func (ClusterClusterConfigSecurityConfigOutput) KerberosConfig

Kerberos Configuration

func (ClusterClusterConfigSecurityConfigOutput) ToClusterClusterConfigSecurityConfigOutput

func (o ClusterClusterConfigSecurityConfigOutput) ToClusterClusterConfigSecurityConfigOutput() ClusterClusterConfigSecurityConfigOutput

func (ClusterClusterConfigSecurityConfigOutput) ToClusterClusterConfigSecurityConfigOutputWithContext

func (o ClusterClusterConfigSecurityConfigOutput) ToClusterClusterConfigSecurityConfigOutputWithContext(ctx context.Context) ClusterClusterConfigSecurityConfigOutput

func (ClusterClusterConfigSecurityConfigOutput) ToClusterClusterConfigSecurityConfigPtrOutput

func (o ClusterClusterConfigSecurityConfigOutput) ToClusterClusterConfigSecurityConfigPtrOutput() ClusterClusterConfigSecurityConfigPtrOutput

func (ClusterClusterConfigSecurityConfigOutput) ToClusterClusterConfigSecurityConfigPtrOutputWithContext

func (o ClusterClusterConfigSecurityConfigOutput) ToClusterClusterConfigSecurityConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigSecurityConfigPtrOutput

type ClusterClusterConfigSecurityConfigPtrInput

type ClusterClusterConfigSecurityConfigPtrInput interface {
	pulumi.Input

	ToClusterClusterConfigSecurityConfigPtrOutput() ClusterClusterConfigSecurityConfigPtrOutput
	ToClusterClusterConfigSecurityConfigPtrOutputWithContext(context.Context) ClusterClusterConfigSecurityConfigPtrOutput
}

type ClusterClusterConfigSecurityConfigPtrOutput

type ClusterClusterConfigSecurityConfigPtrOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigSecurityConfigPtrOutput) Elem

func (ClusterClusterConfigSecurityConfigPtrOutput) ElementType

func (ClusterClusterConfigSecurityConfigPtrOutput) KerberosConfig

Kerberos Configuration

func (ClusterClusterConfigSecurityConfigPtrOutput) ToClusterClusterConfigSecurityConfigPtrOutput

func (o ClusterClusterConfigSecurityConfigPtrOutput) ToClusterClusterConfigSecurityConfigPtrOutput() ClusterClusterConfigSecurityConfigPtrOutput

func (ClusterClusterConfigSecurityConfigPtrOutput) ToClusterClusterConfigSecurityConfigPtrOutputWithContext

func (o ClusterClusterConfigSecurityConfigPtrOutput) ToClusterClusterConfigSecurityConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigSecurityConfigPtrOutput

type ClusterClusterConfigSoftwareConfig

type ClusterClusterConfigSoftwareConfig struct {
	// The Cloud Dataproc image version to use
	// for the cluster - this controls the sets of software versions
	// installed onto the nodes when you create clusters. If not specified, defaults to the
	// latest version. For a list of valid versions see
	// [Cloud Dataproc versions](https://cloud.google.com/dataproc/docs/concepts/dataproc-versions)
	ImageVersion *string `pulumi:"imageVersion"`
	// The set of optional components to activate on the cluster.
	// Accepted values are:
	// * ANACONDA
	// * DRUID
	// * HIVE_WEBHCAT
	// * JUPYTER
	// * KERBEROS
	// * PRESTO
	// * ZEPPELIN
	// * ZOOKEEPER
	OptionalComponents []string `pulumi:"optionalComponents"`
	// A list of override and additional properties (key/value pairs)
	// used to modify various aspects of the common configuration files used when creating
	// a cluster. For a list of valid properties please see
	// [Cluster properties](https://cloud.google.com/dataproc/docs/concepts/cluster-properties)
	OverrideProperties map[string]string      `pulumi:"overrideProperties"`
	Properties         map[string]interface{} `pulumi:"properties"`
}

type ClusterClusterConfigSoftwareConfigArgs

type ClusterClusterConfigSoftwareConfigArgs struct {
	// The Cloud Dataproc image version to use
	// for the cluster - this controls the sets of software versions
	// installed onto the nodes when you create clusters. If not specified, defaults to the
	// latest version. For a list of valid versions see
	// [Cloud Dataproc versions](https://cloud.google.com/dataproc/docs/concepts/dataproc-versions)
	ImageVersion pulumi.StringPtrInput `pulumi:"imageVersion"`
	// The set of optional components to activate on the cluster.
	// Accepted values are:
	// * ANACONDA
	// * DRUID
	// * HIVE_WEBHCAT
	// * JUPYTER
	// * KERBEROS
	// * PRESTO
	// * ZEPPELIN
	// * ZOOKEEPER
	OptionalComponents pulumi.StringArrayInput `pulumi:"optionalComponents"`
	// A list of override and additional properties (key/value pairs)
	// used to modify various aspects of the common configuration files used when creating
	// a cluster. For a list of valid properties please see
	// [Cluster properties](https://cloud.google.com/dataproc/docs/concepts/cluster-properties)
	OverrideProperties pulumi.StringMapInput `pulumi:"overrideProperties"`
	Properties         pulumi.MapInput       `pulumi:"properties"`
}

func (ClusterClusterConfigSoftwareConfigArgs) ElementType

func (ClusterClusterConfigSoftwareConfigArgs) ToClusterClusterConfigSoftwareConfigOutput

func (i ClusterClusterConfigSoftwareConfigArgs) ToClusterClusterConfigSoftwareConfigOutput() ClusterClusterConfigSoftwareConfigOutput

func (ClusterClusterConfigSoftwareConfigArgs) ToClusterClusterConfigSoftwareConfigOutputWithContext

func (i ClusterClusterConfigSoftwareConfigArgs) ToClusterClusterConfigSoftwareConfigOutputWithContext(ctx context.Context) ClusterClusterConfigSoftwareConfigOutput

func (ClusterClusterConfigSoftwareConfigArgs) ToClusterClusterConfigSoftwareConfigPtrOutput

func (i ClusterClusterConfigSoftwareConfigArgs) ToClusterClusterConfigSoftwareConfigPtrOutput() ClusterClusterConfigSoftwareConfigPtrOutput

func (ClusterClusterConfigSoftwareConfigArgs) ToClusterClusterConfigSoftwareConfigPtrOutputWithContext

func (i ClusterClusterConfigSoftwareConfigArgs) ToClusterClusterConfigSoftwareConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigSoftwareConfigPtrOutput

type ClusterClusterConfigSoftwareConfigInput

type ClusterClusterConfigSoftwareConfigInput interface {
	pulumi.Input

	ToClusterClusterConfigSoftwareConfigOutput() ClusterClusterConfigSoftwareConfigOutput
	ToClusterClusterConfigSoftwareConfigOutputWithContext(context.Context) ClusterClusterConfigSoftwareConfigOutput
}

type ClusterClusterConfigSoftwareConfigOutput

type ClusterClusterConfigSoftwareConfigOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigSoftwareConfigOutput) ElementType

func (ClusterClusterConfigSoftwareConfigOutput) ImageVersion

The Cloud Dataproc image version to use for the cluster - this controls the sets of software versions installed onto the nodes when you create clusters. If not specified, defaults to the latest version. For a list of valid versions see [Cloud Dataproc versions](https://cloud.google.com/dataproc/docs/concepts/dataproc-versions)

func (ClusterClusterConfigSoftwareConfigOutput) OptionalComponents

The set of optional components to activate on the cluster. Accepted values are: * ANACONDA * DRUID * HIVE_WEBHCAT * JUPYTER * KERBEROS * PRESTO * ZEPPELIN * ZOOKEEPER

func (ClusterClusterConfigSoftwareConfigOutput) OverrideProperties

A list of override and additional properties (key/value pairs) used to modify various aspects of the common configuration files used when creating a cluster. For a list of valid properties please see [Cluster properties](https://cloud.google.com/dataproc/docs/concepts/cluster-properties)

func (ClusterClusterConfigSoftwareConfigOutput) Properties

func (ClusterClusterConfigSoftwareConfigOutput) ToClusterClusterConfigSoftwareConfigOutput

func (o ClusterClusterConfigSoftwareConfigOutput) ToClusterClusterConfigSoftwareConfigOutput() ClusterClusterConfigSoftwareConfigOutput

func (ClusterClusterConfigSoftwareConfigOutput) ToClusterClusterConfigSoftwareConfigOutputWithContext

func (o ClusterClusterConfigSoftwareConfigOutput) ToClusterClusterConfigSoftwareConfigOutputWithContext(ctx context.Context) ClusterClusterConfigSoftwareConfigOutput

func (ClusterClusterConfigSoftwareConfigOutput) ToClusterClusterConfigSoftwareConfigPtrOutput

func (o ClusterClusterConfigSoftwareConfigOutput) ToClusterClusterConfigSoftwareConfigPtrOutput() ClusterClusterConfigSoftwareConfigPtrOutput

func (ClusterClusterConfigSoftwareConfigOutput) ToClusterClusterConfigSoftwareConfigPtrOutputWithContext

func (o ClusterClusterConfigSoftwareConfigOutput) ToClusterClusterConfigSoftwareConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigSoftwareConfigPtrOutput

type ClusterClusterConfigSoftwareConfigPtrInput

type ClusterClusterConfigSoftwareConfigPtrInput interface {
	pulumi.Input

	ToClusterClusterConfigSoftwareConfigPtrOutput() ClusterClusterConfigSoftwareConfigPtrOutput
	ToClusterClusterConfigSoftwareConfigPtrOutputWithContext(context.Context) ClusterClusterConfigSoftwareConfigPtrOutput
}

type ClusterClusterConfigSoftwareConfigPtrOutput

type ClusterClusterConfigSoftwareConfigPtrOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigSoftwareConfigPtrOutput) Elem

func (ClusterClusterConfigSoftwareConfigPtrOutput) ElementType

func (ClusterClusterConfigSoftwareConfigPtrOutput) ImageVersion

The Cloud Dataproc image version to use for the cluster - this controls the sets of software versions installed onto the nodes when you create clusters. If not specified, defaults to the latest version. For a list of valid versions see [Cloud Dataproc versions](https://cloud.google.com/dataproc/docs/concepts/dataproc-versions)

func (ClusterClusterConfigSoftwareConfigPtrOutput) OptionalComponents

The set of optional components to activate on the cluster. Accepted values are: * ANACONDA * DRUID * HIVE_WEBHCAT * JUPYTER * KERBEROS * PRESTO * ZEPPELIN * ZOOKEEPER

func (ClusterClusterConfigSoftwareConfigPtrOutput) OverrideProperties

A list of override and additional properties (key/value pairs) used to modify various aspects of the common configuration files used when creating a cluster. For a list of valid properties please see [Cluster properties](https://cloud.google.com/dataproc/docs/concepts/cluster-properties)

func (ClusterClusterConfigSoftwareConfigPtrOutput) Properties

func (ClusterClusterConfigSoftwareConfigPtrOutput) ToClusterClusterConfigSoftwareConfigPtrOutput

func (o ClusterClusterConfigSoftwareConfigPtrOutput) ToClusterClusterConfigSoftwareConfigPtrOutput() ClusterClusterConfigSoftwareConfigPtrOutput

func (ClusterClusterConfigSoftwareConfigPtrOutput) ToClusterClusterConfigSoftwareConfigPtrOutputWithContext

func (o ClusterClusterConfigSoftwareConfigPtrOutput) ToClusterClusterConfigSoftwareConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigSoftwareConfigPtrOutput

type ClusterClusterConfigWorkerConfig

type ClusterClusterConfigWorkerConfig struct {
	// The Compute Engine accelerator configuration for these instances. Can be specified multiple times.
	Accelerators []ClusterClusterConfigWorkerConfigAccelerator `pulumi:"accelerators"`
	// Disk Config
	DiskConfig *ClusterClusterConfigWorkerConfigDiskConfig `pulumi:"diskConfig"`
	// The URI for the image to use for this worker.  See [the guide](https://cloud.google.com/dataproc/docs/guides/dataproc-images)
	// for more information.
	ImageUri      *string  `pulumi:"imageUri"`
	InstanceNames []string `pulumi:"instanceNames"`
	// The name of a Google Compute Engine machine type
	// to create for the worker nodes. If not specified, GCP will default to a predetermined
	// computed value (currently `n1-standard-4`).
	MachineType *string `pulumi:"machineType"`
	// The name of a minimum generation of CPU family
	// for the master. If not specified, GCP will default to a predetermined computed value
	// for each zone. See [the guide](https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform)
	// for details about which CPU families are available (and defaulted) for each zone.
	MinCpuPlatform *string `pulumi:"minCpuPlatform"`
	NumInstances   *int    `pulumi:"numInstances"`
}

type ClusterClusterConfigWorkerConfigAccelerator

type ClusterClusterConfigWorkerConfigAccelerator struct {
	// The number of the accelerator cards of this type exposed to this instance. Often restricted to one of `1`, `2`, `4`, or `8`.
	AcceleratorCount int `pulumi:"acceleratorCount"`
	// The short name of the accelerator type to expose to this instance. For example, `nvidia-tesla-k80`.
	AcceleratorType string `pulumi:"acceleratorType"`
}

type ClusterClusterConfigWorkerConfigAcceleratorArgs

type ClusterClusterConfigWorkerConfigAcceleratorArgs struct {
	// The number of the accelerator cards of this type exposed to this instance. Often restricted to one of `1`, `2`, `4`, or `8`.
	AcceleratorCount pulumi.IntInput `pulumi:"acceleratorCount"`
	// The short name of the accelerator type to expose to this instance. For example, `nvidia-tesla-k80`.
	AcceleratorType pulumi.StringInput `pulumi:"acceleratorType"`
}

func (ClusterClusterConfigWorkerConfigAcceleratorArgs) ElementType

func (ClusterClusterConfigWorkerConfigAcceleratorArgs) ToClusterClusterConfigWorkerConfigAcceleratorOutput

func (i ClusterClusterConfigWorkerConfigAcceleratorArgs) ToClusterClusterConfigWorkerConfigAcceleratorOutput() ClusterClusterConfigWorkerConfigAcceleratorOutput

func (ClusterClusterConfigWorkerConfigAcceleratorArgs) ToClusterClusterConfigWorkerConfigAcceleratorOutputWithContext

func (i ClusterClusterConfigWorkerConfigAcceleratorArgs) ToClusterClusterConfigWorkerConfigAcceleratorOutputWithContext(ctx context.Context) ClusterClusterConfigWorkerConfigAcceleratorOutput

type ClusterClusterConfigWorkerConfigAcceleratorArray

type ClusterClusterConfigWorkerConfigAcceleratorArray []ClusterClusterConfigWorkerConfigAcceleratorInput

func (ClusterClusterConfigWorkerConfigAcceleratorArray) ElementType

func (ClusterClusterConfigWorkerConfigAcceleratorArray) ToClusterClusterConfigWorkerConfigAcceleratorArrayOutput

func (i ClusterClusterConfigWorkerConfigAcceleratorArray) ToClusterClusterConfigWorkerConfigAcceleratorArrayOutput() ClusterClusterConfigWorkerConfigAcceleratorArrayOutput

func (ClusterClusterConfigWorkerConfigAcceleratorArray) ToClusterClusterConfigWorkerConfigAcceleratorArrayOutputWithContext

func (i ClusterClusterConfigWorkerConfigAcceleratorArray) ToClusterClusterConfigWorkerConfigAcceleratorArrayOutputWithContext(ctx context.Context) ClusterClusterConfigWorkerConfigAcceleratorArrayOutput

type ClusterClusterConfigWorkerConfigAcceleratorArrayInput

type ClusterClusterConfigWorkerConfigAcceleratorArrayInput interface {
	pulumi.Input

	ToClusterClusterConfigWorkerConfigAcceleratorArrayOutput() ClusterClusterConfigWorkerConfigAcceleratorArrayOutput
	ToClusterClusterConfigWorkerConfigAcceleratorArrayOutputWithContext(context.Context) ClusterClusterConfigWorkerConfigAcceleratorArrayOutput
}

type ClusterClusterConfigWorkerConfigAcceleratorArrayOutput

type ClusterClusterConfigWorkerConfigAcceleratorArrayOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigWorkerConfigAcceleratorArrayOutput) ElementType

func (ClusterClusterConfigWorkerConfigAcceleratorArrayOutput) Index

func (ClusterClusterConfigWorkerConfigAcceleratorArrayOutput) ToClusterClusterConfigWorkerConfigAcceleratorArrayOutput

func (ClusterClusterConfigWorkerConfigAcceleratorArrayOutput) ToClusterClusterConfigWorkerConfigAcceleratorArrayOutputWithContext

func (o ClusterClusterConfigWorkerConfigAcceleratorArrayOutput) ToClusterClusterConfigWorkerConfigAcceleratorArrayOutputWithContext(ctx context.Context) ClusterClusterConfigWorkerConfigAcceleratorArrayOutput

type ClusterClusterConfigWorkerConfigAcceleratorInput

type ClusterClusterConfigWorkerConfigAcceleratorInput interface {
	pulumi.Input

	ToClusterClusterConfigWorkerConfigAcceleratorOutput() ClusterClusterConfigWorkerConfigAcceleratorOutput
	ToClusterClusterConfigWorkerConfigAcceleratorOutputWithContext(context.Context) ClusterClusterConfigWorkerConfigAcceleratorOutput
}

type ClusterClusterConfigWorkerConfigAcceleratorOutput

type ClusterClusterConfigWorkerConfigAcceleratorOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigWorkerConfigAcceleratorOutput) AcceleratorCount

The number of the accelerator cards of this type exposed to this instance. Often restricted to one of `1`, `2`, `4`, or `8`.

func (ClusterClusterConfigWorkerConfigAcceleratorOutput) AcceleratorType

The short name of the accelerator type to expose to this instance. For example, `nvidia-tesla-k80`.

func (ClusterClusterConfigWorkerConfigAcceleratorOutput) ElementType

func (ClusterClusterConfigWorkerConfigAcceleratorOutput) ToClusterClusterConfigWorkerConfigAcceleratorOutput

func (o ClusterClusterConfigWorkerConfigAcceleratorOutput) ToClusterClusterConfigWorkerConfigAcceleratorOutput() ClusterClusterConfigWorkerConfigAcceleratorOutput

func (ClusterClusterConfigWorkerConfigAcceleratorOutput) ToClusterClusterConfigWorkerConfigAcceleratorOutputWithContext

func (o ClusterClusterConfigWorkerConfigAcceleratorOutput) ToClusterClusterConfigWorkerConfigAcceleratorOutputWithContext(ctx context.Context) ClusterClusterConfigWorkerConfigAcceleratorOutput

type ClusterClusterConfigWorkerConfigArgs

type ClusterClusterConfigWorkerConfigArgs struct {
	// The Compute Engine accelerator configuration for these instances. Can be specified multiple times.
	Accelerators ClusterClusterConfigWorkerConfigAcceleratorArrayInput `pulumi:"accelerators"`
	// Disk Config
	DiskConfig ClusterClusterConfigWorkerConfigDiskConfigPtrInput `pulumi:"diskConfig"`
	// The URI for the image to use for this worker.  See [the guide](https://cloud.google.com/dataproc/docs/guides/dataproc-images)
	// for more information.
	ImageUri      pulumi.StringPtrInput   `pulumi:"imageUri"`
	InstanceNames pulumi.StringArrayInput `pulumi:"instanceNames"`
	// The name of a Google Compute Engine machine type
	// to create for the worker nodes. If not specified, GCP will default to a predetermined
	// computed value (currently `n1-standard-4`).
	MachineType pulumi.StringPtrInput `pulumi:"machineType"`
	// The name of a minimum generation of CPU family
	// for the master. If not specified, GCP will default to a predetermined computed value
	// for each zone. See [the guide](https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform)
	// for details about which CPU families are available (and defaulted) for each zone.
	MinCpuPlatform pulumi.StringPtrInput `pulumi:"minCpuPlatform"`
	NumInstances   pulumi.IntPtrInput    `pulumi:"numInstances"`
}

func (ClusterClusterConfigWorkerConfigArgs) ElementType

func (ClusterClusterConfigWorkerConfigArgs) ToClusterClusterConfigWorkerConfigOutput

func (i ClusterClusterConfigWorkerConfigArgs) ToClusterClusterConfigWorkerConfigOutput() ClusterClusterConfigWorkerConfigOutput

func (ClusterClusterConfigWorkerConfigArgs) ToClusterClusterConfigWorkerConfigOutputWithContext

func (i ClusterClusterConfigWorkerConfigArgs) ToClusterClusterConfigWorkerConfigOutputWithContext(ctx context.Context) ClusterClusterConfigWorkerConfigOutput

func (ClusterClusterConfigWorkerConfigArgs) ToClusterClusterConfigWorkerConfigPtrOutput

func (i ClusterClusterConfigWorkerConfigArgs) ToClusterClusterConfigWorkerConfigPtrOutput() ClusterClusterConfigWorkerConfigPtrOutput

func (ClusterClusterConfigWorkerConfigArgs) ToClusterClusterConfigWorkerConfigPtrOutputWithContext

func (i ClusterClusterConfigWorkerConfigArgs) ToClusterClusterConfigWorkerConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigWorkerConfigPtrOutput

type ClusterClusterConfigWorkerConfigDiskConfig

type ClusterClusterConfigWorkerConfigDiskConfig struct {
	// Size of the primary disk attached to each preemptible worker node, specified
	// in GB. The smallest allowed disk size is 10GB. GCP will default to a predetermined
	// computed value if not set (currently 500GB). Note: If SSDs are not
	// attached, it also contains the HDFS data blocks and Hadoop working directories.
	BootDiskSizeGb *int `pulumi:"bootDiskSizeGb"`
	// The disk type of the primary disk attached to each preemptible worker node.
	// One of `"pd-ssd"` or `"pd-standard"`. Defaults to `"pd-standard"`.
	BootDiskType *string `pulumi:"bootDiskType"`
	// The amount of local SSD disks that will be
	// attached to each preemptible worker node. Defaults to 0.
	NumLocalSsds *int `pulumi:"numLocalSsds"`
}

type ClusterClusterConfigWorkerConfigDiskConfigArgs

type ClusterClusterConfigWorkerConfigDiskConfigArgs struct {
	// Size of the primary disk attached to each preemptible worker node, specified
	// in GB. The smallest allowed disk size is 10GB. GCP will default to a predetermined
	// computed value if not set (currently 500GB). Note: If SSDs are not
	// attached, it also contains the HDFS data blocks and Hadoop working directories.
	BootDiskSizeGb pulumi.IntPtrInput `pulumi:"bootDiskSizeGb"`
	// The disk type of the primary disk attached to each preemptible worker node.
	// One of `"pd-ssd"` or `"pd-standard"`. Defaults to `"pd-standard"`.
	BootDiskType pulumi.StringPtrInput `pulumi:"bootDiskType"`
	// The amount of local SSD disks that will be
	// attached to each preemptible worker node. Defaults to 0.
	NumLocalSsds pulumi.IntPtrInput `pulumi:"numLocalSsds"`
}

func (ClusterClusterConfigWorkerConfigDiskConfigArgs) ElementType

func (ClusterClusterConfigWorkerConfigDiskConfigArgs) ToClusterClusterConfigWorkerConfigDiskConfigOutput

func (i ClusterClusterConfigWorkerConfigDiskConfigArgs) ToClusterClusterConfigWorkerConfigDiskConfigOutput() ClusterClusterConfigWorkerConfigDiskConfigOutput

func (ClusterClusterConfigWorkerConfigDiskConfigArgs) ToClusterClusterConfigWorkerConfigDiskConfigOutputWithContext

func (i ClusterClusterConfigWorkerConfigDiskConfigArgs) ToClusterClusterConfigWorkerConfigDiskConfigOutputWithContext(ctx context.Context) ClusterClusterConfigWorkerConfigDiskConfigOutput

func (ClusterClusterConfigWorkerConfigDiskConfigArgs) ToClusterClusterConfigWorkerConfigDiskConfigPtrOutput

func (i ClusterClusterConfigWorkerConfigDiskConfigArgs) ToClusterClusterConfigWorkerConfigDiskConfigPtrOutput() ClusterClusterConfigWorkerConfigDiskConfigPtrOutput

func (ClusterClusterConfigWorkerConfigDiskConfigArgs) ToClusterClusterConfigWorkerConfigDiskConfigPtrOutputWithContext

func (i ClusterClusterConfigWorkerConfigDiskConfigArgs) ToClusterClusterConfigWorkerConfigDiskConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigWorkerConfigDiskConfigPtrOutput

type ClusterClusterConfigWorkerConfigDiskConfigInput

type ClusterClusterConfigWorkerConfigDiskConfigInput interface {
	pulumi.Input

	ToClusterClusterConfigWorkerConfigDiskConfigOutput() ClusterClusterConfigWorkerConfigDiskConfigOutput
	ToClusterClusterConfigWorkerConfigDiskConfigOutputWithContext(context.Context) ClusterClusterConfigWorkerConfigDiskConfigOutput
}

type ClusterClusterConfigWorkerConfigDiskConfigOutput

type ClusterClusterConfigWorkerConfigDiskConfigOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigWorkerConfigDiskConfigOutput) BootDiskSizeGb

Size of the primary disk attached to each preemptible worker node, specified in GB. The smallest allowed disk size is 10GB. GCP will default to a predetermined computed value if not set (currently 500GB). Note: If SSDs are not attached, it also contains the HDFS data blocks and Hadoop working directories.

func (ClusterClusterConfigWorkerConfigDiskConfigOutput) BootDiskType

The disk type of the primary disk attached to each preemptible worker node. One of `"pd-ssd"` or `"pd-standard"`. Defaults to `"pd-standard"`.

func (ClusterClusterConfigWorkerConfigDiskConfigOutput) ElementType

func (ClusterClusterConfigWorkerConfigDiskConfigOutput) NumLocalSsds

The amount of local SSD disks that will be attached to each preemptible worker node. Defaults to 0.

func (ClusterClusterConfigWorkerConfigDiskConfigOutput) ToClusterClusterConfigWorkerConfigDiskConfigOutput

func (o ClusterClusterConfigWorkerConfigDiskConfigOutput) ToClusterClusterConfigWorkerConfigDiskConfigOutput() ClusterClusterConfigWorkerConfigDiskConfigOutput

func (ClusterClusterConfigWorkerConfigDiskConfigOutput) ToClusterClusterConfigWorkerConfigDiskConfigOutputWithContext

func (o ClusterClusterConfigWorkerConfigDiskConfigOutput) ToClusterClusterConfigWorkerConfigDiskConfigOutputWithContext(ctx context.Context) ClusterClusterConfigWorkerConfigDiskConfigOutput

func (ClusterClusterConfigWorkerConfigDiskConfigOutput) ToClusterClusterConfigWorkerConfigDiskConfigPtrOutput

func (o ClusterClusterConfigWorkerConfigDiskConfigOutput) ToClusterClusterConfigWorkerConfigDiskConfigPtrOutput() ClusterClusterConfigWorkerConfigDiskConfigPtrOutput

func (ClusterClusterConfigWorkerConfigDiskConfigOutput) ToClusterClusterConfigWorkerConfigDiskConfigPtrOutputWithContext

func (o ClusterClusterConfigWorkerConfigDiskConfigOutput) ToClusterClusterConfigWorkerConfigDiskConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigWorkerConfigDiskConfigPtrOutput

type ClusterClusterConfigWorkerConfigDiskConfigPtrInput

type ClusterClusterConfigWorkerConfigDiskConfigPtrInput interface {
	pulumi.Input

	ToClusterClusterConfigWorkerConfigDiskConfigPtrOutput() ClusterClusterConfigWorkerConfigDiskConfigPtrOutput
	ToClusterClusterConfigWorkerConfigDiskConfigPtrOutputWithContext(context.Context) ClusterClusterConfigWorkerConfigDiskConfigPtrOutput
}

type ClusterClusterConfigWorkerConfigDiskConfigPtrOutput

type ClusterClusterConfigWorkerConfigDiskConfigPtrOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigWorkerConfigDiskConfigPtrOutput) BootDiskSizeGb

Size of the primary disk attached to each preemptible worker node, specified in GB. The smallest allowed disk size is 10GB. GCP will default to a predetermined computed value if not set (currently 500GB). Note: If SSDs are not attached, it also contains the HDFS data blocks and Hadoop working directories.

func (ClusterClusterConfigWorkerConfigDiskConfigPtrOutput) BootDiskType

The disk type of the primary disk attached to each preemptible worker node. One of `"pd-ssd"` or `"pd-standard"`. Defaults to `"pd-standard"`.

func (ClusterClusterConfigWorkerConfigDiskConfigPtrOutput) Elem

func (ClusterClusterConfigWorkerConfigDiskConfigPtrOutput) ElementType

func (ClusterClusterConfigWorkerConfigDiskConfigPtrOutput) NumLocalSsds

The amount of local SSD disks that will be attached to each preemptible worker node. Defaults to 0.

func (ClusterClusterConfigWorkerConfigDiskConfigPtrOutput) ToClusterClusterConfigWorkerConfigDiskConfigPtrOutput

func (o ClusterClusterConfigWorkerConfigDiskConfigPtrOutput) ToClusterClusterConfigWorkerConfigDiskConfigPtrOutput() ClusterClusterConfigWorkerConfigDiskConfigPtrOutput

func (ClusterClusterConfigWorkerConfigDiskConfigPtrOutput) ToClusterClusterConfigWorkerConfigDiskConfigPtrOutputWithContext

func (o ClusterClusterConfigWorkerConfigDiskConfigPtrOutput) ToClusterClusterConfigWorkerConfigDiskConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigWorkerConfigDiskConfigPtrOutput

type ClusterClusterConfigWorkerConfigInput

type ClusterClusterConfigWorkerConfigInput interface {
	pulumi.Input

	ToClusterClusterConfigWorkerConfigOutput() ClusterClusterConfigWorkerConfigOutput
	ToClusterClusterConfigWorkerConfigOutputWithContext(context.Context) ClusterClusterConfigWorkerConfigOutput
}

type ClusterClusterConfigWorkerConfigOutput

type ClusterClusterConfigWorkerConfigOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigWorkerConfigOutput) Accelerators

The Compute Engine accelerator configuration for these instances. Can be specified multiple times.

func (ClusterClusterConfigWorkerConfigOutput) DiskConfig

Disk Config

func (ClusterClusterConfigWorkerConfigOutput) ElementType

func (ClusterClusterConfigWorkerConfigOutput) ImageUri

The URI for the image to use for this worker. See [the guide](https://cloud.google.com/dataproc/docs/guides/dataproc-images) for more information.

func (ClusterClusterConfigWorkerConfigOutput) InstanceNames

func (ClusterClusterConfigWorkerConfigOutput) MachineType

The name of a Google Compute Engine machine type to create for the worker nodes. If not specified, GCP will default to a predetermined computed value (currently `n1-standard-4`).

func (ClusterClusterConfigWorkerConfigOutput) MinCpuPlatform

The name of a minimum generation of CPU family for the master. If not specified, GCP will default to a predetermined computed value for each zone. See [the guide](https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform) for details about which CPU families are available (and defaulted) for each zone.

func (ClusterClusterConfigWorkerConfigOutput) NumInstances

func (ClusterClusterConfigWorkerConfigOutput) ToClusterClusterConfigWorkerConfigOutput

func (o ClusterClusterConfigWorkerConfigOutput) ToClusterClusterConfigWorkerConfigOutput() ClusterClusterConfigWorkerConfigOutput

func (ClusterClusterConfigWorkerConfigOutput) ToClusterClusterConfigWorkerConfigOutputWithContext

func (o ClusterClusterConfigWorkerConfigOutput) ToClusterClusterConfigWorkerConfigOutputWithContext(ctx context.Context) ClusterClusterConfigWorkerConfigOutput

func (ClusterClusterConfigWorkerConfigOutput) ToClusterClusterConfigWorkerConfigPtrOutput

func (o ClusterClusterConfigWorkerConfigOutput) ToClusterClusterConfigWorkerConfigPtrOutput() ClusterClusterConfigWorkerConfigPtrOutput

func (ClusterClusterConfigWorkerConfigOutput) ToClusterClusterConfigWorkerConfigPtrOutputWithContext

func (o ClusterClusterConfigWorkerConfigOutput) ToClusterClusterConfigWorkerConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigWorkerConfigPtrOutput

type ClusterClusterConfigWorkerConfigPtrInput

type ClusterClusterConfigWorkerConfigPtrInput interface {
	pulumi.Input

	ToClusterClusterConfigWorkerConfigPtrOutput() ClusterClusterConfigWorkerConfigPtrOutput
	ToClusterClusterConfigWorkerConfigPtrOutputWithContext(context.Context) ClusterClusterConfigWorkerConfigPtrOutput
}

type ClusterClusterConfigWorkerConfigPtrOutput

type ClusterClusterConfigWorkerConfigPtrOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigWorkerConfigPtrOutput) Accelerators

The Compute Engine accelerator configuration for these instances. Can be specified multiple times.

func (ClusterClusterConfigWorkerConfigPtrOutput) DiskConfig

Disk Config

func (ClusterClusterConfigWorkerConfigPtrOutput) Elem

func (ClusterClusterConfigWorkerConfigPtrOutput) ElementType

func (ClusterClusterConfigWorkerConfigPtrOutput) ImageUri

The URI for the image to use for this worker. See [the guide](https://cloud.google.com/dataproc/docs/guides/dataproc-images) for more information.

func (ClusterClusterConfigWorkerConfigPtrOutput) InstanceNames

func (ClusterClusterConfigWorkerConfigPtrOutput) MachineType

The name of a Google Compute Engine machine type to create for the worker nodes. If not specified, GCP will default to a predetermined computed value (currently `n1-standard-4`).

func (ClusterClusterConfigWorkerConfigPtrOutput) MinCpuPlatform

The name of a minimum generation of CPU family for the master. If not specified, GCP will default to a predetermined computed value for each zone. See [the guide](https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform) for details about which CPU families are available (and defaulted) for each zone.

func (ClusterClusterConfigWorkerConfigPtrOutput) NumInstances

func (ClusterClusterConfigWorkerConfigPtrOutput) ToClusterClusterConfigWorkerConfigPtrOutput

func (o ClusterClusterConfigWorkerConfigPtrOutput) ToClusterClusterConfigWorkerConfigPtrOutput() ClusterClusterConfigWorkerConfigPtrOutput

func (ClusterClusterConfigWorkerConfigPtrOutput) ToClusterClusterConfigWorkerConfigPtrOutputWithContext

func (o ClusterClusterConfigWorkerConfigPtrOutput) ToClusterClusterConfigWorkerConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigWorkerConfigPtrOutput

type ClusterIAMBinding

type ClusterIAMBinding struct {
	pulumi.CustomResourceState

	// The name or relative resource id of the cluster to manage IAM policies for.
	Cluster   pulumi.StringOutput                 `pulumi:"cluster"`
	Condition ClusterIAMBindingConditionPtrOutput `pulumi:"condition"`
	// (Computed) The etag of the clusters's IAM policy.
	Etag    pulumi.StringOutput      `pulumi:"etag"`
	Members pulumi.StringArrayOutput `pulumi:"members"`
	// The project in which the cluster belongs. If it
	// is not provided, the provider will use a default.
	Project pulumi.StringOutput `pulumi:"project"`
	// The region in which the cluster belongs. If it
	// is not provided, the provider will use a default.
	Region pulumi.StringOutput `pulumi:"region"`
	// The role that should be applied. Only one
	// `dataproc.ClusterIAMBinding` can be used per role. Note that custom roles must be of the format
	// `[projects|organizations]/{parent-name}/roles/{role-name}`.
	Role pulumi.StringOutput `pulumi:"role"`
}

Three different resources help you manage IAM policies on dataproc clusters. Each of these resources serves a different use case:

* `dataproc.ClusterIAMPolicy`: Authoritative. Sets the IAM policy for the cluster and replaces any existing policy already attached. * `dataproc.ClusterIAMBinding`: Authoritative for a given role. Updates the IAM policy to grant a role to a list of members. Other roles within the IAM policy for the cluster are preserved. * `dataproc.ClusterIAMMember`: Non-authoritative. Updates the IAM policy to grant a role to a new member. Other members for the role for the cluster are preserved.

> **Note:** `dataproc.ClusterIAMPolicy` **cannot** be used in conjunction with `dataproc.ClusterIAMBinding` and `dataproc.ClusterIAMMember` or they will fight over what your policy should be. In addition, be careful not to accidentally unset ownership of the cluster as `dataproc.ClusterIAMPolicy` replaces the entire policy.

> **Note:** `dataproc.ClusterIAMBinding` resources **can be** used in conjunction with `dataproc.ClusterIAMMember` resources **only if** they do not grant privilege to the same role.

> This content is derived from https://github.com/terraform-providers/terraform-provider-google/blob/master/website/docs/r/dataproc_cluster_iam.html.markdown.

func GetClusterIAMBinding

func GetClusterIAMBinding(ctx *pulumi.Context,
	name string, id pulumi.IDInput, state *ClusterIAMBindingState, opts ...pulumi.ResourceOption) (*ClusterIAMBinding, error)

GetClusterIAMBinding gets an existing ClusterIAMBinding resource's state with the given name, ID, and optional state properties that are used to uniquely qualify the lookup (nil if not required).

func NewClusterIAMBinding

func NewClusterIAMBinding(ctx *pulumi.Context,
	name string, args *ClusterIAMBindingArgs, opts ...pulumi.ResourceOption) (*ClusterIAMBinding, error)

NewClusterIAMBinding registers a new resource with the given unique name, arguments, and options.

type ClusterIAMBindingArgs

type ClusterIAMBindingArgs struct {
	// The name or relative resource id of the cluster to manage IAM policies for.
	Cluster   pulumi.StringInput
	Condition ClusterIAMBindingConditionPtrInput
	Members   pulumi.StringArrayInput
	// The project in which the cluster belongs. If it
	// is not provided, the provider will use a default.
	Project pulumi.StringPtrInput
	// The region in which the cluster belongs. If it
	// is not provided, the provider will use a default.
	Region pulumi.StringPtrInput
	// The role that should be applied. Only one
	// `dataproc.ClusterIAMBinding` can be used per role. Note that custom roles must be of the format
	// `[projects|organizations]/{parent-name}/roles/{role-name}`.
	Role pulumi.StringInput
}

The set of arguments for constructing a ClusterIAMBinding resource.

func (ClusterIAMBindingArgs) ElementType

func (ClusterIAMBindingArgs) ElementType() reflect.Type

type ClusterIAMBindingCondition

type ClusterIAMBindingCondition struct {
	Description *string `pulumi:"description"`
	Expression  string  `pulumi:"expression"`
	Title       string  `pulumi:"title"`
}

type ClusterIAMBindingConditionArgs

type ClusterIAMBindingConditionArgs struct {
	Description pulumi.StringPtrInput `pulumi:"description"`
	Expression  pulumi.StringInput    `pulumi:"expression"`
	Title       pulumi.StringInput    `pulumi:"title"`
}

func (ClusterIAMBindingConditionArgs) ElementType

func (ClusterIAMBindingConditionArgs) ToClusterIAMBindingConditionOutput

func (i ClusterIAMBindingConditionArgs) ToClusterIAMBindingConditionOutput() ClusterIAMBindingConditionOutput

func (ClusterIAMBindingConditionArgs) ToClusterIAMBindingConditionOutputWithContext

func (i ClusterIAMBindingConditionArgs) ToClusterIAMBindingConditionOutputWithContext(ctx context.Context) ClusterIAMBindingConditionOutput

func (ClusterIAMBindingConditionArgs) ToClusterIAMBindingConditionPtrOutput

func (i ClusterIAMBindingConditionArgs) ToClusterIAMBindingConditionPtrOutput() ClusterIAMBindingConditionPtrOutput

func (ClusterIAMBindingConditionArgs) ToClusterIAMBindingConditionPtrOutputWithContext

func (i ClusterIAMBindingConditionArgs) ToClusterIAMBindingConditionPtrOutputWithContext(ctx context.Context) ClusterIAMBindingConditionPtrOutput

type ClusterIAMBindingConditionInput

type ClusterIAMBindingConditionInput interface {
	pulumi.Input

	ToClusterIAMBindingConditionOutput() ClusterIAMBindingConditionOutput
	ToClusterIAMBindingConditionOutputWithContext(context.Context) ClusterIAMBindingConditionOutput
}

type ClusterIAMBindingConditionOutput

type ClusterIAMBindingConditionOutput struct{ *pulumi.OutputState }

func (ClusterIAMBindingConditionOutput) Description

func (ClusterIAMBindingConditionOutput) ElementType

func (ClusterIAMBindingConditionOutput) Expression

func (ClusterIAMBindingConditionOutput) Title

func (ClusterIAMBindingConditionOutput) ToClusterIAMBindingConditionOutput

func (o ClusterIAMBindingConditionOutput) ToClusterIAMBindingConditionOutput() ClusterIAMBindingConditionOutput

func (ClusterIAMBindingConditionOutput) ToClusterIAMBindingConditionOutputWithContext

func (o ClusterIAMBindingConditionOutput) ToClusterIAMBindingConditionOutputWithContext(ctx context.Context) ClusterIAMBindingConditionOutput

func (ClusterIAMBindingConditionOutput) ToClusterIAMBindingConditionPtrOutput

func (o ClusterIAMBindingConditionOutput) ToClusterIAMBindingConditionPtrOutput() ClusterIAMBindingConditionPtrOutput

func (ClusterIAMBindingConditionOutput) ToClusterIAMBindingConditionPtrOutputWithContext

func (o ClusterIAMBindingConditionOutput) ToClusterIAMBindingConditionPtrOutputWithContext(ctx context.Context) ClusterIAMBindingConditionPtrOutput

type ClusterIAMBindingConditionPtrInput

type ClusterIAMBindingConditionPtrInput interface {
	pulumi.Input

	ToClusterIAMBindingConditionPtrOutput() ClusterIAMBindingConditionPtrOutput
	ToClusterIAMBindingConditionPtrOutputWithContext(context.Context) ClusterIAMBindingConditionPtrOutput
}

type ClusterIAMBindingConditionPtrOutput

type ClusterIAMBindingConditionPtrOutput struct{ *pulumi.OutputState }

func (ClusterIAMBindingConditionPtrOutput) Description

func (ClusterIAMBindingConditionPtrOutput) Elem

func (ClusterIAMBindingConditionPtrOutput) ElementType

func (ClusterIAMBindingConditionPtrOutput) Expression

func (ClusterIAMBindingConditionPtrOutput) Title

func (ClusterIAMBindingConditionPtrOutput) ToClusterIAMBindingConditionPtrOutput

func (o ClusterIAMBindingConditionPtrOutput) ToClusterIAMBindingConditionPtrOutput() ClusterIAMBindingConditionPtrOutput

func (ClusterIAMBindingConditionPtrOutput) ToClusterIAMBindingConditionPtrOutputWithContext

func (o ClusterIAMBindingConditionPtrOutput) ToClusterIAMBindingConditionPtrOutputWithContext(ctx context.Context) ClusterIAMBindingConditionPtrOutput

type ClusterIAMBindingState

type ClusterIAMBindingState struct {
	// The name or relative resource id of the cluster to manage IAM policies for.
	Cluster   pulumi.StringPtrInput
	Condition ClusterIAMBindingConditionPtrInput
	// (Computed) The etag of the clusters's IAM policy.
	Etag    pulumi.StringPtrInput
	Members pulumi.StringArrayInput
	// The project in which the cluster belongs. If it
	// is not provided, the provider will use a default.
	Project pulumi.StringPtrInput
	// The region in which the cluster belongs. If it
	// is not provided, the provider will use a default.
	Region pulumi.StringPtrInput
	// The role that should be applied. Only one
	// `dataproc.ClusterIAMBinding` can be used per role. Note that custom roles must be of the format
	// `[projects|organizations]/{parent-name}/roles/{role-name}`.
	Role pulumi.StringPtrInput
}

func (ClusterIAMBindingState) ElementType

func (ClusterIAMBindingState) ElementType() reflect.Type

type ClusterIAMMember

type ClusterIAMMember struct {
	pulumi.CustomResourceState

	// The name or relative resource id of the cluster to manage IAM policies for.
	Cluster   pulumi.StringOutput                `pulumi:"cluster"`
	Condition ClusterIAMMemberConditionPtrOutput `pulumi:"condition"`
	// (Computed) The etag of the clusters's IAM policy.
	Etag   pulumi.StringOutput `pulumi:"etag"`
	Member pulumi.StringOutput `pulumi:"member"`
	// The project in which the cluster belongs. If it
	// is not provided, the provider will use a default.
	Project pulumi.StringOutput `pulumi:"project"`
	// The region in which the cluster belongs. If it
	// is not provided, the provider will use a default.
	Region pulumi.StringOutput `pulumi:"region"`
	// The role that should be applied. Only one
	// `dataproc.ClusterIAMBinding` can be used per role. Note that custom roles must be of the format
	// `[projects|organizations]/{parent-name}/roles/{role-name}`.
	Role pulumi.StringOutput `pulumi:"role"`
}

Three different resources help you manage IAM policies on dataproc clusters. Each of these resources serves a different use case:

* `dataproc.ClusterIAMPolicy`: Authoritative. Sets the IAM policy for the cluster and replaces any existing policy already attached. * `dataproc.ClusterIAMBinding`: Authoritative for a given role. Updates the IAM policy to grant a role to a list of members. Other roles within the IAM policy for the cluster are preserved. * `dataproc.ClusterIAMMember`: Non-authoritative. Updates the IAM policy to grant a role to a new member. Other members for the role for the cluster are preserved.

> **Note:** `dataproc.ClusterIAMPolicy` **cannot** be used in conjunction with `dataproc.ClusterIAMBinding` and `dataproc.ClusterIAMMember` or they will fight over what your policy should be. In addition, be careful not to accidentally unset ownership of the cluster as `dataproc.ClusterIAMPolicy` replaces the entire policy.

> **Note:** `dataproc.ClusterIAMBinding` resources **can be** used in conjunction with `dataproc.ClusterIAMMember` resources **only if** they do not grant privilege to the same role.

> This content is derived from https://github.com/terraform-providers/terraform-provider-google/blob/master/website/docs/r/dataproc_cluster_iam.html.markdown.

func GetClusterIAMMember

func GetClusterIAMMember(ctx *pulumi.Context,
	name string, id pulumi.IDInput, state *ClusterIAMMemberState, opts ...pulumi.ResourceOption) (*ClusterIAMMember, error)

GetClusterIAMMember gets an existing ClusterIAMMember resource's state with the given name, ID, and optional state properties that are used to uniquely qualify the lookup (nil if not required).

func NewClusterIAMMember

func NewClusterIAMMember(ctx *pulumi.Context,
	name string, args *ClusterIAMMemberArgs, opts ...pulumi.ResourceOption) (*ClusterIAMMember, error)

NewClusterIAMMember registers a new resource with the given unique name, arguments, and options.

type ClusterIAMMemberArgs

type ClusterIAMMemberArgs struct {
	// The name or relative resource id of the cluster to manage IAM policies for.
	Cluster   pulumi.StringInput
	Condition ClusterIAMMemberConditionPtrInput
	Member    pulumi.StringInput
	// The project in which the cluster belongs. If it
	// is not provided, the provider will use a default.
	Project pulumi.StringPtrInput
	// The region in which the cluster belongs. If it
	// is not provided, the provider will use a default.
	Region pulumi.StringPtrInput
	// The role that should be applied. Only one
	// `dataproc.ClusterIAMBinding` can be used per role. Note that custom roles must be of the format
	// `[projects|organizations]/{parent-name}/roles/{role-name}`.
	Role pulumi.StringInput
}

The set of arguments for constructing a ClusterIAMMember resource.

func (ClusterIAMMemberArgs) ElementType

func (ClusterIAMMemberArgs) ElementType() reflect.Type

type ClusterIAMMemberCondition

type ClusterIAMMemberCondition struct {
	Description *string `pulumi:"description"`
	Expression  string  `pulumi:"expression"`
	Title       string  `pulumi:"title"`
}

type ClusterIAMMemberConditionArgs

type ClusterIAMMemberConditionArgs struct {
	Description pulumi.StringPtrInput `pulumi:"description"`
	Expression  pulumi.StringInput    `pulumi:"expression"`
	Title       pulumi.StringInput    `pulumi:"title"`
}

func (ClusterIAMMemberConditionArgs) ElementType

func (ClusterIAMMemberConditionArgs) ToClusterIAMMemberConditionOutput

func (i ClusterIAMMemberConditionArgs) ToClusterIAMMemberConditionOutput() ClusterIAMMemberConditionOutput

func (ClusterIAMMemberConditionArgs) ToClusterIAMMemberConditionOutputWithContext

func (i ClusterIAMMemberConditionArgs) ToClusterIAMMemberConditionOutputWithContext(ctx context.Context) ClusterIAMMemberConditionOutput

func (ClusterIAMMemberConditionArgs) ToClusterIAMMemberConditionPtrOutput

func (i ClusterIAMMemberConditionArgs) ToClusterIAMMemberConditionPtrOutput() ClusterIAMMemberConditionPtrOutput

func (ClusterIAMMemberConditionArgs) ToClusterIAMMemberConditionPtrOutputWithContext

func (i ClusterIAMMemberConditionArgs) ToClusterIAMMemberConditionPtrOutputWithContext(ctx context.Context) ClusterIAMMemberConditionPtrOutput

type ClusterIAMMemberConditionInput

type ClusterIAMMemberConditionInput interface {
	pulumi.Input

	ToClusterIAMMemberConditionOutput() ClusterIAMMemberConditionOutput
	ToClusterIAMMemberConditionOutputWithContext(context.Context) ClusterIAMMemberConditionOutput
}

type ClusterIAMMemberConditionOutput

type ClusterIAMMemberConditionOutput struct{ *pulumi.OutputState }

func (ClusterIAMMemberConditionOutput) Description

func (ClusterIAMMemberConditionOutput) ElementType

func (ClusterIAMMemberConditionOutput) Expression

func (ClusterIAMMemberConditionOutput) Title

func (ClusterIAMMemberConditionOutput) ToClusterIAMMemberConditionOutput

func (o ClusterIAMMemberConditionOutput) ToClusterIAMMemberConditionOutput() ClusterIAMMemberConditionOutput

func (ClusterIAMMemberConditionOutput) ToClusterIAMMemberConditionOutputWithContext

func (o ClusterIAMMemberConditionOutput) ToClusterIAMMemberConditionOutputWithContext(ctx context.Context) ClusterIAMMemberConditionOutput

func (ClusterIAMMemberConditionOutput) ToClusterIAMMemberConditionPtrOutput

func (o ClusterIAMMemberConditionOutput) ToClusterIAMMemberConditionPtrOutput() ClusterIAMMemberConditionPtrOutput

func (ClusterIAMMemberConditionOutput) ToClusterIAMMemberConditionPtrOutputWithContext

func (o ClusterIAMMemberConditionOutput) ToClusterIAMMemberConditionPtrOutputWithContext(ctx context.Context) ClusterIAMMemberConditionPtrOutput

type ClusterIAMMemberConditionPtrInput

type ClusterIAMMemberConditionPtrInput interface {
	pulumi.Input

	ToClusterIAMMemberConditionPtrOutput() ClusterIAMMemberConditionPtrOutput
	ToClusterIAMMemberConditionPtrOutputWithContext(context.Context) ClusterIAMMemberConditionPtrOutput
}

type ClusterIAMMemberConditionPtrOutput

type ClusterIAMMemberConditionPtrOutput struct{ *pulumi.OutputState }

func (ClusterIAMMemberConditionPtrOutput) Description

func (ClusterIAMMemberConditionPtrOutput) Elem

func (ClusterIAMMemberConditionPtrOutput) ElementType

func (ClusterIAMMemberConditionPtrOutput) Expression

func (ClusterIAMMemberConditionPtrOutput) Title

func (ClusterIAMMemberConditionPtrOutput) ToClusterIAMMemberConditionPtrOutput

func (o ClusterIAMMemberConditionPtrOutput) ToClusterIAMMemberConditionPtrOutput() ClusterIAMMemberConditionPtrOutput

func (ClusterIAMMemberConditionPtrOutput) ToClusterIAMMemberConditionPtrOutputWithContext

func (o ClusterIAMMemberConditionPtrOutput) ToClusterIAMMemberConditionPtrOutputWithContext(ctx context.Context) ClusterIAMMemberConditionPtrOutput

type ClusterIAMMemberState

type ClusterIAMMemberState struct {
	// The name or relative resource id of the cluster to manage IAM policies for.
	Cluster   pulumi.StringPtrInput
	Condition ClusterIAMMemberConditionPtrInput
	// (Computed) The etag of the clusters's IAM policy.
	Etag   pulumi.StringPtrInput
	Member pulumi.StringPtrInput
	// The project in which the cluster belongs. If it
	// is not provided, the provider will use a default.
	Project pulumi.StringPtrInput
	// The region in which the cluster belongs. If it
	// is not provided, the provider will use a default.
	Region pulumi.StringPtrInput
	// The role that should be applied. Only one
	// `dataproc.ClusterIAMBinding` can be used per role. Note that custom roles must be of the format
	// `[projects|organizations]/{parent-name}/roles/{role-name}`.
	Role pulumi.StringPtrInput
}

func (ClusterIAMMemberState) ElementType

func (ClusterIAMMemberState) ElementType() reflect.Type

type ClusterIAMPolicy

type ClusterIAMPolicy struct {
	pulumi.CustomResourceState

	// The name or relative resource id of the cluster to manage IAM policies for.
	Cluster pulumi.StringOutput `pulumi:"cluster"`
	// (Computed) The etag of the clusters's IAM policy.
	Etag pulumi.StringOutput `pulumi:"etag"`
	// The policy data generated by a `organizations.getIAMPolicy` data source.
	PolicyData pulumi.StringOutput `pulumi:"policyData"`
	// The project in which the cluster belongs. If it
	// is not provided, the provider will use a default.
	Project pulumi.StringOutput `pulumi:"project"`
	// The region in which the cluster belongs. If it
	// is not provided, the provider will use a default.
	Region pulumi.StringOutput `pulumi:"region"`
}

Three different resources help you manage IAM policies on dataproc clusters. Each of these resources serves a different use case:

* `dataproc.ClusterIAMPolicy`: Authoritative. Sets the IAM policy for the cluster and replaces any existing policy already attached. * `dataproc.ClusterIAMBinding`: Authoritative for a given role. Updates the IAM policy to grant a role to a list of members. Other roles within the IAM policy for the cluster are preserved. * `dataproc.ClusterIAMMember`: Non-authoritative. Updates the IAM policy to grant a role to a new member. Other members for the role for the cluster are preserved.

> **Note:** `dataproc.ClusterIAMPolicy` **cannot** be used in conjunction with `dataproc.ClusterIAMBinding` and `dataproc.ClusterIAMMember` or they will fight over what your policy should be. In addition, be careful not to accidentally unset ownership of the cluster as `dataproc.ClusterIAMPolicy` replaces the entire policy.

> **Note:** `dataproc.ClusterIAMBinding` resources **can be** used in conjunction with `dataproc.ClusterIAMMember` resources **only if** they do not grant privilege to the same role.

> This content is derived from https://github.com/terraform-providers/terraform-provider-google/blob/master/website/docs/r/dataproc_cluster_iam.html.markdown.

func GetClusterIAMPolicy

func GetClusterIAMPolicy(ctx *pulumi.Context,
	name string, id pulumi.IDInput, state *ClusterIAMPolicyState, opts ...pulumi.ResourceOption) (*ClusterIAMPolicy, error)

GetClusterIAMPolicy gets an existing ClusterIAMPolicy resource's state with the given name, ID, and optional state properties that are used to uniquely qualify the lookup (nil if not required).

func NewClusterIAMPolicy

func NewClusterIAMPolicy(ctx *pulumi.Context,
	name string, args *ClusterIAMPolicyArgs, opts ...pulumi.ResourceOption) (*ClusterIAMPolicy, error)

NewClusterIAMPolicy registers a new resource with the given unique name, arguments, and options.

type ClusterIAMPolicyArgs

type ClusterIAMPolicyArgs struct {
	// The name or relative resource id of the cluster to manage IAM policies for.
	Cluster pulumi.StringInput
	// The policy data generated by a `organizations.getIAMPolicy` data source.
	PolicyData pulumi.StringInput
	// The project in which the cluster belongs. If it
	// is not provided, the provider will use a default.
	Project pulumi.StringPtrInput
	// The region in which the cluster belongs. If it
	// is not provided, the provider will use a default.
	Region pulumi.StringPtrInput
}

The set of arguments for constructing a ClusterIAMPolicy resource.

func (ClusterIAMPolicyArgs) ElementType

func (ClusterIAMPolicyArgs) ElementType() reflect.Type

type ClusterIAMPolicyState

type ClusterIAMPolicyState struct {
	// The name or relative resource id of the cluster to manage IAM policies for.
	Cluster pulumi.StringPtrInput
	// (Computed) The etag of the clusters's IAM policy.
	Etag pulumi.StringPtrInput
	// The policy data generated by a `organizations.getIAMPolicy` data source.
	PolicyData pulumi.StringPtrInput
	// The project in which the cluster belongs. If it
	// is not provided, the provider will use a default.
	Project pulumi.StringPtrInput
	// The region in which the cluster belongs. If it
	// is not provided, the provider will use a default.
	Region pulumi.StringPtrInput
}

func (ClusterIAMPolicyState) ElementType

func (ClusterIAMPolicyState) ElementType() reflect.Type

type ClusterState

type ClusterState struct {
	// Allows you to configure various aspects of the cluster.
	// Structure defined below.
	ClusterConfig ClusterClusterConfigPtrInput
	// The list of labels (key/value pairs) to be applied to
	// instances in the cluster. GCP generates some itself including `goog-dataproc-cluster-name`
	// which is the name of the cluster.
	Labels pulumi.StringMapInput
	// The name of the cluster, unique within the project and
	// zone.
	Name pulumi.StringPtrInput
	// The ID of the project in which the `cluster` will exist. If it
	// is not provided, the provider project is used.
	Project pulumi.StringPtrInput
	// The region in which the cluster and associated nodes will be created in.
	// Defaults to `global`.
	Region pulumi.StringPtrInput
}

func (ClusterState) ElementType

func (ClusterState) ElementType() reflect.Type

type Job

type Job struct {
	pulumi.CustomResourceState

	// If present, the location of miscellaneous control files which may be used as part of job setup and handling. If not present, control files may be placed in the same location as driver_output_uri.
	DriverControlsFilesUri pulumi.StringOutput `pulumi:"driverControlsFilesUri"`
	// A URI pointing to the location of the stdout of the job's driver program.
	DriverOutputResourceUri pulumi.StringOutput `pulumi:"driverOutputResourceUri"`
	// By default, you can only delete inactive jobs within
	// Dataproc. Setting this to true, and calling destroy, will ensure that the
	// job is first cancelled before issuing the delete.
	ForceDelete  pulumi.BoolPtrOutput     `pulumi:"forceDelete"`
	HadoopConfig JobHadoopConfigPtrOutput `pulumi:"hadoopConfig"`
	HiveConfig   JobHiveConfigPtrOutput   `pulumi:"hiveConfig"`
	// The list of labels (key/value pairs) to add to the job.
	Labels    pulumi.StringMapOutput `pulumi:"labels"`
	PigConfig JobPigConfigPtrOutput  `pulumi:"pigConfig"`
	Placement JobPlacementOutput     `pulumi:"placement"`
	// The project in which the `cluster` can be found and jobs
	// subsequently run against. If it is not provided, the provider project is used.
	Project       pulumi.StringOutput       `pulumi:"project"`
	PysparkConfig JobPysparkConfigPtrOutput `pulumi:"pysparkConfig"`
	Reference     JobReferenceOutput        `pulumi:"reference"`
	// The Cloud Dataproc region. This essentially determines which clusters are available
	// for this job to be submitted to. If not specified, defaults to `global`.
	Region pulumi.StringPtrOutput `pulumi:"region"`
	// Optional. Job scheduling configuration.
	Scheduling     JobSchedulingPtrOutput     `pulumi:"scheduling"`
	SparkConfig    JobSparkConfigPtrOutput    `pulumi:"sparkConfig"`
	SparksqlConfig JobSparksqlConfigPtrOutput `pulumi:"sparksqlConfig"`
	Status         JobStatusOutput            `pulumi:"status"`
}

Manages a job resource within a Dataproc cluster within GCE. For more information see [the official dataproc documentation](https://cloud.google.com/dataproc/).

!> **Note:** This resource does not support 'update' and changing any attributes will cause the resource to be recreated.

> This content is derived from https://github.com/terraform-providers/terraform-provider-google/blob/master/website/docs/r/dataproc_job.html.markdown.

func GetJob

func GetJob(ctx *pulumi.Context,
	name string, id pulumi.IDInput, state *JobState, opts ...pulumi.ResourceOption) (*Job, error)

GetJob gets an existing Job resource's state with the given name, ID, and optional state properties that are used to uniquely qualify the lookup (nil if not required).

func NewJob

func NewJob(ctx *pulumi.Context,
	name string, args *JobArgs, opts ...pulumi.ResourceOption) (*Job, error)

NewJob registers a new resource with the given unique name, arguments, and options.

type JobArgs

type JobArgs struct {
	// By default, you can only delete inactive jobs within
	// Dataproc. Setting this to true, and calling destroy, will ensure that the
	// job is first cancelled before issuing the delete.
	ForceDelete  pulumi.BoolPtrInput
	HadoopConfig JobHadoopConfigPtrInput
	HiveConfig   JobHiveConfigPtrInput
	// The list of labels (key/value pairs) to add to the job.
	Labels    pulumi.StringMapInput
	PigConfig JobPigConfigPtrInput
	Placement JobPlacementInput
	// The project in which the `cluster` can be found and jobs
	// subsequently run against. If it is not provided, the provider project is used.
	Project       pulumi.StringPtrInput
	PysparkConfig JobPysparkConfigPtrInput
	Reference     JobReferencePtrInput
	// The Cloud Dataproc region. This essentially determines which clusters are available
	// for this job to be submitted to. If not specified, defaults to `global`.
	Region pulumi.StringPtrInput
	// Optional. Job scheduling configuration.
	Scheduling     JobSchedulingPtrInput
	SparkConfig    JobSparkConfigPtrInput
	SparksqlConfig JobSparksqlConfigPtrInput
}

The set of arguments for constructing a Job resource.

func (JobArgs) ElementType

func (JobArgs) ElementType() reflect.Type

type JobHadoopConfig

type JobHadoopConfig struct {
	// HCFS URIs of archives to be extracted in the working directory of .jar, .tar, .tar.gz, .tgz, and .zip.
	ArchiveUris []string `pulumi:"archiveUris"`
	// The arguments to pass to the driver. Do not include arguments, such as -libjars or -Dfoo=bar, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
	Args []string `pulumi:"args"`
	// HCFS URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.
	FileUris []string `pulumi:"fileUris"`
	// HCFS URIs of jar files to be added to the Spark CLASSPATH.
	JarFileUris   []string                      `pulumi:"jarFileUris"`
	LoggingConfig *JobHadoopConfigLoggingConfig `pulumi:"loggingConfig"`
	MainClass     *string                       `pulumi:"mainClass"`
	// The HCFS URI of the jar file containing the main class. Examples: 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'. Conflicts with `mainClass`
	MainJarFileUri *string `pulumi:"mainJarFileUri"`
	// A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Cloud Dataproc API may be overwritten.
	Properties map[string]string `pulumi:"properties"`
}

type JobHadoopConfigArgs

type JobHadoopConfigArgs struct {
	// HCFS URIs of archives to be extracted in the working directory of .jar, .tar, .tar.gz, .tgz, and .zip.
	ArchiveUris pulumi.StringArrayInput `pulumi:"archiveUris"`
	// The arguments to pass to the driver. Do not include arguments, such as -libjars or -Dfoo=bar, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
	Args pulumi.StringArrayInput `pulumi:"args"`
	// HCFS URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.
	FileUris pulumi.StringArrayInput `pulumi:"fileUris"`
	// HCFS URIs of jar files to be added to the Spark CLASSPATH.
	JarFileUris   pulumi.StringArrayInput              `pulumi:"jarFileUris"`
	LoggingConfig JobHadoopConfigLoggingConfigPtrInput `pulumi:"loggingConfig"`
	MainClass     pulumi.StringPtrInput                `pulumi:"mainClass"`
	// The HCFS URI of the jar file containing the main class. Examples: 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'. Conflicts with `mainClass`
	MainJarFileUri pulumi.StringPtrInput `pulumi:"mainJarFileUri"`
	// A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Cloud Dataproc API may be overwritten.
	Properties pulumi.StringMapInput `pulumi:"properties"`
}

func (JobHadoopConfigArgs) ElementType

func (JobHadoopConfigArgs) ElementType() reflect.Type

func (JobHadoopConfigArgs) ToJobHadoopConfigOutput

func (i JobHadoopConfigArgs) ToJobHadoopConfigOutput() JobHadoopConfigOutput

func (JobHadoopConfigArgs) ToJobHadoopConfigOutputWithContext

func (i JobHadoopConfigArgs) ToJobHadoopConfigOutputWithContext(ctx context.Context) JobHadoopConfigOutput

func (JobHadoopConfigArgs) ToJobHadoopConfigPtrOutput

func (i JobHadoopConfigArgs) ToJobHadoopConfigPtrOutput() JobHadoopConfigPtrOutput

func (JobHadoopConfigArgs) ToJobHadoopConfigPtrOutputWithContext

func (i JobHadoopConfigArgs) ToJobHadoopConfigPtrOutputWithContext(ctx context.Context) JobHadoopConfigPtrOutput

type JobHadoopConfigInput

type JobHadoopConfigInput interface {
	pulumi.Input

	ToJobHadoopConfigOutput() JobHadoopConfigOutput
	ToJobHadoopConfigOutputWithContext(context.Context) JobHadoopConfigOutput
}

type JobHadoopConfigLoggingConfig

type JobHadoopConfigLoggingConfig struct {
	DriverLogLevels map[string]string `pulumi:"driverLogLevels"`
}

type JobHadoopConfigLoggingConfigArgs

type JobHadoopConfigLoggingConfigArgs struct {
	DriverLogLevels pulumi.StringMapInput `pulumi:"driverLogLevels"`
}

func (JobHadoopConfigLoggingConfigArgs) ElementType

func (JobHadoopConfigLoggingConfigArgs) ToJobHadoopConfigLoggingConfigOutput

func (i JobHadoopConfigLoggingConfigArgs) ToJobHadoopConfigLoggingConfigOutput() JobHadoopConfigLoggingConfigOutput

func (JobHadoopConfigLoggingConfigArgs) ToJobHadoopConfigLoggingConfigOutputWithContext

func (i JobHadoopConfigLoggingConfigArgs) ToJobHadoopConfigLoggingConfigOutputWithContext(ctx context.Context) JobHadoopConfigLoggingConfigOutput

func (JobHadoopConfigLoggingConfigArgs) ToJobHadoopConfigLoggingConfigPtrOutput

func (i JobHadoopConfigLoggingConfigArgs) ToJobHadoopConfigLoggingConfigPtrOutput() JobHadoopConfigLoggingConfigPtrOutput

func (JobHadoopConfigLoggingConfigArgs) ToJobHadoopConfigLoggingConfigPtrOutputWithContext

func (i JobHadoopConfigLoggingConfigArgs) ToJobHadoopConfigLoggingConfigPtrOutputWithContext(ctx context.Context) JobHadoopConfigLoggingConfigPtrOutput

type JobHadoopConfigLoggingConfigInput

type JobHadoopConfigLoggingConfigInput interface {
	pulumi.Input

	ToJobHadoopConfigLoggingConfigOutput() JobHadoopConfigLoggingConfigOutput
	ToJobHadoopConfigLoggingConfigOutputWithContext(context.Context) JobHadoopConfigLoggingConfigOutput
}

type JobHadoopConfigLoggingConfigOutput

type JobHadoopConfigLoggingConfigOutput struct{ *pulumi.OutputState }

func (JobHadoopConfigLoggingConfigOutput) DriverLogLevels

func (JobHadoopConfigLoggingConfigOutput) ElementType

func (JobHadoopConfigLoggingConfigOutput) ToJobHadoopConfigLoggingConfigOutput

func (o JobHadoopConfigLoggingConfigOutput) ToJobHadoopConfigLoggingConfigOutput() JobHadoopConfigLoggingConfigOutput

func (JobHadoopConfigLoggingConfigOutput) ToJobHadoopConfigLoggingConfigOutputWithContext

func (o JobHadoopConfigLoggingConfigOutput) ToJobHadoopConfigLoggingConfigOutputWithContext(ctx context.Context) JobHadoopConfigLoggingConfigOutput

func (JobHadoopConfigLoggingConfigOutput) ToJobHadoopConfigLoggingConfigPtrOutput

func (o JobHadoopConfigLoggingConfigOutput) ToJobHadoopConfigLoggingConfigPtrOutput() JobHadoopConfigLoggingConfigPtrOutput

func (JobHadoopConfigLoggingConfigOutput) ToJobHadoopConfigLoggingConfigPtrOutputWithContext

func (o JobHadoopConfigLoggingConfigOutput) ToJobHadoopConfigLoggingConfigPtrOutputWithContext(ctx context.Context) JobHadoopConfigLoggingConfigPtrOutput

type JobHadoopConfigLoggingConfigPtrInput

type JobHadoopConfigLoggingConfigPtrInput interface {
	pulumi.Input

	ToJobHadoopConfigLoggingConfigPtrOutput() JobHadoopConfigLoggingConfigPtrOutput
	ToJobHadoopConfigLoggingConfigPtrOutputWithContext(context.Context) JobHadoopConfigLoggingConfigPtrOutput
}

type JobHadoopConfigLoggingConfigPtrOutput

type JobHadoopConfigLoggingConfigPtrOutput struct{ *pulumi.OutputState }

func (JobHadoopConfigLoggingConfigPtrOutput) DriverLogLevels

func (JobHadoopConfigLoggingConfigPtrOutput) Elem

func (JobHadoopConfigLoggingConfigPtrOutput) ElementType

func (JobHadoopConfigLoggingConfigPtrOutput) ToJobHadoopConfigLoggingConfigPtrOutput

func (o JobHadoopConfigLoggingConfigPtrOutput) ToJobHadoopConfigLoggingConfigPtrOutput() JobHadoopConfigLoggingConfigPtrOutput

func (JobHadoopConfigLoggingConfigPtrOutput) ToJobHadoopConfigLoggingConfigPtrOutputWithContext

func (o JobHadoopConfigLoggingConfigPtrOutput) ToJobHadoopConfigLoggingConfigPtrOutputWithContext(ctx context.Context) JobHadoopConfigLoggingConfigPtrOutput

type JobHadoopConfigOutput

type JobHadoopConfigOutput struct{ *pulumi.OutputState }

func (JobHadoopConfigOutput) ArchiveUris

HCFS URIs of archives to be extracted in the working directory of .jar, .tar, .tar.gz, .tgz, and .zip.

func (JobHadoopConfigOutput) Args

The arguments to pass to the driver. Do not include arguments, such as -libjars or -Dfoo=bar, that can be set as job properties, since a collision may occur that causes an incorrect job submission.

func (JobHadoopConfigOutput) ElementType

func (JobHadoopConfigOutput) ElementType() reflect.Type

func (JobHadoopConfigOutput) FileUris

HCFS URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.

func (JobHadoopConfigOutput) JarFileUris

HCFS URIs of jar files to be added to the Spark CLASSPATH.

func (JobHadoopConfigOutput) LoggingConfig

func (JobHadoopConfigOutput) MainClass

func (JobHadoopConfigOutput) MainJarFileUri

func (o JobHadoopConfigOutput) MainJarFileUri() pulumi.StringPtrOutput

The HCFS URI of the jar file containing the main class. Examples: 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'. Conflicts with `mainClass`

func (JobHadoopConfigOutput) Properties

A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Cloud Dataproc API may be overwritten.

func (JobHadoopConfigOutput) ToJobHadoopConfigOutput

func (o JobHadoopConfigOutput) ToJobHadoopConfigOutput() JobHadoopConfigOutput

func (JobHadoopConfigOutput) ToJobHadoopConfigOutputWithContext

func (o JobHadoopConfigOutput) ToJobHadoopConfigOutputWithContext(ctx context.Context) JobHadoopConfigOutput

func (JobHadoopConfigOutput) ToJobHadoopConfigPtrOutput

func (o JobHadoopConfigOutput) ToJobHadoopConfigPtrOutput() JobHadoopConfigPtrOutput

func (JobHadoopConfigOutput) ToJobHadoopConfigPtrOutputWithContext

func (o JobHadoopConfigOutput) ToJobHadoopConfigPtrOutputWithContext(ctx context.Context) JobHadoopConfigPtrOutput

type JobHadoopConfigPtrInput

type JobHadoopConfigPtrInput interface {
	pulumi.Input

	ToJobHadoopConfigPtrOutput() JobHadoopConfigPtrOutput
	ToJobHadoopConfigPtrOutputWithContext(context.Context) JobHadoopConfigPtrOutput
}

type JobHadoopConfigPtrOutput

type JobHadoopConfigPtrOutput struct{ *pulumi.OutputState }

func (JobHadoopConfigPtrOutput) ArchiveUris

HCFS URIs of archives to be extracted in the working directory of .jar, .tar, .tar.gz, .tgz, and .zip.

func (JobHadoopConfigPtrOutput) Args

The arguments to pass to the driver. Do not include arguments, such as -libjars or -Dfoo=bar, that can be set as job properties, since a collision may occur that causes an incorrect job submission.

func (JobHadoopConfigPtrOutput) Elem

func (JobHadoopConfigPtrOutput) ElementType

func (JobHadoopConfigPtrOutput) ElementType() reflect.Type

func (JobHadoopConfigPtrOutput) FileUris

HCFS URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.

func (JobHadoopConfigPtrOutput) JarFileUris

HCFS URIs of jar files to be added to the Spark CLASSPATH.

func (JobHadoopConfigPtrOutput) LoggingConfig

func (JobHadoopConfigPtrOutput) MainClass

func (JobHadoopConfigPtrOutput) MainJarFileUri

func (o JobHadoopConfigPtrOutput) MainJarFileUri() pulumi.StringPtrOutput

The HCFS URI of the jar file containing the main class. Examples: 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'. Conflicts with `mainClass`

func (JobHadoopConfigPtrOutput) Properties

A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Cloud Dataproc API may be overwritten.

func (JobHadoopConfigPtrOutput) ToJobHadoopConfigPtrOutput

func (o JobHadoopConfigPtrOutput) ToJobHadoopConfigPtrOutput() JobHadoopConfigPtrOutput

func (JobHadoopConfigPtrOutput) ToJobHadoopConfigPtrOutputWithContext

func (o JobHadoopConfigPtrOutput) ToJobHadoopConfigPtrOutputWithContext(ctx context.Context) JobHadoopConfigPtrOutput

type JobHiveConfig

type JobHiveConfig struct {
	// Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries. Defaults to false.
	ContinueOnFailure *bool `pulumi:"continueOnFailure"`
	// HCFS URIs of jar files to be added to the Spark CLASSPATH.
	JarFileUris []string `pulumi:"jarFileUris"`
	// A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Cloud Dataproc API may be overwritten.
	Properties map[string]string `pulumi:"properties"`
	// The HCFS URI of the script that contains SQL queries.
	// Conflicts with `queryList`
	QueryFileUri *string  `pulumi:"queryFileUri"`
	QueryLists   []string `pulumi:"queryLists"`
	// Mapping of query variable names to values (equivalent to the Spark SQL command: `SET name="value";`).
	ScriptVariables map[string]string `pulumi:"scriptVariables"`
}

type JobHiveConfigArgs

type JobHiveConfigArgs struct {
	// Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries. Defaults to false.
	ContinueOnFailure pulumi.BoolPtrInput `pulumi:"continueOnFailure"`
	// HCFS URIs of jar files to be added to the Spark CLASSPATH.
	JarFileUris pulumi.StringArrayInput `pulumi:"jarFileUris"`
	// A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Cloud Dataproc API may be overwritten.
	Properties pulumi.StringMapInput `pulumi:"properties"`
	// The HCFS URI of the script that contains SQL queries.
	// Conflicts with `queryList`
	QueryFileUri pulumi.StringPtrInput   `pulumi:"queryFileUri"`
	QueryLists   pulumi.StringArrayInput `pulumi:"queryLists"`
	// Mapping of query variable names to values (equivalent to the Spark SQL command: `SET name="value";`).
	ScriptVariables pulumi.StringMapInput `pulumi:"scriptVariables"`
}

func (JobHiveConfigArgs) ElementType

func (JobHiveConfigArgs) ElementType() reflect.Type

func (JobHiveConfigArgs) ToJobHiveConfigOutput

func (i JobHiveConfigArgs) ToJobHiveConfigOutput() JobHiveConfigOutput

func (JobHiveConfigArgs) ToJobHiveConfigOutputWithContext

func (i JobHiveConfigArgs) ToJobHiveConfigOutputWithContext(ctx context.Context) JobHiveConfigOutput

func (JobHiveConfigArgs) ToJobHiveConfigPtrOutput

func (i JobHiveConfigArgs) ToJobHiveConfigPtrOutput() JobHiveConfigPtrOutput

func (JobHiveConfigArgs) ToJobHiveConfigPtrOutputWithContext

func (i JobHiveConfigArgs) ToJobHiveConfigPtrOutputWithContext(ctx context.Context) JobHiveConfigPtrOutput

type JobHiveConfigInput

type JobHiveConfigInput interface {
	pulumi.Input

	ToJobHiveConfigOutput() JobHiveConfigOutput
	ToJobHiveConfigOutputWithContext(context.Context) JobHiveConfigOutput
}

type JobHiveConfigOutput

type JobHiveConfigOutput struct{ *pulumi.OutputState }

func (JobHiveConfigOutput) ContinueOnFailure

func (o JobHiveConfigOutput) ContinueOnFailure() pulumi.BoolPtrOutput

Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries. Defaults to false.

func (JobHiveConfigOutput) ElementType

func (JobHiveConfigOutput) ElementType() reflect.Type

func (JobHiveConfigOutput) JarFileUris

HCFS URIs of jar files to be added to the Spark CLASSPATH.

func (JobHiveConfigOutput) Properties

A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Cloud Dataproc API may be overwritten.

func (JobHiveConfigOutput) QueryFileUri

func (o JobHiveConfigOutput) QueryFileUri() pulumi.StringPtrOutput

The HCFS URI of the script that contains SQL queries. Conflicts with `queryList`

func (JobHiveConfigOutput) QueryLists

func (JobHiveConfigOutput) ScriptVariables

func (o JobHiveConfigOutput) ScriptVariables() pulumi.StringMapOutput

Mapping of query variable names to values (equivalent to the Spark SQL command: `SET name="value";`).

func (JobHiveConfigOutput) ToJobHiveConfigOutput

func (o JobHiveConfigOutput) ToJobHiveConfigOutput() JobHiveConfigOutput

func (JobHiveConfigOutput) ToJobHiveConfigOutputWithContext

func (o JobHiveConfigOutput) ToJobHiveConfigOutputWithContext(ctx context.Context) JobHiveConfigOutput

func (JobHiveConfigOutput) ToJobHiveConfigPtrOutput

func (o JobHiveConfigOutput) ToJobHiveConfigPtrOutput() JobHiveConfigPtrOutput

func (JobHiveConfigOutput) ToJobHiveConfigPtrOutputWithContext

func (o JobHiveConfigOutput) ToJobHiveConfigPtrOutputWithContext(ctx context.Context) JobHiveConfigPtrOutput

type JobHiveConfigPtrInput

type JobHiveConfigPtrInput interface {
	pulumi.Input

	ToJobHiveConfigPtrOutput() JobHiveConfigPtrOutput
	ToJobHiveConfigPtrOutputWithContext(context.Context) JobHiveConfigPtrOutput
}

type JobHiveConfigPtrOutput

type JobHiveConfigPtrOutput struct{ *pulumi.OutputState }

func (JobHiveConfigPtrOutput) ContinueOnFailure

func (o JobHiveConfigPtrOutput) ContinueOnFailure() pulumi.BoolPtrOutput

Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries. Defaults to false.

func (JobHiveConfigPtrOutput) Elem

func (JobHiveConfigPtrOutput) ElementType

func (JobHiveConfigPtrOutput) ElementType() reflect.Type

func (JobHiveConfigPtrOutput) JarFileUris

HCFS URIs of jar files to be added to the Spark CLASSPATH.

func (JobHiveConfigPtrOutput) Properties

A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Cloud Dataproc API may be overwritten.

func (JobHiveConfigPtrOutput) QueryFileUri

The HCFS URI of the script that contains SQL queries. Conflicts with `queryList`

func (JobHiveConfigPtrOutput) QueryLists

func (JobHiveConfigPtrOutput) ScriptVariables

func (o JobHiveConfigPtrOutput) ScriptVariables() pulumi.StringMapOutput

Mapping of query variable names to values (equivalent to the Spark SQL command: `SET name="value";`).

func (JobHiveConfigPtrOutput) ToJobHiveConfigPtrOutput

func (o JobHiveConfigPtrOutput) ToJobHiveConfigPtrOutput() JobHiveConfigPtrOutput

func (JobHiveConfigPtrOutput) ToJobHiveConfigPtrOutputWithContext

func (o JobHiveConfigPtrOutput) ToJobHiveConfigPtrOutputWithContext(ctx context.Context) JobHiveConfigPtrOutput

type JobIAMBinding

type JobIAMBinding struct {
	pulumi.CustomResourceState

	Condition JobIAMBindingConditionPtrOutput `pulumi:"condition"`
	// (Computed) The etag of the jobs's IAM policy.
	Etag    pulumi.StringOutput      `pulumi:"etag"`
	JobId   pulumi.StringOutput      `pulumi:"jobId"`
	Members pulumi.StringArrayOutput `pulumi:"members"`
	// The project in which the job belongs. If it
	// is not provided, the provider will use a default.
	Project pulumi.StringOutput `pulumi:"project"`
	// The region in which the job belongs. If it
	// is not provided, the provider will use a default.
	Region pulumi.StringOutput `pulumi:"region"`
	// The role that should be applied. Only one
	// `dataproc.JobIAMBinding` can be used per role. Note that custom roles must be of the format
	// `[projects|organizations]/{parent-name}/roles/{role-name}`.
	Role pulumi.StringOutput `pulumi:"role"`
}

Three different resources help you manage IAM policies on dataproc jobs. Each of these resources serves a different use case:

* `dataproc.JobIAMPolicy`: Authoritative. Sets the IAM policy for the job and replaces any existing policy already attached. * `dataproc.JobIAMBinding`: Authoritative for a given role. Updates the IAM policy to grant a role to a list of members. Other roles within the IAM policy for the job are preserved. * `dataproc.JobIAMMember`: Non-authoritative. Updates the IAM policy to grant a role to a new member. Other members for the role for the job are preserved.

> **Note:** `dataproc.JobIAMPolicy` **cannot** be used in conjunction with `dataproc.JobIAMBinding` and `dataproc.JobIAMMember` or they will fight over what your policy should be. In addition, be careful not to accidentally unset ownership of the job as `dataproc.JobIAMPolicy` replaces the entire policy.

> **Note:** `dataproc.JobIAMBinding` resources **can be** used in conjunction with `dataproc.JobIAMMember` resources **only if** they do not grant privilege to the same role.

> This content is derived from https://github.com/terraform-providers/terraform-provider-google/blob/master/website/docs/r/dataproc_job_iam.html.markdown.

func GetJobIAMBinding

func GetJobIAMBinding(ctx *pulumi.Context,
	name string, id pulumi.IDInput, state *JobIAMBindingState, opts ...pulumi.ResourceOption) (*JobIAMBinding, error)

GetJobIAMBinding gets an existing JobIAMBinding resource's state with the given name, ID, and optional state properties that are used to uniquely qualify the lookup (nil if not required).

func NewJobIAMBinding

func NewJobIAMBinding(ctx *pulumi.Context,
	name string, args *JobIAMBindingArgs, opts ...pulumi.ResourceOption) (*JobIAMBinding, error)

NewJobIAMBinding registers a new resource with the given unique name, arguments, and options.

type JobIAMBindingArgs

type JobIAMBindingArgs struct {
	Condition JobIAMBindingConditionPtrInput
	JobId     pulumi.StringInput
	Members   pulumi.StringArrayInput
	// The project in which the job belongs. If it
	// is not provided, the provider will use a default.
	Project pulumi.StringPtrInput
	// The region in which the job belongs. If it
	// is not provided, the provider will use a default.
	Region pulumi.StringPtrInput
	// The role that should be applied. Only one
	// `dataproc.JobIAMBinding` can be used per role. Note that custom roles must be of the format
	// `[projects|organizations]/{parent-name}/roles/{role-name}`.
	Role pulumi.StringInput
}

The set of arguments for constructing a JobIAMBinding resource.

func (JobIAMBindingArgs) ElementType

func (JobIAMBindingArgs) ElementType() reflect.Type

type JobIAMBindingCondition

type JobIAMBindingCondition struct {
	Description *string `pulumi:"description"`
	Expression  string  `pulumi:"expression"`
	Title       string  `pulumi:"title"`
}

type JobIAMBindingConditionArgs

type JobIAMBindingConditionArgs struct {
	Description pulumi.StringPtrInput `pulumi:"description"`
	Expression  pulumi.StringInput    `pulumi:"expression"`
	Title       pulumi.StringInput    `pulumi:"title"`
}

func (JobIAMBindingConditionArgs) ElementType

func (JobIAMBindingConditionArgs) ElementType() reflect.Type

func (JobIAMBindingConditionArgs) ToJobIAMBindingConditionOutput

func (i JobIAMBindingConditionArgs) ToJobIAMBindingConditionOutput() JobIAMBindingConditionOutput

func (JobIAMBindingConditionArgs) ToJobIAMBindingConditionOutputWithContext

func (i JobIAMBindingConditionArgs) ToJobIAMBindingConditionOutputWithContext(ctx context.Context) JobIAMBindingConditionOutput

func (JobIAMBindingConditionArgs) ToJobIAMBindingConditionPtrOutput

func (i JobIAMBindingConditionArgs) ToJobIAMBindingConditionPtrOutput() JobIAMBindingConditionPtrOutput

func (JobIAMBindingConditionArgs) ToJobIAMBindingConditionPtrOutputWithContext

func (i JobIAMBindingConditionArgs) ToJobIAMBindingConditionPtrOutputWithContext(ctx context.Context) JobIAMBindingConditionPtrOutput

type JobIAMBindingConditionInput

type JobIAMBindingConditionInput interface {
	pulumi.Input

	ToJobIAMBindingConditionOutput() JobIAMBindingConditionOutput
	ToJobIAMBindingConditionOutputWithContext(context.Context) JobIAMBindingConditionOutput
}

type JobIAMBindingConditionOutput

type JobIAMBindingConditionOutput struct{ *pulumi.OutputState }

func (JobIAMBindingConditionOutput) Description

func (JobIAMBindingConditionOutput) ElementType

func (JobIAMBindingConditionOutput) Expression

func (JobIAMBindingConditionOutput) Title

func (JobIAMBindingConditionOutput) ToJobIAMBindingConditionOutput

func (o JobIAMBindingConditionOutput) ToJobIAMBindingConditionOutput() JobIAMBindingConditionOutput

func (JobIAMBindingConditionOutput) ToJobIAMBindingConditionOutputWithContext

func (o JobIAMBindingConditionOutput) ToJobIAMBindingConditionOutputWithContext(ctx context.Context) JobIAMBindingConditionOutput

func (JobIAMBindingConditionOutput) ToJobIAMBindingConditionPtrOutput

func (o JobIAMBindingConditionOutput) ToJobIAMBindingConditionPtrOutput() JobIAMBindingConditionPtrOutput

func (JobIAMBindingConditionOutput) ToJobIAMBindingConditionPtrOutputWithContext

func (o JobIAMBindingConditionOutput) ToJobIAMBindingConditionPtrOutputWithContext(ctx context.Context) JobIAMBindingConditionPtrOutput

type JobIAMBindingConditionPtrInput

type JobIAMBindingConditionPtrInput interface {
	pulumi.Input

	ToJobIAMBindingConditionPtrOutput() JobIAMBindingConditionPtrOutput
	ToJobIAMBindingConditionPtrOutputWithContext(context.Context) JobIAMBindingConditionPtrOutput
}

type JobIAMBindingConditionPtrOutput

type JobIAMBindingConditionPtrOutput struct{ *pulumi.OutputState }

func (JobIAMBindingConditionPtrOutput) Description

func (JobIAMBindingConditionPtrOutput) Elem

func (JobIAMBindingConditionPtrOutput) ElementType

func (JobIAMBindingConditionPtrOutput) Expression

func (JobIAMBindingConditionPtrOutput) Title

func (JobIAMBindingConditionPtrOutput) ToJobIAMBindingConditionPtrOutput

func (o JobIAMBindingConditionPtrOutput) ToJobIAMBindingConditionPtrOutput() JobIAMBindingConditionPtrOutput

func (JobIAMBindingConditionPtrOutput) ToJobIAMBindingConditionPtrOutputWithContext

func (o JobIAMBindingConditionPtrOutput) ToJobIAMBindingConditionPtrOutputWithContext(ctx context.Context) JobIAMBindingConditionPtrOutput

type JobIAMBindingState

type JobIAMBindingState struct {
	Condition JobIAMBindingConditionPtrInput
	// (Computed) The etag of the jobs's IAM policy.
	Etag    pulumi.StringPtrInput
	JobId   pulumi.StringPtrInput
	Members pulumi.StringArrayInput
	// The project in which the job belongs. If it
	// is not provided, the provider will use a default.
	Project pulumi.StringPtrInput
	// The region in which the job belongs. If it
	// is not provided, the provider will use a default.
	Region pulumi.StringPtrInput
	// The role that should be applied. Only one
	// `dataproc.JobIAMBinding` can be used per role. Note that custom roles must be of the format
	// `[projects|organizations]/{parent-name}/roles/{role-name}`.
	Role pulumi.StringPtrInput
}

func (JobIAMBindingState) ElementType

func (JobIAMBindingState) ElementType() reflect.Type

type JobIAMMember

type JobIAMMember struct {
	pulumi.CustomResourceState

	Condition JobIAMMemberConditionPtrOutput `pulumi:"condition"`
	// (Computed) The etag of the jobs's IAM policy.
	Etag   pulumi.StringOutput `pulumi:"etag"`
	JobId  pulumi.StringOutput `pulumi:"jobId"`
	Member pulumi.StringOutput `pulumi:"member"`
	// The project in which the job belongs. If it
	// is not provided, the provider will use a default.
	Project pulumi.StringOutput `pulumi:"project"`
	// The region in which the job belongs. If it
	// is not provided, the provider will use a default.
	Region pulumi.StringOutput `pulumi:"region"`
	// The role that should be applied. Only one
	// `dataproc.JobIAMBinding` can be used per role. Note that custom roles must be of the format
	// `[projects|organizations]/{parent-name}/roles/{role-name}`.
	Role pulumi.StringOutput `pulumi:"role"`
}

Three different resources help you manage IAM policies on dataproc jobs. Each of these resources serves a different use case:

* `dataproc.JobIAMPolicy`: Authoritative. Sets the IAM policy for the job and replaces any existing policy already attached. * `dataproc.JobIAMBinding`: Authoritative for a given role. Updates the IAM policy to grant a role to a list of members. Other roles within the IAM policy for the job are preserved. * `dataproc.JobIAMMember`: Non-authoritative. Updates the IAM policy to grant a role to a new member. Other members for the role for the job are preserved.

> **Note:** `dataproc.JobIAMPolicy` **cannot** be used in conjunction with `dataproc.JobIAMBinding` and `dataproc.JobIAMMember` or they will fight over what your policy should be. In addition, be careful not to accidentally unset ownership of the job as `dataproc.JobIAMPolicy` replaces the entire policy.

> **Note:** `dataproc.JobIAMBinding` resources **can be** used in conjunction with `dataproc.JobIAMMember` resources **only if** they do not grant privilege to the same role.

> This content is derived from https://github.com/terraform-providers/terraform-provider-google/blob/master/website/docs/r/dataproc_job_iam.html.markdown.

func GetJobIAMMember

func GetJobIAMMember(ctx *pulumi.Context,
	name string, id pulumi.IDInput, state *JobIAMMemberState, opts ...pulumi.ResourceOption) (*JobIAMMember, error)

GetJobIAMMember gets an existing JobIAMMember resource's state with the given name, ID, and optional state properties that are used to uniquely qualify the lookup (nil if not required).

func NewJobIAMMember

func NewJobIAMMember(ctx *pulumi.Context,
	name string, args *JobIAMMemberArgs, opts ...pulumi.ResourceOption) (*JobIAMMember, error)

NewJobIAMMember registers a new resource with the given unique name, arguments, and options.

type JobIAMMemberArgs

type JobIAMMemberArgs struct {
	Condition JobIAMMemberConditionPtrInput
	JobId     pulumi.StringInput
	Member    pulumi.StringInput
	// The project in which the job belongs. If it
	// is not provided, the provider will use a default.
	Project pulumi.StringPtrInput
	// The region in which the job belongs. If it
	// is not provided, the provider will use a default.
	Region pulumi.StringPtrInput
	// The role that should be applied. Only one
	// `dataproc.JobIAMBinding` can be used per role. Note that custom roles must be of the format
	// `[projects|organizations]/{parent-name}/roles/{role-name}`.
	Role pulumi.StringInput
}

The set of arguments for constructing a JobIAMMember resource.

func (JobIAMMemberArgs) ElementType

func (JobIAMMemberArgs) ElementType() reflect.Type

type JobIAMMemberCondition

type JobIAMMemberCondition struct {
	Description *string `pulumi:"description"`
	Expression  string  `pulumi:"expression"`
	Title       string  `pulumi:"title"`
}

type JobIAMMemberConditionArgs

type JobIAMMemberConditionArgs struct {
	Description pulumi.StringPtrInput `pulumi:"description"`
	Expression  pulumi.StringInput    `pulumi:"expression"`
	Title       pulumi.StringInput    `pulumi:"title"`
}

func (JobIAMMemberConditionArgs) ElementType

func (JobIAMMemberConditionArgs) ElementType() reflect.Type

func (JobIAMMemberConditionArgs) ToJobIAMMemberConditionOutput

func (i JobIAMMemberConditionArgs) ToJobIAMMemberConditionOutput() JobIAMMemberConditionOutput

func (JobIAMMemberConditionArgs) ToJobIAMMemberConditionOutputWithContext

func (i JobIAMMemberConditionArgs) ToJobIAMMemberConditionOutputWithContext(ctx context.Context) JobIAMMemberConditionOutput

func (JobIAMMemberConditionArgs) ToJobIAMMemberConditionPtrOutput

func (i JobIAMMemberConditionArgs) ToJobIAMMemberConditionPtrOutput() JobIAMMemberConditionPtrOutput

func (JobIAMMemberConditionArgs) ToJobIAMMemberConditionPtrOutputWithContext

func (i JobIAMMemberConditionArgs) ToJobIAMMemberConditionPtrOutputWithContext(ctx context.Context) JobIAMMemberConditionPtrOutput

type JobIAMMemberConditionInput

type JobIAMMemberConditionInput interface {
	pulumi.Input

	ToJobIAMMemberConditionOutput() JobIAMMemberConditionOutput
	ToJobIAMMemberConditionOutputWithContext(context.Context) JobIAMMemberConditionOutput
}

type JobIAMMemberConditionOutput

type JobIAMMemberConditionOutput struct{ *pulumi.OutputState }

func (JobIAMMemberConditionOutput) Description

func (JobIAMMemberConditionOutput) ElementType

func (JobIAMMemberConditionOutput) Expression

func (JobIAMMemberConditionOutput) Title

func (JobIAMMemberConditionOutput) ToJobIAMMemberConditionOutput

func (o JobIAMMemberConditionOutput) ToJobIAMMemberConditionOutput() JobIAMMemberConditionOutput

func (JobIAMMemberConditionOutput) ToJobIAMMemberConditionOutputWithContext

func (o JobIAMMemberConditionOutput) ToJobIAMMemberConditionOutputWithContext(ctx context.Context) JobIAMMemberConditionOutput

func (JobIAMMemberConditionOutput) ToJobIAMMemberConditionPtrOutput

func (o JobIAMMemberConditionOutput) ToJobIAMMemberConditionPtrOutput() JobIAMMemberConditionPtrOutput

func (JobIAMMemberConditionOutput) ToJobIAMMemberConditionPtrOutputWithContext

func (o JobIAMMemberConditionOutput) ToJobIAMMemberConditionPtrOutputWithContext(ctx context.Context) JobIAMMemberConditionPtrOutput

type JobIAMMemberConditionPtrInput

type JobIAMMemberConditionPtrInput interface {
	pulumi.Input

	ToJobIAMMemberConditionPtrOutput() JobIAMMemberConditionPtrOutput
	ToJobIAMMemberConditionPtrOutputWithContext(context.Context) JobIAMMemberConditionPtrOutput
}

type JobIAMMemberConditionPtrOutput

type JobIAMMemberConditionPtrOutput struct{ *pulumi.OutputState }

func (JobIAMMemberConditionPtrOutput) Description

func (JobIAMMemberConditionPtrOutput) Elem

func (JobIAMMemberConditionPtrOutput) ElementType

func (JobIAMMemberConditionPtrOutput) Expression

func (JobIAMMemberConditionPtrOutput) Title

func (JobIAMMemberConditionPtrOutput) ToJobIAMMemberConditionPtrOutput

func (o JobIAMMemberConditionPtrOutput) ToJobIAMMemberConditionPtrOutput() JobIAMMemberConditionPtrOutput

func (JobIAMMemberConditionPtrOutput) ToJobIAMMemberConditionPtrOutputWithContext

func (o JobIAMMemberConditionPtrOutput) ToJobIAMMemberConditionPtrOutputWithContext(ctx context.Context) JobIAMMemberConditionPtrOutput

type JobIAMMemberState

type JobIAMMemberState struct {
	Condition JobIAMMemberConditionPtrInput
	// (Computed) The etag of the jobs's IAM policy.
	Etag   pulumi.StringPtrInput
	JobId  pulumi.StringPtrInput
	Member pulumi.StringPtrInput
	// The project in which the job belongs. If it
	// is not provided, the provider will use a default.
	Project pulumi.StringPtrInput
	// The region in which the job belongs. If it
	// is not provided, the provider will use a default.
	Region pulumi.StringPtrInput
	// The role that should be applied. Only one
	// `dataproc.JobIAMBinding` can be used per role. Note that custom roles must be of the format
	// `[projects|organizations]/{parent-name}/roles/{role-name}`.
	Role pulumi.StringPtrInput
}

func (JobIAMMemberState) ElementType

func (JobIAMMemberState) ElementType() reflect.Type

type JobIAMPolicy

type JobIAMPolicy struct {
	pulumi.CustomResourceState

	// (Computed) The etag of the jobs's IAM policy.
	Etag  pulumi.StringOutput `pulumi:"etag"`
	JobId pulumi.StringOutput `pulumi:"jobId"`
	// The policy data generated by a `organizations.getIAMPolicy` data source.
	PolicyData pulumi.StringOutput `pulumi:"policyData"`
	// The project in which the job belongs. If it
	// is not provided, the provider will use a default.
	Project pulumi.StringOutput `pulumi:"project"`
	// The region in which the job belongs. If it
	// is not provided, the provider will use a default.
	Region pulumi.StringOutput `pulumi:"region"`
}

Three different resources help you manage IAM policies on dataproc jobs. Each of these resources serves a different use case:

* `dataproc.JobIAMPolicy`: Authoritative. Sets the IAM policy for the job and replaces any existing policy already attached. * `dataproc.JobIAMBinding`: Authoritative for a given role. Updates the IAM policy to grant a role to a list of members. Other roles within the IAM policy for the job are preserved. * `dataproc.JobIAMMember`: Non-authoritative. Updates the IAM policy to grant a role to a new member. Other members for the role for the job are preserved.

> **Note:** `dataproc.JobIAMPolicy` **cannot** be used in conjunction with `dataproc.JobIAMBinding` and `dataproc.JobIAMMember` or they will fight over what your policy should be. In addition, be careful not to accidentally unset ownership of the job as `dataproc.JobIAMPolicy` replaces the entire policy.

> **Note:** `dataproc.JobIAMBinding` resources **can be** used in conjunction with `dataproc.JobIAMMember` resources **only if** they do not grant privilege to the same role.

> This content is derived from https://github.com/terraform-providers/terraform-provider-google/blob/master/website/docs/r/dataproc_job_iam.html.markdown.

func GetJobIAMPolicy

func GetJobIAMPolicy(ctx *pulumi.Context,
	name string, id pulumi.IDInput, state *JobIAMPolicyState, opts ...pulumi.ResourceOption) (*JobIAMPolicy, error)

GetJobIAMPolicy gets an existing JobIAMPolicy resource's state with the given name, ID, and optional state properties that are used to uniquely qualify the lookup (nil if not required).

func NewJobIAMPolicy

func NewJobIAMPolicy(ctx *pulumi.Context,
	name string, args *JobIAMPolicyArgs, opts ...pulumi.ResourceOption) (*JobIAMPolicy, error)

NewJobIAMPolicy registers a new resource with the given unique name, arguments, and options.

type JobIAMPolicyArgs

type JobIAMPolicyArgs struct {
	JobId pulumi.StringInput
	// The policy data generated by a `organizations.getIAMPolicy` data source.
	PolicyData pulumi.StringInput
	// The project in which the job belongs. If it
	// is not provided, the provider will use a default.
	Project pulumi.StringPtrInput
	// The region in which the job belongs. If it
	// is not provided, the provider will use a default.
	Region pulumi.StringPtrInput
}

The set of arguments for constructing a JobIAMPolicy resource.

func (JobIAMPolicyArgs) ElementType

func (JobIAMPolicyArgs) ElementType() reflect.Type

type JobIAMPolicyState

type JobIAMPolicyState struct {
	// (Computed) The etag of the jobs's IAM policy.
	Etag  pulumi.StringPtrInput
	JobId pulumi.StringPtrInput
	// The policy data generated by a `organizations.getIAMPolicy` data source.
	PolicyData pulumi.StringPtrInput
	// The project in which the job belongs. If it
	// is not provided, the provider will use a default.
	Project pulumi.StringPtrInput
	// The region in which the job belongs. If it
	// is not provided, the provider will use a default.
	Region pulumi.StringPtrInput
}

func (JobIAMPolicyState) ElementType

func (JobIAMPolicyState) ElementType() reflect.Type

type JobPigConfig

type JobPigConfig struct {
	// Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries. Defaults to false.
	ContinueOnFailure *bool `pulumi:"continueOnFailure"`
	// HCFS URIs of jar files to be added to the Spark CLASSPATH.
	JarFileUris   []string                   `pulumi:"jarFileUris"`
	LoggingConfig *JobPigConfigLoggingConfig `pulumi:"loggingConfig"`
	// A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Cloud Dataproc API may be overwritten.
	Properties map[string]string `pulumi:"properties"`
	// The HCFS URI of the script that contains SQL queries.
	// Conflicts with `queryList`
	QueryFileUri *string  `pulumi:"queryFileUri"`
	QueryLists   []string `pulumi:"queryLists"`
	// Mapping of query variable names to values (equivalent to the Spark SQL command: `SET name="value";`).
	ScriptVariables map[string]string `pulumi:"scriptVariables"`
}

type JobPigConfigArgs

type JobPigConfigArgs struct {
	// Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries. Defaults to false.
	ContinueOnFailure pulumi.BoolPtrInput `pulumi:"continueOnFailure"`
	// HCFS URIs of jar files to be added to the Spark CLASSPATH.
	JarFileUris   pulumi.StringArrayInput           `pulumi:"jarFileUris"`
	LoggingConfig JobPigConfigLoggingConfigPtrInput `pulumi:"loggingConfig"`
	// A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Cloud Dataproc API may be overwritten.
	Properties pulumi.StringMapInput `pulumi:"properties"`
	// The HCFS URI of the script that contains SQL queries.
	// Conflicts with `queryList`
	QueryFileUri pulumi.StringPtrInput   `pulumi:"queryFileUri"`
	QueryLists   pulumi.StringArrayInput `pulumi:"queryLists"`
	// Mapping of query variable names to values (equivalent to the Spark SQL command: `SET name="value";`).
	ScriptVariables pulumi.StringMapInput `pulumi:"scriptVariables"`
}

func (JobPigConfigArgs) ElementType

func (JobPigConfigArgs) ElementType() reflect.Type

func (JobPigConfigArgs) ToJobPigConfigOutput

func (i JobPigConfigArgs) ToJobPigConfigOutput() JobPigConfigOutput

func (JobPigConfigArgs) ToJobPigConfigOutputWithContext

func (i JobPigConfigArgs) ToJobPigConfigOutputWithContext(ctx context.Context) JobPigConfigOutput

func (JobPigConfigArgs) ToJobPigConfigPtrOutput

func (i JobPigConfigArgs) ToJobPigConfigPtrOutput() JobPigConfigPtrOutput

func (JobPigConfigArgs) ToJobPigConfigPtrOutputWithContext

func (i JobPigConfigArgs) ToJobPigConfigPtrOutputWithContext(ctx context.Context) JobPigConfigPtrOutput

type JobPigConfigInput

type JobPigConfigInput interface {
	pulumi.Input

	ToJobPigConfigOutput() JobPigConfigOutput
	ToJobPigConfigOutputWithContext(context.Context) JobPigConfigOutput
}

type JobPigConfigLoggingConfig

type JobPigConfigLoggingConfig struct {
	DriverLogLevels map[string]string `pulumi:"driverLogLevels"`
}

type JobPigConfigLoggingConfigArgs

type JobPigConfigLoggingConfigArgs struct {
	DriverLogLevels pulumi.StringMapInput `pulumi:"driverLogLevels"`
}

func (JobPigConfigLoggingConfigArgs) ElementType

func (JobPigConfigLoggingConfigArgs) ToJobPigConfigLoggingConfigOutput

func (i JobPigConfigLoggingConfigArgs) ToJobPigConfigLoggingConfigOutput() JobPigConfigLoggingConfigOutput

func (JobPigConfigLoggingConfigArgs) ToJobPigConfigLoggingConfigOutputWithContext

func (i JobPigConfigLoggingConfigArgs) ToJobPigConfigLoggingConfigOutputWithContext(ctx context.Context) JobPigConfigLoggingConfigOutput

func (JobPigConfigLoggingConfigArgs) ToJobPigConfigLoggingConfigPtrOutput

func (i JobPigConfigLoggingConfigArgs) ToJobPigConfigLoggingConfigPtrOutput() JobPigConfigLoggingConfigPtrOutput

func (JobPigConfigLoggingConfigArgs) ToJobPigConfigLoggingConfigPtrOutputWithContext

func (i JobPigConfigLoggingConfigArgs) ToJobPigConfigLoggingConfigPtrOutputWithContext(ctx context.Context) JobPigConfigLoggingConfigPtrOutput

type JobPigConfigLoggingConfigInput

type JobPigConfigLoggingConfigInput interface {
	pulumi.Input

	ToJobPigConfigLoggingConfigOutput() JobPigConfigLoggingConfigOutput
	ToJobPigConfigLoggingConfigOutputWithContext(context.Context) JobPigConfigLoggingConfigOutput
}

type JobPigConfigLoggingConfigOutput

type JobPigConfigLoggingConfigOutput struct{ *pulumi.OutputState }

func (JobPigConfigLoggingConfigOutput) DriverLogLevels

func (JobPigConfigLoggingConfigOutput) ElementType

func (JobPigConfigLoggingConfigOutput) ToJobPigConfigLoggingConfigOutput

func (o JobPigConfigLoggingConfigOutput) ToJobPigConfigLoggingConfigOutput() JobPigConfigLoggingConfigOutput

func (JobPigConfigLoggingConfigOutput) ToJobPigConfigLoggingConfigOutputWithContext

func (o JobPigConfigLoggingConfigOutput) ToJobPigConfigLoggingConfigOutputWithContext(ctx context.Context) JobPigConfigLoggingConfigOutput

func (JobPigConfigLoggingConfigOutput) ToJobPigConfigLoggingConfigPtrOutput

func (o JobPigConfigLoggingConfigOutput) ToJobPigConfigLoggingConfigPtrOutput() JobPigConfigLoggingConfigPtrOutput

func (JobPigConfigLoggingConfigOutput) ToJobPigConfigLoggingConfigPtrOutputWithContext

func (o JobPigConfigLoggingConfigOutput) ToJobPigConfigLoggingConfigPtrOutputWithContext(ctx context.Context) JobPigConfigLoggingConfigPtrOutput

type JobPigConfigLoggingConfigPtrInput

type JobPigConfigLoggingConfigPtrInput interface {
	pulumi.Input

	ToJobPigConfigLoggingConfigPtrOutput() JobPigConfigLoggingConfigPtrOutput
	ToJobPigConfigLoggingConfigPtrOutputWithContext(context.Context) JobPigConfigLoggingConfigPtrOutput
}

type JobPigConfigLoggingConfigPtrOutput

type JobPigConfigLoggingConfigPtrOutput struct{ *pulumi.OutputState }

func (JobPigConfigLoggingConfigPtrOutput) DriverLogLevels

func (JobPigConfigLoggingConfigPtrOutput) Elem

func (JobPigConfigLoggingConfigPtrOutput) ElementType

func (JobPigConfigLoggingConfigPtrOutput) ToJobPigConfigLoggingConfigPtrOutput

func (o JobPigConfigLoggingConfigPtrOutput) ToJobPigConfigLoggingConfigPtrOutput() JobPigConfigLoggingConfigPtrOutput

func (JobPigConfigLoggingConfigPtrOutput) ToJobPigConfigLoggingConfigPtrOutputWithContext

func (o JobPigConfigLoggingConfigPtrOutput) ToJobPigConfigLoggingConfigPtrOutputWithContext(ctx context.Context) JobPigConfigLoggingConfigPtrOutput

type JobPigConfigOutput

type JobPigConfigOutput struct{ *pulumi.OutputState }

func (JobPigConfigOutput) ContinueOnFailure

func (o JobPigConfigOutput) ContinueOnFailure() pulumi.BoolPtrOutput

Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries. Defaults to false.

func (JobPigConfigOutput) ElementType

func (JobPigConfigOutput) ElementType() reflect.Type

func (JobPigConfigOutput) JarFileUris

HCFS URIs of jar files to be added to the Spark CLASSPATH.

func (JobPigConfigOutput) LoggingConfig

func (JobPigConfigOutput) Properties

func (o JobPigConfigOutput) Properties() pulumi.StringMapOutput

A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Cloud Dataproc API may be overwritten.

func (JobPigConfigOutput) QueryFileUri

func (o JobPigConfigOutput) QueryFileUri() pulumi.StringPtrOutput

The HCFS URI of the script that contains SQL queries. Conflicts with `queryList`

func (JobPigConfigOutput) QueryLists

func (JobPigConfigOutput) ScriptVariables

func (o JobPigConfigOutput) ScriptVariables() pulumi.StringMapOutput

Mapping of query variable names to values (equivalent to the Spark SQL command: `SET name="value";`).

func (JobPigConfigOutput) ToJobPigConfigOutput

func (o JobPigConfigOutput) ToJobPigConfigOutput() JobPigConfigOutput

func (JobPigConfigOutput) ToJobPigConfigOutputWithContext

func (o JobPigConfigOutput) ToJobPigConfigOutputWithContext(ctx context.Context) JobPigConfigOutput

func (JobPigConfigOutput) ToJobPigConfigPtrOutput

func (o JobPigConfigOutput) ToJobPigConfigPtrOutput() JobPigConfigPtrOutput

func (JobPigConfigOutput) ToJobPigConfigPtrOutputWithContext

func (o JobPigConfigOutput) ToJobPigConfigPtrOutputWithContext(ctx context.Context) JobPigConfigPtrOutput

type JobPigConfigPtrInput

type JobPigConfigPtrInput interface {
	pulumi.Input

	ToJobPigConfigPtrOutput() JobPigConfigPtrOutput
	ToJobPigConfigPtrOutputWithContext(context.Context) JobPigConfigPtrOutput
}

type JobPigConfigPtrOutput

type JobPigConfigPtrOutput struct{ *pulumi.OutputState }

func (JobPigConfigPtrOutput) ContinueOnFailure

func (o JobPigConfigPtrOutput) ContinueOnFailure() pulumi.BoolPtrOutput

Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries. Defaults to false.

func (JobPigConfigPtrOutput) Elem

func (JobPigConfigPtrOutput) ElementType

func (JobPigConfigPtrOutput) ElementType() reflect.Type

func (JobPigConfigPtrOutput) JarFileUris

HCFS URIs of jar files to be added to the Spark CLASSPATH.

func (JobPigConfigPtrOutput) LoggingConfig

func (JobPigConfigPtrOutput) Properties

A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Cloud Dataproc API may be overwritten.

func (JobPigConfigPtrOutput) QueryFileUri

func (o JobPigConfigPtrOutput) QueryFileUri() pulumi.StringPtrOutput

The HCFS URI of the script that contains SQL queries. Conflicts with `queryList`

func (JobPigConfigPtrOutput) QueryLists

func (JobPigConfigPtrOutput) ScriptVariables

func (o JobPigConfigPtrOutput) ScriptVariables() pulumi.StringMapOutput

Mapping of query variable names to values (equivalent to the Spark SQL command: `SET name="value";`).

func (JobPigConfigPtrOutput) ToJobPigConfigPtrOutput

func (o JobPigConfigPtrOutput) ToJobPigConfigPtrOutput() JobPigConfigPtrOutput

func (JobPigConfigPtrOutput) ToJobPigConfigPtrOutputWithContext

func (o JobPigConfigPtrOutput) ToJobPigConfigPtrOutputWithContext(ctx context.Context) JobPigConfigPtrOutput

type JobPlacement

type JobPlacement struct {
	ClusterName string  `pulumi:"clusterName"`
	ClusterUuid *string `pulumi:"clusterUuid"`
}

type JobPlacementArgs

type JobPlacementArgs struct {
	ClusterName pulumi.StringInput    `pulumi:"clusterName"`
	ClusterUuid pulumi.StringPtrInput `pulumi:"clusterUuid"`
}

func (JobPlacementArgs) ElementType

func (JobPlacementArgs) ElementType() reflect.Type

func (JobPlacementArgs) ToJobPlacementOutput

func (i JobPlacementArgs) ToJobPlacementOutput() JobPlacementOutput

func (JobPlacementArgs) ToJobPlacementOutputWithContext

func (i JobPlacementArgs) ToJobPlacementOutputWithContext(ctx context.Context) JobPlacementOutput

func (JobPlacementArgs) ToJobPlacementPtrOutput

func (i JobPlacementArgs) ToJobPlacementPtrOutput() JobPlacementPtrOutput

func (JobPlacementArgs) ToJobPlacementPtrOutputWithContext

func (i JobPlacementArgs) ToJobPlacementPtrOutputWithContext(ctx context.Context) JobPlacementPtrOutput

type JobPlacementInput

type JobPlacementInput interface {
	pulumi.Input

	ToJobPlacementOutput() JobPlacementOutput
	ToJobPlacementOutputWithContext(context.Context) JobPlacementOutput
}

type JobPlacementOutput

type JobPlacementOutput struct{ *pulumi.OutputState }

func (JobPlacementOutput) ClusterName

func (o JobPlacementOutput) ClusterName() pulumi.StringOutput

func (JobPlacementOutput) ClusterUuid

func (o JobPlacementOutput) ClusterUuid() pulumi.StringPtrOutput

func (JobPlacementOutput) ElementType

func (JobPlacementOutput) ElementType() reflect.Type

func (JobPlacementOutput) ToJobPlacementOutput

func (o JobPlacementOutput) ToJobPlacementOutput() JobPlacementOutput

func (JobPlacementOutput) ToJobPlacementOutputWithContext

func (o JobPlacementOutput) ToJobPlacementOutputWithContext(ctx context.Context) JobPlacementOutput

func (JobPlacementOutput) ToJobPlacementPtrOutput

func (o JobPlacementOutput) ToJobPlacementPtrOutput() JobPlacementPtrOutput

func (JobPlacementOutput) ToJobPlacementPtrOutputWithContext

func (o JobPlacementOutput) ToJobPlacementPtrOutputWithContext(ctx context.Context) JobPlacementPtrOutput

type JobPlacementPtrInput

type JobPlacementPtrInput interface {
	pulumi.Input

	ToJobPlacementPtrOutput() JobPlacementPtrOutput
	ToJobPlacementPtrOutputWithContext(context.Context) JobPlacementPtrOutput
}

type JobPlacementPtrOutput

type JobPlacementPtrOutput struct{ *pulumi.OutputState }

func (JobPlacementPtrOutput) ClusterName

func (o JobPlacementPtrOutput) ClusterName() pulumi.StringOutput

func (JobPlacementPtrOutput) ClusterUuid

func (JobPlacementPtrOutput) Elem

func (JobPlacementPtrOutput) ElementType

func (JobPlacementPtrOutput) ElementType() reflect.Type

func (JobPlacementPtrOutput) ToJobPlacementPtrOutput

func (o JobPlacementPtrOutput) ToJobPlacementPtrOutput() JobPlacementPtrOutput

func (JobPlacementPtrOutput) ToJobPlacementPtrOutputWithContext

func (o JobPlacementPtrOutput) ToJobPlacementPtrOutputWithContext(ctx context.Context) JobPlacementPtrOutput

type JobPysparkConfig

type JobPysparkConfig struct {
	// HCFS URIs of archives to be extracted in the working directory of .jar, .tar, .tar.gz, .tgz, and .zip.
	ArchiveUris []string `pulumi:"archiveUris"`
	// The arguments to pass to the driver. Do not include arguments, such as -libjars or -Dfoo=bar, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
	Args []string `pulumi:"args"`
	// HCFS URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.
	FileUris []string `pulumi:"fileUris"`
	// HCFS URIs of jar files to be added to the Spark CLASSPATH.
	JarFileUris       []string                       `pulumi:"jarFileUris"`
	LoggingConfig     *JobPysparkConfigLoggingConfig `pulumi:"loggingConfig"`
	MainPythonFileUri string                         `pulumi:"mainPythonFileUri"`
	// A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Cloud Dataproc API may be overwritten.
	Properties map[string]string `pulumi:"properties"`
	// HCFS file URIs of Python files to pass to the PySpark framework. Supported file types: .py, .egg, and .zip.
	PythonFileUris []string `pulumi:"pythonFileUris"`
}

type JobPysparkConfigArgs

type JobPysparkConfigArgs struct {
	// HCFS URIs of archives to be extracted in the working directory of .jar, .tar, .tar.gz, .tgz, and .zip.
	ArchiveUris pulumi.StringArrayInput `pulumi:"archiveUris"`
	// The arguments to pass to the driver. Do not include arguments, such as -libjars or -Dfoo=bar, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
	Args pulumi.StringArrayInput `pulumi:"args"`
	// HCFS URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.
	FileUris pulumi.StringArrayInput `pulumi:"fileUris"`
	// HCFS URIs of jar files to be added to the Spark CLASSPATH.
	JarFileUris       pulumi.StringArrayInput               `pulumi:"jarFileUris"`
	LoggingConfig     JobPysparkConfigLoggingConfigPtrInput `pulumi:"loggingConfig"`
	MainPythonFileUri pulumi.StringInput                    `pulumi:"mainPythonFileUri"`
	// A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Cloud Dataproc API may be overwritten.
	Properties pulumi.StringMapInput `pulumi:"properties"`
	// HCFS file URIs of Python files to pass to the PySpark framework. Supported file types: .py, .egg, and .zip.
	PythonFileUris pulumi.StringArrayInput `pulumi:"pythonFileUris"`
}

func (JobPysparkConfigArgs) ElementType

func (JobPysparkConfigArgs) ElementType() reflect.Type

func (JobPysparkConfigArgs) ToJobPysparkConfigOutput

func (i JobPysparkConfigArgs) ToJobPysparkConfigOutput() JobPysparkConfigOutput

func (JobPysparkConfigArgs) ToJobPysparkConfigOutputWithContext

func (i JobPysparkConfigArgs) ToJobPysparkConfigOutputWithContext(ctx context.Context) JobPysparkConfigOutput

func (JobPysparkConfigArgs) ToJobPysparkConfigPtrOutput

func (i JobPysparkConfigArgs) ToJobPysparkConfigPtrOutput() JobPysparkConfigPtrOutput

func (JobPysparkConfigArgs) ToJobPysparkConfigPtrOutputWithContext

func (i JobPysparkConfigArgs) ToJobPysparkConfigPtrOutputWithContext(ctx context.Context) JobPysparkConfigPtrOutput

type JobPysparkConfigInput

type JobPysparkConfigInput interface {
	pulumi.Input

	ToJobPysparkConfigOutput() JobPysparkConfigOutput
	ToJobPysparkConfigOutputWithContext(context.Context) JobPysparkConfigOutput
}

type JobPysparkConfigLoggingConfig

type JobPysparkConfigLoggingConfig struct {
	DriverLogLevels map[string]string `pulumi:"driverLogLevels"`
}

type JobPysparkConfigLoggingConfigArgs

type JobPysparkConfigLoggingConfigArgs struct {
	DriverLogLevels pulumi.StringMapInput `pulumi:"driverLogLevels"`
}

func (JobPysparkConfigLoggingConfigArgs) ElementType

func (JobPysparkConfigLoggingConfigArgs) ToJobPysparkConfigLoggingConfigOutput

func (i JobPysparkConfigLoggingConfigArgs) ToJobPysparkConfigLoggingConfigOutput() JobPysparkConfigLoggingConfigOutput

func (JobPysparkConfigLoggingConfigArgs) ToJobPysparkConfigLoggingConfigOutputWithContext

func (i JobPysparkConfigLoggingConfigArgs) ToJobPysparkConfigLoggingConfigOutputWithContext(ctx context.Context) JobPysparkConfigLoggingConfigOutput

func (JobPysparkConfigLoggingConfigArgs) ToJobPysparkConfigLoggingConfigPtrOutput

func (i JobPysparkConfigLoggingConfigArgs) ToJobPysparkConfigLoggingConfigPtrOutput() JobPysparkConfigLoggingConfigPtrOutput

func (JobPysparkConfigLoggingConfigArgs) ToJobPysparkConfigLoggingConfigPtrOutputWithContext

func (i JobPysparkConfigLoggingConfigArgs) ToJobPysparkConfigLoggingConfigPtrOutputWithContext(ctx context.Context) JobPysparkConfigLoggingConfigPtrOutput

type JobPysparkConfigLoggingConfigInput

type JobPysparkConfigLoggingConfigInput interface {
	pulumi.Input

	ToJobPysparkConfigLoggingConfigOutput() JobPysparkConfigLoggingConfigOutput
	ToJobPysparkConfigLoggingConfigOutputWithContext(context.Context) JobPysparkConfigLoggingConfigOutput
}

type JobPysparkConfigLoggingConfigOutput

type JobPysparkConfigLoggingConfigOutput struct{ *pulumi.OutputState }

func (JobPysparkConfigLoggingConfigOutput) DriverLogLevels

func (JobPysparkConfigLoggingConfigOutput) ElementType

func (JobPysparkConfigLoggingConfigOutput) ToJobPysparkConfigLoggingConfigOutput

func (o JobPysparkConfigLoggingConfigOutput) ToJobPysparkConfigLoggingConfigOutput() JobPysparkConfigLoggingConfigOutput

func (JobPysparkConfigLoggingConfigOutput) ToJobPysparkConfigLoggingConfigOutputWithContext

func (o JobPysparkConfigLoggingConfigOutput) ToJobPysparkConfigLoggingConfigOutputWithContext(ctx context.Context) JobPysparkConfigLoggingConfigOutput

func (JobPysparkConfigLoggingConfigOutput) ToJobPysparkConfigLoggingConfigPtrOutput

func (o JobPysparkConfigLoggingConfigOutput) ToJobPysparkConfigLoggingConfigPtrOutput() JobPysparkConfigLoggingConfigPtrOutput

func (JobPysparkConfigLoggingConfigOutput) ToJobPysparkConfigLoggingConfigPtrOutputWithContext

func (o JobPysparkConfigLoggingConfigOutput) ToJobPysparkConfigLoggingConfigPtrOutputWithContext(ctx context.Context) JobPysparkConfigLoggingConfigPtrOutput

type JobPysparkConfigLoggingConfigPtrInput

type JobPysparkConfigLoggingConfigPtrInput interface {
	pulumi.Input

	ToJobPysparkConfigLoggingConfigPtrOutput() JobPysparkConfigLoggingConfigPtrOutput
	ToJobPysparkConfigLoggingConfigPtrOutputWithContext(context.Context) JobPysparkConfigLoggingConfigPtrOutput
}

type JobPysparkConfigLoggingConfigPtrOutput

type JobPysparkConfigLoggingConfigPtrOutput struct{ *pulumi.OutputState }

func (JobPysparkConfigLoggingConfigPtrOutput) DriverLogLevels

func (JobPysparkConfigLoggingConfigPtrOutput) Elem

func (JobPysparkConfigLoggingConfigPtrOutput) ElementType

func (JobPysparkConfigLoggingConfigPtrOutput) ToJobPysparkConfigLoggingConfigPtrOutput

func (o JobPysparkConfigLoggingConfigPtrOutput) ToJobPysparkConfigLoggingConfigPtrOutput() JobPysparkConfigLoggingConfigPtrOutput

func (JobPysparkConfigLoggingConfigPtrOutput) ToJobPysparkConfigLoggingConfigPtrOutputWithContext

func (o JobPysparkConfigLoggingConfigPtrOutput) ToJobPysparkConfigLoggingConfigPtrOutputWithContext(ctx context.Context) JobPysparkConfigLoggingConfigPtrOutput

type JobPysparkConfigOutput

type JobPysparkConfigOutput struct{ *pulumi.OutputState }

func (JobPysparkConfigOutput) ArchiveUris

HCFS URIs of archives to be extracted in the working directory of .jar, .tar, .tar.gz, .tgz, and .zip.

func (JobPysparkConfigOutput) Args

The arguments to pass to the driver. Do not include arguments, such as -libjars or -Dfoo=bar, that can be set as job properties, since a collision may occur that causes an incorrect job submission.

func (JobPysparkConfigOutput) ElementType

func (JobPysparkConfigOutput) ElementType() reflect.Type

func (JobPysparkConfigOutput) FileUris

HCFS URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.

func (JobPysparkConfigOutput) JarFileUris

HCFS URIs of jar files to be added to the Spark CLASSPATH.

func (JobPysparkConfigOutput) LoggingConfig

func (JobPysparkConfigOutput) MainPythonFileUri

func (o JobPysparkConfigOutput) MainPythonFileUri() pulumi.StringOutput

func (JobPysparkConfigOutput) Properties

A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Cloud Dataproc API may be overwritten.

func (JobPysparkConfigOutput) PythonFileUris

func (o JobPysparkConfigOutput) PythonFileUris() pulumi.StringArrayOutput

HCFS file URIs of Python files to pass to the PySpark framework. Supported file types: .py, .egg, and .zip.

func (JobPysparkConfigOutput) ToJobPysparkConfigOutput

func (o JobPysparkConfigOutput) ToJobPysparkConfigOutput() JobPysparkConfigOutput

func (JobPysparkConfigOutput) ToJobPysparkConfigOutputWithContext

func (o JobPysparkConfigOutput) ToJobPysparkConfigOutputWithContext(ctx context.Context) JobPysparkConfigOutput

func (JobPysparkConfigOutput) ToJobPysparkConfigPtrOutput

func (o JobPysparkConfigOutput) ToJobPysparkConfigPtrOutput() JobPysparkConfigPtrOutput

func (JobPysparkConfigOutput) ToJobPysparkConfigPtrOutputWithContext

func (o JobPysparkConfigOutput) ToJobPysparkConfigPtrOutputWithContext(ctx context.Context) JobPysparkConfigPtrOutput

type JobPysparkConfigPtrInput

type JobPysparkConfigPtrInput interface {
	pulumi.Input

	ToJobPysparkConfigPtrOutput() JobPysparkConfigPtrOutput
	ToJobPysparkConfigPtrOutputWithContext(context.Context) JobPysparkConfigPtrOutput
}

type JobPysparkConfigPtrOutput

type JobPysparkConfigPtrOutput struct{ *pulumi.OutputState }

func (JobPysparkConfigPtrOutput) ArchiveUris

HCFS URIs of archives to be extracted in the working directory of .jar, .tar, .tar.gz, .tgz, and .zip.

func (JobPysparkConfigPtrOutput) Args

The arguments to pass to the driver. Do not include arguments, such as -libjars or -Dfoo=bar, that can be set as job properties, since a collision may occur that causes an incorrect job submission.

func (JobPysparkConfigPtrOutput) Elem

func (JobPysparkConfigPtrOutput) ElementType

func (JobPysparkConfigPtrOutput) ElementType() reflect.Type

func (JobPysparkConfigPtrOutput) FileUris

HCFS URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.

func (JobPysparkConfigPtrOutput) JarFileUris

HCFS URIs of jar files to be added to the Spark CLASSPATH.

func (JobPysparkConfigPtrOutput) LoggingConfig

func (JobPysparkConfigPtrOutput) MainPythonFileUri

func (o JobPysparkConfigPtrOutput) MainPythonFileUri() pulumi.StringOutput

func (JobPysparkConfigPtrOutput) Properties

A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Cloud Dataproc API may be overwritten.

func (JobPysparkConfigPtrOutput) PythonFileUris

HCFS file URIs of Python files to pass to the PySpark framework. Supported file types: .py, .egg, and .zip.

func (JobPysparkConfigPtrOutput) ToJobPysparkConfigPtrOutput

func (o JobPysparkConfigPtrOutput) ToJobPysparkConfigPtrOutput() JobPysparkConfigPtrOutput

func (JobPysparkConfigPtrOutput) ToJobPysparkConfigPtrOutputWithContext

func (o JobPysparkConfigPtrOutput) ToJobPysparkConfigPtrOutputWithContext(ctx context.Context) JobPysparkConfigPtrOutput

type JobReference

type JobReference struct {
	JobId *string `pulumi:"jobId"`
}

type JobReferenceArgs

type JobReferenceArgs struct {
	JobId pulumi.StringPtrInput `pulumi:"jobId"`
}

func (JobReferenceArgs) ElementType

func (JobReferenceArgs) ElementType() reflect.Type

func (JobReferenceArgs) ToJobReferenceOutput

func (i JobReferenceArgs) ToJobReferenceOutput() JobReferenceOutput

func (JobReferenceArgs) ToJobReferenceOutputWithContext

func (i JobReferenceArgs) ToJobReferenceOutputWithContext(ctx context.Context) JobReferenceOutput

func (JobReferenceArgs) ToJobReferencePtrOutput

func (i JobReferenceArgs) ToJobReferencePtrOutput() JobReferencePtrOutput

func (JobReferenceArgs) ToJobReferencePtrOutputWithContext

func (i JobReferenceArgs) ToJobReferencePtrOutputWithContext(ctx context.Context) JobReferencePtrOutput

type JobReferenceInput

type JobReferenceInput interface {
	pulumi.Input

	ToJobReferenceOutput() JobReferenceOutput
	ToJobReferenceOutputWithContext(context.Context) JobReferenceOutput
}

type JobReferenceOutput

type JobReferenceOutput struct{ *pulumi.OutputState }

func (JobReferenceOutput) ElementType

func (JobReferenceOutput) ElementType() reflect.Type

func (JobReferenceOutput) JobId

func (JobReferenceOutput) ToJobReferenceOutput

func (o JobReferenceOutput) ToJobReferenceOutput() JobReferenceOutput

func (JobReferenceOutput) ToJobReferenceOutputWithContext

func (o JobReferenceOutput) ToJobReferenceOutputWithContext(ctx context.Context) JobReferenceOutput

func (JobReferenceOutput) ToJobReferencePtrOutput

func (o JobReferenceOutput) ToJobReferencePtrOutput() JobReferencePtrOutput

func (JobReferenceOutput) ToJobReferencePtrOutputWithContext

func (o JobReferenceOutput) ToJobReferencePtrOutputWithContext(ctx context.Context) JobReferencePtrOutput

type JobReferencePtrInput

type JobReferencePtrInput interface {
	pulumi.Input

	ToJobReferencePtrOutput() JobReferencePtrOutput
	ToJobReferencePtrOutputWithContext(context.Context) JobReferencePtrOutput
}

type JobReferencePtrOutput

type JobReferencePtrOutput struct{ *pulumi.OutputState }

func (JobReferencePtrOutput) Elem

func (JobReferencePtrOutput) ElementType

func (JobReferencePtrOutput) ElementType() reflect.Type

func (JobReferencePtrOutput) JobId

func (JobReferencePtrOutput) ToJobReferencePtrOutput

func (o JobReferencePtrOutput) ToJobReferencePtrOutput() JobReferencePtrOutput

func (JobReferencePtrOutput) ToJobReferencePtrOutputWithContext

func (o JobReferencePtrOutput) ToJobReferencePtrOutputWithContext(ctx context.Context) JobReferencePtrOutput

type JobScheduling

type JobScheduling struct {
	MaxFailuresPerHour int `pulumi:"maxFailuresPerHour"`
}

type JobSchedulingArgs

type JobSchedulingArgs struct {
	MaxFailuresPerHour pulumi.IntInput `pulumi:"maxFailuresPerHour"`
}

func (JobSchedulingArgs) ElementType

func (JobSchedulingArgs) ElementType() reflect.Type

func (JobSchedulingArgs) ToJobSchedulingOutput

func (i JobSchedulingArgs) ToJobSchedulingOutput() JobSchedulingOutput

func (JobSchedulingArgs) ToJobSchedulingOutputWithContext

func (i JobSchedulingArgs) ToJobSchedulingOutputWithContext(ctx context.Context) JobSchedulingOutput

func (JobSchedulingArgs) ToJobSchedulingPtrOutput

func (i JobSchedulingArgs) ToJobSchedulingPtrOutput() JobSchedulingPtrOutput

func (JobSchedulingArgs) ToJobSchedulingPtrOutputWithContext

func (i JobSchedulingArgs) ToJobSchedulingPtrOutputWithContext(ctx context.Context) JobSchedulingPtrOutput

type JobSchedulingInput

type JobSchedulingInput interface {
	pulumi.Input

	ToJobSchedulingOutput() JobSchedulingOutput
	ToJobSchedulingOutputWithContext(context.Context) JobSchedulingOutput
}

type JobSchedulingOutput

type JobSchedulingOutput struct{ *pulumi.OutputState }

func (JobSchedulingOutput) ElementType

func (JobSchedulingOutput) ElementType() reflect.Type

func (JobSchedulingOutput) MaxFailuresPerHour

func (o JobSchedulingOutput) MaxFailuresPerHour() pulumi.IntOutput

func (JobSchedulingOutput) ToJobSchedulingOutput

func (o JobSchedulingOutput) ToJobSchedulingOutput() JobSchedulingOutput

func (JobSchedulingOutput) ToJobSchedulingOutputWithContext

func (o JobSchedulingOutput) ToJobSchedulingOutputWithContext(ctx context.Context) JobSchedulingOutput

func (JobSchedulingOutput) ToJobSchedulingPtrOutput

func (o JobSchedulingOutput) ToJobSchedulingPtrOutput() JobSchedulingPtrOutput

func (JobSchedulingOutput) ToJobSchedulingPtrOutputWithContext

func (o JobSchedulingOutput) ToJobSchedulingPtrOutputWithContext(ctx context.Context) JobSchedulingPtrOutput

type JobSchedulingPtrInput

type JobSchedulingPtrInput interface {
	pulumi.Input

	ToJobSchedulingPtrOutput() JobSchedulingPtrOutput
	ToJobSchedulingPtrOutputWithContext(context.Context) JobSchedulingPtrOutput
}

type JobSchedulingPtrOutput

type JobSchedulingPtrOutput struct{ *pulumi.OutputState }

func (JobSchedulingPtrOutput) Elem

func (JobSchedulingPtrOutput) ElementType

func (JobSchedulingPtrOutput) ElementType() reflect.Type

func (JobSchedulingPtrOutput) MaxFailuresPerHour

func (o JobSchedulingPtrOutput) MaxFailuresPerHour() pulumi.IntOutput

func (JobSchedulingPtrOutput) ToJobSchedulingPtrOutput

func (o JobSchedulingPtrOutput) ToJobSchedulingPtrOutput() JobSchedulingPtrOutput

func (JobSchedulingPtrOutput) ToJobSchedulingPtrOutputWithContext

func (o JobSchedulingPtrOutput) ToJobSchedulingPtrOutputWithContext(ctx context.Context) JobSchedulingPtrOutput

type JobSparkConfig

type JobSparkConfig struct {
	// HCFS URIs of archives to be extracted in the working directory of .jar, .tar, .tar.gz, .tgz, and .zip.
	ArchiveUris []string `pulumi:"archiveUris"`
	// The arguments to pass to the driver. Do not include arguments, such as -libjars or -Dfoo=bar, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
	Args []string `pulumi:"args"`
	// HCFS URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.
	FileUris []string `pulumi:"fileUris"`
	// HCFS URIs of jar files to be added to the Spark CLASSPATH.
	JarFileUris   []string                     `pulumi:"jarFileUris"`
	LoggingConfig *JobSparkConfigLoggingConfig `pulumi:"loggingConfig"`
	MainClass     *string                      `pulumi:"mainClass"`
	// The HCFS URI of the jar file containing the main class. Examples: 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'. Conflicts with `mainClass`
	MainJarFileUri *string `pulumi:"mainJarFileUri"`
	// A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Cloud Dataproc API may be overwritten.
	Properties map[string]string `pulumi:"properties"`
}

type JobSparkConfigArgs

type JobSparkConfigArgs struct {
	// HCFS URIs of archives to be extracted in the working directory of .jar, .tar, .tar.gz, .tgz, and .zip.
	ArchiveUris pulumi.StringArrayInput `pulumi:"archiveUris"`
	// The arguments to pass to the driver. Do not include arguments, such as -libjars or -Dfoo=bar, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
	Args pulumi.StringArrayInput `pulumi:"args"`
	// HCFS URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.
	FileUris pulumi.StringArrayInput `pulumi:"fileUris"`
	// HCFS URIs of jar files to be added to the Spark CLASSPATH.
	JarFileUris   pulumi.StringArrayInput             `pulumi:"jarFileUris"`
	LoggingConfig JobSparkConfigLoggingConfigPtrInput `pulumi:"loggingConfig"`
	MainClass     pulumi.StringPtrInput               `pulumi:"mainClass"`
	// The HCFS URI of the jar file containing the main class. Examples: 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'. Conflicts with `mainClass`
	MainJarFileUri pulumi.StringPtrInput `pulumi:"mainJarFileUri"`
	// A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Cloud Dataproc API may be overwritten.
	Properties pulumi.StringMapInput `pulumi:"properties"`
}

func (JobSparkConfigArgs) ElementType

func (JobSparkConfigArgs) ElementType() reflect.Type

func (JobSparkConfigArgs) ToJobSparkConfigOutput

func (i JobSparkConfigArgs) ToJobSparkConfigOutput() JobSparkConfigOutput

func (JobSparkConfigArgs) ToJobSparkConfigOutputWithContext

func (i JobSparkConfigArgs) ToJobSparkConfigOutputWithContext(ctx context.Context) JobSparkConfigOutput

func (JobSparkConfigArgs) ToJobSparkConfigPtrOutput

func (i JobSparkConfigArgs) ToJobSparkConfigPtrOutput() JobSparkConfigPtrOutput

func (JobSparkConfigArgs) ToJobSparkConfigPtrOutputWithContext

func (i JobSparkConfigArgs) ToJobSparkConfigPtrOutputWithContext(ctx context.Context) JobSparkConfigPtrOutput

type JobSparkConfigInput

type JobSparkConfigInput interface {
	pulumi.Input

	ToJobSparkConfigOutput() JobSparkConfigOutput
	ToJobSparkConfigOutputWithContext(context.Context) JobSparkConfigOutput
}

type JobSparkConfigLoggingConfig

type JobSparkConfigLoggingConfig struct {
	DriverLogLevels map[string]string `pulumi:"driverLogLevels"`
}

type JobSparkConfigLoggingConfigArgs

type JobSparkConfigLoggingConfigArgs struct {
	DriverLogLevels pulumi.StringMapInput `pulumi:"driverLogLevels"`
}

func (JobSparkConfigLoggingConfigArgs) ElementType

func (JobSparkConfigLoggingConfigArgs) ToJobSparkConfigLoggingConfigOutput

func (i JobSparkConfigLoggingConfigArgs) ToJobSparkConfigLoggingConfigOutput() JobSparkConfigLoggingConfigOutput

func (JobSparkConfigLoggingConfigArgs) ToJobSparkConfigLoggingConfigOutputWithContext

func (i JobSparkConfigLoggingConfigArgs) ToJobSparkConfigLoggingConfigOutputWithContext(ctx context.Context) JobSparkConfigLoggingConfigOutput

func (JobSparkConfigLoggingConfigArgs) ToJobSparkConfigLoggingConfigPtrOutput

func (i JobSparkConfigLoggingConfigArgs) ToJobSparkConfigLoggingConfigPtrOutput() JobSparkConfigLoggingConfigPtrOutput

func (JobSparkConfigLoggingConfigArgs) ToJobSparkConfigLoggingConfigPtrOutputWithContext

func (i JobSparkConfigLoggingConfigArgs) ToJobSparkConfigLoggingConfigPtrOutputWithContext(ctx context.Context) JobSparkConfigLoggingConfigPtrOutput

type JobSparkConfigLoggingConfigInput

type JobSparkConfigLoggingConfigInput interface {
	pulumi.Input

	ToJobSparkConfigLoggingConfigOutput() JobSparkConfigLoggingConfigOutput
	ToJobSparkConfigLoggingConfigOutputWithContext(context.Context) JobSparkConfigLoggingConfigOutput
}

type JobSparkConfigLoggingConfigOutput

type JobSparkConfigLoggingConfigOutput struct{ *pulumi.OutputState }

func (JobSparkConfigLoggingConfigOutput) DriverLogLevels

func (JobSparkConfigLoggingConfigOutput) ElementType

func (JobSparkConfigLoggingConfigOutput) ToJobSparkConfigLoggingConfigOutput

func (o JobSparkConfigLoggingConfigOutput) ToJobSparkConfigLoggingConfigOutput() JobSparkConfigLoggingConfigOutput

func (JobSparkConfigLoggingConfigOutput) ToJobSparkConfigLoggingConfigOutputWithContext

func (o JobSparkConfigLoggingConfigOutput) ToJobSparkConfigLoggingConfigOutputWithContext(ctx context.Context) JobSparkConfigLoggingConfigOutput

func (JobSparkConfigLoggingConfigOutput) ToJobSparkConfigLoggingConfigPtrOutput

func (o JobSparkConfigLoggingConfigOutput) ToJobSparkConfigLoggingConfigPtrOutput() JobSparkConfigLoggingConfigPtrOutput

func (JobSparkConfigLoggingConfigOutput) ToJobSparkConfigLoggingConfigPtrOutputWithContext

func (o JobSparkConfigLoggingConfigOutput) ToJobSparkConfigLoggingConfigPtrOutputWithContext(ctx context.Context) JobSparkConfigLoggingConfigPtrOutput

type JobSparkConfigLoggingConfigPtrInput

type JobSparkConfigLoggingConfigPtrInput interface {
	pulumi.Input

	ToJobSparkConfigLoggingConfigPtrOutput() JobSparkConfigLoggingConfigPtrOutput
	ToJobSparkConfigLoggingConfigPtrOutputWithContext(context.Context) JobSparkConfigLoggingConfigPtrOutput
}

type JobSparkConfigLoggingConfigPtrOutput

type JobSparkConfigLoggingConfigPtrOutput struct{ *pulumi.OutputState }

func (JobSparkConfigLoggingConfigPtrOutput) DriverLogLevels

func (JobSparkConfigLoggingConfigPtrOutput) Elem

func (JobSparkConfigLoggingConfigPtrOutput) ElementType

func (JobSparkConfigLoggingConfigPtrOutput) ToJobSparkConfigLoggingConfigPtrOutput

func (o JobSparkConfigLoggingConfigPtrOutput) ToJobSparkConfigLoggingConfigPtrOutput() JobSparkConfigLoggingConfigPtrOutput

func (JobSparkConfigLoggingConfigPtrOutput) ToJobSparkConfigLoggingConfigPtrOutputWithContext

func (o JobSparkConfigLoggingConfigPtrOutput) ToJobSparkConfigLoggingConfigPtrOutputWithContext(ctx context.Context) JobSparkConfigLoggingConfigPtrOutput

type JobSparkConfigOutput

type JobSparkConfigOutput struct{ *pulumi.OutputState }

func (JobSparkConfigOutput) ArchiveUris

HCFS URIs of archives to be extracted in the working directory of .jar, .tar, .tar.gz, .tgz, and .zip.

func (JobSparkConfigOutput) Args

The arguments to pass to the driver. Do not include arguments, such as -libjars or -Dfoo=bar, that can be set as job properties, since a collision may occur that causes an incorrect job submission.

func (JobSparkConfigOutput) ElementType

func (JobSparkConfigOutput) ElementType() reflect.Type

func (JobSparkConfigOutput) FileUris

HCFS URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.

func (JobSparkConfigOutput) JarFileUris

HCFS URIs of jar files to be added to the Spark CLASSPATH.

func (JobSparkConfigOutput) LoggingConfig

func (JobSparkConfigOutput) MainClass

func (JobSparkConfigOutput) MainJarFileUri

func (o JobSparkConfigOutput) MainJarFileUri() pulumi.StringPtrOutput

The HCFS URI of the jar file containing the main class. Examples: 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'. Conflicts with `mainClass`

func (JobSparkConfigOutput) Properties

A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Cloud Dataproc API may be overwritten.

func (JobSparkConfigOutput) ToJobSparkConfigOutput

func (o JobSparkConfigOutput) ToJobSparkConfigOutput() JobSparkConfigOutput

func (JobSparkConfigOutput) ToJobSparkConfigOutputWithContext

func (o JobSparkConfigOutput) ToJobSparkConfigOutputWithContext(ctx context.Context) JobSparkConfigOutput

func (JobSparkConfigOutput) ToJobSparkConfigPtrOutput

func (o JobSparkConfigOutput) ToJobSparkConfigPtrOutput() JobSparkConfigPtrOutput

func (JobSparkConfigOutput) ToJobSparkConfigPtrOutputWithContext

func (o JobSparkConfigOutput) ToJobSparkConfigPtrOutputWithContext(ctx context.Context) JobSparkConfigPtrOutput

type JobSparkConfigPtrInput

type JobSparkConfigPtrInput interface {
	pulumi.Input

	ToJobSparkConfigPtrOutput() JobSparkConfigPtrOutput
	ToJobSparkConfigPtrOutputWithContext(context.Context) JobSparkConfigPtrOutput
}

type JobSparkConfigPtrOutput

type JobSparkConfigPtrOutput struct{ *pulumi.OutputState }

func (JobSparkConfigPtrOutput) ArchiveUris

HCFS URIs of archives to be extracted in the working directory of .jar, .tar, .tar.gz, .tgz, and .zip.

func (JobSparkConfigPtrOutput) Args

The arguments to pass to the driver. Do not include arguments, such as -libjars or -Dfoo=bar, that can be set as job properties, since a collision may occur that causes an incorrect job submission.

func (JobSparkConfigPtrOutput) Elem

func (JobSparkConfigPtrOutput) ElementType

func (JobSparkConfigPtrOutput) ElementType() reflect.Type

func (JobSparkConfigPtrOutput) FileUris

HCFS URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.

func (JobSparkConfigPtrOutput) JarFileUris

HCFS URIs of jar files to be added to the Spark CLASSPATH.

func (JobSparkConfigPtrOutput) LoggingConfig

func (JobSparkConfigPtrOutput) MainClass

func (JobSparkConfigPtrOutput) MainJarFileUri

func (o JobSparkConfigPtrOutput) MainJarFileUri() pulumi.StringPtrOutput

The HCFS URI of the jar file containing the main class. Examples: 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'. Conflicts with `mainClass`

func (JobSparkConfigPtrOutput) Properties

A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Cloud Dataproc API may be overwritten.

func (JobSparkConfigPtrOutput) ToJobSparkConfigPtrOutput

func (o JobSparkConfigPtrOutput) ToJobSparkConfigPtrOutput() JobSparkConfigPtrOutput

func (JobSparkConfigPtrOutput) ToJobSparkConfigPtrOutputWithContext

func (o JobSparkConfigPtrOutput) ToJobSparkConfigPtrOutputWithContext(ctx context.Context) JobSparkConfigPtrOutput

type JobSparksqlConfig

type JobSparksqlConfig struct {
	// HCFS URIs of jar files to be added to the Spark CLASSPATH.
	JarFileUris   []string                        `pulumi:"jarFileUris"`
	LoggingConfig *JobSparksqlConfigLoggingConfig `pulumi:"loggingConfig"`
	// A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Cloud Dataproc API may be overwritten.
	Properties map[string]string `pulumi:"properties"`
	// The HCFS URI of the script that contains SQL queries.
	// Conflicts with `queryList`
	QueryFileUri *string  `pulumi:"queryFileUri"`
	QueryLists   []string `pulumi:"queryLists"`
	// Mapping of query variable names to values (equivalent to the Spark SQL command: `SET name="value";`).
	ScriptVariables map[string]string `pulumi:"scriptVariables"`
}

type JobSparksqlConfigArgs

type JobSparksqlConfigArgs struct {
	// HCFS URIs of jar files to be added to the Spark CLASSPATH.
	JarFileUris   pulumi.StringArrayInput                `pulumi:"jarFileUris"`
	LoggingConfig JobSparksqlConfigLoggingConfigPtrInput `pulumi:"loggingConfig"`
	// A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Cloud Dataproc API may be overwritten.
	Properties pulumi.StringMapInput `pulumi:"properties"`
	// The HCFS URI of the script that contains SQL queries.
	// Conflicts with `queryList`
	QueryFileUri pulumi.StringPtrInput   `pulumi:"queryFileUri"`
	QueryLists   pulumi.StringArrayInput `pulumi:"queryLists"`
	// Mapping of query variable names to values (equivalent to the Spark SQL command: `SET name="value";`).
	ScriptVariables pulumi.StringMapInput `pulumi:"scriptVariables"`
}

func (JobSparksqlConfigArgs) ElementType

func (JobSparksqlConfigArgs) ElementType() reflect.Type

func (JobSparksqlConfigArgs) ToJobSparksqlConfigOutput

func (i JobSparksqlConfigArgs) ToJobSparksqlConfigOutput() JobSparksqlConfigOutput

func (JobSparksqlConfigArgs) ToJobSparksqlConfigOutputWithContext

func (i JobSparksqlConfigArgs) ToJobSparksqlConfigOutputWithContext(ctx context.Context) JobSparksqlConfigOutput

func (JobSparksqlConfigArgs) ToJobSparksqlConfigPtrOutput

func (i JobSparksqlConfigArgs) ToJobSparksqlConfigPtrOutput() JobSparksqlConfigPtrOutput

func (JobSparksqlConfigArgs) ToJobSparksqlConfigPtrOutputWithContext

func (i JobSparksqlConfigArgs) ToJobSparksqlConfigPtrOutputWithContext(ctx context.Context) JobSparksqlConfigPtrOutput

type JobSparksqlConfigInput

type JobSparksqlConfigInput interface {
	pulumi.Input

	ToJobSparksqlConfigOutput() JobSparksqlConfigOutput
	ToJobSparksqlConfigOutputWithContext(context.Context) JobSparksqlConfigOutput
}

type JobSparksqlConfigLoggingConfig

type JobSparksqlConfigLoggingConfig struct {
	DriverLogLevels map[string]string `pulumi:"driverLogLevels"`
}

type JobSparksqlConfigLoggingConfigArgs

type JobSparksqlConfigLoggingConfigArgs struct {
	DriverLogLevels pulumi.StringMapInput `pulumi:"driverLogLevels"`
}

func (JobSparksqlConfigLoggingConfigArgs) ElementType

func (JobSparksqlConfigLoggingConfigArgs) ToJobSparksqlConfigLoggingConfigOutput

func (i JobSparksqlConfigLoggingConfigArgs) ToJobSparksqlConfigLoggingConfigOutput() JobSparksqlConfigLoggingConfigOutput

func (JobSparksqlConfigLoggingConfigArgs) ToJobSparksqlConfigLoggingConfigOutputWithContext

func (i JobSparksqlConfigLoggingConfigArgs) ToJobSparksqlConfigLoggingConfigOutputWithContext(ctx context.Context) JobSparksqlConfigLoggingConfigOutput

func (JobSparksqlConfigLoggingConfigArgs) ToJobSparksqlConfigLoggingConfigPtrOutput

func (i JobSparksqlConfigLoggingConfigArgs) ToJobSparksqlConfigLoggingConfigPtrOutput() JobSparksqlConfigLoggingConfigPtrOutput

func (JobSparksqlConfigLoggingConfigArgs) ToJobSparksqlConfigLoggingConfigPtrOutputWithContext

func (i JobSparksqlConfigLoggingConfigArgs) ToJobSparksqlConfigLoggingConfigPtrOutputWithContext(ctx context.Context) JobSparksqlConfigLoggingConfigPtrOutput

type JobSparksqlConfigLoggingConfigInput

type JobSparksqlConfigLoggingConfigInput interface {
	pulumi.Input

	ToJobSparksqlConfigLoggingConfigOutput() JobSparksqlConfigLoggingConfigOutput
	ToJobSparksqlConfigLoggingConfigOutputWithContext(context.Context) JobSparksqlConfigLoggingConfigOutput
}

type JobSparksqlConfigLoggingConfigOutput

type JobSparksqlConfigLoggingConfigOutput struct{ *pulumi.OutputState }

func (JobSparksqlConfigLoggingConfigOutput) DriverLogLevels

func (JobSparksqlConfigLoggingConfigOutput) ElementType

func (JobSparksqlConfigLoggingConfigOutput) ToJobSparksqlConfigLoggingConfigOutput

func (o JobSparksqlConfigLoggingConfigOutput) ToJobSparksqlConfigLoggingConfigOutput() JobSparksqlConfigLoggingConfigOutput

func (JobSparksqlConfigLoggingConfigOutput) ToJobSparksqlConfigLoggingConfigOutputWithContext

func (o JobSparksqlConfigLoggingConfigOutput) ToJobSparksqlConfigLoggingConfigOutputWithContext(ctx context.Context) JobSparksqlConfigLoggingConfigOutput

func (JobSparksqlConfigLoggingConfigOutput) ToJobSparksqlConfigLoggingConfigPtrOutput

func (o JobSparksqlConfigLoggingConfigOutput) ToJobSparksqlConfigLoggingConfigPtrOutput() JobSparksqlConfigLoggingConfigPtrOutput

func (JobSparksqlConfigLoggingConfigOutput) ToJobSparksqlConfigLoggingConfigPtrOutputWithContext

func (o JobSparksqlConfigLoggingConfigOutput) ToJobSparksqlConfigLoggingConfigPtrOutputWithContext(ctx context.Context) JobSparksqlConfigLoggingConfigPtrOutput

type JobSparksqlConfigLoggingConfigPtrInput

type JobSparksqlConfigLoggingConfigPtrInput interface {
	pulumi.Input

	ToJobSparksqlConfigLoggingConfigPtrOutput() JobSparksqlConfigLoggingConfigPtrOutput
	ToJobSparksqlConfigLoggingConfigPtrOutputWithContext(context.Context) JobSparksqlConfigLoggingConfigPtrOutput
}

type JobSparksqlConfigLoggingConfigPtrOutput

type JobSparksqlConfigLoggingConfigPtrOutput struct{ *pulumi.OutputState }

func (JobSparksqlConfigLoggingConfigPtrOutput) DriverLogLevels

func (JobSparksqlConfigLoggingConfigPtrOutput) Elem

func (JobSparksqlConfigLoggingConfigPtrOutput) ElementType

func (JobSparksqlConfigLoggingConfigPtrOutput) ToJobSparksqlConfigLoggingConfigPtrOutput

func (o JobSparksqlConfigLoggingConfigPtrOutput) ToJobSparksqlConfigLoggingConfigPtrOutput() JobSparksqlConfigLoggingConfigPtrOutput

func (JobSparksqlConfigLoggingConfigPtrOutput) ToJobSparksqlConfigLoggingConfigPtrOutputWithContext

func (o JobSparksqlConfigLoggingConfigPtrOutput) ToJobSparksqlConfigLoggingConfigPtrOutputWithContext(ctx context.Context) JobSparksqlConfigLoggingConfigPtrOutput

type JobSparksqlConfigOutput

type JobSparksqlConfigOutput struct{ *pulumi.OutputState }

func (JobSparksqlConfigOutput) ElementType

func (JobSparksqlConfigOutput) ElementType() reflect.Type

func (JobSparksqlConfigOutput) JarFileUris

HCFS URIs of jar files to be added to the Spark CLASSPATH.

func (JobSparksqlConfigOutput) LoggingConfig

func (JobSparksqlConfigOutput) Properties

A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Cloud Dataproc API may be overwritten.

func (JobSparksqlConfigOutput) QueryFileUri

The HCFS URI of the script that contains SQL queries. Conflicts with `queryList`

func (JobSparksqlConfigOutput) QueryLists

func (JobSparksqlConfigOutput) ScriptVariables

func (o JobSparksqlConfigOutput) ScriptVariables() pulumi.StringMapOutput

Mapping of query variable names to values (equivalent to the Spark SQL command: `SET name="value";`).

func (JobSparksqlConfigOutput) ToJobSparksqlConfigOutput

func (o JobSparksqlConfigOutput) ToJobSparksqlConfigOutput() JobSparksqlConfigOutput

func (JobSparksqlConfigOutput) ToJobSparksqlConfigOutputWithContext

func (o JobSparksqlConfigOutput) ToJobSparksqlConfigOutputWithContext(ctx context.Context) JobSparksqlConfigOutput

func (JobSparksqlConfigOutput) ToJobSparksqlConfigPtrOutput

func (o JobSparksqlConfigOutput) ToJobSparksqlConfigPtrOutput() JobSparksqlConfigPtrOutput

func (JobSparksqlConfigOutput) ToJobSparksqlConfigPtrOutputWithContext

func (o JobSparksqlConfigOutput) ToJobSparksqlConfigPtrOutputWithContext(ctx context.Context) JobSparksqlConfigPtrOutput

type JobSparksqlConfigPtrInput

type JobSparksqlConfigPtrInput interface {
	pulumi.Input

	ToJobSparksqlConfigPtrOutput() JobSparksqlConfigPtrOutput
	ToJobSparksqlConfigPtrOutputWithContext(context.Context) JobSparksqlConfigPtrOutput
}

type JobSparksqlConfigPtrOutput

type JobSparksqlConfigPtrOutput struct{ *pulumi.OutputState }

func (JobSparksqlConfigPtrOutput) Elem

func (JobSparksqlConfigPtrOutput) ElementType

func (JobSparksqlConfigPtrOutput) ElementType() reflect.Type

func (JobSparksqlConfigPtrOutput) JarFileUris

HCFS URIs of jar files to be added to the Spark CLASSPATH.

func (JobSparksqlConfigPtrOutput) LoggingConfig

func (JobSparksqlConfigPtrOutput) Properties

A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Cloud Dataproc API may be overwritten.

func (JobSparksqlConfigPtrOutput) QueryFileUri

The HCFS URI of the script that contains SQL queries. Conflicts with `queryList`

func (JobSparksqlConfigPtrOutput) QueryLists

func (JobSparksqlConfigPtrOutput) ScriptVariables

Mapping of query variable names to values (equivalent to the Spark SQL command: `SET name="value";`).

func (JobSparksqlConfigPtrOutput) ToJobSparksqlConfigPtrOutput

func (o JobSparksqlConfigPtrOutput) ToJobSparksqlConfigPtrOutput() JobSparksqlConfigPtrOutput

func (JobSparksqlConfigPtrOutput) ToJobSparksqlConfigPtrOutputWithContext

func (o JobSparksqlConfigPtrOutput) ToJobSparksqlConfigPtrOutputWithContext(ctx context.Context) JobSparksqlConfigPtrOutput

type JobState

type JobState struct {
	// If present, the location of miscellaneous control files which may be used as part of job setup and handling. If not present, control files may be placed in the same location as driver_output_uri.
	DriverControlsFilesUri pulumi.StringPtrInput
	// A URI pointing to the location of the stdout of the job's driver program.
	DriverOutputResourceUri pulumi.StringPtrInput
	// By default, you can only delete inactive jobs within
	// Dataproc. Setting this to true, and calling destroy, will ensure that the
	// job is first cancelled before issuing the delete.
	ForceDelete  pulumi.BoolPtrInput
	HadoopConfig JobHadoopConfigPtrInput
	HiveConfig   JobHiveConfigPtrInput
	// The list of labels (key/value pairs) to add to the job.
	Labels    pulumi.StringMapInput
	PigConfig JobPigConfigPtrInput
	Placement JobPlacementPtrInput
	// The project in which the `cluster` can be found and jobs
	// subsequently run against. If it is not provided, the provider project is used.
	Project       pulumi.StringPtrInput
	PysparkConfig JobPysparkConfigPtrInput
	Reference     JobReferencePtrInput
	// The Cloud Dataproc region. This essentially determines which clusters are available
	// for this job to be submitted to. If not specified, defaults to `global`.
	Region pulumi.StringPtrInput
	// Optional. Job scheduling configuration.
	Scheduling     JobSchedulingPtrInput
	SparkConfig    JobSparkConfigPtrInput
	SparksqlConfig JobSparksqlConfigPtrInput
	Status         JobStatusPtrInput
}

func (JobState) ElementType

func (JobState) ElementType() reflect.Type

type JobStatus

type JobStatus struct {
	Details        *string `pulumi:"details"`
	State          *string `pulumi:"state"`
	StateStartTime *string `pulumi:"stateStartTime"`
	Substate       *string `pulumi:"substate"`
}

type JobStatusArgs

type JobStatusArgs struct {
	Details        pulumi.StringPtrInput `pulumi:"details"`
	State          pulumi.StringPtrInput `pulumi:"state"`
	StateStartTime pulumi.StringPtrInput `pulumi:"stateStartTime"`
	Substate       pulumi.StringPtrInput `pulumi:"substate"`
}

func (JobStatusArgs) ElementType

func (JobStatusArgs) ElementType() reflect.Type

func (JobStatusArgs) ToJobStatusOutput

func (i JobStatusArgs) ToJobStatusOutput() JobStatusOutput

func (JobStatusArgs) ToJobStatusOutputWithContext

func (i JobStatusArgs) ToJobStatusOutputWithContext(ctx context.Context) JobStatusOutput

func (JobStatusArgs) ToJobStatusPtrOutput

func (i JobStatusArgs) ToJobStatusPtrOutput() JobStatusPtrOutput

func (JobStatusArgs) ToJobStatusPtrOutputWithContext

func (i JobStatusArgs) ToJobStatusPtrOutputWithContext(ctx context.Context) JobStatusPtrOutput

type JobStatusInput

type JobStatusInput interface {
	pulumi.Input

	ToJobStatusOutput() JobStatusOutput
	ToJobStatusOutputWithContext(context.Context) JobStatusOutput
}

type JobStatusOutput

type JobStatusOutput struct{ *pulumi.OutputState }

func (JobStatusOutput) Details

func (JobStatusOutput) ElementType

func (JobStatusOutput) ElementType() reflect.Type

func (JobStatusOutput) State

func (JobStatusOutput) StateStartTime

func (o JobStatusOutput) StateStartTime() pulumi.StringPtrOutput

func (JobStatusOutput) Substate

func (o JobStatusOutput) Substate() pulumi.StringPtrOutput

func (JobStatusOutput) ToJobStatusOutput

func (o JobStatusOutput) ToJobStatusOutput() JobStatusOutput

func (JobStatusOutput) ToJobStatusOutputWithContext

func (o JobStatusOutput) ToJobStatusOutputWithContext(ctx context.Context) JobStatusOutput

func (JobStatusOutput) ToJobStatusPtrOutput

func (o JobStatusOutput) ToJobStatusPtrOutput() JobStatusPtrOutput

func (JobStatusOutput) ToJobStatusPtrOutputWithContext

func (o JobStatusOutput) ToJobStatusPtrOutputWithContext(ctx context.Context) JobStatusPtrOutput

type JobStatusPtrInput

type JobStatusPtrInput interface {
	pulumi.Input

	ToJobStatusPtrOutput() JobStatusPtrOutput
	ToJobStatusPtrOutputWithContext(context.Context) JobStatusPtrOutput
}

func JobStatusPtr

func JobStatusPtr(v *JobStatusArgs) JobStatusPtrInput

type JobStatusPtrOutput

type JobStatusPtrOutput struct{ *pulumi.OutputState }

func (JobStatusPtrOutput) Details

func (JobStatusPtrOutput) Elem

func (JobStatusPtrOutput) ElementType

func (JobStatusPtrOutput) ElementType() reflect.Type

func (JobStatusPtrOutput) State

func (JobStatusPtrOutput) StateStartTime

func (o JobStatusPtrOutput) StateStartTime() pulumi.StringPtrOutput

func (JobStatusPtrOutput) Substate

func (JobStatusPtrOutput) ToJobStatusPtrOutput

func (o JobStatusPtrOutput) ToJobStatusPtrOutput() JobStatusPtrOutput

func (JobStatusPtrOutput) ToJobStatusPtrOutputWithContext

func (o JobStatusPtrOutput) ToJobStatusPtrOutputWithContext(ctx context.Context) JobStatusPtrOutput

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL