dataproc

package
v4.21.0 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Apr 13, 2021 License: Apache-2.0 Imports: 7 Imported by: 0

Documentation

Index

Constants

This section is empty.

Variables

This section is empty.

Functions

This section is empty.

Types

type AutoscalingPolicy

type AutoscalingPolicy struct {
	pulumi.CustomResourceState

	// Basic algorithm for autoscaling.
	// Structure is documented below.
	BasicAlgorithm AutoscalingPolicyBasicAlgorithmPtrOutput `pulumi:"basicAlgorithm"`
	// The  location where the autoscaling policy should reside.
	// The default value is `global`.
	Location pulumi.StringPtrOutput `pulumi:"location"`
	// The "resource name" of the autoscaling policy.
	Name pulumi.StringOutput `pulumi:"name"`
	// The policy id. The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_),
	// and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between
	// 3 and 50 characters.
	PolicyId pulumi.StringOutput `pulumi:"policyId"`
	// The ID of the project in which the resource belongs.
	// If it is not provided, the provider project is used.
	Project pulumi.StringOutput `pulumi:"project"`
	// Describes how the autoscaler will operate for secondary workers.
	// Structure is documented below.
	SecondaryWorkerConfig AutoscalingPolicySecondaryWorkerConfigPtrOutput `pulumi:"secondaryWorkerConfig"`
	// Describes how the autoscaler will operate for primary workers.
	// Structure is documented below.
	WorkerConfig AutoscalingPolicyWorkerConfigPtrOutput `pulumi:"workerConfig"`
}

Describes an autoscaling policy for Dataproc cluster autoscaler.

## Example Usage ### Dataproc Autoscaling Policy

```go package main

import (

"github.com/pulumi/pulumi-gcp/sdk/v4/go/gcp/dataproc"
"github.com/pulumi/pulumi/sdk/v2/go/pulumi"

)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		asp, err := dataproc.NewAutoscalingPolicy(ctx, "asp", &dataproc.AutoscalingPolicyArgs{
			PolicyId: pulumi.String("dataproc-policy"),
			Location: pulumi.String("us-central1"),
			WorkerConfig: &dataproc.AutoscalingPolicyWorkerConfigArgs{
				MaxInstances: pulumi.Int(3),
			},
			BasicAlgorithm: &dataproc.AutoscalingPolicyBasicAlgorithmArgs{
				YarnConfig: &dataproc.AutoscalingPolicyBasicAlgorithmYarnConfigArgs{
					GracefulDecommissionTimeout: pulumi.String("30s"),
					ScaleUpFactor:               pulumi.Float64(0.5),
					ScaleDownFactor:             pulumi.Float64(0.5),
				},
			},
		})
		if err != nil {
			return err
		}
		_, err = dataproc.NewCluster(ctx, "basic", &dataproc.ClusterArgs{
			Region: pulumi.String("us-central1"),
			ClusterConfig: &dataproc.ClusterClusterConfigArgs{
				AutoscalingConfig: &dataproc.ClusterClusterConfigAutoscalingConfigArgs{
					PolicyUri: asp.Name,
				},
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}

```

## Import

AutoscalingPolicy can be imported using any of these accepted formats

```sh

$ pulumi import gcp:dataproc/autoscalingPolicy:AutoscalingPolicy default projects/{{project}}/locations/{{location}}/autoscalingPolicies/{{policy_id}}

```

```sh

$ pulumi import gcp:dataproc/autoscalingPolicy:AutoscalingPolicy default {{project}}/{{location}}/{{policy_id}}

```

```sh

$ pulumi import gcp:dataproc/autoscalingPolicy:AutoscalingPolicy default {{location}}/{{policy_id}}

```

func GetAutoscalingPolicy

func GetAutoscalingPolicy(ctx *pulumi.Context,
	name string, id pulumi.IDInput, state *AutoscalingPolicyState, opts ...pulumi.ResourceOption) (*AutoscalingPolicy, error)

GetAutoscalingPolicy gets an existing AutoscalingPolicy resource's state with the given name, ID, and optional state properties that are used to uniquely qualify the lookup (nil if not required).

func NewAutoscalingPolicy

func NewAutoscalingPolicy(ctx *pulumi.Context,
	name string, args *AutoscalingPolicyArgs, opts ...pulumi.ResourceOption) (*AutoscalingPolicy, error)

NewAutoscalingPolicy registers a new resource with the given unique name, arguments, and options.

func (*AutoscalingPolicy) ElementType added in v4.4.0

func (*AutoscalingPolicy) ElementType() reflect.Type

func (*AutoscalingPolicy) ToAutoscalingPolicyOutput added in v4.4.0

func (i *AutoscalingPolicy) ToAutoscalingPolicyOutput() AutoscalingPolicyOutput

func (*AutoscalingPolicy) ToAutoscalingPolicyOutputWithContext added in v4.4.0

func (i *AutoscalingPolicy) ToAutoscalingPolicyOutputWithContext(ctx context.Context) AutoscalingPolicyOutput

func (*AutoscalingPolicy) ToAutoscalingPolicyPtrOutput added in v4.11.1

func (i *AutoscalingPolicy) ToAutoscalingPolicyPtrOutput() AutoscalingPolicyPtrOutput

func (*AutoscalingPolicy) ToAutoscalingPolicyPtrOutputWithContext added in v4.11.1

func (i *AutoscalingPolicy) ToAutoscalingPolicyPtrOutputWithContext(ctx context.Context) AutoscalingPolicyPtrOutput

type AutoscalingPolicyArgs

type AutoscalingPolicyArgs struct {
	// Basic algorithm for autoscaling.
	// Structure is documented below.
	BasicAlgorithm AutoscalingPolicyBasicAlgorithmPtrInput
	// The  location where the autoscaling policy should reside.
	// The default value is `global`.
	Location pulumi.StringPtrInput
	// The policy id. The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_),
	// and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between
	// 3 and 50 characters.
	PolicyId pulumi.StringInput
	// The ID of the project in which the resource belongs.
	// If it is not provided, the provider project is used.
	Project pulumi.StringPtrInput
	// Describes how the autoscaler will operate for secondary workers.
	// Structure is documented below.
	SecondaryWorkerConfig AutoscalingPolicySecondaryWorkerConfigPtrInput
	// Describes how the autoscaler will operate for primary workers.
	// Structure is documented below.
	WorkerConfig AutoscalingPolicyWorkerConfigPtrInput
}

The set of arguments for constructing a AutoscalingPolicy resource.

func (AutoscalingPolicyArgs) ElementType

func (AutoscalingPolicyArgs) ElementType() reflect.Type

type AutoscalingPolicyArray added in v4.11.1

type AutoscalingPolicyArray []AutoscalingPolicyInput

func (AutoscalingPolicyArray) ElementType added in v4.11.1

func (AutoscalingPolicyArray) ElementType() reflect.Type

func (AutoscalingPolicyArray) ToAutoscalingPolicyArrayOutput added in v4.11.1

func (i AutoscalingPolicyArray) ToAutoscalingPolicyArrayOutput() AutoscalingPolicyArrayOutput

func (AutoscalingPolicyArray) ToAutoscalingPolicyArrayOutputWithContext added in v4.11.1

func (i AutoscalingPolicyArray) ToAutoscalingPolicyArrayOutputWithContext(ctx context.Context) AutoscalingPolicyArrayOutput

type AutoscalingPolicyArrayInput added in v4.11.1

type AutoscalingPolicyArrayInput interface {
	pulumi.Input

	ToAutoscalingPolicyArrayOutput() AutoscalingPolicyArrayOutput
	ToAutoscalingPolicyArrayOutputWithContext(context.Context) AutoscalingPolicyArrayOutput
}

AutoscalingPolicyArrayInput is an input type that accepts AutoscalingPolicyArray and AutoscalingPolicyArrayOutput values. You can construct a concrete instance of `AutoscalingPolicyArrayInput` via:

AutoscalingPolicyArray{ AutoscalingPolicyArgs{...} }

type AutoscalingPolicyArrayOutput added in v4.11.1

type AutoscalingPolicyArrayOutput struct{ *pulumi.OutputState }

func (AutoscalingPolicyArrayOutput) ElementType added in v4.11.1

func (AutoscalingPolicyArrayOutput) Index added in v4.11.1

func (AutoscalingPolicyArrayOutput) ToAutoscalingPolicyArrayOutput added in v4.11.1

func (o AutoscalingPolicyArrayOutput) ToAutoscalingPolicyArrayOutput() AutoscalingPolicyArrayOutput

func (AutoscalingPolicyArrayOutput) ToAutoscalingPolicyArrayOutputWithContext added in v4.11.1

func (o AutoscalingPolicyArrayOutput) ToAutoscalingPolicyArrayOutputWithContext(ctx context.Context) AutoscalingPolicyArrayOutput

type AutoscalingPolicyBasicAlgorithm

type AutoscalingPolicyBasicAlgorithm struct {
	// Duration between scaling events. A scaling period starts after the
	// update operation from the previous event has completed.
	// Bounds: [2m, 1d]. Default: 2m.
	CooldownPeriod *string `pulumi:"cooldownPeriod"`
	// YARN autoscaling configuration.
	// Structure is documented below.
	YarnConfig AutoscalingPolicyBasicAlgorithmYarnConfig `pulumi:"yarnConfig"`
}

type AutoscalingPolicyBasicAlgorithmArgs

type AutoscalingPolicyBasicAlgorithmArgs struct {
	// Duration between scaling events. A scaling period starts after the
	// update operation from the previous event has completed.
	// Bounds: [2m, 1d]. Default: 2m.
	CooldownPeriod pulumi.StringPtrInput `pulumi:"cooldownPeriod"`
	// YARN autoscaling configuration.
	// Structure is documented below.
	YarnConfig AutoscalingPolicyBasicAlgorithmYarnConfigInput `pulumi:"yarnConfig"`
}

func (AutoscalingPolicyBasicAlgorithmArgs) ElementType

func (AutoscalingPolicyBasicAlgorithmArgs) ToAutoscalingPolicyBasicAlgorithmOutput

func (i AutoscalingPolicyBasicAlgorithmArgs) ToAutoscalingPolicyBasicAlgorithmOutput() AutoscalingPolicyBasicAlgorithmOutput

func (AutoscalingPolicyBasicAlgorithmArgs) ToAutoscalingPolicyBasicAlgorithmOutputWithContext

func (i AutoscalingPolicyBasicAlgorithmArgs) ToAutoscalingPolicyBasicAlgorithmOutputWithContext(ctx context.Context) AutoscalingPolicyBasicAlgorithmOutput

func (AutoscalingPolicyBasicAlgorithmArgs) ToAutoscalingPolicyBasicAlgorithmPtrOutput

func (i AutoscalingPolicyBasicAlgorithmArgs) ToAutoscalingPolicyBasicAlgorithmPtrOutput() AutoscalingPolicyBasicAlgorithmPtrOutput

func (AutoscalingPolicyBasicAlgorithmArgs) ToAutoscalingPolicyBasicAlgorithmPtrOutputWithContext

func (i AutoscalingPolicyBasicAlgorithmArgs) ToAutoscalingPolicyBasicAlgorithmPtrOutputWithContext(ctx context.Context) AutoscalingPolicyBasicAlgorithmPtrOutput

type AutoscalingPolicyBasicAlgorithmInput

type AutoscalingPolicyBasicAlgorithmInput interface {
	pulumi.Input

	ToAutoscalingPolicyBasicAlgorithmOutput() AutoscalingPolicyBasicAlgorithmOutput
	ToAutoscalingPolicyBasicAlgorithmOutputWithContext(context.Context) AutoscalingPolicyBasicAlgorithmOutput
}

AutoscalingPolicyBasicAlgorithmInput is an input type that accepts AutoscalingPolicyBasicAlgorithmArgs and AutoscalingPolicyBasicAlgorithmOutput values. You can construct a concrete instance of `AutoscalingPolicyBasicAlgorithmInput` via:

AutoscalingPolicyBasicAlgorithmArgs{...}

type AutoscalingPolicyBasicAlgorithmOutput

type AutoscalingPolicyBasicAlgorithmOutput struct{ *pulumi.OutputState }

func (AutoscalingPolicyBasicAlgorithmOutput) CooldownPeriod

Duration between scaling events. A scaling period starts after the update operation from the previous event has completed. Bounds: [2m, 1d]. Default: 2m.

func (AutoscalingPolicyBasicAlgorithmOutput) ElementType

func (AutoscalingPolicyBasicAlgorithmOutput) ToAutoscalingPolicyBasicAlgorithmOutput

func (o AutoscalingPolicyBasicAlgorithmOutput) ToAutoscalingPolicyBasicAlgorithmOutput() AutoscalingPolicyBasicAlgorithmOutput

func (AutoscalingPolicyBasicAlgorithmOutput) ToAutoscalingPolicyBasicAlgorithmOutputWithContext

func (o AutoscalingPolicyBasicAlgorithmOutput) ToAutoscalingPolicyBasicAlgorithmOutputWithContext(ctx context.Context) AutoscalingPolicyBasicAlgorithmOutput

func (AutoscalingPolicyBasicAlgorithmOutput) ToAutoscalingPolicyBasicAlgorithmPtrOutput

func (o AutoscalingPolicyBasicAlgorithmOutput) ToAutoscalingPolicyBasicAlgorithmPtrOutput() AutoscalingPolicyBasicAlgorithmPtrOutput

func (AutoscalingPolicyBasicAlgorithmOutput) ToAutoscalingPolicyBasicAlgorithmPtrOutputWithContext

func (o AutoscalingPolicyBasicAlgorithmOutput) ToAutoscalingPolicyBasicAlgorithmPtrOutputWithContext(ctx context.Context) AutoscalingPolicyBasicAlgorithmPtrOutput

func (AutoscalingPolicyBasicAlgorithmOutput) YarnConfig

YARN autoscaling configuration. Structure is documented below.

type AutoscalingPolicyBasicAlgorithmPtrInput

type AutoscalingPolicyBasicAlgorithmPtrInput interface {
	pulumi.Input

	ToAutoscalingPolicyBasicAlgorithmPtrOutput() AutoscalingPolicyBasicAlgorithmPtrOutput
	ToAutoscalingPolicyBasicAlgorithmPtrOutputWithContext(context.Context) AutoscalingPolicyBasicAlgorithmPtrOutput
}

AutoscalingPolicyBasicAlgorithmPtrInput is an input type that accepts AutoscalingPolicyBasicAlgorithmArgs, AutoscalingPolicyBasicAlgorithmPtr and AutoscalingPolicyBasicAlgorithmPtrOutput values. You can construct a concrete instance of `AutoscalingPolicyBasicAlgorithmPtrInput` via:

        AutoscalingPolicyBasicAlgorithmArgs{...}

or:

        nil

type AutoscalingPolicyBasicAlgorithmPtrOutput

type AutoscalingPolicyBasicAlgorithmPtrOutput struct{ *pulumi.OutputState }

func (AutoscalingPolicyBasicAlgorithmPtrOutput) CooldownPeriod

Duration between scaling events. A scaling period starts after the update operation from the previous event has completed. Bounds: [2m, 1d]. Default: 2m.

func (AutoscalingPolicyBasicAlgorithmPtrOutput) Elem

func (AutoscalingPolicyBasicAlgorithmPtrOutput) ElementType

func (AutoscalingPolicyBasicAlgorithmPtrOutput) ToAutoscalingPolicyBasicAlgorithmPtrOutput

func (o AutoscalingPolicyBasicAlgorithmPtrOutput) ToAutoscalingPolicyBasicAlgorithmPtrOutput() AutoscalingPolicyBasicAlgorithmPtrOutput

func (AutoscalingPolicyBasicAlgorithmPtrOutput) ToAutoscalingPolicyBasicAlgorithmPtrOutputWithContext

func (o AutoscalingPolicyBasicAlgorithmPtrOutput) ToAutoscalingPolicyBasicAlgorithmPtrOutputWithContext(ctx context.Context) AutoscalingPolicyBasicAlgorithmPtrOutput

func (AutoscalingPolicyBasicAlgorithmPtrOutput) YarnConfig

YARN autoscaling configuration. Structure is documented below.

type AutoscalingPolicyBasicAlgorithmYarnConfig

type AutoscalingPolicyBasicAlgorithmYarnConfig struct {
	// Timeout for YARN graceful decommissioning of Node Managers. Specifies the
	// duration to wait for jobs to complete before forcefully removing workers
	// (and potentially interrupting jobs). Only applicable to downscaling operations.
	// Bounds: [0s, 1d].
	GracefulDecommissionTimeout string `pulumi:"gracefulDecommissionTimeout"`
	// Fraction of average pending memory in the last cooldown period for which to
	// remove workers. A scale-down factor of 1 will result in scaling down so that there
	// is no available memory remaining after the update (more aggressive scaling).
	// A scale-down factor of 0 disables removing workers, which can be beneficial for
	// autoscaling a single job.
	// Bounds: [0.0, 1.0].
	ScaleDownFactor float64 `pulumi:"scaleDownFactor"`
	// Minimum scale-down threshold as a fraction of total cluster size before scaling occurs.
	// For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must
	// recommend at least a 2 worker scale-down for the cluster to scale. A threshold of 0
	// means the autoscaler will scale down on any recommended change.
	// Bounds: [0.0, 1.0]. Default: 0.0.
	ScaleDownMinWorkerFraction *float64 `pulumi:"scaleDownMinWorkerFraction"`
	// Fraction of average pending memory in the last cooldown period for which to
	// add workers. A scale-up factor of 1.0 will result in scaling up so that there
	// is no pending memory remaining after the update (more aggressive scaling).
	// A scale-up factor closer to 0 will result in a smaller magnitude of scaling up
	// (less aggressive scaling).
	// Bounds: [0.0, 1.0].
	ScaleUpFactor float64 `pulumi:"scaleUpFactor"`
	// Minimum scale-up threshold as a fraction of total cluster size before scaling
	// occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler
	// must recommend at least a 2-worker scale-up for the cluster to scale. A threshold of
	// 0 means the autoscaler will scale up on any recommended change.
	// Bounds: [0.0, 1.0]. Default: 0.0.
	ScaleUpMinWorkerFraction *float64 `pulumi:"scaleUpMinWorkerFraction"`
}

type AutoscalingPolicyBasicAlgorithmYarnConfigArgs

type AutoscalingPolicyBasicAlgorithmYarnConfigArgs struct {
	// Timeout for YARN graceful decommissioning of Node Managers. Specifies the
	// duration to wait for jobs to complete before forcefully removing workers
	// (and potentially interrupting jobs). Only applicable to downscaling operations.
	// Bounds: [0s, 1d].
	GracefulDecommissionTimeout pulumi.StringInput `pulumi:"gracefulDecommissionTimeout"`
	// Fraction of average pending memory in the last cooldown period for which to
	// remove workers. A scale-down factor of 1 will result in scaling down so that there
	// is no available memory remaining after the update (more aggressive scaling).
	// A scale-down factor of 0 disables removing workers, which can be beneficial for
	// autoscaling a single job.
	// Bounds: [0.0, 1.0].
	ScaleDownFactor pulumi.Float64Input `pulumi:"scaleDownFactor"`
	// Minimum scale-down threshold as a fraction of total cluster size before scaling occurs.
	// For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must
	// recommend at least a 2 worker scale-down for the cluster to scale. A threshold of 0
	// means the autoscaler will scale down on any recommended change.
	// Bounds: [0.0, 1.0]. Default: 0.0.
	ScaleDownMinWorkerFraction pulumi.Float64PtrInput `pulumi:"scaleDownMinWorkerFraction"`
	// Fraction of average pending memory in the last cooldown period for which to
	// add workers. A scale-up factor of 1.0 will result in scaling up so that there
	// is no pending memory remaining after the update (more aggressive scaling).
	// A scale-up factor closer to 0 will result in a smaller magnitude of scaling up
	// (less aggressive scaling).
	// Bounds: [0.0, 1.0].
	ScaleUpFactor pulumi.Float64Input `pulumi:"scaleUpFactor"`
	// Minimum scale-up threshold as a fraction of total cluster size before scaling
	// occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler
	// must recommend at least a 2-worker scale-up for the cluster to scale. A threshold of
	// 0 means the autoscaler will scale up on any recommended change.
	// Bounds: [0.0, 1.0]. Default: 0.0.
	ScaleUpMinWorkerFraction pulumi.Float64PtrInput `pulumi:"scaleUpMinWorkerFraction"`
}

func (AutoscalingPolicyBasicAlgorithmYarnConfigArgs) ElementType

func (AutoscalingPolicyBasicAlgorithmYarnConfigArgs) ToAutoscalingPolicyBasicAlgorithmYarnConfigOutput

func (i AutoscalingPolicyBasicAlgorithmYarnConfigArgs) ToAutoscalingPolicyBasicAlgorithmYarnConfigOutput() AutoscalingPolicyBasicAlgorithmYarnConfigOutput

func (AutoscalingPolicyBasicAlgorithmYarnConfigArgs) ToAutoscalingPolicyBasicAlgorithmYarnConfigOutputWithContext

func (i AutoscalingPolicyBasicAlgorithmYarnConfigArgs) ToAutoscalingPolicyBasicAlgorithmYarnConfigOutputWithContext(ctx context.Context) AutoscalingPolicyBasicAlgorithmYarnConfigOutput

func (AutoscalingPolicyBasicAlgorithmYarnConfigArgs) ToAutoscalingPolicyBasicAlgorithmYarnConfigPtrOutput

func (i AutoscalingPolicyBasicAlgorithmYarnConfigArgs) ToAutoscalingPolicyBasicAlgorithmYarnConfigPtrOutput() AutoscalingPolicyBasicAlgorithmYarnConfigPtrOutput

func (AutoscalingPolicyBasicAlgorithmYarnConfigArgs) ToAutoscalingPolicyBasicAlgorithmYarnConfigPtrOutputWithContext

func (i AutoscalingPolicyBasicAlgorithmYarnConfigArgs) ToAutoscalingPolicyBasicAlgorithmYarnConfigPtrOutputWithContext(ctx context.Context) AutoscalingPolicyBasicAlgorithmYarnConfigPtrOutput

type AutoscalingPolicyBasicAlgorithmYarnConfigInput

type AutoscalingPolicyBasicAlgorithmYarnConfigInput interface {
	pulumi.Input

	ToAutoscalingPolicyBasicAlgorithmYarnConfigOutput() AutoscalingPolicyBasicAlgorithmYarnConfigOutput
	ToAutoscalingPolicyBasicAlgorithmYarnConfigOutputWithContext(context.Context) AutoscalingPolicyBasicAlgorithmYarnConfigOutput
}

AutoscalingPolicyBasicAlgorithmYarnConfigInput is an input type that accepts AutoscalingPolicyBasicAlgorithmYarnConfigArgs and AutoscalingPolicyBasicAlgorithmYarnConfigOutput values. You can construct a concrete instance of `AutoscalingPolicyBasicAlgorithmYarnConfigInput` via:

AutoscalingPolicyBasicAlgorithmYarnConfigArgs{...}

type AutoscalingPolicyBasicAlgorithmYarnConfigOutput

type AutoscalingPolicyBasicAlgorithmYarnConfigOutput struct{ *pulumi.OutputState }

func (AutoscalingPolicyBasicAlgorithmYarnConfigOutput) ElementType

func (AutoscalingPolicyBasicAlgorithmYarnConfigOutput) GracefulDecommissionTimeout

Timeout for YARN graceful decommissioning of Node Managers. Specifies the duration to wait for jobs to complete before forcefully removing workers (and potentially interrupting jobs). Only applicable to downscaling operations. Bounds: [0s, 1d].

func (AutoscalingPolicyBasicAlgorithmYarnConfigOutput) ScaleDownFactor

Fraction of average pending memory in the last cooldown period for which to remove workers. A scale-down factor of 1 will result in scaling down so that there is no available memory remaining after the update (more aggressive scaling). A scale-down factor of 0 disables removing workers, which can be beneficial for autoscaling a single job. Bounds: [0.0, 1.0].

func (AutoscalingPolicyBasicAlgorithmYarnConfigOutput) ScaleDownMinWorkerFraction

Minimum scale-down threshold as a fraction of total cluster size before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at least a 2 worker scale-down for the cluster to scale. A threshold of 0 means the autoscaler will scale down on any recommended change. Bounds: [0.0, 1.0]. Default: 0.0.

func (AutoscalingPolicyBasicAlgorithmYarnConfigOutput) ScaleUpFactor

Fraction of average pending memory in the last cooldown period for which to add workers. A scale-up factor of 1.0 will result in scaling up so that there is no pending memory remaining after the update (more aggressive scaling). A scale-up factor closer to 0 will result in a smaller magnitude of scaling up (less aggressive scaling). Bounds: [0.0, 1.0].

func (AutoscalingPolicyBasicAlgorithmYarnConfigOutput) ScaleUpMinWorkerFraction

Minimum scale-up threshold as a fraction of total cluster size before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at least a 2-worker scale-up for the cluster to scale. A threshold of 0 means the autoscaler will scale up on any recommended change. Bounds: [0.0, 1.0]. Default: 0.0.

func (AutoscalingPolicyBasicAlgorithmYarnConfigOutput) ToAutoscalingPolicyBasicAlgorithmYarnConfigOutput

func (o AutoscalingPolicyBasicAlgorithmYarnConfigOutput) ToAutoscalingPolicyBasicAlgorithmYarnConfigOutput() AutoscalingPolicyBasicAlgorithmYarnConfigOutput

func (AutoscalingPolicyBasicAlgorithmYarnConfigOutput) ToAutoscalingPolicyBasicAlgorithmYarnConfigOutputWithContext

func (o AutoscalingPolicyBasicAlgorithmYarnConfigOutput) ToAutoscalingPolicyBasicAlgorithmYarnConfigOutputWithContext(ctx context.Context) AutoscalingPolicyBasicAlgorithmYarnConfigOutput

func (AutoscalingPolicyBasicAlgorithmYarnConfigOutput) ToAutoscalingPolicyBasicAlgorithmYarnConfigPtrOutput

func (o AutoscalingPolicyBasicAlgorithmYarnConfigOutput) ToAutoscalingPolicyBasicAlgorithmYarnConfigPtrOutput() AutoscalingPolicyBasicAlgorithmYarnConfigPtrOutput

func (AutoscalingPolicyBasicAlgorithmYarnConfigOutput) ToAutoscalingPolicyBasicAlgorithmYarnConfigPtrOutputWithContext

func (o AutoscalingPolicyBasicAlgorithmYarnConfigOutput) ToAutoscalingPolicyBasicAlgorithmYarnConfigPtrOutputWithContext(ctx context.Context) AutoscalingPolicyBasicAlgorithmYarnConfigPtrOutput

type AutoscalingPolicyBasicAlgorithmYarnConfigPtrInput

type AutoscalingPolicyBasicAlgorithmYarnConfigPtrInput interface {
	pulumi.Input

	ToAutoscalingPolicyBasicAlgorithmYarnConfigPtrOutput() AutoscalingPolicyBasicAlgorithmYarnConfigPtrOutput
	ToAutoscalingPolicyBasicAlgorithmYarnConfigPtrOutputWithContext(context.Context) AutoscalingPolicyBasicAlgorithmYarnConfigPtrOutput
}

AutoscalingPolicyBasicAlgorithmYarnConfigPtrInput is an input type that accepts AutoscalingPolicyBasicAlgorithmYarnConfigArgs, AutoscalingPolicyBasicAlgorithmYarnConfigPtr and AutoscalingPolicyBasicAlgorithmYarnConfigPtrOutput values. You can construct a concrete instance of `AutoscalingPolicyBasicAlgorithmYarnConfigPtrInput` via:

        AutoscalingPolicyBasicAlgorithmYarnConfigArgs{...}

or:

        nil

type AutoscalingPolicyBasicAlgorithmYarnConfigPtrOutput

type AutoscalingPolicyBasicAlgorithmYarnConfigPtrOutput struct{ *pulumi.OutputState }

func (AutoscalingPolicyBasicAlgorithmYarnConfigPtrOutput) Elem

func (AutoscalingPolicyBasicAlgorithmYarnConfigPtrOutput) ElementType

func (AutoscalingPolicyBasicAlgorithmYarnConfigPtrOutput) GracefulDecommissionTimeout

Timeout for YARN graceful decommissioning of Node Managers. Specifies the duration to wait for jobs to complete before forcefully removing workers (and potentially interrupting jobs). Only applicable to downscaling operations. Bounds: [0s, 1d].

func (AutoscalingPolicyBasicAlgorithmYarnConfigPtrOutput) ScaleDownFactor

Fraction of average pending memory in the last cooldown period for which to remove workers. A scale-down factor of 1 will result in scaling down so that there is no available memory remaining after the update (more aggressive scaling). A scale-down factor of 0 disables removing workers, which can be beneficial for autoscaling a single job. Bounds: [0.0, 1.0].

func (AutoscalingPolicyBasicAlgorithmYarnConfigPtrOutput) ScaleDownMinWorkerFraction

Minimum scale-down threshold as a fraction of total cluster size before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at least a 2 worker scale-down for the cluster to scale. A threshold of 0 means the autoscaler will scale down on any recommended change. Bounds: [0.0, 1.0]. Default: 0.0.

func (AutoscalingPolicyBasicAlgorithmYarnConfigPtrOutput) ScaleUpFactor

Fraction of average pending memory in the last cooldown period for which to add workers. A scale-up factor of 1.0 will result in scaling up so that there is no pending memory remaining after the update (more aggressive scaling). A scale-up factor closer to 0 will result in a smaller magnitude of scaling up (less aggressive scaling). Bounds: [0.0, 1.0].

func (AutoscalingPolicyBasicAlgorithmYarnConfigPtrOutput) ScaleUpMinWorkerFraction

Minimum scale-up threshold as a fraction of total cluster size before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at least a 2-worker scale-up for the cluster to scale. A threshold of 0 means the autoscaler will scale up on any recommended change. Bounds: [0.0, 1.0]. Default: 0.0.

func (AutoscalingPolicyBasicAlgorithmYarnConfigPtrOutput) ToAutoscalingPolicyBasicAlgorithmYarnConfigPtrOutput

func (o AutoscalingPolicyBasicAlgorithmYarnConfigPtrOutput) ToAutoscalingPolicyBasicAlgorithmYarnConfigPtrOutput() AutoscalingPolicyBasicAlgorithmYarnConfigPtrOutput

func (AutoscalingPolicyBasicAlgorithmYarnConfigPtrOutput) ToAutoscalingPolicyBasicAlgorithmYarnConfigPtrOutputWithContext

func (o AutoscalingPolicyBasicAlgorithmYarnConfigPtrOutput) ToAutoscalingPolicyBasicAlgorithmYarnConfigPtrOutputWithContext(ctx context.Context) AutoscalingPolicyBasicAlgorithmYarnConfigPtrOutput

type AutoscalingPolicyInput added in v4.4.0

type AutoscalingPolicyInput interface {
	pulumi.Input

	ToAutoscalingPolicyOutput() AutoscalingPolicyOutput
	ToAutoscalingPolicyOutputWithContext(ctx context.Context) AutoscalingPolicyOutput
}

type AutoscalingPolicyMap added in v4.11.1

type AutoscalingPolicyMap map[string]AutoscalingPolicyInput

func (AutoscalingPolicyMap) ElementType added in v4.11.1

func (AutoscalingPolicyMap) ElementType() reflect.Type

func (AutoscalingPolicyMap) ToAutoscalingPolicyMapOutput added in v4.11.1

func (i AutoscalingPolicyMap) ToAutoscalingPolicyMapOutput() AutoscalingPolicyMapOutput

func (AutoscalingPolicyMap) ToAutoscalingPolicyMapOutputWithContext added in v4.11.1

func (i AutoscalingPolicyMap) ToAutoscalingPolicyMapOutputWithContext(ctx context.Context) AutoscalingPolicyMapOutput

type AutoscalingPolicyMapInput added in v4.11.1

type AutoscalingPolicyMapInput interface {
	pulumi.Input

	ToAutoscalingPolicyMapOutput() AutoscalingPolicyMapOutput
	ToAutoscalingPolicyMapOutputWithContext(context.Context) AutoscalingPolicyMapOutput
}

AutoscalingPolicyMapInput is an input type that accepts AutoscalingPolicyMap and AutoscalingPolicyMapOutput values. You can construct a concrete instance of `AutoscalingPolicyMapInput` via:

AutoscalingPolicyMap{ "key": AutoscalingPolicyArgs{...} }

type AutoscalingPolicyMapOutput added in v4.11.1

type AutoscalingPolicyMapOutput struct{ *pulumi.OutputState }

func (AutoscalingPolicyMapOutput) ElementType added in v4.11.1

func (AutoscalingPolicyMapOutput) ElementType() reflect.Type

func (AutoscalingPolicyMapOutput) MapIndex added in v4.11.1

func (AutoscalingPolicyMapOutput) ToAutoscalingPolicyMapOutput added in v4.11.1

func (o AutoscalingPolicyMapOutput) ToAutoscalingPolicyMapOutput() AutoscalingPolicyMapOutput

func (AutoscalingPolicyMapOutput) ToAutoscalingPolicyMapOutputWithContext added in v4.11.1

func (o AutoscalingPolicyMapOutput) ToAutoscalingPolicyMapOutputWithContext(ctx context.Context) AutoscalingPolicyMapOutput

type AutoscalingPolicyOutput added in v4.4.0

type AutoscalingPolicyOutput struct {
	*pulumi.OutputState
}

func (AutoscalingPolicyOutput) ElementType added in v4.4.0

func (AutoscalingPolicyOutput) ElementType() reflect.Type

func (AutoscalingPolicyOutput) ToAutoscalingPolicyOutput added in v4.4.0

func (o AutoscalingPolicyOutput) ToAutoscalingPolicyOutput() AutoscalingPolicyOutput

func (AutoscalingPolicyOutput) ToAutoscalingPolicyOutputWithContext added in v4.4.0

func (o AutoscalingPolicyOutput) ToAutoscalingPolicyOutputWithContext(ctx context.Context) AutoscalingPolicyOutput

func (AutoscalingPolicyOutput) ToAutoscalingPolicyPtrOutput added in v4.11.1

func (o AutoscalingPolicyOutput) ToAutoscalingPolicyPtrOutput() AutoscalingPolicyPtrOutput

func (AutoscalingPolicyOutput) ToAutoscalingPolicyPtrOutputWithContext added in v4.11.1

func (o AutoscalingPolicyOutput) ToAutoscalingPolicyPtrOutputWithContext(ctx context.Context) AutoscalingPolicyPtrOutput

type AutoscalingPolicyPtrInput added in v4.11.1

type AutoscalingPolicyPtrInput interface {
	pulumi.Input

	ToAutoscalingPolicyPtrOutput() AutoscalingPolicyPtrOutput
	ToAutoscalingPolicyPtrOutputWithContext(ctx context.Context) AutoscalingPolicyPtrOutput
}

type AutoscalingPolicyPtrOutput added in v4.11.1

type AutoscalingPolicyPtrOutput struct {
	*pulumi.OutputState
}

func (AutoscalingPolicyPtrOutput) ElementType added in v4.11.1

func (AutoscalingPolicyPtrOutput) ElementType() reflect.Type

func (AutoscalingPolicyPtrOutput) ToAutoscalingPolicyPtrOutput added in v4.11.1

func (o AutoscalingPolicyPtrOutput) ToAutoscalingPolicyPtrOutput() AutoscalingPolicyPtrOutput

func (AutoscalingPolicyPtrOutput) ToAutoscalingPolicyPtrOutputWithContext added in v4.11.1

func (o AutoscalingPolicyPtrOutput) ToAutoscalingPolicyPtrOutputWithContext(ctx context.Context) AutoscalingPolicyPtrOutput

type AutoscalingPolicySecondaryWorkerConfig

type AutoscalingPolicySecondaryWorkerConfig struct {
	// Maximum number of instances for this group. Note that by default, clusters will not use
	// secondary workers. Required for secondary workers if the minimum secondary instances is set.
	// Bounds: [minInstances, ). Defaults to 0.
	MaxInstances *int `pulumi:"maxInstances"`
	// Minimum number of instances for this group. Bounds: [0, maxInstances]. Defaults to 0.
	MinInstances *int `pulumi:"minInstances"`
	// Weight for the instance group, which is used to determine the fraction of total workers
	// in the cluster from this instance group. For example, if primary workers have weight 2,
	// and secondary workers have weight 1, the cluster will have approximately 2 primary workers
	// for each secondary worker.
	// The cluster may not reach the specified balance if constrained by min/max bounds or other
	// autoscaling settings. For example, if maxInstances for secondary workers is 0, then only
	// primary workers will be added. The cluster can also be out of balance when created.
	// If weight is not set on any instance group, the cluster will default to equal weight for
	// all groups: the cluster will attempt to maintain an equal number of workers in each group
	// within the configured size bounds for each group. If weight is set for one group only,
	// the cluster will default to zero weight on the unset group. For example if weight is set
	// only on primary workers, the cluster will use primary workers only and no secondary workers.
	Weight *int `pulumi:"weight"`
}

type AutoscalingPolicySecondaryWorkerConfigArgs

type AutoscalingPolicySecondaryWorkerConfigArgs struct {
	// Maximum number of instances for this group. Note that by default, clusters will not use
	// secondary workers. Required for secondary workers if the minimum secondary instances is set.
	// Bounds: [minInstances, ). Defaults to 0.
	MaxInstances pulumi.IntPtrInput `pulumi:"maxInstances"`
	// Minimum number of instances for this group. Bounds: [0, maxInstances]. Defaults to 0.
	MinInstances pulumi.IntPtrInput `pulumi:"minInstances"`
	// Weight for the instance group, which is used to determine the fraction of total workers
	// in the cluster from this instance group. For example, if primary workers have weight 2,
	// and secondary workers have weight 1, the cluster will have approximately 2 primary workers
	// for each secondary worker.
	// The cluster may not reach the specified balance if constrained by min/max bounds or other
	// autoscaling settings. For example, if maxInstances for secondary workers is 0, then only
	// primary workers will be added. The cluster can also be out of balance when created.
	// If weight is not set on any instance group, the cluster will default to equal weight for
	// all groups: the cluster will attempt to maintain an equal number of workers in each group
	// within the configured size bounds for each group. If weight is set for one group only,
	// the cluster will default to zero weight on the unset group. For example if weight is set
	// only on primary workers, the cluster will use primary workers only and no secondary workers.
	Weight pulumi.IntPtrInput `pulumi:"weight"`
}

func (AutoscalingPolicySecondaryWorkerConfigArgs) ElementType

func (AutoscalingPolicySecondaryWorkerConfigArgs) ToAutoscalingPolicySecondaryWorkerConfigOutput

func (i AutoscalingPolicySecondaryWorkerConfigArgs) ToAutoscalingPolicySecondaryWorkerConfigOutput() AutoscalingPolicySecondaryWorkerConfigOutput

func (AutoscalingPolicySecondaryWorkerConfigArgs) ToAutoscalingPolicySecondaryWorkerConfigOutputWithContext

func (i AutoscalingPolicySecondaryWorkerConfigArgs) ToAutoscalingPolicySecondaryWorkerConfigOutputWithContext(ctx context.Context) AutoscalingPolicySecondaryWorkerConfigOutput

func (AutoscalingPolicySecondaryWorkerConfigArgs) ToAutoscalingPolicySecondaryWorkerConfigPtrOutput

func (i AutoscalingPolicySecondaryWorkerConfigArgs) ToAutoscalingPolicySecondaryWorkerConfigPtrOutput() AutoscalingPolicySecondaryWorkerConfigPtrOutput

func (AutoscalingPolicySecondaryWorkerConfigArgs) ToAutoscalingPolicySecondaryWorkerConfigPtrOutputWithContext

func (i AutoscalingPolicySecondaryWorkerConfigArgs) ToAutoscalingPolicySecondaryWorkerConfigPtrOutputWithContext(ctx context.Context) AutoscalingPolicySecondaryWorkerConfigPtrOutput

type AutoscalingPolicySecondaryWorkerConfigInput

type AutoscalingPolicySecondaryWorkerConfigInput interface {
	pulumi.Input

	ToAutoscalingPolicySecondaryWorkerConfigOutput() AutoscalingPolicySecondaryWorkerConfigOutput
	ToAutoscalingPolicySecondaryWorkerConfigOutputWithContext(context.Context) AutoscalingPolicySecondaryWorkerConfigOutput
}

AutoscalingPolicySecondaryWorkerConfigInput is an input type that accepts AutoscalingPolicySecondaryWorkerConfigArgs and AutoscalingPolicySecondaryWorkerConfigOutput values. You can construct a concrete instance of `AutoscalingPolicySecondaryWorkerConfigInput` via:

AutoscalingPolicySecondaryWorkerConfigArgs{...}

type AutoscalingPolicySecondaryWorkerConfigOutput

type AutoscalingPolicySecondaryWorkerConfigOutput struct{ *pulumi.OutputState }

func (AutoscalingPolicySecondaryWorkerConfigOutput) ElementType

func (AutoscalingPolicySecondaryWorkerConfigOutput) MaxInstances

Maximum number of instances for this group. Note that by default, clusters will not use secondary workers. Required for secondary workers if the minimum secondary instances is set. Bounds: [minInstances, ). Defaults to 0.

func (AutoscalingPolicySecondaryWorkerConfigOutput) MinInstances

Minimum number of instances for this group. Bounds: [0, maxInstances]. Defaults to 0.

func (AutoscalingPolicySecondaryWorkerConfigOutput) ToAutoscalingPolicySecondaryWorkerConfigOutput

func (o AutoscalingPolicySecondaryWorkerConfigOutput) ToAutoscalingPolicySecondaryWorkerConfigOutput() AutoscalingPolicySecondaryWorkerConfigOutput

func (AutoscalingPolicySecondaryWorkerConfigOutput) ToAutoscalingPolicySecondaryWorkerConfigOutputWithContext

func (o AutoscalingPolicySecondaryWorkerConfigOutput) ToAutoscalingPolicySecondaryWorkerConfigOutputWithContext(ctx context.Context) AutoscalingPolicySecondaryWorkerConfigOutput

func (AutoscalingPolicySecondaryWorkerConfigOutput) ToAutoscalingPolicySecondaryWorkerConfigPtrOutput

func (o AutoscalingPolicySecondaryWorkerConfigOutput) ToAutoscalingPolicySecondaryWorkerConfigPtrOutput() AutoscalingPolicySecondaryWorkerConfigPtrOutput

func (AutoscalingPolicySecondaryWorkerConfigOutput) ToAutoscalingPolicySecondaryWorkerConfigPtrOutputWithContext

func (o AutoscalingPolicySecondaryWorkerConfigOutput) ToAutoscalingPolicySecondaryWorkerConfigPtrOutputWithContext(ctx context.Context) AutoscalingPolicySecondaryWorkerConfigPtrOutput

func (AutoscalingPolicySecondaryWorkerConfigOutput) Weight

Weight for the instance group, which is used to determine the fraction of total workers in the cluster from this instance group. For example, if primary workers have weight 2, and secondary workers have weight 1, the cluster will have approximately 2 primary workers for each secondary worker. The cluster may not reach the specified balance if constrained by min/max bounds or other autoscaling settings. For example, if maxInstances for secondary workers is 0, then only primary workers will be added. The cluster can also be out of balance when created. If weight is not set on any instance group, the cluster will default to equal weight for all groups: the cluster will attempt to maintain an equal number of workers in each group within the configured size bounds for each group. If weight is set for one group only, the cluster will default to zero weight on the unset group. For example if weight is set only on primary workers, the cluster will use primary workers only and no secondary workers.

type AutoscalingPolicySecondaryWorkerConfigPtrInput

type AutoscalingPolicySecondaryWorkerConfigPtrInput interface {
	pulumi.Input

	ToAutoscalingPolicySecondaryWorkerConfigPtrOutput() AutoscalingPolicySecondaryWorkerConfigPtrOutput
	ToAutoscalingPolicySecondaryWorkerConfigPtrOutputWithContext(context.Context) AutoscalingPolicySecondaryWorkerConfigPtrOutput
}

AutoscalingPolicySecondaryWorkerConfigPtrInput is an input type that accepts AutoscalingPolicySecondaryWorkerConfigArgs, AutoscalingPolicySecondaryWorkerConfigPtr and AutoscalingPolicySecondaryWorkerConfigPtrOutput values. You can construct a concrete instance of `AutoscalingPolicySecondaryWorkerConfigPtrInput` via:

        AutoscalingPolicySecondaryWorkerConfigArgs{...}

or:

        nil

type AutoscalingPolicySecondaryWorkerConfigPtrOutput

type AutoscalingPolicySecondaryWorkerConfigPtrOutput struct{ *pulumi.OutputState }

func (AutoscalingPolicySecondaryWorkerConfigPtrOutput) Elem

func (AutoscalingPolicySecondaryWorkerConfigPtrOutput) ElementType

func (AutoscalingPolicySecondaryWorkerConfigPtrOutput) MaxInstances

Maximum number of instances for this group. Note that by default, clusters will not use secondary workers. Required for secondary workers if the minimum secondary instances is set. Bounds: [minInstances, ). Defaults to 0.

func (AutoscalingPolicySecondaryWorkerConfigPtrOutput) MinInstances

Minimum number of instances for this group. Bounds: [0, maxInstances]. Defaults to 0.

func (AutoscalingPolicySecondaryWorkerConfigPtrOutput) ToAutoscalingPolicySecondaryWorkerConfigPtrOutput

func (o AutoscalingPolicySecondaryWorkerConfigPtrOutput) ToAutoscalingPolicySecondaryWorkerConfigPtrOutput() AutoscalingPolicySecondaryWorkerConfigPtrOutput

func (AutoscalingPolicySecondaryWorkerConfigPtrOutput) ToAutoscalingPolicySecondaryWorkerConfigPtrOutputWithContext

func (o AutoscalingPolicySecondaryWorkerConfigPtrOutput) ToAutoscalingPolicySecondaryWorkerConfigPtrOutputWithContext(ctx context.Context) AutoscalingPolicySecondaryWorkerConfigPtrOutput

func (AutoscalingPolicySecondaryWorkerConfigPtrOutput) Weight

Weight for the instance group, which is used to determine the fraction of total workers in the cluster from this instance group. For example, if primary workers have weight 2, and secondary workers have weight 1, the cluster will have approximately 2 primary workers for each secondary worker. The cluster may not reach the specified balance if constrained by min/max bounds or other autoscaling settings. For example, if maxInstances for secondary workers is 0, then only primary workers will be added. The cluster can also be out of balance when created. If weight is not set on any instance group, the cluster will default to equal weight for all groups: the cluster will attempt to maintain an equal number of workers in each group within the configured size bounds for each group. If weight is set for one group only, the cluster will default to zero weight on the unset group. For example if weight is set only on primary workers, the cluster will use primary workers only and no secondary workers.

type AutoscalingPolicyState

type AutoscalingPolicyState struct {
	// Basic algorithm for autoscaling.
	// Structure is documented below.
	BasicAlgorithm AutoscalingPolicyBasicAlgorithmPtrInput
	// The  location where the autoscaling policy should reside.
	// The default value is `global`.
	Location pulumi.StringPtrInput
	// The "resource name" of the autoscaling policy.
	Name pulumi.StringPtrInput
	// The policy id. The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_),
	// and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between
	// 3 and 50 characters.
	PolicyId pulumi.StringPtrInput
	// The ID of the project in which the resource belongs.
	// If it is not provided, the provider project is used.
	Project pulumi.StringPtrInput
	// Describes how the autoscaler will operate for secondary workers.
	// Structure is documented below.
	SecondaryWorkerConfig AutoscalingPolicySecondaryWorkerConfigPtrInput
	// Describes how the autoscaler will operate for primary workers.
	// Structure is documented below.
	WorkerConfig AutoscalingPolicyWorkerConfigPtrInput
}

func (AutoscalingPolicyState) ElementType

func (AutoscalingPolicyState) ElementType() reflect.Type

type AutoscalingPolicyWorkerConfig

type AutoscalingPolicyWorkerConfig struct {
	// Maximum number of instances for this group. Note that by default, clusters will not use
	// secondary workers. Required for secondary workers if the minimum secondary instances is set.
	// Bounds: [minInstances, ). Defaults to 0.
	MaxInstances int `pulumi:"maxInstances"`
	// Minimum number of instances for this group. Bounds: [0, maxInstances]. Defaults to 0.
	MinInstances *int `pulumi:"minInstances"`
	// Weight for the instance group, which is used to determine the fraction of total workers
	// in the cluster from this instance group. For example, if primary workers have weight 2,
	// and secondary workers have weight 1, the cluster will have approximately 2 primary workers
	// for each secondary worker.
	// The cluster may not reach the specified balance if constrained by min/max bounds or other
	// autoscaling settings. For example, if maxInstances for secondary workers is 0, then only
	// primary workers will be added. The cluster can also be out of balance when created.
	// If weight is not set on any instance group, the cluster will default to equal weight for
	// all groups: the cluster will attempt to maintain an equal number of workers in each group
	// within the configured size bounds for each group. If weight is set for one group only,
	// the cluster will default to zero weight on the unset group. For example if weight is set
	// only on primary workers, the cluster will use primary workers only and no secondary workers.
	Weight *int `pulumi:"weight"`
}

type AutoscalingPolicyWorkerConfigArgs

type AutoscalingPolicyWorkerConfigArgs struct {
	// Maximum number of instances for this group. Note that by default, clusters will not use
	// secondary workers. Required for secondary workers if the minimum secondary instances is set.
	// Bounds: [minInstances, ). Defaults to 0.
	MaxInstances pulumi.IntInput `pulumi:"maxInstances"`
	// Minimum number of instances for this group. Bounds: [0, maxInstances]. Defaults to 0.
	MinInstances pulumi.IntPtrInput `pulumi:"minInstances"`
	// Weight for the instance group, which is used to determine the fraction of total workers
	// in the cluster from this instance group. For example, if primary workers have weight 2,
	// and secondary workers have weight 1, the cluster will have approximately 2 primary workers
	// for each secondary worker.
	// The cluster may not reach the specified balance if constrained by min/max bounds or other
	// autoscaling settings. For example, if maxInstances for secondary workers is 0, then only
	// primary workers will be added. The cluster can also be out of balance when created.
	// If weight is not set on any instance group, the cluster will default to equal weight for
	// all groups: the cluster will attempt to maintain an equal number of workers in each group
	// within the configured size bounds for each group. If weight is set for one group only,
	// the cluster will default to zero weight on the unset group. For example if weight is set
	// only on primary workers, the cluster will use primary workers only and no secondary workers.
	Weight pulumi.IntPtrInput `pulumi:"weight"`
}

func (AutoscalingPolicyWorkerConfigArgs) ElementType

func (AutoscalingPolicyWorkerConfigArgs) ToAutoscalingPolicyWorkerConfigOutput

func (i AutoscalingPolicyWorkerConfigArgs) ToAutoscalingPolicyWorkerConfigOutput() AutoscalingPolicyWorkerConfigOutput

func (AutoscalingPolicyWorkerConfigArgs) ToAutoscalingPolicyWorkerConfigOutputWithContext

func (i AutoscalingPolicyWorkerConfigArgs) ToAutoscalingPolicyWorkerConfigOutputWithContext(ctx context.Context) AutoscalingPolicyWorkerConfigOutput

func (AutoscalingPolicyWorkerConfigArgs) ToAutoscalingPolicyWorkerConfigPtrOutput

func (i AutoscalingPolicyWorkerConfigArgs) ToAutoscalingPolicyWorkerConfigPtrOutput() AutoscalingPolicyWorkerConfigPtrOutput

func (AutoscalingPolicyWorkerConfigArgs) ToAutoscalingPolicyWorkerConfigPtrOutputWithContext

func (i AutoscalingPolicyWorkerConfigArgs) ToAutoscalingPolicyWorkerConfigPtrOutputWithContext(ctx context.Context) AutoscalingPolicyWorkerConfigPtrOutput

type AutoscalingPolicyWorkerConfigInput

type AutoscalingPolicyWorkerConfigInput interface {
	pulumi.Input

	ToAutoscalingPolicyWorkerConfigOutput() AutoscalingPolicyWorkerConfigOutput
	ToAutoscalingPolicyWorkerConfigOutputWithContext(context.Context) AutoscalingPolicyWorkerConfigOutput
}

AutoscalingPolicyWorkerConfigInput is an input type that accepts AutoscalingPolicyWorkerConfigArgs and AutoscalingPolicyWorkerConfigOutput values. You can construct a concrete instance of `AutoscalingPolicyWorkerConfigInput` via:

AutoscalingPolicyWorkerConfigArgs{...}

type AutoscalingPolicyWorkerConfigOutput

type AutoscalingPolicyWorkerConfigOutput struct{ *pulumi.OutputState }

func (AutoscalingPolicyWorkerConfigOutput) ElementType

func (AutoscalingPolicyWorkerConfigOutput) MaxInstances

Maximum number of instances for this group. Note that by default, clusters will not use secondary workers. Required for secondary workers if the minimum secondary instances is set. Bounds: [minInstances, ). Defaults to 0.

func (AutoscalingPolicyWorkerConfigOutput) MinInstances

Minimum number of instances for this group. Bounds: [0, maxInstances]. Defaults to 0.

func (AutoscalingPolicyWorkerConfigOutput) ToAutoscalingPolicyWorkerConfigOutput

func (o AutoscalingPolicyWorkerConfigOutput) ToAutoscalingPolicyWorkerConfigOutput() AutoscalingPolicyWorkerConfigOutput

func (AutoscalingPolicyWorkerConfigOutput) ToAutoscalingPolicyWorkerConfigOutputWithContext

func (o AutoscalingPolicyWorkerConfigOutput) ToAutoscalingPolicyWorkerConfigOutputWithContext(ctx context.Context) AutoscalingPolicyWorkerConfigOutput

func (AutoscalingPolicyWorkerConfigOutput) ToAutoscalingPolicyWorkerConfigPtrOutput

func (o AutoscalingPolicyWorkerConfigOutput) ToAutoscalingPolicyWorkerConfigPtrOutput() AutoscalingPolicyWorkerConfigPtrOutput

func (AutoscalingPolicyWorkerConfigOutput) ToAutoscalingPolicyWorkerConfigPtrOutputWithContext

func (o AutoscalingPolicyWorkerConfigOutput) ToAutoscalingPolicyWorkerConfigPtrOutputWithContext(ctx context.Context) AutoscalingPolicyWorkerConfigPtrOutput

func (AutoscalingPolicyWorkerConfigOutput) Weight

Weight for the instance group, which is used to determine the fraction of total workers in the cluster from this instance group. For example, if primary workers have weight 2, and secondary workers have weight 1, the cluster will have approximately 2 primary workers for each secondary worker. The cluster may not reach the specified balance if constrained by min/max bounds or other autoscaling settings. For example, if maxInstances for secondary workers is 0, then only primary workers will be added. The cluster can also be out of balance when created. If weight is not set on any instance group, the cluster will default to equal weight for all groups: the cluster will attempt to maintain an equal number of workers in each group within the configured size bounds for each group. If weight is set for one group only, the cluster will default to zero weight on the unset group. For example if weight is set only on primary workers, the cluster will use primary workers only and no secondary workers.

type AutoscalingPolicyWorkerConfigPtrInput

type AutoscalingPolicyWorkerConfigPtrInput interface {
	pulumi.Input

	ToAutoscalingPolicyWorkerConfigPtrOutput() AutoscalingPolicyWorkerConfigPtrOutput
	ToAutoscalingPolicyWorkerConfigPtrOutputWithContext(context.Context) AutoscalingPolicyWorkerConfigPtrOutput
}

AutoscalingPolicyWorkerConfigPtrInput is an input type that accepts AutoscalingPolicyWorkerConfigArgs, AutoscalingPolicyWorkerConfigPtr and AutoscalingPolicyWorkerConfigPtrOutput values. You can construct a concrete instance of `AutoscalingPolicyWorkerConfigPtrInput` via:

        AutoscalingPolicyWorkerConfigArgs{...}

or:

        nil

type AutoscalingPolicyWorkerConfigPtrOutput

type AutoscalingPolicyWorkerConfigPtrOutput struct{ *pulumi.OutputState }

func (AutoscalingPolicyWorkerConfigPtrOutput) Elem

func (AutoscalingPolicyWorkerConfigPtrOutput) ElementType

func (AutoscalingPolicyWorkerConfigPtrOutput) MaxInstances

Maximum number of instances for this group. Note that by default, clusters will not use secondary workers. Required for secondary workers if the minimum secondary instances is set. Bounds: [minInstances, ). Defaults to 0.

func (AutoscalingPolicyWorkerConfigPtrOutput) MinInstances

Minimum number of instances for this group. Bounds: [0, maxInstances]. Defaults to 0.

func (AutoscalingPolicyWorkerConfigPtrOutput) ToAutoscalingPolicyWorkerConfigPtrOutput

func (o AutoscalingPolicyWorkerConfigPtrOutput) ToAutoscalingPolicyWorkerConfigPtrOutput() AutoscalingPolicyWorkerConfigPtrOutput

func (AutoscalingPolicyWorkerConfigPtrOutput) ToAutoscalingPolicyWorkerConfigPtrOutputWithContext

func (o AutoscalingPolicyWorkerConfigPtrOutput) ToAutoscalingPolicyWorkerConfigPtrOutputWithContext(ctx context.Context) AutoscalingPolicyWorkerConfigPtrOutput

func (AutoscalingPolicyWorkerConfigPtrOutput) Weight

Weight for the instance group, which is used to determine the fraction of total workers in the cluster from this instance group. For example, if primary workers have weight 2, and secondary workers have weight 1, the cluster will have approximately 2 primary workers for each secondary worker. The cluster may not reach the specified balance if constrained by min/max bounds or other autoscaling settings. For example, if maxInstances for secondary workers is 0, then only primary workers will be added. The cluster can also be out of balance when created. If weight is not set on any instance group, the cluster will default to equal weight for all groups: the cluster will attempt to maintain an equal number of workers in each group within the configured size bounds for each group. If weight is set for one group only, the cluster will default to zero weight on the unset group. For example if weight is set only on primary workers, the cluster will use primary workers only and no secondary workers.

type Cluster

type Cluster struct {
	pulumi.CustomResourceState

	// Allows you to configure various aspects of the cluster.
	// Structure defined below.
	ClusterConfig ClusterClusterConfigOutput `pulumi:"clusterConfig"`
	// The timeout duration which allows graceful decomissioning when you change the number of worker nodes directly through a
	// terraform apply
	GracefulDecommissionTimeout pulumi.StringPtrOutput `pulumi:"gracefulDecommissionTimeout"`
	// The list of labels (key/value pairs) to be applied to
	// instances in the cluster. GCP generates some itself including `goog-dataproc-cluster-name`
	// which is the name of the cluster.
	Labels pulumi.StringMapOutput `pulumi:"labels"`
	// The name of the cluster, unique within the project and
	// zone.
	Name pulumi.StringOutput `pulumi:"name"`
	// The ID of the project in which the `cluster` will exist. If it
	// is not provided, the provider project is used.
	Project pulumi.StringOutput `pulumi:"project"`
	// The region in which the cluster and associated nodes will be created in.
	// Defaults to `global`.
	Region pulumi.StringPtrOutput `pulumi:"region"`
}

Manages a Cloud Dataproc cluster resource within GCP.

* [API documentation](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.clusters) * How-to Guides

!> **Warning:** Due to limitations of the API, all arguments except `labels`,`cluster_config.worker_config.num_instances` and `cluster_config.preemptible_worker_config.num_instances` are non-updatable. Changing others will cause recreation of the whole cluster!

## Example Usage ### Basic

```go package main

import (

"github.com/pulumi/pulumi-gcp/sdk/v4/go/gcp/dataproc"
"github.com/pulumi/pulumi/sdk/v2/go/pulumi"

)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := dataproc.NewCluster(ctx, "simplecluster", &dataproc.ClusterArgs{
			Region: pulumi.String("us-central1"),
		})
		if err != nil {
			return err
		}
		return nil
	})
}

``` ### Advanced

```go package main

import (

"github.com/pulumi/pulumi-gcp/sdk/v4/go/gcp/dataproc"
"github.com/pulumi/pulumi-gcp/sdk/v4/go/gcp/serviceAccount"
"github.com/pulumi/pulumi/sdk/v2/go/pulumi"

)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := serviceAccount.NewAccount(ctx, "_default", &serviceAccount.AccountArgs{
			AccountId:   pulumi.String("service-account-id"),
			DisplayName: pulumi.String("Service Account"),
		})
		if err != nil {
			return err
		}
		_, err = dataproc.NewCluster(ctx, "mycluster", &dataproc.ClusterArgs{
			Region:                      pulumi.String("us-central1"),
			GracefulDecommissionTimeout: pulumi.String("120s"),
			Labels: pulumi.StringMap{
				"foo": pulumi.String("bar"),
			},
			ClusterConfig: &dataproc.ClusterClusterConfigArgs{
				StagingBucket: pulumi.String("dataproc-staging-bucket"),
				MasterConfig: &dataproc.ClusterClusterConfigMasterConfigArgs{
					NumInstances: pulumi.Int(1),
					MachineType:  pulumi.String("e2-medium"),
					DiskConfig: &dataproc.ClusterClusterConfigMasterConfigDiskConfigArgs{
						BootDiskType:   pulumi.String("pd-ssd"),
						BootDiskSizeGb: pulumi.Int(30),
					},
				},
				WorkerConfig: &dataproc.ClusterClusterConfigWorkerConfigArgs{
					NumInstances:   pulumi.Int(2),
					MachineType:    pulumi.String("e2-medium"),
					MinCpuPlatform: pulumi.String("Intel Skylake"),
					DiskConfig: &dataproc.ClusterClusterConfigWorkerConfigDiskConfigArgs{
						BootDiskSizeGb: pulumi.Int(30),
						NumLocalSsds:   pulumi.Int(1),
					},
				},
				PreemptibleWorkerConfig: &dataproc.ClusterClusterConfigPreemptibleWorkerConfigArgs{
					NumInstances: pulumi.Int(0),
				},
				SoftwareConfig: &dataproc.ClusterClusterConfigSoftwareConfigArgs{
					ImageVersion: pulumi.String("1.3.7-deb9"),
					OverrideProperties: pulumi.StringMap{
						"dataproc:dataproc.allow.zero.workers": pulumi.String("true"),
					},
				},
				GceClusterConfig: &dataproc.ClusterClusterConfigGceClusterConfigArgs{
					Tags: pulumi.StringArray{
						pulumi.String("foo"),
						pulumi.String("bar"),
					},
					ServiceAccount: _default.Email,
					ServiceAccountScopes: pulumi.StringArray{
						pulumi.String("cloud-platform"),
					},
				},
				InitializationActions: dataproc.ClusterClusterConfigInitializationActionArray{
					&dataproc.ClusterClusterConfigInitializationActionArgs{
						Script:     pulumi.String("gs://dataproc-initialization-actions/stackdriver/stackdriver.sh"),
						TimeoutSec: pulumi.Int(500),
					},
				},
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}

``` ### Using A GPU Accelerator

```go package main

import (

"github.com/pulumi/pulumi-gcp/sdk/v4/go/gcp/dataproc"
"github.com/pulumi/pulumi/sdk/v2/go/pulumi"

)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := dataproc.NewCluster(ctx, "acceleratedCluster", &dataproc.ClusterArgs{
			ClusterConfig: &dataproc.ClusterClusterConfigArgs{
				GceClusterConfig: &dataproc.ClusterClusterConfigGceClusterConfigArgs{
					Zone: pulumi.String("us-central1-a"),
				},
				MasterConfig: &dataproc.ClusterClusterConfigMasterConfigArgs{
					Accelerators: dataproc.ClusterClusterConfigMasterConfigAcceleratorArray{
						&dataproc.ClusterClusterConfigMasterConfigAcceleratorArgs{
							AcceleratorCount: pulumi.Int(1),
							AcceleratorType:  pulumi.String("nvidia-tesla-k80"),
						},
					},
				},
			},
			Region: pulumi.String("us-central1"),
		})
		if err != nil {
			return err
		}
		return nil
	})
}

```

## Import

This resource does not support import.

func GetCluster

func GetCluster(ctx *pulumi.Context,
	name string, id pulumi.IDInput, state *ClusterState, opts ...pulumi.ResourceOption) (*Cluster, error)

GetCluster gets an existing Cluster resource's state with the given name, ID, and optional state properties that are used to uniquely qualify the lookup (nil if not required).

func NewCluster

func NewCluster(ctx *pulumi.Context,
	name string, args *ClusterArgs, opts ...pulumi.ResourceOption) (*Cluster, error)

NewCluster registers a new resource with the given unique name, arguments, and options.

func (*Cluster) ElementType added in v4.4.0

func (*Cluster) ElementType() reflect.Type

func (*Cluster) ToClusterOutput added in v4.4.0

func (i *Cluster) ToClusterOutput() ClusterOutput

func (*Cluster) ToClusterOutputWithContext added in v4.4.0

func (i *Cluster) ToClusterOutputWithContext(ctx context.Context) ClusterOutput

func (*Cluster) ToClusterPtrOutput added in v4.11.1

func (i *Cluster) ToClusterPtrOutput() ClusterPtrOutput

func (*Cluster) ToClusterPtrOutputWithContext added in v4.11.1

func (i *Cluster) ToClusterPtrOutputWithContext(ctx context.Context) ClusterPtrOutput

type ClusterArgs

type ClusterArgs struct {
	// Allows you to configure various aspects of the cluster.
	// Structure defined below.
	ClusterConfig ClusterClusterConfigPtrInput
	// The timeout duration which allows graceful decomissioning when you change the number of worker nodes directly through a
	// terraform apply
	GracefulDecommissionTimeout pulumi.StringPtrInput
	// The list of labels (key/value pairs) to be applied to
	// instances in the cluster. GCP generates some itself including `goog-dataproc-cluster-name`
	// which is the name of the cluster.
	Labels pulumi.StringMapInput
	// The name of the cluster, unique within the project and
	// zone.
	Name pulumi.StringPtrInput
	// The ID of the project in which the `cluster` will exist. If it
	// is not provided, the provider project is used.
	Project pulumi.StringPtrInput
	// The region in which the cluster and associated nodes will be created in.
	// Defaults to `global`.
	Region pulumi.StringPtrInput
}

The set of arguments for constructing a Cluster resource.

func (ClusterArgs) ElementType

func (ClusterArgs) ElementType() reflect.Type

type ClusterArray added in v4.11.1

type ClusterArray []ClusterInput

func (ClusterArray) ElementType added in v4.11.1

func (ClusterArray) ElementType() reflect.Type

func (ClusterArray) ToClusterArrayOutput added in v4.11.1

func (i ClusterArray) ToClusterArrayOutput() ClusterArrayOutput

func (ClusterArray) ToClusterArrayOutputWithContext added in v4.11.1

func (i ClusterArray) ToClusterArrayOutputWithContext(ctx context.Context) ClusterArrayOutput

type ClusterArrayInput added in v4.11.1

type ClusterArrayInput interface {
	pulumi.Input

	ToClusterArrayOutput() ClusterArrayOutput
	ToClusterArrayOutputWithContext(context.Context) ClusterArrayOutput
}

ClusterArrayInput is an input type that accepts ClusterArray and ClusterArrayOutput values. You can construct a concrete instance of `ClusterArrayInput` via:

ClusterArray{ ClusterArgs{...} }

type ClusterArrayOutput added in v4.11.1

type ClusterArrayOutput struct{ *pulumi.OutputState }

func (ClusterArrayOutput) ElementType added in v4.11.1

func (ClusterArrayOutput) ElementType() reflect.Type

func (ClusterArrayOutput) Index added in v4.11.1

func (ClusterArrayOutput) ToClusterArrayOutput added in v4.11.1

func (o ClusterArrayOutput) ToClusterArrayOutput() ClusterArrayOutput

func (ClusterArrayOutput) ToClusterArrayOutputWithContext added in v4.11.1

func (o ClusterArrayOutput) ToClusterArrayOutputWithContext(ctx context.Context) ClusterArrayOutput

type ClusterClusterConfig

type ClusterClusterConfig struct {
	// The autoscaling policy config associated with the cluster.
	// Note that once set, if `autoscalingConfig` is the only field set in `clusterConfig`, it can
	// only be removed by setting `policyUri = ""`, rather than removing the whole block.
	// Structure defined below.
	AutoscalingConfig *ClusterClusterConfigAutoscalingConfig `pulumi:"autoscalingConfig"`
	Bucket            *string                                `pulumi:"bucket"`
	// The Customer managed encryption keys settings for the cluster.
	// Structure defined below.
	EncryptionConfig *ClusterClusterConfigEncryptionConfig `pulumi:"encryptionConfig"`
	// The config settings for port access on the cluster.
	// Structure defined below.
	// ***
	EndpointConfig *ClusterClusterConfigEndpointConfig `pulumi:"endpointConfig"`
	// Common config settings for resources of Google Compute Engine cluster
	// instances, applicable to all instances in the cluster. Structure defined below.
	GceClusterConfig *ClusterClusterConfigGceClusterConfig `pulumi:"gceClusterConfig"`
	// Commands to execute on each node after config is completed.
	// You can specify multiple versions of these. Structure defined below.
	InitializationActions []ClusterClusterConfigInitializationAction `pulumi:"initializationActions"`
	// The settings for auto deletion cluster schedule.
	// Structure defined below.
	LifecycleConfig *ClusterClusterConfigLifecycleConfig `pulumi:"lifecycleConfig"`
	// The Google Compute Engine config settings for the master instances
	// in a cluster.. Structure defined below.
	MasterConfig *ClusterClusterConfigMasterConfig `pulumi:"masterConfig"`
	// The Google Compute Engine config settings for the additional (aka
	// preemptible) instances in a cluster. Structure defined below.
	PreemptibleWorkerConfig *ClusterClusterConfigPreemptibleWorkerConfig `pulumi:"preemptibleWorkerConfig"`
	// Security related configuration. Structure defined below.
	SecurityConfig *ClusterClusterConfigSecurityConfig `pulumi:"securityConfig"`
	// The config settings for software inside the cluster.
	// Structure defined below.
	SoftwareConfig *ClusterClusterConfigSoftwareConfig `pulumi:"softwareConfig"`
	// The Cloud Storage staging bucket used to stage files,
	// such as Hadoop jars, between client machines and the cluster.
	// Note: If you don't explicitly specify a `stagingBucket`
	// then GCP will auto create / assign one for you. However, you are not guaranteed
	// an auto generated bucket which is solely dedicated to your cluster; it may be shared
	// with other clusters in the same region/zone also choosing to use the auto generation
	// option.
	StagingBucket *string `pulumi:"stagingBucket"`
	// The Cloud Storage temp bucket used to store ephemeral cluster
	// and jobs data, such as Spark and MapReduce history files.
	// Note: If you don't explicitly specify a `tempBucket` then GCP will auto create / assign one for you.
	TempBucket *string `pulumi:"tempBucket"`
	// The Google Compute Engine config settings for the worker instances
	// in a cluster.. Structure defined below.
	WorkerConfig *ClusterClusterConfigWorkerConfig `pulumi:"workerConfig"`
}

type ClusterClusterConfigArgs

type ClusterClusterConfigArgs struct {
	// The autoscaling policy config associated with the cluster.
	// Note that once set, if `autoscalingConfig` is the only field set in `clusterConfig`, it can
	// only be removed by setting `policyUri = ""`, rather than removing the whole block.
	// Structure defined below.
	AutoscalingConfig ClusterClusterConfigAutoscalingConfigPtrInput `pulumi:"autoscalingConfig"`
	Bucket            pulumi.StringPtrInput                         `pulumi:"bucket"`
	// The Customer managed encryption keys settings for the cluster.
	// Structure defined below.
	EncryptionConfig ClusterClusterConfigEncryptionConfigPtrInput `pulumi:"encryptionConfig"`
	// The config settings for port access on the cluster.
	// Structure defined below.
	// ***
	EndpointConfig ClusterClusterConfigEndpointConfigPtrInput `pulumi:"endpointConfig"`
	// Common config settings for resources of Google Compute Engine cluster
	// instances, applicable to all instances in the cluster. Structure defined below.
	GceClusterConfig ClusterClusterConfigGceClusterConfigPtrInput `pulumi:"gceClusterConfig"`
	// Commands to execute on each node after config is completed.
	// You can specify multiple versions of these. Structure defined below.
	InitializationActions ClusterClusterConfigInitializationActionArrayInput `pulumi:"initializationActions"`
	// The settings for auto deletion cluster schedule.
	// Structure defined below.
	LifecycleConfig ClusterClusterConfigLifecycleConfigPtrInput `pulumi:"lifecycleConfig"`
	// The Google Compute Engine config settings for the master instances
	// in a cluster.. Structure defined below.
	MasterConfig ClusterClusterConfigMasterConfigPtrInput `pulumi:"masterConfig"`
	// The Google Compute Engine config settings for the additional (aka
	// preemptible) instances in a cluster. Structure defined below.
	PreemptibleWorkerConfig ClusterClusterConfigPreemptibleWorkerConfigPtrInput `pulumi:"preemptibleWorkerConfig"`
	// Security related configuration. Structure defined below.
	SecurityConfig ClusterClusterConfigSecurityConfigPtrInput `pulumi:"securityConfig"`
	// The config settings for software inside the cluster.
	// Structure defined below.
	SoftwareConfig ClusterClusterConfigSoftwareConfigPtrInput `pulumi:"softwareConfig"`
	// The Cloud Storage staging bucket used to stage files,
	// such as Hadoop jars, between client machines and the cluster.
	// Note: If you don't explicitly specify a `stagingBucket`
	// then GCP will auto create / assign one for you. However, you are not guaranteed
	// an auto generated bucket which is solely dedicated to your cluster; it may be shared
	// with other clusters in the same region/zone also choosing to use the auto generation
	// option.
	StagingBucket pulumi.StringPtrInput `pulumi:"stagingBucket"`
	// The Cloud Storage temp bucket used to store ephemeral cluster
	// and jobs data, such as Spark and MapReduce history files.
	// Note: If you don't explicitly specify a `tempBucket` then GCP will auto create / assign one for you.
	TempBucket pulumi.StringPtrInput `pulumi:"tempBucket"`
	// The Google Compute Engine config settings for the worker instances
	// in a cluster.. Structure defined below.
	WorkerConfig ClusterClusterConfigWorkerConfigPtrInput `pulumi:"workerConfig"`
}

func (ClusterClusterConfigArgs) ElementType

func (ClusterClusterConfigArgs) ElementType() reflect.Type

func (ClusterClusterConfigArgs) ToClusterClusterConfigOutput

func (i ClusterClusterConfigArgs) ToClusterClusterConfigOutput() ClusterClusterConfigOutput

func (ClusterClusterConfigArgs) ToClusterClusterConfigOutputWithContext

func (i ClusterClusterConfigArgs) ToClusterClusterConfigOutputWithContext(ctx context.Context) ClusterClusterConfigOutput

func (ClusterClusterConfigArgs) ToClusterClusterConfigPtrOutput

func (i ClusterClusterConfigArgs) ToClusterClusterConfigPtrOutput() ClusterClusterConfigPtrOutput

func (ClusterClusterConfigArgs) ToClusterClusterConfigPtrOutputWithContext

func (i ClusterClusterConfigArgs) ToClusterClusterConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigPtrOutput

type ClusterClusterConfigAutoscalingConfig

type ClusterClusterConfigAutoscalingConfig struct {
	// The autoscaling policy used by the cluster.
	PolicyUri string `pulumi:"policyUri"`
}

type ClusterClusterConfigAutoscalingConfigArgs

type ClusterClusterConfigAutoscalingConfigArgs struct {
	// The autoscaling policy used by the cluster.
	PolicyUri pulumi.StringInput `pulumi:"policyUri"`
}

func (ClusterClusterConfigAutoscalingConfigArgs) ElementType

func (ClusterClusterConfigAutoscalingConfigArgs) ToClusterClusterConfigAutoscalingConfigOutput

func (i ClusterClusterConfigAutoscalingConfigArgs) ToClusterClusterConfigAutoscalingConfigOutput() ClusterClusterConfigAutoscalingConfigOutput

func (ClusterClusterConfigAutoscalingConfigArgs) ToClusterClusterConfigAutoscalingConfigOutputWithContext

func (i ClusterClusterConfigAutoscalingConfigArgs) ToClusterClusterConfigAutoscalingConfigOutputWithContext(ctx context.Context) ClusterClusterConfigAutoscalingConfigOutput

func (ClusterClusterConfigAutoscalingConfigArgs) ToClusterClusterConfigAutoscalingConfigPtrOutput

func (i ClusterClusterConfigAutoscalingConfigArgs) ToClusterClusterConfigAutoscalingConfigPtrOutput() ClusterClusterConfigAutoscalingConfigPtrOutput

func (ClusterClusterConfigAutoscalingConfigArgs) ToClusterClusterConfigAutoscalingConfigPtrOutputWithContext

func (i ClusterClusterConfigAutoscalingConfigArgs) ToClusterClusterConfigAutoscalingConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigAutoscalingConfigPtrOutput

type ClusterClusterConfigAutoscalingConfigInput

type ClusterClusterConfigAutoscalingConfigInput interface {
	pulumi.Input

	ToClusterClusterConfigAutoscalingConfigOutput() ClusterClusterConfigAutoscalingConfigOutput
	ToClusterClusterConfigAutoscalingConfigOutputWithContext(context.Context) ClusterClusterConfigAutoscalingConfigOutput
}

ClusterClusterConfigAutoscalingConfigInput is an input type that accepts ClusterClusterConfigAutoscalingConfigArgs and ClusterClusterConfigAutoscalingConfigOutput values. You can construct a concrete instance of `ClusterClusterConfigAutoscalingConfigInput` via:

ClusterClusterConfigAutoscalingConfigArgs{...}

type ClusterClusterConfigAutoscalingConfigOutput

type ClusterClusterConfigAutoscalingConfigOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigAutoscalingConfigOutput) ElementType

func (ClusterClusterConfigAutoscalingConfigOutput) PolicyUri

The autoscaling policy used by the cluster.

func (ClusterClusterConfigAutoscalingConfigOutput) ToClusterClusterConfigAutoscalingConfigOutput

func (o ClusterClusterConfigAutoscalingConfigOutput) ToClusterClusterConfigAutoscalingConfigOutput() ClusterClusterConfigAutoscalingConfigOutput

func (ClusterClusterConfigAutoscalingConfigOutput) ToClusterClusterConfigAutoscalingConfigOutputWithContext

func (o ClusterClusterConfigAutoscalingConfigOutput) ToClusterClusterConfigAutoscalingConfigOutputWithContext(ctx context.Context) ClusterClusterConfigAutoscalingConfigOutput

func (ClusterClusterConfigAutoscalingConfigOutput) ToClusterClusterConfigAutoscalingConfigPtrOutput

func (o ClusterClusterConfigAutoscalingConfigOutput) ToClusterClusterConfigAutoscalingConfigPtrOutput() ClusterClusterConfigAutoscalingConfigPtrOutput

func (ClusterClusterConfigAutoscalingConfigOutput) ToClusterClusterConfigAutoscalingConfigPtrOutputWithContext

func (o ClusterClusterConfigAutoscalingConfigOutput) ToClusterClusterConfigAutoscalingConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigAutoscalingConfigPtrOutput

type ClusterClusterConfigAutoscalingConfigPtrInput

type ClusterClusterConfigAutoscalingConfigPtrInput interface {
	pulumi.Input

	ToClusterClusterConfigAutoscalingConfigPtrOutput() ClusterClusterConfigAutoscalingConfigPtrOutput
	ToClusterClusterConfigAutoscalingConfigPtrOutputWithContext(context.Context) ClusterClusterConfigAutoscalingConfigPtrOutput
}

ClusterClusterConfigAutoscalingConfigPtrInput is an input type that accepts ClusterClusterConfigAutoscalingConfigArgs, ClusterClusterConfigAutoscalingConfigPtr and ClusterClusterConfigAutoscalingConfigPtrOutput values. You can construct a concrete instance of `ClusterClusterConfigAutoscalingConfigPtrInput` via:

        ClusterClusterConfigAutoscalingConfigArgs{...}

or:

        nil

type ClusterClusterConfigAutoscalingConfigPtrOutput

type ClusterClusterConfigAutoscalingConfigPtrOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigAutoscalingConfigPtrOutput) Elem

func (ClusterClusterConfigAutoscalingConfigPtrOutput) ElementType

func (ClusterClusterConfigAutoscalingConfigPtrOutput) PolicyUri

The autoscaling policy used by the cluster.

func (ClusterClusterConfigAutoscalingConfigPtrOutput) ToClusterClusterConfigAutoscalingConfigPtrOutput

func (o ClusterClusterConfigAutoscalingConfigPtrOutput) ToClusterClusterConfigAutoscalingConfigPtrOutput() ClusterClusterConfigAutoscalingConfigPtrOutput

func (ClusterClusterConfigAutoscalingConfigPtrOutput) ToClusterClusterConfigAutoscalingConfigPtrOutputWithContext

func (o ClusterClusterConfigAutoscalingConfigPtrOutput) ToClusterClusterConfigAutoscalingConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigAutoscalingConfigPtrOutput

type ClusterClusterConfigEncryptionConfig

type ClusterClusterConfigEncryptionConfig struct {
	// The Cloud KMS key name to use for PD disk encryption for
	// all instances in the cluster.
	KmsKeyName string `pulumi:"kmsKeyName"`
}

type ClusterClusterConfigEncryptionConfigArgs

type ClusterClusterConfigEncryptionConfigArgs struct {
	// The Cloud KMS key name to use for PD disk encryption for
	// all instances in the cluster.
	KmsKeyName pulumi.StringInput `pulumi:"kmsKeyName"`
}

func (ClusterClusterConfigEncryptionConfigArgs) ElementType

func (ClusterClusterConfigEncryptionConfigArgs) ToClusterClusterConfigEncryptionConfigOutput

func (i ClusterClusterConfigEncryptionConfigArgs) ToClusterClusterConfigEncryptionConfigOutput() ClusterClusterConfigEncryptionConfigOutput

func (ClusterClusterConfigEncryptionConfigArgs) ToClusterClusterConfigEncryptionConfigOutputWithContext

func (i ClusterClusterConfigEncryptionConfigArgs) ToClusterClusterConfigEncryptionConfigOutputWithContext(ctx context.Context) ClusterClusterConfigEncryptionConfigOutput

func (ClusterClusterConfigEncryptionConfigArgs) ToClusterClusterConfigEncryptionConfigPtrOutput

func (i ClusterClusterConfigEncryptionConfigArgs) ToClusterClusterConfigEncryptionConfigPtrOutput() ClusterClusterConfigEncryptionConfigPtrOutput

func (ClusterClusterConfigEncryptionConfigArgs) ToClusterClusterConfigEncryptionConfigPtrOutputWithContext

func (i ClusterClusterConfigEncryptionConfigArgs) ToClusterClusterConfigEncryptionConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigEncryptionConfigPtrOutput

type ClusterClusterConfigEncryptionConfigInput

type ClusterClusterConfigEncryptionConfigInput interface {
	pulumi.Input

	ToClusterClusterConfigEncryptionConfigOutput() ClusterClusterConfigEncryptionConfigOutput
	ToClusterClusterConfigEncryptionConfigOutputWithContext(context.Context) ClusterClusterConfigEncryptionConfigOutput
}

ClusterClusterConfigEncryptionConfigInput is an input type that accepts ClusterClusterConfigEncryptionConfigArgs and ClusterClusterConfigEncryptionConfigOutput values. You can construct a concrete instance of `ClusterClusterConfigEncryptionConfigInput` via:

ClusterClusterConfigEncryptionConfigArgs{...}

type ClusterClusterConfigEncryptionConfigOutput

type ClusterClusterConfigEncryptionConfigOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigEncryptionConfigOutput) ElementType

func (ClusterClusterConfigEncryptionConfigOutput) KmsKeyName

The Cloud KMS key name to use for PD disk encryption for all instances in the cluster.

func (ClusterClusterConfigEncryptionConfigOutput) ToClusterClusterConfigEncryptionConfigOutput

func (o ClusterClusterConfigEncryptionConfigOutput) ToClusterClusterConfigEncryptionConfigOutput() ClusterClusterConfigEncryptionConfigOutput

func (ClusterClusterConfigEncryptionConfigOutput) ToClusterClusterConfigEncryptionConfigOutputWithContext

func (o ClusterClusterConfigEncryptionConfigOutput) ToClusterClusterConfigEncryptionConfigOutputWithContext(ctx context.Context) ClusterClusterConfigEncryptionConfigOutput

func (ClusterClusterConfigEncryptionConfigOutput) ToClusterClusterConfigEncryptionConfigPtrOutput

func (o ClusterClusterConfigEncryptionConfigOutput) ToClusterClusterConfigEncryptionConfigPtrOutput() ClusterClusterConfigEncryptionConfigPtrOutput

func (ClusterClusterConfigEncryptionConfigOutput) ToClusterClusterConfigEncryptionConfigPtrOutputWithContext

func (o ClusterClusterConfigEncryptionConfigOutput) ToClusterClusterConfigEncryptionConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigEncryptionConfigPtrOutput

type ClusterClusterConfigEncryptionConfigPtrInput

type ClusterClusterConfigEncryptionConfigPtrInput interface {
	pulumi.Input

	ToClusterClusterConfigEncryptionConfigPtrOutput() ClusterClusterConfigEncryptionConfigPtrOutput
	ToClusterClusterConfigEncryptionConfigPtrOutputWithContext(context.Context) ClusterClusterConfigEncryptionConfigPtrOutput
}

ClusterClusterConfigEncryptionConfigPtrInput is an input type that accepts ClusterClusterConfigEncryptionConfigArgs, ClusterClusterConfigEncryptionConfigPtr and ClusterClusterConfigEncryptionConfigPtrOutput values. You can construct a concrete instance of `ClusterClusterConfigEncryptionConfigPtrInput` via:

        ClusterClusterConfigEncryptionConfigArgs{...}

or:

        nil

type ClusterClusterConfigEncryptionConfigPtrOutput

type ClusterClusterConfigEncryptionConfigPtrOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigEncryptionConfigPtrOutput) Elem

func (ClusterClusterConfigEncryptionConfigPtrOutput) ElementType

func (ClusterClusterConfigEncryptionConfigPtrOutput) KmsKeyName

The Cloud KMS key name to use for PD disk encryption for all instances in the cluster.

func (ClusterClusterConfigEncryptionConfigPtrOutput) ToClusterClusterConfigEncryptionConfigPtrOutput

func (o ClusterClusterConfigEncryptionConfigPtrOutput) ToClusterClusterConfigEncryptionConfigPtrOutput() ClusterClusterConfigEncryptionConfigPtrOutput

func (ClusterClusterConfigEncryptionConfigPtrOutput) ToClusterClusterConfigEncryptionConfigPtrOutputWithContext

func (o ClusterClusterConfigEncryptionConfigPtrOutput) ToClusterClusterConfigEncryptionConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigEncryptionConfigPtrOutput

type ClusterClusterConfigEndpointConfig

type ClusterClusterConfigEndpointConfig struct {
	// The flag to enable http access to specific ports
	// on the cluster from external sources (aka Component Gateway). Defaults to false.
	EnableHttpPortAccess bool                   `pulumi:"enableHttpPortAccess"`
	HttpPorts            map[string]interface{} `pulumi:"httpPorts"`
}

type ClusterClusterConfigEndpointConfigArgs

type ClusterClusterConfigEndpointConfigArgs struct {
	// The flag to enable http access to specific ports
	// on the cluster from external sources (aka Component Gateway). Defaults to false.
	EnableHttpPortAccess pulumi.BoolInput `pulumi:"enableHttpPortAccess"`
	HttpPorts            pulumi.MapInput  `pulumi:"httpPorts"`
}

func (ClusterClusterConfigEndpointConfigArgs) ElementType

func (ClusterClusterConfigEndpointConfigArgs) ToClusterClusterConfigEndpointConfigOutput

func (i ClusterClusterConfigEndpointConfigArgs) ToClusterClusterConfigEndpointConfigOutput() ClusterClusterConfigEndpointConfigOutput

func (ClusterClusterConfigEndpointConfigArgs) ToClusterClusterConfigEndpointConfigOutputWithContext

func (i ClusterClusterConfigEndpointConfigArgs) ToClusterClusterConfigEndpointConfigOutputWithContext(ctx context.Context) ClusterClusterConfigEndpointConfigOutput

func (ClusterClusterConfigEndpointConfigArgs) ToClusterClusterConfigEndpointConfigPtrOutput

func (i ClusterClusterConfigEndpointConfigArgs) ToClusterClusterConfigEndpointConfigPtrOutput() ClusterClusterConfigEndpointConfigPtrOutput

func (ClusterClusterConfigEndpointConfigArgs) ToClusterClusterConfigEndpointConfigPtrOutputWithContext

func (i ClusterClusterConfigEndpointConfigArgs) ToClusterClusterConfigEndpointConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigEndpointConfigPtrOutput

type ClusterClusterConfigEndpointConfigInput

type ClusterClusterConfigEndpointConfigInput interface {
	pulumi.Input

	ToClusterClusterConfigEndpointConfigOutput() ClusterClusterConfigEndpointConfigOutput
	ToClusterClusterConfigEndpointConfigOutputWithContext(context.Context) ClusterClusterConfigEndpointConfigOutput
}

ClusterClusterConfigEndpointConfigInput is an input type that accepts ClusterClusterConfigEndpointConfigArgs and ClusterClusterConfigEndpointConfigOutput values. You can construct a concrete instance of `ClusterClusterConfigEndpointConfigInput` via:

ClusterClusterConfigEndpointConfigArgs{...}

type ClusterClusterConfigEndpointConfigOutput

type ClusterClusterConfigEndpointConfigOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigEndpointConfigOutput) ElementType

func (ClusterClusterConfigEndpointConfigOutput) EnableHttpPortAccess

The flag to enable http access to specific ports on the cluster from external sources (aka Component Gateway). Defaults to false.

func (ClusterClusterConfigEndpointConfigOutput) HttpPorts

func (ClusterClusterConfigEndpointConfigOutput) ToClusterClusterConfigEndpointConfigOutput

func (o ClusterClusterConfigEndpointConfigOutput) ToClusterClusterConfigEndpointConfigOutput() ClusterClusterConfigEndpointConfigOutput

func (ClusterClusterConfigEndpointConfigOutput) ToClusterClusterConfigEndpointConfigOutputWithContext

func (o ClusterClusterConfigEndpointConfigOutput) ToClusterClusterConfigEndpointConfigOutputWithContext(ctx context.Context) ClusterClusterConfigEndpointConfigOutput

func (ClusterClusterConfigEndpointConfigOutput) ToClusterClusterConfigEndpointConfigPtrOutput

func (o ClusterClusterConfigEndpointConfigOutput) ToClusterClusterConfigEndpointConfigPtrOutput() ClusterClusterConfigEndpointConfigPtrOutput

func (ClusterClusterConfigEndpointConfigOutput) ToClusterClusterConfigEndpointConfigPtrOutputWithContext

func (o ClusterClusterConfigEndpointConfigOutput) ToClusterClusterConfigEndpointConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigEndpointConfigPtrOutput

type ClusterClusterConfigEndpointConfigPtrInput

type ClusterClusterConfigEndpointConfigPtrInput interface {
	pulumi.Input

	ToClusterClusterConfigEndpointConfigPtrOutput() ClusterClusterConfigEndpointConfigPtrOutput
	ToClusterClusterConfigEndpointConfigPtrOutputWithContext(context.Context) ClusterClusterConfigEndpointConfigPtrOutput
}

ClusterClusterConfigEndpointConfigPtrInput is an input type that accepts ClusterClusterConfigEndpointConfigArgs, ClusterClusterConfigEndpointConfigPtr and ClusterClusterConfigEndpointConfigPtrOutput values. You can construct a concrete instance of `ClusterClusterConfigEndpointConfigPtrInput` via:

        ClusterClusterConfigEndpointConfigArgs{...}

or:

        nil

type ClusterClusterConfigEndpointConfigPtrOutput

type ClusterClusterConfigEndpointConfigPtrOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigEndpointConfigPtrOutput) Elem

func (ClusterClusterConfigEndpointConfigPtrOutput) ElementType

func (ClusterClusterConfigEndpointConfigPtrOutput) EnableHttpPortAccess

The flag to enable http access to specific ports on the cluster from external sources (aka Component Gateway). Defaults to false.

func (ClusterClusterConfigEndpointConfigPtrOutput) HttpPorts

func (ClusterClusterConfigEndpointConfigPtrOutput) ToClusterClusterConfigEndpointConfigPtrOutput

func (o ClusterClusterConfigEndpointConfigPtrOutput) ToClusterClusterConfigEndpointConfigPtrOutput() ClusterClusterConfigEndpointConfigPtrOutput

func (ClusterClusterConfigEndpointConfigPtrOutput) ToClusterClusterConfigEndpointConfigPtrOutputWithContext

func (o ClusterClusterConfigEndpointConfigPtrOutput) ToClusterClusterConfigEndpointConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigEndpointConfigPtrOutput

type ClusterClusterConfigGceClusterConfig

type ClusterClusterConfigGceClusterConfig struct {
	// By default, clusters are not restricted to internal IP addresses,
	// and will have ephemeral external IP addresses assigned to each instance. If set to true, all
	// instances in the cluster will only have internal IP addresses. Note: Private Google Access
	// (also known as `privateIpGoogleAccess`) must be enabled on the subnetwork that the cluster
	// will be launched in.
	InternalIpOnly *bool `pulumi:"internalIpOnly"`
	// A map of the Compute Engine metadata entries to add to all instances
	// (see [Project and instance metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
	Metadata map[string]string `pulumi:"metadata"`
	// The name or selfLink of the Google Compute Engine
	// network to the cluster will be part of. Conflicts with `subnetwork`.
	// If neither is specified, this defaults to the "default" network.
	Network *string `pulumi:"network"`
	// The service account to be used by the Node VMs.
	// If not specified, the "default" service account is used.
	ServiceAccount *string `pulumi:"serviceAccount"`
	// The set of Google API scopes
	// to be made available on all of the node VMs under the `serviceAccount`
	// specified. Both OAuth2 URLs and gcloud
	// short names are supported. To allow full access to all Cloud APIs, use the
	// `cloud-platform` scope. See a complete list of scopes [here](https://cloud.google.com/sdk/gcloud/reference/alpha/compute/instances/set-scopes#--scopes).
	ServiceAccountScopes []string `pulumi:"serviceAccountScopes"`
	// The name or selfLink of the Google Compute Engine
	// subnetwork the cluster will be part of. Conflicts with `network`.
	Subnetwork *string `pulumi:"subnetwork"`
	// The list of instance tags applied to instances in the cluster.
	// Tags are used to identify valid sources or targets for network firewalls.
	Tags []string `pulumi:"tags"`
	// The GCP zone where your data is stored and used (i.e. where
	// the master and the worker nodes will be created in). If `region` is set to 'global' (default)
	// then `zone` is mandatory, otherwise GCP is able to make use of [Auto Zone Placement](https://cloud.google.com/dataproc/docs/concepts/auto-zone)
	// to determine this automatically for you.
	// Note: This setting additionally determines and restricts
	// which computing resources are available for use with other configs such as
	// `cluster_config.master_config.machine_type` and `cluster_config.worker_config.machine_type`.
	Zone *string `pulumi:"zone"`
}

type ClusterClusterConfigGceClusterConfigArgs

type ClusterClusterConfigGceClusterConfigArgs struct {
	// By default, clusters are not restricted to internal IP addresses,
	// and will have ephemeral external IP addresses assigned to each instance. If set to true, all
	// instances in the cluster will only have internal IP addresses. Note: Private Google Access
	// (also known as `privateIpGoogleAccess`) must be enabled on the subnetwork that the cluster
	// will be launched in.
	InternalIpOnly pulumi.BoolPtrInput `pulumi:"internalIpOnly"`
	// A map of the Compute Engine metadata entries to add to all instances
	// (see [Project and instance metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
	Metadata pulumi.StringMapInput `pulumi:"metadata"`
	// The name or selfLink of the Google Compute Engine
	// network to the cluster will be part of. Conflicts with `subnetwork`.
	// If neither is specified, this defaults to the "default" network.
	Network pulumi.StringPtrInput `pulumi:"network"`
	// The service account to be used by the Node VMs.
	// If not specified, the "default" service account is used.
	ServiceAccount pulumi.StringPtrInput `pulumi:"serviceAccount"`
	// The set of Google API scopes
	// to be made available on all of the node VMs under the `serviceAccount`
	// specified. Both OAuth2 URLs and gcloud
	// short names are supported. To allow full access to all Cloud APIs, use the
	// `cloud-platform` scope. See a complete list of scopes [here](https://cloud.google.com/sdk/gcloud/reference/alpha/compute/instances/set-scopes#--scopes).
	ServiceAccountScopes pulumi.StringArrayInput `pulumi:"serviceAccountScopes"`
	// The name or selfLink of the Google Compute Engine
	// subnetwork the cluster will be part of. Conflicts with `network`.
	Subnetwork pulumi.StringPtrInput `pulumi:"subnetwork"`
	// The list of instance tags applied to instances in the cluster.
	// Tags are used to identify valid sources or targets for network firewalls.
	Tags pulumi.StringArrayInput `pulumi:"tags"`
	// The GCP zone where your data is stored and used (i.e. where
	// the master and the worker nodes will be created in). If `region` is set to 'global' (default)
	// then `zone` is mandatory, otherwise GCP is able to make use of [Auto Zone Placement](https://cloud.google.com/dataproc/docs/concepts/auto-zone)
	// to determine this automatically for you.
	// Note: This setting additionally determines and restricts
	// which computing resources are available for use with other configs such as
	// `cluster_config.master_config.machine_type` and `cluster_config.worker_config.machine_type`.
	Zone pulumi.StringPtrInput `pulumi:"zone"`
}

func (ClusterClusterConfigGceClusterConfigArgs) ElementType

func (ClusterClusterConfigGceClusterConfigArgs) ToClusterClusterConfigGceClusterConfigOutput

func (i ClusterClusterConfigGceClusterConfigArgs) ToClusterClusterConfigGceClusterConfigOutput() ClusterClusterConfigGceClusterConfigOutput

func (ClusterClusterConfigGceClusterConfigArgs) ToClusterClusterConfigGceClusterConfigOutputWithContext

func (i ClusterClusterConfigGceClusterConfigArgs) ToClusterClusterConfigGceClusterConfigOutputWithContext(ctx context.Context) ClusterClusterConfigGceClusterConfigOutput

func (ClusterClusterConfigGceClusterConfigArgs) ToClusterClusterConfigGceClusterConfigPtrOutput

func (i ClusterClusterConfigGceClusterConfigArgs) ToClusterClusterConfigGceClusterConfigPtrOutput() ClusterClusterConfigGceClusterConfigPtrOutput

func (ClusterClusterConfigGceClusterConfigArgs) ToClusterClusterConfigGceClusterConfigPtrOutputWithContext

func (i ClusterClusterConfigGceClusterConfigArgs) ToClusterClusterConfigGceClusterConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigGceClusterConfigPtrOutput

type ClusterClusterConfigGceClusterConfigInput

type ClusterClusterConfigGceClusterConfigInput interface {
	pulumi.Input

	ToClusterClusterConfigGceClusterConfigOutput() ClusterClusterConfigGceClusterConfigOutput
	ToClusterClusterConfigGceClusterConfigOutputWithContext(context.Context) ClusterClusterConfigGceClusterConfigOutput
}

ClusterClusterConfigGceClusterConfigInput is an input type that accepts ClusterClusterConfigGceClusterConfigArgs and ClusterClusterConfigGceClusterConfigOutput values. You can construct a concrete instance of `ClusterClusterConfigGceClusterConfigInput` via:

ClusterClusterConfigGceClusterConfigArgs{...}

type ClusterClusterConfigGceClusterConfigOutput

type ClusterClusterConfigGceClusterConfigOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigGceClusterConfigOutput) ElementType

func (ClusterClusterConfigGceClusterConfigOutput) InternalIpOnly

By default, clusters are not restricted to internal IP addresses, and will have ephemeral external IP addresses assigned to each instance. If set to true, all instances in the cluster will only have internal IP addresses. Note: Private Google Access (also known as `privateIpGoogleAccess`) must be enabled on the subnetwork that the cluster will be launched in.

func (ClusterClusterConfigGceClusterConfigOutput) Metadata

A map of the Compute Engine metadata entries to add to all instances (see [Project and instance metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).

func (ClusterClusterConfigGceClusterConfigOutput) Network

The name or selfLink of the Google Compute Engine network to the cluster will be part of. Conflicts with `subnetwork`. If neither is specified, this defaults to the "default" network.

func (ClusterClusterConfigGceClusterConfigOutput) ServiceAccount

The service account to be used by the Node VMs. If not specified, the "default" service account is used.

func (ClusterClusterConfigGceClusterConfigOutput) ServiceAccountScopes

The set of Google API scopes to be made available on all of the node VMs under the `serviceAccount` specified. Both OAuth2 URLs and gcloud short names are supported. To allow full access to all Cloud APIs, use the `cloud-platform` scope. See a complete list of scopes [here](https://cloud.google.com/sdk/gcloud/reference/alpha/compute/instances/set-scopes#--scopes).

func (ClusterClusterConfigGceClusterConfigOutput) Subnetwork

The name or selfLink of the Google Compute Engine subnetwork the cluster will be part of. Conflicts with `network`.

func (ClusterClusterConfigGceClusterConfigOutput) Tags

The list of instance tags applied to instances in the cluster. Tags are used to identify valid sources or targets for network firewalls.

func (ClusterClusterConfigGceClusterConfigOutput) ToClusterClusterConfigGceClusterConfigOutput

func (o ClusterClusterConfigGceClusterConfigOutput) ToClusterClusterConfigGceClusterConfigOutput() ClusterClusterConfigGceClusterConfigOutput

func (ClusterClusterConfigGceClusterConfigOutput) ToClusterClusterConfigGceClusterConfigOutputWithContext

func (o ClusterClusterConfigGceClusterConfigOutput) ToClusterClusterConfigGceClusterConfigOutputWithContext(ctx context.Context) ClusterClusterConfigGceClusterConfigOutput

func (ClusterClusterConfigGceClusterConfigOutput) ToClusterClusterConfigGceClusterConfigPtrOutput

func (o ClusterClusterConfigGceClusterConfigOutput) ToClusterClusterConfigGceClusterConfigPtrOutput() ClusterClusterConfigGceClusterConfigPtrOutput

func (ClusterClusterConfigGceClusterConfigOutput) ToClusterClusterConfigGceClusterConfigPtrOutputWithContext

func (o ClusterClusterConfigGceClusterConfigOutput) ToClusterClusterConfigGceClusterConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigGceClusterConfigPtrOutput

func (ClusterClusterConfigGceClusterConfigOutput) Zone

The GCP zone where your data is stored and used (i.e. where the master and the worker nodes will be created in). If `region` is set to 'global' (default) then `zone` is mandatory, otherwise GCP is able to make use of [Auto Zone Placement](https://cloud.google.com/dataproc/docs/concepts/auto-zone) to determine this automatically for you. Note: This setting additionally determines and restricts which computing resources are available for use with other configs such as `cluster_config.master_config.machine_type` and `cluster_config.worker_config.machine_type`.

type ClusterClusterConfigGceClusterConfigPtrInput

type ClusterClusterConfigGceClusterConfigPtrInput interface {
	pulumi.Input

	ToClusterClusterConfigGceClusterConfigPtrOutput() ClusterClusterConfigGceClusterConfigPtrOutput
	ToClusterClusterConfigGceClusterConfigPtrOutputWithContext(context.Context) ClusterClusterConfigGceClusterConfigPtrOutput
}

ClusterClusterConfigGceClusterConfigPtrInput is an input type that accepts ClusterClusterConfigGceClusterConfigArgs, ClusterClusterConfigGceClusterConfigPtr and ClusterClusterConfigGceClusterConfigPtrOutput values. You can construct a concrete instance of `ClusterClusterConfigGceClusterConfigPtrInput` via:

        ClusterClusterConfigGceClusterConfigArgs{...}

or:

        nil

type ClusterClusterConfigGceClusterConfigPtrOutput

type ClusterClusterConfigGceClusterConfigPtrOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigGceClusterConfigPtrOutput) Elem

func (ClusterClusterConfigGceClusterConfigPtrOutput) ElementType

func (ClusterClusterConfigGceClusterConfigPtrOutput) InternalIpOnly

By default, clusters are not restricted to internal IP addresses, and will have ephemeral external IP addresses assigned to each instance. If set to true, all instances in the cluster will only have internal IP addresses. Note: Private Google Access (also known as `privateIpGoogleAccess`) must be enabled on the subnetwork that the cluster will be launched in.

func (ClusterClusterConfigGceClusterConfigPtrOutput) Metadata

A map of the Compute Engine metadata entries to add to all instances (see [Project and instance metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).

func (ClusterClusterConfigGceClusterConfigPtrOutput) Network

The name or selfLink of the Google Compute Engine network to the cluster will be part of. Conflicts with `subnetwork`. If neither is specified, this defaults to the "default" network.

func (ClusterClusterConfigGceClusterConfigPtrOutput) ServiceAccount

The service account to be used by the Node VMs. If not specified, the "default" service account is used.

func (ClusterClusterConfigGceClusterConfigPtrOutput) ServiceAccountScopes

The set of Google API scopes to be made available on all of the node VMs under the `serviceAccount` specified. Both OAuth2 URLs and gcloud short names are supported. To allow full access to all Cloud APIs, use the `cloud-platform` scope. See a complete list of scopes [here](https://cloud.google.com/sdk/gcloud/reference/alpha/compute/instances/set-scopes#--scopes).

func (ClusterClusterConfigGceClusterConfigPtrOutput) Subnetwork

The name or selfLink of the Google Compute Engine subnetwork the cluster will be part of. Conflicts with `network`.

func (ClusterClusterConfigGceClusterConfigPtrOutput) Tags

The list of instance tags applied to instances in the cluster. Tags are used to identify valid sources or targets for network firewalls.

func (ClusterClusterConfigGceClusterConfigPtrOutput) ToClusterClusterConfigGceClusterConfigPtrOutput

func (o ClusterClusterConfigGceClusterConfigPtrOutput) ToClusterClusterConfigGceClusterConfigPtrOutput() ClusterClusterConfigGceClusterConfigPtrOutput

func (ClusterClusterConfigGceClusterConfigPtrOutput) ToClusterClusterConfigGceClusterConfigPtrOutputWithContext

func (o ClusterClusterConfigGceClusterConfigPtrOutput) ToClusterClusterConfigGceClusterConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigGceClusterConfigPtrOutput

func (ClusterClusterConfigGceClusterConfigPtrOutput) Zone

The GCP zone where your data is stored and used (i.e. where the master and the worker nodes will be created in). If `region` is set to 'global' (default) then `zone` is mandatory, otherwise GCP is able to make use of [Auto Zone Placement](https://cloud.google.com/dataproc/docs/concepts/auto-zone) to determine this automatically for you. Note: This setting additionally determines and restricts which computing resources are available for use with other configs such as `cluster_config.master_config.machine_type` and `cluster_config.worker_config.machine_type`.

type ClusterClusterConfigInitializationAction

type ClusterClusterConfigInitializationAction struct {
	// The script to be executed during initialization of the cluster.
	// The script must be a GCS file with a gs:// prefix.
	Script string `pulumi:"script"`
	// The maximum duration (in seconds) which `script` is
	// allowed to take to execute its action. GCP will default to a predetermined
	// computed value if not set (currently 300).
	TimeoutSec *int `pulumi:"timeoutSec"`
}

type ClusterClusterConfigInitializationActionArgs

type ClusterClusterConfigInitializationActionArgs struct {
	// The script to be executed during initialization of the cluster.
	// The script must be a GCS file with a gs:// prefix.
	Script pulumi.StringInput `pulumi:"script"`
	// The maximum duration (in seconds) which `script` is
	// allowed to take to execute its action. GCP will default to a predetermined
	// computed value if not set (currently 300).
	TimeoutSec pulumi.IntPtrInput `pulumi:"timeoutSec"`
}

func (ClusterClusterConfigInitializationActionArgs) ElementType

func (ClusterClusterConfigInitializationActionArgs) ToClusterClusterConfigInitializationActionOutput

func (i ClusterClusterConfigInitializationActionArgs) ToClusterClusterConfigInitializationActionOutput() ClusterClusterConfigInitializationActionOutput

func (ClusterClusterConfigInitializationActionArgs) ToClusterClusterConfigInitializationActionOutputWithContext

func (i ClusterClusterConfigInitializationActionArgs) ToClusterClusterConfigInitializationActionOutputWithContext(ctx context.Context) ClusterClusterConfigInitializationActionOutput

type ClusterClusterConfigInitializationActionArray

type ClusterClusterConfigInitializationActionArray []ClusterClusterConfigInitializationActionInput

func (ClusterClusterConfigInitializationActionArray) ElementType

func (ClusterClusterConfigInitializationActionArray) ToClusterClusterConfigInitializationActionArrayOutput

func (i ClusterClusterConfigInitializationActionArray) ToClusterClusterConfigInitializationActionArrayOutput() ClusterClusterConfigInitializationActionArrayOutput

func (ClusterClusterConfigInitializationActionArray) ToClusterClusterConfigInitializationActionArrayOutputWithContext

func (i ClusterClusterConfigInitializationActionArray) ToClusterClusterConfigInitializationActionArrayOutputWithContext(ctx context.Context) ClusterClusterConfigInitializationActionArrayOutput

type ClusterClusterConfigInitializationActionArrayInput

type ClusterClusterConfigInitializationActionArrayInput interface {
	pulumi.Input

	ToClusterClusterConfigInitializationActionArrayOutput() ClusterClusterConfigInitializationActionArrayOutput
	ToClusterClusterConfigInitializationActionArrayOutputWithContext(context.Context) ClusterClusterConfigInitializationActionArrayOutput
}

ClusterClusterConfigInitializationActionArrayInput is an input type that accepts ClusterClusterConfigInitializationActionArray and ClusterClusterConfigInitializationActionArrayOutput values. You can construct a concrete instance of `ClusterClusterConfigInitializationActionArrayInput` via:

ClusterClusterConfigInitializationActionArray{ ClusterClusterConfigInitializationActionArgs{...} }

type ClusterClusterConfigInitializationActionArrayOutput

type ClusterClusterConfigInitializationActionArrayOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigInitializationActionArrayOutput) ElementType

func (ClusterClusterConfigInitializationActionArrayOutput) Index

func (ClusterClusterConfigInitializationActionArrayOutput) ToClusterClusterConfigInitializationActionArrayOutput

func (o ClusterClusterConfigInitializationActionArrayOutput) ToClusterClusterConfigInitializationActionArrayOutput() ClusterClusterConfigInitializationActionArrayOutput

func (ClusterClusterConfigInitializationActionArrayOutput) ToClusterClusterConfigInitializationActionArrayOutputWithContext

func (o ClusterClusterConfigInitializationActionArrayOutput) ToClusterClusterConfigInitializationActionArrayOutputWithContext(ctx context.Context) ClusterClusterConfigInitializationActionArrayOutput

type ClusterClusterConfigInitializationActionInput

type ClusterClusterConfigInitializationActionInput interface {
	pulumi.Input

	ToClusterClusterConfigInitializationActionOutput() ClusterClusterConfigInitializationActionOutput
	ToClusterClusterConfigInitializationActionOutputWithContext(context.Context) ClusterClusterConfigInitializationActionOutput
}

ClusterClusterConfigInitializationActionInput is an input type that accepts ClusterClusterConfigInitializationActionArgs and ClusterClusterConfigInitializationActionOutput values. You can construct a concrete instance of `ClusterClusterConfigInitializationActionInput` via:

ClusterClusterConfigInitializationActionArgs{...}

type ClusterClusterConfigInitializationActionOutput

type ClusterClusterConfigInitializationActionOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigInitializationActionOutput) ElementType

func (ClusterClusterConfigInitializationActionOutput) Script

The script to be executed during initialization of the cluster. The script must be a GCS file with a gs:// prefix.

func (ClusterClusterConfigInitializationActionOutput) TimeoutSec

The maximum duration (in seconds) which `script` is allowed to take to execute its action. GCP will default to a predetermined computed value if not set (currently 300).

func (ClusterClusterConfigInitializationActionOutput) ToClusterClusterConfigInitializationActionOutput

func (o ClusterClusterConfigInitializationActionOutput) ToClusterClusterConfigInitializationActionOutput() ClusterClusterConfigInitializationActionOutput

func (ClusterClusterConfigInitializationActionOutput) ToClusterClusterConfigInitializationActionOutputWithContext

func (o ClusterClusterConfigInitializationActionOutput) ToClusterClusterConfigInitializationActionOutputWithContext(ctx context.Context) ClusterClusterConfigInitializationActionOutput

type ClusterClusterConfigInput

type ClusterClusterConfigInput interface {
	pulumi.Input

	ToClusterClusterConfigOutput() ClusterClusterConfigOutput
	ToClusterClusterConfigOutputWithContext(context.Context) ClusterClusterConfigOutput
}

ClusterClusterConfigInput is an input type that accepts ClusterClusterConfigArgs and ClusterClusterConfigOutput values. You can construct a concrete instance of `ClusterClusterConfigInput` via:

ClusterClusterConfigArgs{...}

type ClusterClusterConfigLifecycleConfig

type ClusterClusterConfigLifecycleConfig struct {
	// The time when cluster will be auto-deleted.
	// A timestamp in RFC3339 UTC "Zulu" format, accurate to nanoseconds.
	// Example: "2014-10-02T15:01:23.045123456Z".
	AutoDeleteTime *string `pulumi:"autoDeleteTime"`
	// The duration to keep the cluster alive while idling
	// (no jobs running). After this TTL, the cluster will be deleted. Valid range: [10m, 14d].
	IdleDeleteTtl *string `pulumi:"idleDeleteTtl"`
	IdleStartTime *string `pulumi:"idleStartTime"`
}

type ClusterClusterConfigLifecycleConfigArgs

type ClusterClusterConfigLifecycleConfigArgs struct {
	// The time when cluster will be auto-deleted.
	// A timestamp in RFC3339 UTC "Zulu" format, accurate to nanoseconds.
	// Example: "2014-10-02T15:01:23.045123456Z".
	AutoDeleteTime pulumi.StringPtrInput `pulumi:"autoDeleteTime"`
	// The duration to keep the cluster alive while idling
	// (no jobs running). After this TTL, the cluster will be deleted. Valid range: [10m, 14d].
	IdleDeleteTtl pulumi.StringPtrInput `pulumi:"idleDeleteTtl"`
	IdleStartTime pulumi.StringPtrInput `pulumi:"idleStartTime"`
}

func (ClusterClusterConfigLifecycleConfigArgs) ElementType

func (ClusterClusterConfigLifecycleConfigArgs) ToClusterClusterConfigLifecycleConfigOutput

func (i ClusterClusterConfigLifecycleConfigArgs) ToClusterClusterConfigLifecycleConfigOutput() ClusterClusterConfigLifecycleConfigOutput

func (ClusterClusterConfigLifecycleConfigArgs) ToClusterClusterConfigLifecycleConfigOutputWithContext

func (i ClusterClusterConfigLifecycleConfigArgs) ToClusterClusterConfigLifecycleConfigOutputWithContext(ctx context.Context) ClusterClusterConfigLifecycleConfigOutput

func (ClusterClusterConfigLifecycleConfigArgs) ToClusterClusterConfigLifecycleConfigPtrOutput

func (i ClusterClusterConfigLifecycleConfigArgs) ToClusterClusterConfigLifecycleConfigPtrOutput() ClusterClusterConfigLifecycleConfigPtrOutput

func (ClusterClusterConfigLifecycleConfigArgs) ToClusterClusterConfigLifecycleConfigPtrOutputWithContext

func (i ClusterClusterConfigLifecycleConfigArgs) ToClusterClusterConfigLifecycleConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigLifecycleConfigPtrOutput

type ClusterClusterConfigLifecycleConfigInput

type ClusterClusterConfigLifecycleConfigInput interface {
	pulumi.Input

	ToClusterClusterConfigLifecycleConfigOutput() ClusterClusterConfigLifecycleConfigOutput
	ToClusterClusterConfigLifecycleConfigOutputWithContext(context.Context) ClusterClusterConfigLifecycleConfigOutput
}

ClusterClusterConfigLifecycleConfigInput is an input type that accepts ClusterClusterConfigLifecycleConfigArgs and ClusterClusterConfigLifecycleConfigOutput values. You can construct a concrete instance of `ClusterClusterConfigLifecycleConfigInput` via:

ClusterClusterConfigLifecycleConfigArgs{...}

type ClusterClusterConfigLifecycleConfigOutput

type ClusterClusterConfigLifecycleConfigOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigLifecycleConfigOutput) AutoDeleteTime

The time when cluster will be auto-deleted. A timestamp in RFC3339 UTC "Zulu" format, accurate to nanoseconds. Example: "2014-10-02T15:01:23.045123456Z".

func (ClusterClusterConfigLifecycleConfigOutput) ElementType

func (ClusterClusterConfigLifecycleConfigOutput) IdleDeleteTtl

The duration to keep the cluster alive while idling (no jobs running). After this TTL, the cluster will be deleted. Valid range: [10m, 14d].

func (ClusterClusterConfigLifecycleConfigOutput) IdleStartTime

func (ClusterClusterConfigLifecycleConfigOutput) ToClusterClusterConfigLifecycleConfigOutput

func (o ClusterClusterConfigLifecycleConfigOutput) ToClusterClusterConfigLifecycleConfigOutput() ClusterClusterConfigLifecycleConfigOutput

func (ClusterClusterConfigLifecycleConfigOutput) ToClusterClusterConfigLifecycleConfigOutputWithContext

func (o ClusterClusterConfigLifecycleConfigOutput) ToClusterClusterConfigLifecycleConfigOutputWithContext(ctx context.Context) ClusterClusterConfigLifecycleConfigOutput

func (ClusterClusterConfigLifecycleConfigOutput) ToClusterClusterConfigLifecycleConfigPtrOutput

func (o ClusterClusterConfigLifecycleConfigOutput) ToClusterClusterConfigLifecycleConfigPtrOutput() ClusterClusterConfigLifecycleConfigPtrOutput

func (ClusterClusterConfigLifecycleConfigOutput) ToClusterClusterConfigLifecycleConfigPtrOutputWithContext

func (o ClusterClusterConfigLifecycleConfigOutput) ToClusterClusterConfigLifecycleConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigLifecycleConfigPtrOutput

type ClusterClusterConfigLifecycleConfigPtrInput

type ClusterClusterConfigLifecycleConfigPtrInput interface {
	pulumi.Input

	ToClusterClusterConfigLifecycleConfigPtrOutput() ClusterClusterConfigLifecycleConfigPtrOutput
	ToClusterClusterConfigLifecycleConfigPtrOutputWithContext(context.Context) ClusterClusterConfigLifecycleConfigPtrOutput
}

ClusterClusterConfigLifecycleConfigPtrInput is an input type that accepts ClusterClusterConfigLifecycleConfigArgs, ClusterClusterConfigLifecycleConfigPtr and ClusterClusterConfigLifecycleConfigPtrOutput values. You can construct a concrete instance of `ClusterClusterConfigLifecycleConfigPtrInput` via:

        ClusterClusterConfigLifecycleConfigArgs{...}

or:

        nil

type ClusterClusterConfigLifecycleConfigPtrOutput

type ClusterClusterConfigLifecycleConfigPtrOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigLifecycleConfigPtrOutput) AutoDeleteTime

The time when cluster will be auto-deleted. A timestamp in RFC3339 UTC "Zulu" format, accurate to nanoseconds. Example: "2014-10-02T15:01:23.045123456Z".

func (ClusterClusterConfigLifecycleConfigPtrOutput) Elem

func (ClusterClusterConfigLifecycleConfigPtrOutput) ElementType

func (ClusterClusterConfigLifecycleConfigPtrOutput) IdleDeleteTtl

The duration to keep the cluster alive while idling (no jobs running). After this TTL, the cluster will be deleted. Valid range: [10m, 14d].

func (ClusterClusterConfigLifecycleConfigPtrOutput) IdleStartTime

func (ClusterClusterConfigLifecycleConfigPtrOutput) ToClusterClusterConfigLifecycleConfigPtrOutput

func (o ClusterClusterConfigLifecycleConfigPtrOutput) ToClusterClusterConfigLifecycleConfigPtrOutput() ClusterClusterConfigLifecycleConfigPtrOutput

func (ClusterClusterConfigLifecycleConfigPtrOutput) ToClusterClusterConfigLifecycleConfigPtrOutputWithContext

func (o ClusterClusterConfigLifecycleConfigPtrOutput) ToClusterClusterConfigLifecycleConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigLifecycleConfigPtrOutput

type ClusterClusterConfigMasterConfig

type ClusterClusterConfigMasterConfig struct {
	// The Compute Engine accelerator configuration for these instances. Can be specified multiple times.
	Accelerators []ClusterClusterConfigMasterConfigAccelerator `pulumi:"accelerators"`
	// Disk Config
	DiskConfig *ClusterClusterConfigMasterConfigDiskConfig `pulumi:"diskConfig"`
	// The URI for the image to use for this worker.  See [the guide](https://cloud.google.com/dataproc/docs/guides/dataproc-images)
	// for more information.
	ImageUri      *string  `pulumi:"imageUri"`
	InstanceNames []string `pulumi:"instanceNames"`
	// The name of a Google Compute Engine machine type
	// to create for the worker nodes. If not specified, GCP will default to a predetermined
	// computed value (currently `n1-standard-4`).
	MachineType *string `pulumi:"machineType"`
	// The name of a minimum generation of CPU family
	// for the master. If not specified, GCP will default to a predetermined computed value
	// for each zone. See [the guide](https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform)
	// for details about which CPU families are available (and defaulted) for each zone.
	MinCpuPlatform *string `pulumi:"minCpuPlatform"`
	// Specifies the number of preemptible nodes to create.
	// Defaults to 0.
	NumInstances *int `pulumi:"numInstances"`
}

type ClusterClusterConfigMasterConfigAccelerator

type ClusterClusterConfigMasterConfigAccelerator struct {
	// The number of the accelerator cards of this type exposed to this instance. Often restricted to one of `1`, `2`, `4`, or `8`.
	AcceleratorCount int `pulumi:"acceleratorCount"`
	// The short name of the accelerator type to expose to this instance. For example, `nvidia-tesla-k80`.
	AcceleratorType string `pulumi:"acceleratorType"`
}

type ClusterClusterConfigMasterConfigAcceleratorArgs

type ClusterClusterConfigMasterConfigAcceleratorArgs struct {
	// The number of the accelerator cards of this type exposed to this instance. Often restricted to one of `1`, `2`, `4`, or `8`.
	AcceleratorCount pulumi.IntInput `pulumi:"acceleratorCount"`
	// The short name of the accelerator type to expose to this instance. For example, `nvidia-tesla-k80`.
	AcceleratorType pulumi.StringInput `pulumi:"acceleratorType"`
}

func (ClusterClusterConfigMasterConfigAcceleratorArgs) ElementType

func (ClusterClusterConfigMasterConfigAcceleratorArgs) ToClusterClusterConfigMasterConfigAcceleratorOutput

func (i ClusterClusterConfigMasterConfigAcceleratorArgs) ToClusterClusterConfigMasterConfigAcceleratorOutput() ClusterClusterConfigMasterConfigAcceleratorOutput

func (ClusterClusterConfigMasterConfigAcceleratorArgs) ToClusterClusterConfigMasterConfigAcceleratorOutputWithContext

func (i ClusterClusterConfigMasterConfigAcceleratorArgs) ToClusterClusterConfigMasterConfigAcceleratorOutputWithContext(ctx context.Context) ClusterClusterConfigMasterConfigAcceleratorOutput

type ClusterClusterConfigMasterConfigAcceleratorArray

type ClusterClusterConfigMasterConfigAcceleratorArray []ClusterClusterConfigMasterConfigAcceleratorInput

func (ClusterClusterConfigMasterConfigAcceleratorArray) ElementType

func (ClusterClusterConfigMasterConfigAcceleratorArray) ToClusterClusterConfigMasterConfigAcceleratorArrayOutput

func (i ClusterClusterConfigMasterConfigAcceleratorArray) ToClusterClusterConfigMasterConfigAcceleratorArrayOutput() ClusterClusterConfigMasterConfigAcceleratorArrayOutput

func (ClusterClusterConfigMasterConfigAcceleratorArray) ToClusterClusterConfigMasterConfigAcceleratorArrayOutputWithContext

func (i ClusterClusterConfigMasterConfigAcceleratorArray) ToClusterClusterConfigMasterConfigAcceleratorArrayOutputWithContext(ctx context.Context) ClusterClusterConfigMasterConfigAcceleratorArrayOutput

type ClusterClusterConfigMasterConfigAcceleratorArrayInput

type ClusterClusterConfigMasterConfigAcceleratorArrayInput interface {
	pulumi.Input

	ToClusterClusterConfigMasterConfigAcceleratorArrayOutput() ClusterClusterConfigMasterConfigAcceleratorArrayOutput
	ToClusterClusterConfigMasterConfigAcceleratorArrayOutputWithContext(context.Context) ClusterClusterConfigMasterConfigAcceleratorArrayOutput
}

ClusterClusterConfigMasterConfigAcceleratorArrayInput is an input type that accepts ClusterClusterConfigMasterConfigAcceleratorArray and ClusterClusterConfigMasterConfigAcceleratorArrayOutput values. You can construct a concrete instance of `ClusterClusterConfigMasterConfigAcceleratorArrayInput` via:

ClusterClusterConfigMasterConfigAcceleratorArray{ ClusterClusterConfigMasterConfigAcceleratorArgs{...} }

type ClusterClusterConfigMasterConfigAcceleratorArrayOutput

type ClusterClusterConfigMasterConfigAcceleratorArrayOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigMasterConfigAcceleratorArrayOutput) ElementType

func (ClusterClusterConfigMasterConfigAcceleratorArrayOutput) Index

func (ClusterClusterConfigMasterConfigAcceleratorArrayOutput) ToClusterClusterConfigMasterConfigAcceleratorArrayOutput

func (ClusterClusterConfigMasterConfigAcceleratorArrayOutput) ToClusterClusterConfigMasterConfigAcceleratorArrayOutputWithContext

func (o ClusterClusterConfigMasterConfigAcceleratorArrayOutput) ToClusterClusterConfigMasterConfigAcceleratorArrayOutputWithContext(ctx context.Context) ClusterClusterConfigMasterConfigAcceleratorArrayOutput

type ClusterClusterConfigMasterConfigAcceleratorInput

type ClusterClusterConfigMasterConfigAcceleratorInput interface {
	pulumi.Input

	ToClusterClusterConfigMasterConfigAcceleratorOutput() ClusterClusterConfigMasterConfigAcceleratorOutput
	ToClusterClusterConfigMasterConfigAcceleratorOutputWithContext(context.Context) ClusterClusterConfigMasterConfigAcceleratorOutput
}

ClusterClusterConfigMasterConfigAcceleratorInput is an input type that accepts ClusterClusterConfigMasterConfigAcceleratorArgs and ClusterClusterConfigMasterConfigAcceleratorOutput values. You can construct a concrete instance of `ClusterClusterConfigMasterConfigAcceleratorInput` via:

ClusterClusterConfigMasterConfigAcceleratorArgs{...}

type ClusterClusterConfigMasterConfigAcceleratorOutput

type ClusterClusterConfigMasterConfigAcceleratorOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigMasterConfigAcceleratorOutput) AcceleratorCount

The number of the accelerator cards of this type exposed to this instance. Often restricted to one of `1`, `2`, `4`, or `8`.

func (ClusterClusterConfigMasterConfigAcceleratorOutput) AcceleratorType

The short name of the accelerator type to expose to this instance. For example, `nvidia-tesla-k80`.

func (ClusterClusterConfigMasterConfigAcceleratorOutput) ElementType

func (ClusterClusterConfigMasterConfigAcceleratorOutput) ToClusterClusterConfigMasterConfigAcceleratorOutput

func (o ClusterClusterConfigMasterConfigAcceleratorOutput) ToClusterClusterConfigMasterConfigAcceleratorOutput() ClusterClusterConfigMasterConfigAcceleratorOutput

func (ClusterClusterConfigMasterConfigAcceleratorOutput) ToClusterClusterConfigMasterConfigAcceleratorOutputWithContext

func (o ClusterClusterConfigMasterConfigAcceleratorOutput) ToClusterClusterConfigMasterConfigAcceleratorOutputWithContext(ctx context.Context) ClusterClusterConfigMasterConfigAcceleratorOutput

type ClusterClusterConfigMasterConfigArgs

type ClusterClusterConfigMasterConfigArgs struct {
	// The Compute Engine accelerator configuration for these instances. Can be specified multiple times.
	Accelerators ClusterClusterConfigMasterConfigAcceleratorArrayInput `pulumi:"accelerators"`
	// Disk Config
	DiskConfig ClusterClusterConfigMasterConfigDiskConfigPtrInput `pulumi:"diskConfig"`
	// The URI for the image to use for this worker.  See [the guide](https://cloud.google.com/dataproc/docs/guides/dataproc-images)
	// for more information.
	ImageUri      pulumi.StringPtrInput   `pulumi:"imageUri"`
	InstanceNames pulumi.StringArrayInput `pulumi:"instanceNames"`
	// The name of a Google Compute Engine machine type
	// to create for the worker nodes. If not specified, GCP will default to a predetermined
	// computed value (currently `n1-standard-4`).
	MachineType pulumi.StringPtrInput `pulumi:"machineType"`
	// The name of a minimum generation of CPU family
	// for the master. If not specified, GCP will default to a predetermined computed value
	// for each zone. See [the guide](https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform)
	// for details about which CPU families are available (and defaulted) for each zone.
	MinCpuPlatform pulumi.StringPtrInput `pulumi:"minCpuPlatform"`
	// Specifies the number of preemptible nodes to create.
	// Defaults to 0.
	NumInstances pulumi.IntPtrInput `pulumi:"numInstances"`
}

func (ClusterClusterConfigMasterConfigArgs) ElementType

func (ClusterClusterConfigMasterConfigArgs) ToClusterClusterConfigMasterConfigOutput

func (i ClusterClusterConfigMasterConfigArgs) ToClusterClusterConfigMasterConfigOutput() ClusterClusterConfigMasterConfigOutput

func (ClusterClusterConfigMasterConfigArgs) ToClusterClusterConfigMasterConfigOutputWithContext

func (i ClusterClusterConfigMasterConfigArgs) ToClusterClusterConfigMasterConfigOutputWithContext(ctx context.Context) ClusterClusterConfigMasterConfigOutput

func (ClusterClusterConfigMasterConfigArgs) ToClusterClusterConfigMasterConfigPtrOutput

func (i ClusterClusterConfigMasterConfigArgs) ToClusterClusterConfigMasterConfigPtrOutput() ClusterClusterConfigMasterConfigPtrOutput

func (ClusterClusterConfigMasterConfigArgs) ToClusterClusterConfigMasterConfigPtrOutputWithContext

func (i ClusterClusterConfigMasterConfigArgs) ToClusterClusterConfigMasterConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigMasterConfigPtrOutput

type ClusterClusterConfigMasterConfigDiskConfig

type ClusterClusterConfigMasterConfigDiskConfig struct {
	// Size of the primary disk attached to each preemptible worker node, specified
	// in GB. The smallest allowed disk size is 10GB. GCP will default to a predetermined
	// computed value if not set (currently 500GB). Note: If SSDs are not
	// attached, it also contains the HDFS data blocks and Hadoop working directories.
	BootDiskSizeGb *int `pulumi:"bootDiskSizeGb"`
	// The disk type of the primary disk attached to each preemptible worker node.
	// One of `"pd-ssd"` or `"pd-standard"`. Defaults to `"pd-standard"`.
	BootDiskType *string `pulumi:"bootDiskType"`
	// The amount of local SSD disks that will be
	// attached to each preemptible worker node. Defaults to 0.
	NumLocalSsds *int `pulumi:"numLocalSsds"`
}

type ClusterClusterConfigMasterConfigDiskConfigArgs

type ClusterClusterConfigMasterConfigDiskConfigArgs struct {
	// Size of the primary disk attached to each preemptible worker node, specified
	// in GB. The smallest allowed disk size is 10GB. GCP will default to a predetermined
	// computed value if not set (currently 500GB). Note: If SSDs are not
	// attached, it also contains the HDFS data blocks and Hadoop working directories.
	BootDiskSizeGb pulumi.IntPtrInput `pulumi:"bootDiskSizeGb"`
	// The disk type of the primary disk attached to each preemptible worker node.
	// One of `"pd-ssd"` or `"pd-standard"`. Defaults to `"pd-standard"`.
	BootDiskType pulumi.StringPtrInput `pulumi:"bootDiskType"`
	// The amount of local SSD disks that will be
	// attached to each preemptible worker node. Defaults to 0.
	NumLocalSsds pulumi.IntPtrInput `pulumi:"numLocalSsds"`
}

func (ClusterClusterConfigMasterConfigDiskConfigArgs) ElementType

func (ClusterClusterConfigMasterConfigDiskConfigArgs) ToClusterClusterConfigMasterConfigDiskConfigOutput

func (i ClusterClusterConfigMasterConfigDiskConfigArgs) ToClusterClusterConfigMasterConfigDiskConfigOutput() ClusterClusterConfigMasterConfigDiskConfigOutput

func (ClusterClusterConfigMasterConfigDiskConfigArgs) ToClusterClusterConfigMasterConfigDiskConfigOutputWithContext

func (i ClusterClusterConfigMasterConfigDiskConfigArgs) ToClusterClusterConfigMasterConfigDiskConfigOutputWithContext(ctx context.Context) ClusterClusterConfigMasterConfigDiskConfigOutput

func (ClusterClusterConfigMasterConfigDiskConfigArgs) ToClusterClusterConfigMasterConfigDiskConfigPtrOutput

func (i ClusterClusterConfigMasterConfigDiskConfigArgs) ToClusterClusterConfigMasterConfigDiskConfigPtrOutput() ClusterClusterConfigMasterConfigDiskConfigPtrOutput

func (ClusterClusterConfigMasterConfigDiskConfigArgs) ToClusterClusterConfigMasterConfigDiskConfigPtrOutputWithContext

func (i ClusterClusterConfigMasterConfigDiskConfigArgs) ToClusterClusterConfigMasterConfigDiskConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigMasterConfigDiskConfigPtrOutput

type ClusterClusterConfigMasterConfigDiskConfigInput

type ClusterClusterConfigMasterConfigDiskConfigInput interface {
	pulumi.Input

	ToClusterClusterConfigMasterConfigDiskConfigOutput() ClusterClusterConfigMasterConfigDiskConfigOutput
	ToClusterClusterConfigMasterConfigDiskConfigOutputWithContext(context.Context) ClusterClusterConfigMasterConfigDiskConfigOutput
}

ClusterClusterConfigMasterConfigDiskConfigInput is an input type that accepts ClusterClusterConfigMasterConfigDiskConfigArgs and ClusterClusterConfigMasterConfigDiskConfigOutput values. You can construct a concrete instance of `ClusterClusterConfigMasterConfigDiskConfigInput` via:

ClusterClusterConfigMasterConfigDiskConfigArgs{...}

type ClusterClusterConfigMasterConfigDiskConfigOutput

type ClusterClusterConfigMasterConfigDiskConfigOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigMasterConfigDiskConfigOutput) BootDiskSizeGb

Size of the primary disk attached to each preemptible worker node, specified in GB. The smallest allowed disk size is 10GB. GCP will default to a predetermined computed value if not set (currently 500GB). Note: If SSDs are not attached, it also contains the HDFS data blocks and Hadoop working directories.

func (ClusterClusterConfigMasterConfigDiskConfigOutput) BootDiskType

The disk type of the primary disk attached to each preemptible worker node. One of `"pd-ssd"` or `"pd-standard"`. Defaults to `"pd-standard"`.

func (ClusterClusterConfigMasterConfigDiskConfigOutput) ElementType

func (ClusterClusterConfigMasterConfigDiskConfigOutput) NumLocalSsds

The amount of local SSD disks that will be attached to each preemptible worker node. Defaults to 0.

func (ClusterClusterConfigMasterConfigDiskConfigOutput) ToClusterClusterConfigMasterConfigDiskConfigOutput

func (o ClusterClusterConfigMasterConfigDiskConfigOutput) ToClusterClusterConfigMasterConfigDiskConfigOutput() ClusterClusterConfigMasterConfigDiskConfigOutput

func (ClusterClusterConfigMasterConfigDiskConfigOutput) ToClusterClusterConfigMasterConfigDiskConfigOutputWithContext

func (o ClusterClusterConfigMasterConfigDiskConfigOutput) ToClusterClusterConfigMasterConfigDiskConfigOutputWithContext(ctx context.Context) ClusterClusterConfigMasterConfigDiskConfigOutput

func (ClusterClusterConfigMasterConfigDiskConfigOutput) ToClusterClusterConfigMasterConfigDiskConfigPtrOutput

func (o ClusterClusterConfigMasterConfigDiskConfigOutput) ToClusterClusterConfigMasterConfigDiskConfigPtrOutput() ClusterClusterConfigMasterConfigDiskConfigPtrOutput

func (ClusterClusterConfigMasterConfigDiskConfigOutput) ToClusterClusterConfigMasterConfigDiskConfigPtrOutputWithContext

func (o ClusterClusterConfigMasterConfigDiskConfigOutput) ToClusterClusterConfigMasterConfigDiskConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigMasterConfigDiskConfigPtrOutput

type ClusterClusterConfigMasterConfigDiskConfigPtrInput

type ClusterClusterConfigMasterConfigDiskConfigPtrInput interface {
	pulumi.Input

	ToClusterClusterConfigMasterConfigDiskConfigPtrOutput() ClusterClusterConfigMasterConfigDiskConfigPtrOutput
	ToClusterClusterConfigMasterConfigDiskConfigPtrOutputWithContext(context.Context) ClusterClusterConfigMasterConfigDiskConfigPtrOutput
}

ClusterClusterConfigMasterConfigDiskConfigPtrInput is an input type that accepts ClusterClusterConfigMasterConfigDiskConfigArgs, ClusterClusterConfigMasterConfigDiskConfigPtr and ClusterClusterConfigMasterConfigDiskConfigPtrOutput values. You can construct a concrete instance of `ClusterClusterConfigMasterConfigDiskConfigPtrInput` via:

        ClusterClusterConfigMasterConfigDiskConfigArgs{...}

or:

        nil

type ClusterClusterConfigMasterConfigDiskConfigPtrOutput

type ClusterClusterConfigMasterConfigDiskConfigPtrOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigMasterConfigDiskConfigPtrOutput) BootDiskSizeGb

Size of the primary disk attached to each preemptible worker node, specified in GB. The smallest allowed disk size is 10GB. GCP will default to a predetermined computed value if not set (currently 500GB). Note: If SSDs are not attached, it also contains the HDFS data blocks and Hadoop working directories.

func (ClusterClusterConfigMasterConfigDiskConfigPtrOutput) BootDiskType

The disk type of the primary disk attached to each preemptible worker node. One of `"pd-ssd"` or `"pd-standard"`. Defaults to `"pd-standard"`.

func (ClusterClusterConfigMasterConfigDiskConfigPtrOutput) Elem

func (ClusterClusterConfigMasterConfigDiskConfigPtrOutput) ElementType

func (ClusterClusterConfigMasterConfigDiskConfigPtrOutput) NumLocalSsds

The amount of local SSD disks that will be attached to each preemptible worker node. Defaults to 0.

func (ClusterClusterConfigMasterConfigDiskConfigPtrOutput) ToClusterClusterConfigMasterConfigDiskConfigPtrOutput

func (o ClusterClusterConfigMasterConfigDiskConfigPtrOutput) ToClusterClusterConfigMasterConfigDiskConfigPtrOutput() ClusterClusterConfigMasterConfigDiskConfigPtrOutput

func (ClusterClusterConfigMasterConfigDiskConfigPtrOutput) ToClusterClusterConfigMasterConfigDiskConfigPtrOutputWithContext

func (o ClusterClusterConfigMasterConfigDiskConfigPtrOutput) ToClusterClusterConfigMasterConfigDiskConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigMasterConfigDiskConfigPtrOutput

type ClusterClusterConfigMasterConfigInput

type ClusterClusterConfigMasterConfigInput interface {
	pulumi.Input

	ToClusterClusterConfigMasterConfigOutput() ClusterClusterConfigMasterConfigOutput
	ToClusterClusterConfigMasterConfigOutputWithContext(context.Context) ClusterClusterConfigMasterConfigOutput
}

ClusterClusterConfigMasterConfigInput is an input type that accepts ClusterClusterConfigMasterConfigArgs and ClusterClusterConfigMasterConfigOutput values. You can construct a concrete instance of `ClusterClusterConfigMasterConfigInput` via:

ClusterClusterConfigMasterConfigArgs{...}

type ClusterClusterConfigMasterConfigOutput

type ClusterClusterConfigMasterConfigOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigMasterConfigOutput) Accelerators

The Compute Engine accelerator configuration for these instances. Can be specified multiple times.

func (ClusterClusterConfigMasterConfigOutput) DiskConfig

Disk Config

func (ClusterClusterConfigMasterConfigOutput) ElementType

func (ClusterClusterConfigMasterConfigOutput) ImageUri

The URI for the image to use for this worker. See [the guide](https://cloud.google.com/dataproc/docs/guides/dataproc-images) for more information.

func (ClusterClusterConfigMasterConfigOutput) InstanceNames

func (ClusterClusterConfigMasterConfigOutput) MachineType

The name of a Google Compute Engine machine type to create for the worker nodes. If not specified, GCP will default to a predetermined computed value (currently `n1-standard-4`).

func (ClusterClusterConfigMasterConfigOutput) MinCpuPlatform

The name of a minimum generation of CPU family for the master. If not specified, GCP will default to a predetermined computed value for each zone. See [the guide](https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform) for details about which CPU families are available (and defaulted) for each zone.

func (ClusterClusterConfigMasterConfigOutput) NumInstances

Specifies the number of preemptible nodes to create. Defaults to 0.

func (ClusterClusterConfigMasterConfigOutput) ToClusterClusterConfigMasterConfigOutput

func (o ClusterClusterConfigMasterConfigOutput) ToClusterClusterConfigMasterConfigOutput() ClusterClusterConfigMasterConfigOutput

func (ClusterClusterConfigMasterConfigOutput) ToClusterClusterConfigMasterConfigOutputWithContext

func (o ClusterClusterConfigMasterConfigOutput) ToClusterClusterConfigMasterConfigOutputWithContext(ctx context.Context) ClusterClusterConfigMasterConfigOutput

func (ClusterClusterConfigMasterConfigOutput) ToClusterClusterConfigMasterConfigPtrOutput

func (o ClusterClusterConfigMasterConfigOutput) ToClusterClusterConfigMasterConfigPtrOutput() ClusterClusterConfigMasterConfigPtrOutput

func (ClusterClusterConfigMasterConfigOutput) ToClusterClusterConfigMasterConfigPtrOutputWithContext

func (o ClusterClusterConfigMasterConfigOutput) ToClusterClusterConfigMasterConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigMasterConfigPtrOutput

type ClusterClusterConfigMasterConfigPtrInput

type ClusterClusterConfigMasterConfigPtrInput interface {
	pulumi.Input

	ToClusterClusterConfigMasterConfigPtrOutput() ClusterClusterConfigMasterConfigPtrOutput
	ToClusterClusterConfigMasterConfigPtrOutputWithContext(context.Context) ClusterClusterConfigMasterConfigPtrOutput
}

ClusterClusterConfigMasterConfigPtrInput is an input type that accepts ClusterClusterConfigMasterConfigArgs, ClusterClusterConfigMasterConfigPtr and ClusterClusterConfigMasterConfigPtrOutput values. You can construct a concrete instance of `ClusterClusterConfigMasterConfigPtrInput` via:

        ClusterClusterConfigMasterConfigArgs{...}

or:

        nil

type ClusterClusterConfigMasterConfigPtrOutput

type ClusterClusterConfigMasterConfigPtrOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigMasterConfigPtrOutput) Accelerators

The Compute Engine accelerator configuration for these instances. Can be specified multiple times.

func (ClusterClusterConfigMasterConfigPtrOutput) DiskConfig

Disk Config

func (ClusterClusterConfigMasterConfigPtrOutput) Elem

func (ClusterClusterConfigMasterConfigPtrOutput) ElementType

func (ClusterClusterConfigMasterConfigPtrOutput) ImageUri

The URI for the image to use for this worker. See [the guide](https://cloud.google.com/dataproc/docs/guides/dataproc-images) for more information.

func (ClusterClusterConfigMasterConfigPtrOutput) InstanceNames

func (ClusterClusterConfigMasterConfigPtrOutput) MachineType

The name of a Google Compute Engine machine type to create for the worker nodes. If not specified, GCP will default to a predetermined computed value (currently `n1-standard-4`).

func (ClusterClusterConfigMasterConfigPtrOutput) MinCpuPlatform

The name of a minimum generation of CPU family for the master. If not specified, GCP will default to a predetermined computed value for each zone. See [the guide](https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform) for details about which CPU families are available (and defaulted) for each zone.

func (ClusterClusterConfigMasterConfigPtrOutput) NumInstances

Specifies the number of preemptible nodes to create. Defaults to 0.

func (ClusterClusterConfigMasterConfigPtrOutput) ToClusterClusterConfigMasterConfigPtrOutput

func (o ClusterClusterConfigMasterConfigPtrOutput) ToClusterClusterConfigMasterConfigPtrOutput() ClusterClusterConfigMasterConfigPtrOutput

func (ClusterClusterConfigMasterConfigPtrOutput) ToClusterClusterConfigMasterConfigPtrOutputWithContext

func (o ClusterClusterConfigMasterConfigPtrOutput) ToClusterClusterConfigMasterConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigMasterConfigPtrOutput

type ClusterClusterConfigOutput

type ClusterClusterConfigOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigOutput) AutoscalingConfig

The autoscaling policy config associated with the cluster. Note that once set, if `autoscalingConfig` is the only field set in `clusterConfig`, it can only be removed by setting `policyUri = ""`, rather than removing the whole block. Structure defined below.

func (ClusterClusterConfigOutput) Bucket

func (ClusterClusterConfigOutput) ElementType

func (ClusterClusterConfigOutput) ElementType() reflect.Type

func (ClusterClusterConfigOutput) EncryptionConfig

The Customer managed encryption keys settings for the cluster. Structure defined below.

func (ClusterClusterConfigOutput) EndpointConfig

The config settings for port access on the cluster. Structure defined below. ***

func (ClusterClusterConfigOutput) GceClusterConfig

Common config settings for resources of Google Compute Engine cluster instances, applicable to all instances in the cluster. Structure defined below.

func (ClusterClusterConfigOutput) InitializationActions

Commands to execute on each node after config is completed. You can specify multiple versions of these. Structure defined below.

func (ClusterClusterConfigOutput) LifecycleConfig

The settings for auto deletion cluster schedule. Structure defined below.

func (ClusterClusterConfigOutput) MasterConfig

The Google Compute Engine config settings for the master instances in a cluster.. Structure defined below.

func (ClusterClusterConfigOutput) PreemptibleWorkerConfig

The Google Compute Engine config settings for the additional (aka preemptible) instances in a cluster. Structure defined below.

func (ClusterClusterConfigOutput) SecurityConfig

Security related configuration. Structure defined below.

func (ClusterClusterConfigOutput) SoftwareConfig

The config settings for software inside the cluster. Structure defined below.

func (ClusterClusterConfigOutput) StagingBucket

The Cloud Storage staging bucket used to stage files, such as Hadoop jars, between client machines and the cluster. Note: If you don't explicitly specify a `stagingBucket` then GCP will auto create / assign one for you. However, you are not guaranteed an auto generated bucket which is solely dedicated to your cluster; it may be shared with other clusters in the same region/zone also choosing to use the auto generation option.

func (ClusterClusterConfigOutput) TempBucket added in v4.10.0

The Cloud Storage temp bucket used to store ephemeral cluster and jobs data, such as Spark and MapReduce history files. Note: If you don't explicitly specify a `tempBucket` then GCP will auto create / assign one for you.

func (ClusterClusterConfigOutput) ToClusterClusterConfigOutput

func (o ClusterClusterConfigOutput) ToClusterClusterConfigOutput() ClusterClusterConfigOutput

func (ClusterClusterConfigOutput) ToClusterClusterConfigOutputWithContext

func (o ClusterClusterConfigOutput) ToClusterClusterConfigOutputWithContext(ctx context.Context) ClusterClusterConfigOutput

func (ClusterClusterConfigOutput) ToClusterClusterConfigPtrOutput

func (o ClusterClusterConfigOutput) ToClusterClusterConfigPtrOutput() ClusterClusterConfigPtrOutput

func (ClusterClusterConfigOutput) ToClusterClusterConfigPtrOutputWithContext

func (o ClusterClusterConfigOutput) ToClusterClusterConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigPtrOutput

func (ClusterClusterConfigOutput) WorkerConfig

The Google Compute Engine config settings for the worker instances in a cluster.. Structure defined below.

type ClusterClusterConfigPreemptibleWorkerConfig

type ClusterClusterConfigPreemptibleWorkerConfig struct {
	// Disk Config
	DiskConfig    *ClusterClusterConfigPreemptibleWorkerConfigDiskConfig `pulumi:"diskConfig"`
	InstanceNames []string                                               `pulumi:"instanceNames"`
	// Specifies the number of preemptible nodes to create.
	// Defaults to 0.
	NumInstances *int `pulumi:"numInstances"`
}

type ClusterClusterConfigPreemptibleWorkerConfigArgs

type ClusterClusterConfigPreemptibleWorkerConfigArgs struct {
	// Disk Config
	DiskConfig    ClusterClusterConfigPreemptibleWorkerConfigDiskConfigPtrInput `pulumi:"diskConfig"`
	InstanceNames pulumi.StringArrayInput                                       `pulumi:"instanceNames"`
	// Specifies the number of preemptible nodes to create.
	// Defaults to 0.
	NumInstances pulumi.IntPtrInput `pulumi:"numInstances"`
}

func (ClusterClusterConfigPreemptibleWorkerConfigArgs) ElementType

func (ClusterClusterConfigPreemptibleWorkerConfigArgs) ToClusterClusterConfigPreemptibleWorkerConfigOutput

func (i ClusterClusterConfigPreemptibleWorkerConfigArgs) ToClusterClusterConfigPreemptibleWorkerConfigOutput() ClusterClusterConfigPreemptibleWorkerConfigOutput

func (ClusterClusterConfigPreemptibleWorkerConfigArgs) ToClusterClusterConfigPreemptibleWorkerConfigOutputWithContext

func (i ClusterClusterConfigPreemptibleWorkerConfigArgs) ToClusterClusterConfigPreemptibleWorkerConfigOutputWithContext(ctx context.Context) ClusterClusterConfigPreemptibleWorkerConfigOutput

func (ClusterClusterConfigPreemptibleWorkerConfigArgs) ToClusterClusterConfigPreemptibleWorkerConfigPtrOutput

func (i ClusterClusterConfigPreemptibleWorkerConfigArgs) ToClusterClusterConfigPreemptibleWorkerConfigPtrOutput() ClusterClusterConfigPreemptibleWorkerConfigPtrOutput

func (ClusterClusterConfigPreemptibleWorkerConfigArgs) ToClusterClusterConfigPreemptibleWorkerConfigPtrOutputWithContext

func (i ClusterClusterConfigPreemptibleWorkerConfigArgs) ToClusterClusterConfigPreemptibleWorkerConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigPreemptibleWorkerConfigPtrOutput

type ClusterClusterConfigPreemptibleWorkerConfigDiskConfig

type ClusterClusterConfigPreemptibleWorkerConfigDiskConfig struct {
	// Size of the primary disk attached to each preemptible worker node, specified
	// in GB. The smallest allowed disk size is 10GB. GCP will default to a predetermined
	// computed value if not set (currently 500GB). Note: If SSDs are not
	// attached, it also contains the HDFS data blocks and Hadoop working directories.
	BootDiskSizeGb *int `pulumi:"bootDiskSizeGb"`
	// The disk type of the primary disk attached to each preemptible worker node.
	// One of `"pd-ssd"` or `"pd-standard"`. Defaults to `"pd-standard"`.
	BootDiskType *string `pulumi:"bootDiskType"`
	// The amount of local SSD disks that will be
	// attached to each preemptible worker node. Defaults to 0.
	NumLocalSsds *int `pulumi:"numLocalSsds"`
}

type ClusterClusterConfigPreemptibleWorkerConfigDiskConfigArgs

type ClusterClusterConfigPreemptibleWorkerConfigDiskConfigArgs struct {
	// Size of the primary disk attached to each preemptible worker node, specified
	// in GB. The smallest allowed disk size is 10GB. GCP will default to a predetermined
	// computed value if not set (currently 500GB). Note: If SSDs are not
	// attached, it also contains the HDFS data blocks and Hadoop working directories.
	BootDiskSizeGb pulumi.IntPtrInput `pulumi:"bootDiskSizeGb"`
	// The disk type of the primary disk attached to each preemptible worker node.
	// One of `"pd-ssd"` or `"pd-standard"`. Defaults to `"pd-standard"`.
	BootDiskType pulumi.StringPtrInput `pulumi:"bootDiskType"`
	// The amount of local SSD disks that will be
	// attached to each preemptible worker node. Defaults to 0.
	NumLocalSsds pulumi.IntPtrInput `pulumi:"numLocalSsds"`
}

func (ClusterClusterConfigPreemptibleWorkerConfigDiskConfigArgs) ElementType

func (ClusterClusterConfigPreemptibleWorkerConfigDiskConfigArgs) ToClusterClusterConfigPreemptibleWorkerConfigDiskConfigOutput

func (ClusterClusterConfigPreemptibleWorkerConfigDiskConfigArgs) ToClusterClusterConfigPreemptibleWorkerConfigDiskConfigOutputWithContext

func (i ClusterClusterConfigPreemptibleWorkerConfigDiskConfigArgs) ToClusterClusterConfigPreemptibleWorkerConfigDiskConfigOutputWithContext(ctx context.Context) ClusterClusterConfigPreemptibleWorkerConfigDiskConfigOutput

func (ClusterClusterConfigPreemptibleWorkerConfigDiskConfigArgs) ToClusterClusterConfigPreemptibleWorkerConfigDiskConfigPtrOutput

func (ClusterClusterConfigPreemptibleWorkerConfigDiskConfigArgs) ToClusterClusterConfigPreemptibleWorkerConfigDiskConfigPtrOutputWithContext

func (i ClusterClusterConfigPreemptibleWorkerConfigDiskConfigArgs) ToClusterClusterConfigPreemptibleWorkerConfigDiskConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigPreemptibleWorkerConfigDiskConfigPtrOutput

type ClusterClusterConfigPreemptibleWorkerConfigDiskConfigInput

type ClusterClusterConfigPreemptibleWorkerConfigDiskConfigInput interface {
	pulumi.Input

	ToClusterClusterConfigPreemptibleWorkerConfigDiskConfigOutput() ClusterClusterConfigPreemptibleWorkerConfigDiskConfigOutput
	ToClusterClusterConfigPreemptibleWorkerConfigDiskConfigOutputWithContext(context.Context) ClusterClusterConfigPreemptibleWorkerConfigDiskConfigOutput
}

ClusterClusterConfigPreemptibleWorkerConfigDiskConfigInput is an input type that accepts ClusterClusterConfigPreemptibleWorkerConfigDiskConfigArgs and ClusterClusterConfigPreemptibleWorkerConfigDiskConfigOutput values. You can construct a concrete instance of `ClusterClusterConfigPreemptibleWorkerConfigDiskConfigInput` via:

ClusterClusterConfigPreemptibleWorkerConfigDiskConfigArgs{...}

type ClusterClusterConfigPreemptibleWorkerConfigDiskConfigOutput

type ClusterClusterConfigPreemptibleWorkerConfigDiskConfigOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigPreemptibleWorkerConfigDiskConfigOutput) BootDiskSizeGb

Size of the primary disk attached to each preemptible worker node, specified in GB. The smallest allowed disk size is 10GB. GCP will default to a predetermined computed value if not set (currently 500GB). Note: If SSDs are not attached, it also contains the HDFS data blocks and Hadoop working directories.

func (ClusterClusterConfigPreemptibleWorkerConfigDiskConfigOutput) BootDiskType

The disk type of the primary disk attached to each preemptible worker node. One of `"pd-ssd"` or `"pd-standard"`. Defaults to `"pd-standard"`.

func (ClusterClusterConfigPreemptibleWorkerConfigDiskConfigOutput) ElementType

func (ClusterClusterConfigPreemptibleWorkerConfigDiskConfigOutput) NumLocalSsds

The amount of local SSD disks that will be attached to each preemptible worker node. Defaults to 0.

func (ClusterClusterConfigPreemptibleWorkerConfigDiskConfigOutput) ToClusterClusterConfigPreemptibleWorkerConfigDiskConfigOutput

func (ClusterClusterConfigPreemptibleWorkerConfigDiskConfigOutput) ToClusterClusterConfigPreemptibleWorkerConfigDiskConfigOutputWithContext

func (o ClusterClusterConfigPreemptibleWorkerConfigDiskConfigOutput) ToClusterClusterConfigPreemptibleWorkerConfigDiskConfigOutputWithContext(ctx context.Context) ClusterClusterConfigPreemptibleWorkerConfigDiskConfigOutput

func (ClusterClusterConfigPreemptibleWorkerConfigDiskConfigOutput) ToClusterClusterConfigPreemptibleWorkerConfigDiskConfigPtrOutput

func (ClusterClusterConfigPreemptibleWorkerConfigDiskConfigOutput) ToClusterClusterConfigPreemptibleWorkerConfigDiskConfigPtrOutputWithContext

func (o ClusterClusterConfigPreemptibleWorkerConfigDiskConfigOutput) ToClusterClusterConfigPreemptibleWorkerConfigDiskConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigPreemptibleWorkerConfigDiskConfigPtrOutput

type ClusterClusterConfigPreemptibleWorkerConfigDiskConfigPtrInput

type ClusterClusterConfigPreemptibleWorkerConfigDiskConfigPtrInput interface {
	pulumi.Input

	ToClusterClusterConfigPreemptibleWorkerConfigDiskConfigPtrOutput() ClusterClusterConfigPreemptibleWorkerConfigDiskConfigPtrOutput
	ToClusterClusterConfigPreemptibleWorkerConfigDiskConfigPtrOutputWithContext(context.Context) ClusterClusterConfigPreemptibleWorkerConfigDiskConfigPtrOutput
}

ClusterClusterConfigPreemptibleWorkerConfigDiskConfigPtrInput is an input type that accepts ClusterClusterConfigPreemptibleWorkerConfigDiskConfigArgs, ClusterClusterConfigPreemptibleWorkerConfigDiskConfigPtr and ClusterClusterConfigPreemptibleWorkerConfigDiskConfigPtrOutput values. You can construct a concrete instance of `ClusterClusterConfigPreemptibleWorkerConfigDiskConfigPtrInput` via:

        ClusterClusterConfigPreemptibleWorkerConfigDiskConfigArgs{...}

or:

        nil

type ClusterClusterConfigPreemptibleWorkerConfigDiskConfigPtrOutput

type ClusterClusterConfigPreemptibleWorkerConfigDiskConfigPtrOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigPreemptibleWorkerConfigDiskConfigPtrOutput) BootDiskSizeGb

Size of the primary disk attached to each preemptible worker node, specified in GB. The smallest allowed disk size is 10GB. GCP will default to a predetermined computed value if not set (currently 500GB). Note: If SSDs are not attached, it also contains the HDFS data blocks and Hadoop working directories.

func (ClusterClusterConfigPreemptibleWorkerConfigDiskConfigPtrOutput) BootDiskType

The disk type of the primary disk attached to each preemptible worker node. One of `"pd-ssd"` or `"pd-standard"`. Defaults to `"pd-standard"`.

func (ClusterClusterConfigPreemptibleWorkerConfigDiskConfigPtrOutput) Elem

func (ClusterClusterConfigPreemptibleWorkerConfigDiskConfigPtrOutput) ElementType

func (ClusterClusterConfigPreemptibleWorkerConfigDiskConfigPtrOutput) NumLocalSsds

The amount of local SSD disks that will be attached to each preemptible worker node. Defaults to 0.

func (ClusterClusterConfigPreemptibleWorkerConfigDiskConfigPtrOutput) ToClusterClusterConfigPreemptibleWorkerConfigDiskConfigPtrOutput

func (ClusterClusterConfigPreemptibleWorkerConfigDiskConfigPtrOutput) ToClusterClusterConfigPreemptibleWorkerConfigDiskConfigPtrOutputWithContext

func (o ClusterClusterConfigPreemptibleWorkerConfigDiskConfigPtrOutput) ToClusterClusterConfigPreemptibleWorkerConfigDiskConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigPreemptibleWorkerConfigDiskConfigPtrOutput

type ClusterClusterConfigPreemptibleWorkerConfigInput

type ClusterClusterConfigPreemptibleWorkerConfigInput interface {
	pulumi.Input

	ToClusterClusterConfigPreemptibleWorkerConfigOutput() ClusterClusterConfigPreemptibleWorkerConfigOutput
	ToClusterClusterConfigPreemptibleWorkerConfigOutputWithContext(context.Context) ClusterClusterConfigPreemptibleWorkerConfigOutput
}

ClusterClusterConfigPreemptibleWorkerConfigInput is an input type that accepts ClusterClusterConfigPreemptibleWorkerConfigArgs and ClusterClusterConfigPreemptibleWorkerConfigOutput values. You can construct a concrete instance of `ClusterClusterConfigPreemptibleWorkerConfigInput` via:

ClusterClusterConfigPreemptibleWorkerConfigArgs{...}

type ClusterClusterConfigPreemptibleWorkerConfigOutput

type ClusterClusterConfigPreemptibleWorkerConfigOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigPreemptibleWorkerConfigOutput) DiskConfig

Disk Config

func (ClusterClusterConfigPreemptibleWorkerConfigOutput) ElementType

func (ClusterClusterConfigPreemptibleWorkerConfigOutput) InstanceNames

func (ClusterClusterConfigPreemptibleWorkerConfigOutput) NumInstances

Specifies the number of preemptible nodes to create. Defaults to 0.

func (ClusterClusterConfigPreemptibleWorkerConfigOutput) ToClusterClusterConfigPreemptibleWorkerConfigOutput

func (o ClusterClusterConfigPreemptibleWorkerConfigOutput) ToClusterClusterConfigPreemptibleWorkerConfigOutput() ClusterClusterConfigPreemptibleWorkerConfigOutput

func (ClusterClusterConfigPreemptibleWorkerConfigOutput) ToClusterClusterConfigPreemptibleWorkerConfigOutputWithContext

func (o ClusterClusterConfigPreemptibleWorkerConfigOutput) ToClusterClusterConfigPreemptibleWorkerConfigOutputWithContext(ctx context.Context) ClusterClusterConfigPreemptibleWorkerConfigOutput

func (ClusterClusterConfigPreemptibleWorkerConfigOutput) ToClusterClusterConfigPreemptibleWorkerConfigPtrOutput

func (o ClusterClusterConfigPreemptibleWorkerConfigOutput) ToClusterClusterConfigPreemptibleWorkerConfigPtrOutput() ClusterClusterConfigPreemptibleWorkerConfigPtrOutput

func (ClusterClusterConfigPreemptibleWorkerConfigOutput) ToClusterClusterConfigPreemptibleWorkerConfigPtrOutputWithContext

func (o ClusterClusterConfigPreemptibleWorkerConfigOutput) ToClusterClusterConfigPreemptibleWorkerConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigPreemptibleWorkerConfigPtrOutput

type ClusterClusterConfigPreemptibleWorkerConfigPtrInput

type ClusterClusterConfigPreemptibleWorkerConfigPtrInput interface {
	pulumi.Input

	ToClusterClusterConfigPreemptibleWorkerConfigPtrOutput() ClusterClusterConfigPreemptibleWorkerConfigPtrOutput
	ToClusterClusterConfigPreemptibleWorkerConfigPtrOutputWithContext(context.Context) ClusterClusterConfigPreemptibleWorkerConfigPtrOutput
}

ClusterClusterConfigPreemptibleWorkerConfigPtrInput is an input type that accepts ClusterClusterConfigPreemptibleWorkerConfigArgs, ClusterClusterConfigPreemptibleWorkerConfigPtr and ClusterClusterConfigPreemptibleWorkerConfigPtrOutput values. You can construct a concrete instance of `ClusterClusterConfigPreemptibleWorkerConfigPtrInput` via:

        ClusterClusterConfigPreemptibleWorkerConfigArgs{...}

or:

        nil

type ClusterClusterConfigPreemptibleWorkerConfigPtrOutput

type ClusterClusterConfigPreemptibleWorkerConfigPtrOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigPreemptibleWorkerConfigPtrOutput) DiskConfig

Disk Config

func (ClusterClusterConfigPreemptibleWorkerConfigPtrOutput) Elem

func (ClusterClusterConfigPreemptibleWorkerConfigPtrOutput) ElementType

func (ClusterClusterConfigPreemptibleWorkerConfigPtrOutput) InstanceNames

func (ClusterClusterConfigPreemptibleWorkerConfigPtrOutput) NumInstances

Specifies the number of preemptible nodes to create. Defaults to 0.

func (ClusterClusterConfigPreemptibleWorkerConfigPtrOutput) ToClusterClusterConfigPreemptibleWorkerConfigPtrOutput

func (ClusterClusterConfigPreemptibleWorkerConfigPtrOutput) ToClusterClusterConfigPreemptibleWorkerConfigPtrOutputWithContext

func (o ClusterClusterConfigPreemptibleWorkerConfigPtrOutput) ToClusterClusterConfigPreemptibleWorkerConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigPreemptibleWorkerConfigPtrOutput

type ClusterClusterConfigPtrInput

type ClusterClusterConfigPtrInput interface {
	pulumi.Input

	ToClusterClusterConfigPtrOutput() ClusterClusterConfigPtrOutput
	ToClusterClusterConfigPtrOutputWithContext(context.Context) ClusterClusterConfigPtrOutput
}

ClusterClusterConfigPtrInput is an input type that accepts ClusterClusterConfigArgs, ClusterClusterConfigPtr and ClusterClusterConfigPtrOutput values. You can construct a concrete instance of `ClusterClusterConfigPtrInput` via:

        ClusterClusterConfigArgs{...}

or:

        nil

type ClusterClusterConfigPtrOutput

type ClusterClusterConfigPtrOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigPtrOutput) AutoscalingConfig

The autoscaling policy config associated with the cluster. Note that once set, if `autoscalingConfig` is the only field set in `clusterConfig`, it can only be removed by setting `policyUri = ""`, rather than removing the whole block. Structure defined below.

func (ClusterClusterConfigPtrOutput) Bucket

func (ClusterClusterConfigPtrOutput) Elem

func (ClusterClusterConfigPtrOutput) ElementType

func (ClusterClusterConfigPtrOutput) EncryptionConfig

The Customer managed encryption keys settings for the cluster. Structure defined below.

func (ClusterClusterConfigPtrOutput) EndpointConfig

The config settings for port access on the cluster. Structure defined below. ***

func (ClusterClusterConfigPtrOutput) GceClusterConfig

Common config settings for resources of Google Compute Engine cluster instances, applicable to all instances in the cluster. Structure defined below.

func (ClusterClusterConfigPtrOutput) InitializationActions

Commands to execute on each node after config is completed. You can specify multiple versions of these. Structure defined below.

func (ClusterClusterConfigPtrOutput) LifecycleConfig

The settings for auto deletion cluster schedule. Structure defined below.

func (ClusterClusterConfigPtrOutput) MasterConfig

The Google Compute Engine config settings for the master instances in a cluster.. Structure defined below.

func (ClusterClusterConfigPtrOutput) PreemptibleWorkerConfig

The Google Compute Engine config settings for the additional (aka preemptible) instances in a cluster. Structure defined below.

func (ClusterClusterConfigPtrOutput) SecurityConfig

Security related configuration. Structure defined below.

func (ClusterClusterConfigPtrOutput) SoftwareConfig

The config settings for software inside the cluster. Structure defined below.

func (ClusterClusterConfigPtrOutput) StagingBucket

The Cloud Storage staging bucket used to stage files, such as Hadoop jars, between client machines and the cluster. Note: If you don't explicitly specify a `stagingBucket` then GCP will auto create / assign one for you. However, you are not guaranteed an auto generated bucket which is solely dedicated to your cluster; it may be shared with other clusters in the same region/zone also choosing to use the auto generation option.

func (ClusterClusterConfigPtrOutput) TempBucket added in v4.10.0

The Cloud Storage temp bucket used to store ephemeral cluster and jobs data, such as Spark and MapReduce history files. Note: If you don't explicitly specify a `tempBucket` then GCP will auto create / assign one for you.

func (ClusterClusterConfigPtrOutput) ToClusterClusterConfigPtrOutput

func (o ClusterClusterConfigPtrOutput) ToClusterClusterConfigPtrOutput() ClusterClusterConfigPtrOutput

func (ClusterClusterConfigPtrOutput) ToClusterClusterConfigPtrOutputWithContext

func (o ClusterClusterConfigPtrOutput) ToClusterClusterConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigPtrOutput

func (ClusterClusterConfigPtrOutput) WorkerConfig

The Google Compute Engine config settings for the worker instances in a cluster.. Structure defined below.

type ClusterClusterConfigSecurityConfig

type ClusterClusterConfigSecurityConfig struct {
	// Kerberos Configuration
	KerberosConfig ClusterClusterConfigSecurityConfigKerberosConfig `pulumi:"kerberosConfig"`
}

type ClusterClusterConfigSecurityConfigArgs

type ClusterClusterConfigSecurityConfigArgs struct {
	// Kerberos Configuration
	KerberosConfig ClusterClusterConfigSecurityConfigKerberosConfigInput `pulumi:"kerberosConfig"`
}

func (ClusterClusterConfigSecurityConfigArgs) ElementType

func (ClusterClusterConfigSecurityConfigArgs) ToClusterClusterConfigSecurityConfigOutput

func (i ClusterClusterConfigSecurityConfigArgs) ToClusterClusterConfigSecurityConfigOutput() ClusterClusterConfigSecurityConfigOutput

func (ClusterClusterConfigSecurityConfigArgs) ToClusterClusterConfigSecurityConfigOutputWithContext

func (i ClusterClusterConfigSecurityConfigArgs) ToClusterClusterConfigSecurityConfigOutputWithContext(ctx context.Context) ClusterClusterConfigSecurityConfigOutput

func (ClusterClusterConfigSecurityConfigArgs) ToClusterClusterConfigSecurityConfigPtrOutput

func (i ClusterClusterConfigSecurityConfigArgs) ToClusterClusterConfigSecurityConfigPtrOutput() ClusterClusterConfigSecurityConfigPtrOutput

func (ClusterClusterConfigSecurityConfigArgs) ToClusterClusterConfigSecurityConfigPtrOutputWithContext

func (i ClusterClusterConfigSecurityConfigArgs) ToClusterClusterConfigSecurityConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigSecurityConfigPtrOutput

type ClusterClusterConfigSecurityConfigInput

type ClusterClusterConfigSecurityConfigInput interface {
	pulumi.Input

	ToClusterClusterConfigSecurityConfigOutput() ClusterClusterConfigSecurityConfigOutput
	ToClusterClusterConfigSecurityConfigOutputWithContext(context.Context) ClusterClusterConfigSecurityConfigOutput
}

ClusterClusterConfigSecurityConfigInput is an input type that accepts ClusterClusterConfigSecurityConfigArgs and ClusterClusterConfigSecurityConfigOutput values. You can construct a concrete instance of `ClusterClusterConfigSecurityConfigInput` via:

ClusterClusterConfigSecurityConfigArgs{...}

type ClusterClusterConfigSecurityConfigKerberosConfig

type ClusterClusterConfigSecurityConfigKerberosConfig struct {
	// The admin server (IP or hostname) for the
	// remote trusted realm in a cross realm trust relationship.
	CrossRealmTrustAdminServer *string `pulumi:"crossRealmTrustAdminServer"`
	// The KDC (IP or hostname) for the
	// remote trusted realm in a cross realm trust relationship.
	CrossRealmTrustKdc *string `pulumi:"crossRealmTrustKdc"`
	// The remote realm the Dataproc on-cluster KDC will
	// trust, should the user enable cross realm trust.
	CrossRealmTrustRealm *string `pulumi:"crossRealmTrustRealm"`
	// The Cloud Storage URI of a KMS
	// encrypted file containing the shared password between the on-cluster Kerberos realm
	// and the remote trusted realm, in a cross realm trust relationship.
	CrossRealmTrustSharedPasswordUri *string `pulumi:"crossRealmTrustSharedPasswordUri"`
	// Flag to indicate whether to Kerberize the cluster.
	EnableKerberos *bool `pulumi:"enableKerberos"`
	// The Cloud Storage URI of a KMS encrypted file containing
	// the master key of the KDC database.
	KdcDbKeyUri *string `pulumi:"kdcDbKeyUri"`
	// The Cloud Storage URI of a KMS encrypted file containing
	// the password to the user provided key. For the self-signed certificate, this password
	// is generated by Dataproc.
	KeyPasswordUri *string `pulumi:"keyPasswordUri"`
	// The Cloud Storage URI of a KMS encrypted file containing
	// the password to the user provided keystore. For the self-signed certificated, the password
	// is generated by Dataproc.
	KeystorePasswordUri *string `pulumi:"keystorePasswordUri"`
	// The Cloud Storage URI of the keystore file used for SSL encryption.
	// If not provided, Dataproc will provide a self-signed certificate.
	KeystoreUri *string `pulumi:"keystoreUri"`
	// The URI of the KMS key used to encrypt various sensitive files.
	KmsKeyUri string `pulumi:"kmsKeyUri"`
	// The name of the on-cluster Kerberos realm. If not specified, the
	// uppercased domain of hostnames will be the realm.
	Realm *string `pulumi:"realm"`
	// The Cloud Storage URI of a KMS encrypted file
	// containing the root principal password.
	RootPrincipalPasswordUri string `pulumi:"rootPrincipalPasswordUri"`
	// The lifetime of the ticket granting ticket, in hours.
	TgtLifetimeHours *int `pulumi:"tgtLifetimeHours"`
	// The Cloud Storage URI of a KMS encrypted file
	// containing the password to the user provided truststore. For the self-signed
	// certificate, this password is generated by Dataproc.
	TruststorePasswordUri *string `pulumi:"truststorePasswordUri"`
	// The Cloud Storage URI of the truststore file used for
	// SSL encryption. If not provided, Dataproc will provide a self-signed certificate.
	TruststoreUri *string `pulumi:"truststoreUri"`
}

type ClusterClusterConfigSecurityConfigKerberosConfigArgs

type ClusterClusterConfigSecurityConfigKerberosConfigArgs struct {
	// The admin server (IP or hostname) for the
	// remote trusted realm in a cross realm trust relationship.
	CrossRealmTrustAdminServer pulumi.StringPtrInput `pulumi:"crossRealmTrustAdminServer"`
	// The KDC (IP or hostname) for the
	// remote trusted realm in a cross realm trust relationship.
	CrossRealmTrustKdc pulumi.StringPtrInput `pulumi:"crossRealmTrustKdc"`
	// The remote realm the Dataproc on-cluster KDC will
	// trust, should the user enable cross realm trust.
	CrossRealmTrustRealm pulumi.StringPtrInput `pulumi:"crossRealmTrustRealm"`
	// The Cloud Storage URI of a KMS
	// encrypted file containing the shared password between the on-cluster Kerberos realm
	// and the remote trusted realm, in a cross realm trust relationship.
	CrossRealmTrustSharedPasswordUri pulumi.StringPtrInput `pulumi:"crossRealmTrustSharedPasswordUri"`
	// Flag to indicate whether to Kerberize the cluster.
	EnableKerberos pulumi.BoolPtrInput `pulumi:"enableKerberos"`
	// The Cloud Storage URI of a KMS encrypted file containing
	// the master key of the KDC database.
	KdcDbKeyUri pulumi.StringPtrInput `pulumi:"kdcDbKeyUri"`
	// The Cloud Storage URI of a KMS encrypted file containing
	// the password to the user provided key. For the self-signed certificate, this password
	// is generated by Dataproc.
	KeyPasswordUri pulumi.StringPtrInput `pulumi:"keyPasswordUri"`
	// The Cloud Storage URI of a KMS encrypted file containing
	// the password to the user provided keystore. For the self-signed certificated, the password
	// is generated by Dataproc.
	KeystorePasswordUri pulumi.StringPtrInput `pulumi:"keystorePasswordUri"`
	// The Cloud Storage URI of the keystore file used for SSL encryption.
	// If not provided, Dataproc will provide a self-signed certificate.
	KeystoreUri pulumi.StringPtrInput `pulumi:"keystoreUri"`
	// The URI of the KMS key used to encrypt various sensitive files.
	KmsKeyUri pulumi.StringInput `pulumi:"kmsKeyUri"`
	// The name of the on-cluster Kerberos realm. If not specified, the
	// uppercased domain of hostnames will be the realm.
	Realm pulumi.StringPtrInput `pulumi:"realm"`
	// The Cloud Storage URI of a KMS encrypted file
	// containing the root principal password.
	RootPrincipalPasswordUri pulumi.StringInput `pulumi:"rootPrincipalPasswordUri"`
	// The lifetime of the ticket granting ticket, in hours.
	TgtLifetimeHours pulumi.IntPtrInput `pulumi:"tgtLifetimeHours"`
	// The Cloud Storage URI of a KMS encrypted file
	// containing the password to the user provided truststore. For the self-signed
	// certificate, this password is generated by Dataproc.
	TruststorePasswordUri pulumi.StringPtrInput `pulumi:"truststorePasswordUri"`
	// The Cloud Storage URI of the truststore file used for
	// SSL encryption. If not provided, Dataproc will provide a self-signed certificate.
	TruststoreUri pulumi.StringPtrInput `pulumi:"truststoreUri"`
}

func (ClusterClusterConfigSecurityConfigKerberosConfigArgs) ElementType

func (ClusterClusterConfigSecurityConfigKerberosConfigArgs) ToClusterClusterConfigSecurityConfigKerberosConfigOutput

func (i ClusterClusterConfigSecurityConfigKerberosConfigArgs) ToClusterClusterConfigSecurityConfigKerberosConfigOutput() ClusterClusterConfigSecurityConfigKerberosConfigOutput

func (ClusterClusterConfigSecurityConfigKerberosConfigArgs) ToClusterClusterConfigSecurityConfigKerberosConfigOutputWithContext

func (i ClusterClusterConfigSecurityConfigKerberosConfigArgs) ToClusterClusterConfigSecurityConfigKerberosConfigOutputWithContext(ctx context.Context) ClusterClusterConfigSecurityConfigKerberosConfigOutput

func (ClusterClusterConfigSecurityConfigKerberosConfigArgs) ToClusterClusterConfigSecurityConfigKerberosConfigPtrOutput

func (i ClusterClusterConfigSecurityConfigKerberosConfigArgs) ToClusterClusterConfigSecurityConfigKerberosConfigPtrOutput() ClusterClusterConfigSecurityConfigKerberosConfigPtrOutput

func (ClusterClusterConfigSecurityConfigKerberosConfigArgs) ToClusterClusterConfigSecurityConfigKerberosConfigPtrOutputWithContext

func (i ClusterClusterConfigSecurityConfigKerberosConfigArgs) ToClusterClusterConfigSecurityConfigKerberosConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigSecurityConfigKerberosConfigPtrOutput

type ClusterClusterConfigSecurityConfigKerberosConfigInput

type ClusterClusterConfigSecurityConfigKerberosConfigInput interface {
	pulumi.Input

	ToClusterClusterConfigSecurityConfigKerberosConfigOutput() ClusterClusterConfigSecurityConfigKerberosConfigOutput
	ToClusterClusterConfigSecurityConfigKerberosConfigOutputWithContext(context.Context) ClusterClusterConfigSecurityConfigKerberosConfigOutput
}

ClusterClusterConfigSecurityConfigKerberosConfigInput is an input type that accepts ClusterClusterConfigSecurityConfigKerberosConfigArgs and ClusterClusterConfigSecurityConfigKerberosConfigOutput values. You can construct a concrete instance of `ClusterClusterConfigSecurityConfigKerberosConfigInput` via:

ClusterClusterConfigSecurityConfigKerberosConfigArgs{...}

type ClusterClusterConfigSecurityConfigKerberosConfigOutput

type ClusterClusterConfigSecurityConfigKerberosConfigOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigSecurityConfigKerberosConfigOutput) CrossRealmTrustAdminServer

The admin server (IP or hostname) for the remote trusted realm in a cross realm trust relationship.

func (ClusterClusterConfigSecurityConfigKerberosConfigOutput) CrossRealmTrustKdc

The KDC (IP or hostname) for the remote trusted realm in a cross realm trust relationship.

func (ClusterClusterConfigSecurityConfigKerberosConfigOutput) CrossRealmTrustRealm

The remote realm the Dataproc on-cluster KDC will trust, should the user enable cross realm trust.

func (ClusterClusterConfigSecurityConfigKerberosConfigOutput) CrossRealmTrustSharedPasswordUri

The Cloud Storage URI of a KMS encrypted file containing the shared password between the on-cluster Kerberos realm and the remote trusted realm, in a cross realm trust relationship.

func (ClusterClusterConfigSecurityConfigKerberosConfigOutput) ElementType

func (ClusterClusterConfigSecurityConfigKerberosConfigOutput) EnableKerberos

Flag to indicate whether to Kerberize the cluster.

func (ClusterClusterConfigSecurityConfigKerberosConfigOutput) KdcDbKeyUri

The Cloud Storage URI of a KMS encrypted file containing the master key of the KDC database.

func (ClusterClusterConfigSecurityConfigKerberosConfigOutput) KeyPasswordUri

The Cloud Storage URI of a KMS encrypted file containing the password to the user provided key. For the self-signed certificate, this password is generated by Dataproc.

func (ClusterClusterConfigSecurityConfigKerberosConfigOutput) KeystorePasswordUri

The Cloud Storage URI of a KMS encrypted file containing the password to the user provided keystore. For the self-signed certificated, the password is generated by Dataproc.

func (ClusterClusterConfigSecurityConfigKerberosConfigOutput) KeystoreUri

The Cloud Storage URI of the keystore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.

func (ClusterClusterConfigSecurityConfigKerberosConfigOutput) KmsKeyUri

The URI of the KMS key used to encrypt various sensitive files.

func (ClusterClusterConfigSecurityConfigKerberosConfigOutput) Realm

The name of the on-cluster Kerberos realm. If not specified, the uppercased domain of hostnames will be the realm.

func (ClusterClusterConfigSecurityConfigKerberosConfigOutput) RootPrincipalPasswordUri

The Cloud Storage URI of a KMS encrypted file containing the root principal password.

func (ClusterClusterConfigSecurityConfigKerberosConfigOutput) TgtLifetimeHours

The lifetime of the ticket granting ticket, in hours.

func (ClusterClusterConfigSecurityConfigKerberosConfigOutput) ToClusterClusterConfigSecurityConfigKerberosConfigOutput

func (ClusterClusterConfigSecurityConfigKerberosConfigOutput) ToClusterClusterConfigSecurityConfigKerberosConfigOutputWithContext

func (o ClusterClusterConfigSecurityConfigKerberosConfigOutput) ToClusterClusterConfigSecurityConfigKerberosConfigOutputWithContext(ctx context.Context) ClusterClusterConfigSecurityConfigKerberosConfigOutput

func (ClusterClusterConfigSecurityConfigKerberosConfigOutput) ToClusterClusterConfigSecurityConfigKerberosConfigPtrOutput

func (o ClusterClusterConfigSecurityConfigKerberosConfigOutput) ToClusterClusterConfigSecurityConfigKerberosConfigPtrOutput() ClusterClusterConfigSecurityConfigKerberosConfigPtrOutput

func (ClusterClusterConfigSecurityConfigKerberosConfigOutput) ToClusterClusterConfigSecurityConfigKerberosConfigPtrOutputWithContext

func (o ClusterClusterConfigSecurityConfigKerberosConfigOutput) ToClusterClusterConfigSecurityConfigKerberosConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigSecurityConfigKerberosConfigPtrOutput

func (ClusterClusterConfigSecurityConfigKerberosConfigOutput) TruststorePasswordUri

The Cloud Storage URI of a KMS encrypted file containing the password to the user provided truststore. For the self-signed certificate, this password is generated by Dataproc.

func (ClusterClusterConfigSecurityConfigKerberosConfigOutput) TruststoreUri

The Cloud Storage URI of the truststore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.

type ClusterClusterConfigSecurityConfigKerberosConfigPtrInput

type ClusterClusterConfigSecurityConfigKerberosConfigPtrInput interface {
	pulumi.Input

	ToClusterClusterConfigSecurityConfigKerberosConfigPtrOutput() ClusterClusterConfigSecurityConfigKerberosConfigPtrOutput
	ToClusterClusterConfigSecurityConfigKerberosConfigPtrOutputWithContext(context.Context) ClusterClusterConfigSecurityConfigKerberosConfigPtrOutput
}

ClusterClusterConfigSecurityConfigKerberosConfigPtrInput is an input type that accepts ClusterClusterConfigSecurityConfigKerberosConfigArgs, ClusterClusterConfigSecurityConfigKerberosConfigPtr and ClusterClusterConfigSecurityConfigKerberosConfigPtrOutput values. You can construct a concrete instance of `ClusterClusterConfigSecurityConfigKerberosConfigPtrInput` via:

        ClusterClusterConfigSecurityConfigKerberosConfigArgs{...}

or:

        nil

type ClusterClusterConfigSecurityConfigKerberosConfigPtrOutput

type ClusterClusterConfigSecurityConfigKerberosConfigPtrOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigSecurityConfigKerberosConfigPtrOutput) CrossRealmTrustAdminServer

The admin server (IP or hostname) for the remote trusted realm in a cross realm trust relationship.

func (ClusterClusterConfigSecurityConfigKerberosConfigPtrOutput) CrossRealmTrustKdc

The KDC (IP or hostname) for the remote trusted realm in a cross realm trust relationship.

func (ClusterClusterConfigSecurityConfigKerberosConfigPtrOutput) CrossRealmTrustRealm

The remote realm the Dataproc on-cluster KDC will trust, should the user enable cross realm trust.

func (ClusterClusterConfigSecurityConfigKerberosConfigPtrOutput) CrossRealmTrustSharedPasswordUri

The Cloud Storage URI of a KMS encrypted file containing the shared password between the on-cluster Kerberos realm and the remote trusted realm, in a cross realm trust relationship.

func (ClusterClusterConfigSecurityConfigKerberosConfigPtrOutput) Elem

func (ClusterClusterConfigSecurityConfigKerberosConfigPtrOutput) ElementType

func (ClusterClusterConfigSecurityConfigKerberosConfigPtrOutput) EnableKerberos

Flag to indicate whether to Kerberize the cluster.

func (ClusterClusterConfigSecurityConfigKerberosConfigPtrOutput) KdcDbKeyUri

The Cloud Storage URI of a KMS encrypted file containing the master key of the KDC database.

func (ClusterClusterConfigSecurityConfigKerberosConfigPtrOutput) KeyPasswordUri

The Cloud Storage URI of a KMS encrypted file containing the password to the user provided key. For the self-signed certificate, this password is generated by Dataproc.

func (ClusterClusterConfigSecurityConfigKerberosConfigPtrOutput) KeystorePasswordUri

The Cloud Storage URI of a KMS encrypted file containing the password to the user provided keystore. For the self-signed certificated, the password is generated by Dataproc.

func (ClusterClusterConfigSecurityConfigKerberosConfigPtrOutput) KeystoreUri

The Cloud Storage URI of the keystore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.

func (ClusterClusterConfigSecurityConfigKerberosConfigPtrOutput) KmsKeyUri

The URI of the KMS key used to encrypt various sensitive files.

func (ClusterClusterConfigSecurityConfigKerberosConfigPtrOutput) Realm

The name of the on-cluster Kerberos realm. If not specified, the uppercased domain of hostnames will be the realm.

func (ClusterClusterConfigSecurityConfigKerberosConfigPtrOutput) RootPrincipalPasswordUri

The Cloud Storage URI of a KMS encrypted file containing the root principal password.

func (ClusterClusterConfigSecurityConfigKerberosConfigPtrOutput) TgtLifetimeHours

The lifetime of the ticket granting ticket, in hours.

func (ClusterClusterConfigSecurityConfigKerberosConfigPtrOutput) ToClusterClusterConfigSecurityConfigKerberosConfigPtrOutput

func (ClusterClusterConfigSecurityConfigKerberosConfigPtrOutput) ToClusterClusterConfigSecurityConfigKerberosConfigPtrOutputWithContext

func (o ClusterClusterConfigSecurityConfigKerberosConfigPtrOutput) ToClusterClusterConfigSecurityConfigKerberosConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigSecurityConfigKerberosConfigPtrOutput

func (ClusterClusterConfigSecurityConfigKerberosConfigPtrOutput) TruststorePasswordUri

The Cloud Storage URI of a KMS encrypted file containing the password to the user provided truststore. For the self-signed certificate, this password is generated by Dataproc.

func (ClusterClusterConfigSecurityConfigKerberosConfigPtrOutput) TruststoreUri

The Cloud Storage URI of the truststore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.

type ClusterClusterConfigSecurityConfigOutput

type ClusterClusterConfigSecurityConfigOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigSecurityConfigOutput) ElementType

func (ClusterClusterConfigSecurityConfigOutput) KerberosConfig

Kerberos Configuration

func (ClusterClusterConfigSecurityConfigOutput) ToClusterClusterConfigSecurityConfigOutput

func (o ClusterClusterConfigSecurityConfigOutput) ToClusterClusterConfigSecurityConfigOutput() ClusterClusterConfigSecurityConfigOutput

func (ClusterClusterConfigSecurityConfigOutput) ToClusterClusterConfigSecurityConfigOutputWithContext

func (o ClusterClusterConfigSecurityConfigOutput) ToClusterClusterConfigSecurityConfigOutputWithContext(ctx context.Context) ClusterClusterConfigSecurityConfigOutput

func (ClusterClusterConfigSecurityConfigOutput) ToClusterClusterConfigSecurityConfigPtrOutput

func (o ClusterClusterConfigSecurityConfigOutput) ToClusterClusterConfigSecurityConfigPtrOutput() ClusterClusterConfigSecurityConfigPtrOutput

func (ClusterClusterConfigSecurityConfigOutput) ToClusterClusterConfigSecurityConfigPtrOutputWithContext

func (o ClusterClusterConfigSecurityConfigOutput) ToClusterClusterConfigSecurityConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigSecurityConfigPtrOutput

type ClusterClusterConfigSecurityConfigPtrInput

type ClusterClusterConfigSecurityConfigPtrInput interface {
	pulumi.Input

	ToClusterClusterConfigSecurityConfigPtrOutput() ClusterClusterConfigSecurityConfigPtrOutput
	ToClusterClusterConfigSecurityConfigPtrOutputWithContext(context.Context) ClusterClusterConfigSecurityConfigPtrOutput
}

ClusterClusterConfigSecurityConfigPtrInput is an input type that accepts ClusterClusterConfigSecurityConfigArgs, ClusterClusterConfigSecurityConfigPtr and ClusterClusterConfigSecurityConfigPtrOutput values. You can construct a concrete instance of `ClusterClusterConfigSecurityConfigPtrInput` via:

        ClusterClusterConfigSecurityConfigArgs{...}

or:

        nil

type ClusterClusterConfigSecurityConfigPtrOutput

type ClusterClusterConfigSecurityConfigPtrOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigSecurityConfigPtrOutput) Elem

func (ClusterClusterConfigSecurityConfigPtrOutput) ElementType

func (ClusterClusterConfigSecurityConfigPtrOutput) KerberosConfig

Kerberos Configuration

func (ClusterClusterConfigSecurityConfigPtrOutput) ToClusterClusterConfigSecurityConfigPtrOutput

func (o ClusterClusterConfigSecurityConfigPtrOutput) ToClusterClusterConfigSecurityConfigPtrOutput() ClusterClusterConfigSecurityConfigPtrOutput

func (ClusterClusterConfigSecurityConfigPtrOutput) ToClusterClusterConfigSecurityConfigPtrOutputWithContext

func (o ClusterClusterConfigSecurityConfigPtrOutput) ToClusterClusterConfigSecurityConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigSecurityConfigPtrOutput

type ClusterClusterConfigSoftwareConfig

type ClusterClusterConfigSoftwareConfig struct {
	// The Cloud Dataproc image version to use
	// for the cluster - this controls the sets of software versions
	// installed onto the nodes when you create clusters. If not specified, defaults to the
	// latest version. For a list of valid versions see
	// [Cloud Dataproc versions](https://cloud.google.com/dataproc/docs/concepts/dataproc-versions)
	ImageVersion *string `pulumi:"imageVersion"`
	// The set of optional components to activate on the cluster.
	// Accepted values are:
	// * ANACONDA
	// * DRUID
	// * HBASE
	// * HIVE_WEBHCAT
	// * JUPYTER
	// * KERBEROS
	// * PRESTO
	// * RANGER
	// * SOLR
	// * ZEPPELIN
	// * ZOOKEEPER
	OptionalComponents []string `pulumi:"optionalComponents"`
	// A list of override and additional properties (key/value pairs)
	// used to modify various aspects of the common configuration files used when creating
	// a cluster. For a list of valid properties please see
	// [Cluster properties](https://cloud.google.com/dataproc/docs/concepts/cluster-properties)
	OverrideProperties map[string]string      `pulumi:"overrideProperties"`
	Properties         map[string]interface{} `pulumi:"properties"`
}

type ClusterClusterConfigSoftwareConfigArgs

type ClusterClusterConfigSoftwareConfigArgs struct {
	// The Cloud Dataproc image version to use
	// for the cluster - this controls the sets of software versions
	// installed onto the nodes when you create clusters. If not specified, defaults to the
	// latest version. For a list of valid versions see
	// [Cloud Dataproc versions](https://cloud.google.com/dataproc/docs/concepts/dataproc-versions)
	ImageVersion pulumi.StringPtrInput `pulumi:"imageVersion"`
	// The set of optional components to activate on the cluster.
	// Accepted values are:
	// * ANACONDA
	// * DRUID
	// * HBASE
	// * HIVE_WEBHCAT
	// * JUPYTER
	// * KERBEROS
	// * PRESTO
	// * RANGER
	// * SOLR
	// * ZEPPELIN
	// * ZOOKEEPER
	OptionalComponents pulumi.StringArrayInput `pulumi:"optionalComponents"`
	// A list of override and additional properties (key/value pairs)
	// used to modify various aspects of the common configuration files used when creating
	// a cluster. For a list of valid properties please see
	// [Cluster properties](https://cloud.google.com/dataproc/docs/concepts/cluster-properties)
	OverrideProperties pulumi.StringMapInput `pulumi:"overrideProperties"`
	Properties         pulumi.MapInput       `pulumi:"properties"`
}

func (ClusterClusterConfigSoftwareConfigArgs) ElementType

func (ClusterClusterConfigSoftwareConfigArgs) ToClusterClusterConfigSoftwareConfigOutput

func (i ClusterClusterConfigSoftwareConfigArgs) ToClusterClusterConfigSoftwareConfigOutput() ClusterClusterConfigSoftwareConfigOutput

func (ClusterClusterConfigSoftwareConfigArgs) ToClusterClusterConfigSoftwareConfigOutputWithContext

func (i ClusterClusterConfigSoftwareConfigArgs) ToClusterClusterConfigSoftwareConfigOutputWithContext(ctx context.Context) ClusterClusterConfigSoftwareConfigOutput

func (ClusterClusterConfigSoftwareConfigArgs) ToClusterClusterConfigSoftwareConfigPtrOutput

func (i ClusterClusterConfigSoftwareConfigArgs) ToClusterClusterConfigSoftwareConfigPtrOutput() ClusterClusterConfigSoftwareConfigPtrOutput

func (ClusterClusterConfigSoftwareConfigArgs) ToClusterClusterConfigSoftwareConfigPtrOutputWithContext

func (i ClusterClusterConfigSoftwareConfigArgs) ToClusterClusterConfigSoftwareConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigSoftwareConfigPtrOutput

type ClusterClusterConfigSoftwareConfigInput

type ClusterClusterConfigSoftwareConfigInput interface {
	pulumi.Input

	ToClusterClusterConfigSoftwareConfigOutput() ClusterClusterConfigSoftwareConfigOutput
	ToClusterClusterConfigSoftwareConfigOutputWithContext(context.Context) ClusterClusterConfigSoftwareConfigOutput
}

ClusterClusterConfigSoftwareConfigInput is an input type that accepts ClusterClusterConfigSoftwareConfigArgs and ClusterClusterConfigSoftwareConfigOutput values. You can construct a concrete instance of `ClusterClusterConfigSoftwareConfigInput` via:

ClusterClusterConfigSoftwareConfigArgs{...}

type ClusterClusterConfigSoftwareConfigOutput

type ClusterClusterConfigSoftwareConfigOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigSoftwareConfigOutput) ElementType

func (ClusterClusterConfigSoftwareConfigOutput) ImageVersion

The Cloud Dataproc image version to use for the cluster - this controls the sets of software versions installed onto the nodes when you create clusters. If not specified, defaults to the latest version. For a list of valid versions see [Cloud Dataproc versions](https://cloud.google.com/dataproc/docs/concepts/dataproc-versions)

func (ClusterClusterConfigSoftwareConfigOutput) OptionalComponents

The set of optional components to activate on the cluster. Accepted values are: * ANACONDA * DRUID * HBASE * HIVE_WEBHCAT * JUPYTER * KERBEROS * PRESTO * RANGER * SOLR * ZEPPELIN * ZOOKEEPER

func (ClusterClusterConfigSoftwareConfigOutput) OverrideProperties

A list of override and additional properties (key/value pairs) used to modify various aspects of the common configuration files used when creating a cluster. For a list of valid properties please see [Cluster properties](https://cloud.google.com/dataproc/docs/concepts/cluster-properties)

func (ClusterClusterConfigSoftwareConfigOutput) Properties

func (ClusterClusterConfigSoftwareConfigOutput) ToClusterClusterConfigSoftwareConfigOutput

func (o ClusterClusterConfigSoftwareConfigOutput) ToClusterClusterConfigSoftwareConfigOutput() ClusterClusterConfigSoftwareConfigOutput

func (ClusterClusterConfigSoftwareConfigOutput) ToClusterClusterConfigSoftwareConfigOutputWithContext

func (o ClusterClusterConfigSoftwareConfigOutput) ToClusterClusterConfigSoftwareConfigOutputWithContext(ctx context.Context) ClusterClusterConfigSoftwareConfigOutput

func (ClusterClusterConfigSoftwareConfigOutput) ToClusterClusterConfigSoftwareConfigPtrOutput

func (o ClusterClusterConfigSoftwareConfigOutput) ToClusterClusterConfigSoftwareConfigPtrOutput() ClusterClusterConfigSoftwareConfigPtrOutput

func (ClusterClusterConfigSoftwareConfigOutput) ToClusterClusterConfigSoftwareConfigPtrOutputWithContext

func (o ClusterClusterConfigSoftwareConfigOutput) ToClusterClusterConfigSoftwareConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigSoftwareConfigPtrOutput

type ClusterClusterConfigSoftwareConfigPtrInput

type ClusterClusterConfigSoftwareConfigPtrInput interface {
	pulumi.Input

	ToClusterClusterConfigSoftwareConfigPtrOutput() ClusterClusterConfigSoftwareConfigPtrOutput
	ToClusterClusterConfigSoftwareConfigPtrOutputWithContext(context.Context) ClusterClusterConfigSoftwareConfigPtrOutput
}

ClusterClusterConfigSoftwareConfigPtrInput is an input type that accepts ClusterClusterConfigSoftwareConfigArgs, ClusterClusterConfigSoftwareConfigPtr and ClusterClusterConfigSoftwareConfigPtrOutput values. You can construct a concrete instance of `ClusterClusterConfigSoftwareConfigPtrInput` via:

        ClusterClusterConfigSoftwareConfigArgs{...}

or:

        nil

type ClusterClusterConfigSoftwareConfigPtrOutput

type ClusterClusterConfigSoftwareConfigPtrOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigSoftwareConfigPtrOutput) Elem

func (ClusterClusterConfigSoftwareConfigPtrOutput) ElementType

func (ClusterClusterConfigSoftwareConfigPtrOutput) ImageVersion

The Cloud Dataproc image version to use for the cluster - this controls the sets of software versions installed onto the nodes when you create clusters. If not specified, defaults to the latest version. For a list of valid versions see [Cloud Dataproc versions](https://cloud.google.com/dataproc/docs/concepts/dataproc-versions)

func (ClusterClusterConfigSoftwareConfigPtrOutput) OptionalComponents

The set of optional components to activate on the cluster. Accepted values are: * ANACONDA * DRUID * HBASE * HIVE_WEBHCAT * JUPYTER * KERBEROS * PRESTO * RANGER * SOLR * ZEPPELIN * ZOOKEEPER

func (ClusterClusterConfigSoftwareConfigPtrOutput) OverrideProperties

A list of override and additional properties (key/value pairs) used to modify various aspects of the common configuration files used when creating a cluster. For a list of valid properties please see [Cluster properties](https://cloud.google.com/dataproc/docs/concepts/cluster-properties)

func (ClusterClusterConfigSoftwareConfigPtrOutput) Properties

func (ClusterClusterConfigSoftwareConfigPtrOutput) ToClusterClusterConfigSoftwareConfigPtrOutput

func (o ClusterClusterConfigSoftwareConfigPtrOutput) ToClusterClusterConfigSoftwareConfigPtrOutput() ClusterClusterConfigSoftwareConfigPtrOutput

func (ClusterClusterConfigSoftwareConfigPtrOutput) ToClusterClusterConfigSoftwareConfigPtrOutputWithContext

func (o ClusterClusterConfigSoftwareConfigPtrOutput) ToClusterClusterConfigSoftwareConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigSoftwareConfigPtrOutput

type ClusterClusterConfigWorkerConfig

type ClusterClusterConfigWorkerConfig struct {
	// The Compute Engine accelerator configuration for these instances. Can be specified multiple times.
	Accelerators []ClusterClusterConfigWorkerConfigAccelerator `pulumi:"accelerators"`
	// Disk Config
	DiskConfig *ClusterClusterConfigWorkerConfigDiskConfig `pulumi:"diskConfig"`
	// The URI for the image to use for this worker.  See [the guide](https://cloud.google.com/dataproc/docs/guides/dataproc-images)
	// for more information.
	ImageUri      *string  `pulumi:"imageUri"`
	InstanceNames []string `pulumi:"instanceNames"`
	// The name of a Google Compute Engine machine type
	// to create for the worker nodes. If not specified, GCP will default to a predetermined
	// computed value (currently `n1-standard-4`).
	MachineType *string `pulumi:"machineType"`
	// The name of a minimum generation of CPU family
	// for the master. If not specified, GCP will default to a predetermined computed value
	// for each zone. See [the guide](https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform)
	// for details about which CPU families are available (and defaulted) for each zone.
	MinCpuPlatform *string `pulumi:"minCpuPlatform"`
	// Specifies the number of preemptible nodes to create.
	// Defaults to 0.
	NumInstances *int `pulumi:"numInstances"`
}

type ClusterClusterConfigWorkerConfigAccelerator

type ClusterClusterConfigWorkerConfigAccelerator struct {
	// The number of the accelerator cards of this type exposed to this instance. Often restricted to one of `1`, `2`, `4`, or `8`.
	AcceleratorCount int `pulumi:"acceleratorCount"`
	// The short name of the accelerator type to expose to this instance. For example, `nvidia-tesla-k80`.
	AcceleratorType string `pulumi:"acceleratorType"`
}

type ClusterClusterConfigWorkerConfigAcceleratorArgs

type ClusterClusterConfigWorkerConfigAcceleratorArgs struct {
	// The number of the accelerator cards of this type exposed to this instance. Often restricted to one of `1`, `2`, `4`, or `8`.
	AcceleratorCount pulumi.IntInput `pulumi:"acceleratorCount"`
	// The short name of the accelerator type to expose to this instance. For example, `nvidia-tesla-k80`.
	AcceleratorType pulumi.StringInput `pulumi:"acceleratorType"`
}

func (ClusterClusterConfigWorkerConfigAcceleratorArgs) ElementType

func (ClusterClusterConfigWorkerConfigAcceleratorArgs) ToClusterClusterConfigWorkerConfigAcceleratorOutput

func (i ClusterClusterConfigWorkerConfigAcceleratorArgs) ToClusterClusterConfigWorkerConfigAcceleratorOutput() ClusterClusterConfigWorkerConfigAcceleratorOutput

func (ClusterClusterConfigWorkerConfigAcceleratorArgs) ToClusterClusterConfigWorkerConfigAcceleratorOutputWithContext

func (i ClusterClusterConfigWorkerConfigAcceleratorArgs) ToClusterClusterConfigWorkerConfigAcceleratorOutputWithContext(ctx context.Context) ClusterClusterConfigWorkerConfigAcceleratorOutput

type ClusterClusterConfigWorkerConfigAcceleratorArray

type ClusterClusterConfigWorkerConfigAcceleratorArray []ClusterClusterConfigWorkerConfigAcceleratorInput

func (ClusterClusterConfigWorkerConfigAcceleratorArray) ElementType

func (ClusterClusterConfigWorkerConfigAcceleratorArray) ToClusterClusterConfigWorkerConfigAcceleratorArrayOutput

func (i ClusterClusterConfigWorkerConfigAcceleratorArray) ToClusterClusterConfigWorkerConfigAcceleratorArrayOutput() ClusterClusterConfigWorkerConfigAcceleratorArrayOutput

func (ClusterClusterConfigWorkerConfigAcceleratorArray) ToClusterClusterConfigWorkerConfigAcceleratorArrayOutputWithContext

func (i ClusterClusterConfigWorkerConfigAcceleratorArray) ToClusterClusterConfigWorkerConfigAcceleratorArrayOutputWithContext(ctx context.Context) ClusterClusterConfigWorkerConfigAcceleratorArrayOutput

type ClusterClusterConfigWorkerConfigAcceleratorArrayInput

type ClusterClusterConfigWorkerConfigAcceleratorArrayInput interface {
	pulumi.Input

	ToClusterClusterConfigWorkerConfigAcceleratorArrayOutput() ClusterClusterConfigWorkerConfigAcceleratorArrayOutput
	ToClusterClusterConfigWorkerConfigAcceleratorArrayOutputWithContext(context.Context) ClusterClusterConfigWorkerConfigAcceleratorArrayOutput
}

ClusterClusterConfigWorkerConfigAcceleratorArrayInput is an input type that accepts ClusterClusterConfigWorkerConfigAcceleratorArray and ClusterClusterConfigWorkerConfigAcceleratorArrayOutput values. You can construct a concrete instance of `ClusterClusterConfigWorkerConfigAcceleratorArrayInput` via:

ClusterClusterConfigWorkerConfigAcceleratorArray{ ClusterClusterConfigWorkerConfigAcceleratorArgs{...} }

type ClusterClusterConfigWorkerConfigAcceleratorArrayOutput

type ClusterClusterConfigWorkerConfigAcceleratorArrayOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigWorkerConfigAcceleratorArrayOutput) ElementType

func (ClusterClusterConfigWorkerConfigAcceleratorArrayOutput) Index

func (ClusterClusterConfigWorkerConfigAcceleratorArrayOutput) ToClusterClusterConfigWorkerConfigAcceleratorArrayOutput

func (ClusterClusterConfigWorkerConfigAcceleratorArrayOutput) ToClusterClusterConfigWorkerConfigAcceleratorArrayOutputWithContext

func (o ClusterClusterConfigWorkerConfigAcceleratorArrayOutput) ToClusterClusterConfigWorkerConfigAcceleratorArrayOutputWithContext(ctx context.Context) ClusterClusterConfigWorkerConfigAcceleratorArrayOutput

type ClusterClusterConfigWorkerConfigAcceleratorInput

type ClusterClusterConfigWorkerConfigAcceleratorInput interface {
	pulumi.Input

	ToClusterClusterConfigWorkerConfigAcceleratorOutput() ClusterClusterConfigWorkerConfigAcceleratorOutput
	ToClusterClusterConfigWorkerConfigAcceleratorOutputWithContext(context.Context) ClusterClusterConfigWorkerConfigAcceleratorOutput
}

ClusterClusterConfigWorkerConfigAcceleratorInput is an input type that accepts ClusterClusterConfigWorkerConfigAcceleratorArgs and ClusterClusterConfigWorkerConfigAcceleratorOutput values. You can construct a concrete instance of `ClusterClusterConfigWorkerConfigAcceleratorInput` via:

ClusterClusterConfigWorkerConfigAcceleratorArgs{...}

type ClusterClusterConfigWorkerConfigAcceleratorOutput

type ClusterClusterConfigWorkerConfigAcceleratorOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigWorkerConfigAcceleratorOutput) AcceleratorCount

The number of the accelerator cards of this type exposed to this instance. Often restricted to one of `1`, `2`, `4`, or `8`.

func (ClusterClusterConfigWorkerConfigAcceleratorOutput) AcceleratorType

The short name of the accelerator type to expose to this instance. For example, `nvidia-tesla-k80`.

func (ClusterClusterConfigWorkerConfigAcceleratorOutput) ElementType

func (ClusterClusterConfigWorkerConfigAcceleratorOutput) ToClusterClusterConfigWorkerConfigAcceleratorOutput

func (o ClusterClusterConfigWorkerConfigAcceleratorOutput) ToClusterClusterConfigWorkerConfigAcceleratorOutput() ClusterClusterConfigWorkerConfigAcceleratorOutput

func (ClusterClusterConfigWorkerConfigAcceleratorOutput) ToClusterClusterConfigWorkerConfigAcceleratorOutputWithContext

func (o ClusterClusterConfigWorkerConfigAcceleratorOutput) ToClusterClusterConfigWorkerConfigAcceleratorOutputWithContext(ctx context.Context) ClusterClusterConfigWorkerConfigAcceleratorOutput

type ClusterClusterConfigWorkerConfigArgs

type ClusterClusterConfigWorkerConfigArgs struct {
	// The Compute Engine accelerator configuration for these instances. Can be specified multiple times.
	Accelerators ClusterClusterConfigWorkerConfigAcceleratorArrayInput `pulumi:"accelerators"`
	// Disk Config
	DiskConfig ClusterClusterConfigWorkerConfigDiskConfigPtrInput `pulumi:"diskConfig"`
	// The URI for the image to use for this worker.  See [the guide](https://cloud.google.com/dataproc/docs/guides/dataproc-images)
	// for more information.
	ImageUri      pulumi.StringPtrInput   `pulumi:"imageUri"`
	InstanceNames pulumi.StringArrayInput `pulumi:"instanceNames"`
	// The name of a Google Compute Engine machine type
	// to create for the worker nodes. If not specified, GCP will default to a predetermined
	// computed value (currently `n1-standard-4`).
	MachineType pulumi.StringPtrInput `pulumi:"machineType"`
	// The name of a minimum generation of CPU family
	// for the master. If not specified, GCP will default to a predetermined computed value
	// for each zone. See [the guide](https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform)
	// for details about which CPU families are available (and defaulted) for each zone.
	MinCpuPlatform pulumi.StringPtrInput `pulumi:"minCpuPlatform"`
	// Specifies the number of preemptible nodes to create.
	// Defaults to 0.
	NumInstances pulumi.IntPtrInput `pulumi:"numInstances"`
}

func (ClusterClusterConfigWorkerConfigArgs) ElementType

func (ClusterClusterConfigWorkerConfigArgs) ToClusterClusterConfigWorkerConfigOutput

func (i ClusterClusterConfigWorkerConfigArgs) ToClusterClusterConfigWorkerConfigOutput() ClusterClusterConfigWorkerConfigOutput

func (ClusterClusterConfigWorkerConfigArgs) ToClusterClusterConfigWorkerConfigOutputWithContext

func (i ClusterClusterConfigWorkerConfigArgs) ToClusterClusterConfigWorkerConfigOutputWithContext(ctx context.Context) ClusterClusterConfigWorkerConfigOutput

func (ClusterClusterConfigWorkerConfigArgs) ToClusterClusterConfigWorkerConfigPtrOutput

func (i ClusterClusterConfigWorkerConfigArgs) ToClusterClusterConfigWorkerConfigPtrOutput() ClusterClusterConfigWorkerConfigPtrOutput

func (ClusterClusterConfigWorkerConfigArgs) ToClusterClusterConfigWorkerConfigPtrOutputWithContext

func (i ClusterClusterConfigWorkerConfigArgs) ToClusterClusterConfigWorkerConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigWorkerConfigPtrOutput

type ClusterClusterConfigWorkerConfigDiskConfig

type ClusterClusterConfigWorkerConfigDiskConfig struct {
	// Size of the primary disk attached to each preemptible worker node, specified
	// in GB. The smallest allowed disk size is 10GB. GCP will default to a predetermined
	// computed value if not set (currently 500GB). Note: If SSDs are not
	// attached, it also contains the HDFS data blocks and Hadoop working directories.
	BootDiskSizeGb *int `pulumi:"bootDiskSizeGb"`
	// The disk type of the primary disk attached to each preemptible worker node.
	// One of `"pd-ssd"` or `"pd-standard"`. Defaults to `"pd-standard"`.
	BootDiskType *string `pulumi:"bootDiskType"`
	// The amount of local SSD disks that will be
	// attached to each preemptible worker node. Defaults to 0.
	NumLocalSsds *int `pulumi:"numLocalSsds"`
}

type ClusterClusterConfigWorkerConfigDiskConfigArgs

type ClusterClusterConfigWorkerConfigDiskConfigArgs struct {
	// Size of the primary disk attached to each preemptible worker node, specified
	// in GB. The smallest allowed disk size is 10GB. GCP will default to a predetermined
	// computed value if not set (currently 500GB). Note: If SSDs are not
	// attached, it also contains the HDFS data blocks and Hadoop working directories.
	BootDiskSizeGb pulumi.IntPtrInput `pulumi:"bootDiskSizeGb"`
	// The disk type of the primary disk attached to each preemptible worker node.
	// One of `"pd-ssd"` or `"pd-standard"`. Defaults to `"pd-standard"`.
	BootDiskType pulumi.StringPtrInput `pulumi:"bootDiskType"`
	// The amount of local SSD disks that will be
	// attached to each preemptible worker node. Defaults to 0.
	NumLocalSsds pulumi.IntPtrInput `pulumi:"numLocalSsds"`
}

func (ClusterClusterConfigWorkerConfigDiskConfigArgs) ElementType

func (ClusterClusterConfigWorkerConfigDiskConfigArgs) ToClusterClusterConfigWorkerConfigDiskConfigOutput

func (i ClusterClusterConfigWorkerConfigDiskConfigArgs) ToClusterClusterConfigWorkerConfigDiskConfigOutput() ClusterClusterConfigWorkerConfigDiskConfigOutput

func (ClusterClusterConfigWorkerConfigDiskConfigArgs) ToClusterClusterConfigWorkerConfigDiskConfigOutputWithContext

func (i ClusterClusterConfigWorkerConfigDiskConfigArgs) ToClusterClusterConfigWorkerConfigDiskConfigOutputWithContext(ctx context.Context) ClusterClusterConfigWorkerConfigDiskConfigOutput

func (ClusterClusterConfigWorkerConfigDiskConfigArgs) ToClusterClusterConfigWorkerConfigDiskConfigPtrOutput

func (i ClusterClusterConfigWorkerConfigDiskConfigArgs) ToClusterClusterConfigWorkerConfigDiskConfigPtrOutput() ClusterClusterConfigWorkerConfigDiskConfigPtrOutput

func (ClusterClusterConfigWorkerConfigDiskConfigArgs) ToClusterClusterConfigWorkerConfigDiskConfigPtrOutputWithContext

func (i ClusterClusterConfigWorkerConfigDiskConfigArgs) ToClusterClusterConfigWorkerConfigDiskConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigWorkerConfigDiskConfigPtrOutput

type ClusterClusterConfigWorkerConfigDiskConfigInput

type ClusterClusterConfigWorkerConfigDiskConfigInput interface {
	pulumi.Input

	ToClusterClusterConfigWorkerConfigDiskConfigOutput() ClusterClusterConfigWorkerConfigDiskConfigOutput
	ToClusterClusterConfigWorkerConfigDiskConfigOutputWithContext(context.Context) ClusterClusterConfigWorkerConfigDiskConfigOutput
}

ClusterClusterConfigWorkerConfigDiskConfigInput is an input type that accepts ClusterClusterConfigWorkerConfigDiskConfigArgs and ClusterClusterConfigWorkerConfigDiskConfigOutput values. You can construct a concrete instance of `ClusterClusterConfigWorkerConfigDiskConfigInput` via:

ClusterClusterConfigWorkerConfigDiskConfigArgs{...}

type ClusterClusterConfigWorkerConfigDiskConfigOutput

type ClusterClusterConfigWorkerConfigDiskConfigOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigWorkerConfigDiskConfigOutput) BootDiskSizeGb

Size of the primary disk attached to each preemptible worker node, specified in GB. The smallest allowed disk size is 10GB. GCP will default to a predetermined computed value if not set (currently 500GB). Note: If SSDs are not attached, it also contains the HDFS data blocks and Hadoop working directories.

func (ClusterClusterConfigWorkerConfigDiskConfigOutput) BootDiskType

The disk type of the primary disk attached to each preemptible worker node. One of `"pd-ssd"` or `"pd-standard"`. Defaults to `"pd-standard"`.

func (ClusterClusterConfigWorkerConfigDiskConfigOutput) ElementType

func (ClusterClusterConfigWorkerConfigDiskConfigOutput) NumLocalSsds

The amount of local SSD disks that will be attached to each preemptible worker node. Defaults to 0.

func (ClusterClusterConfigWorkerConfigDiskConfigOutput) ToClusterClusterConfigWorkerConfigDiskConfigOutput

func (o ClusterClusterConfigWorkerConfigDiskConfigOutput) ToClusterClusterConfigWorkerConfigDiskConfigOutput() ClusterClusterConfigWorkerConfigDiskConfigOutput

func (ClusterClusterConfigWorkerConfigDiskConfigOutput) ToClusterClusterConfigWorkerConfigDiskConfigOutputWithContext

func (o ClusterClusterConfigWorkerConfigDiskConfigOutput) ToClusterClusterConfigWorkerConfigDiskConfigOutputWithContext(ctx context.Context) ClusterClusterConfigWorkerConfigDiskConfigOutput

func (ClusterClusterConfigWorkerConfigDiskConfigOutput) ToClusterClusterConfigWorkerConfigDiskConfigPtrOutput

func (o ClusterClusterConfigWorkerConfigDiskConfigOutput) ToClusterClusterConfigWorkerConfigDiskConfigPtrOutput() ClusterClusterConfigWorkerConfigDiskConfigPtrOutput

func (ClusterClusterConfigWorkerConfigDiskConfigOutput) ToClusterClusterConfigWorkerConfigDiskConfigPtrOutputWithContext

func (o ClusterClusterConfigWorkerConfigDiskConfigOutput) ToClusterClusterConfigWorkerConfigDiskConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigWorkerConfigDiskConfigPtrOutput

type ClusterClusterConfigWorkerConfigDiskConfigPtrInput

type ClusterClusterConfigWorkerConfigDiskConfigPtrInput interface {
	pulumi.Input

	ToClusterClusterConfigWorkerConfigDiskConfigPtrOutput() ClusterClusterConfigWorkerConfigDiskConfigPtrOutput
	ToClusterClusterConfigWorkerConfigDiskConfigPtrOutputWithContext(context.Context) ClusterClusterConfigWorkerConfigDiskConfigPtrOutput
}

ClusterClusterConfigWorkerConfigDiskConfigPtrInput is an input type that accepts ClusterClusterConfigWorkerConfigDiskConfigArgs, ClusterClusterConfigWorkerConfigDiskConfigPtr and ClusterClusterConfigWorkerConfigDiskConfigPtrOutput values. You can construct a concrete instance of `ClusterClusterConfigWorkerConfigDiskConfigPtrInput` via:

        ClusterClusterConfigWorkerConfigDiskConfigArgs{...}

or:

        nil

type ClusterClusterConfigWorkerConfigDiskConfigPtrOutput

type ClusterClusterConfigWorkerConfigDiskConfigPtrOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigWorkerConfigDiskConfigPtrOutput) BootDiskSizeGb

Size of the primary disk attached to each preemptible worker node, specified in GB. The smallest allowed disk size is 10GB. GCP will default to a predetermined computed value if not set (currently 500GB). Note: If SSDs are not attached, it also contains the HDFS data blocks and Hadoop working directories.

func (ClusterClusterConfigWorkerConfigDiskConfigPtrOutput) BootDiskType

The disk type of the primary disk attached to each preemptible worker node. One of `"pd-ssd"` or `"pd-standard"`. Defaults to `"pd-standard"`.

func (ClusterClusterConfigWorkerConfigDiskConfigPtrOutput) Elem

func (ClusterClusterConfigWorkerConfigDiskConfigPtrOutput) ElementType

func (ClusterClusterConfigWorkerConfigDiskConfigPtrOutput) NumLocalSsds

The amount of local SSD disks that will be attached to each preemptible worker node. Defaults to 0.

func (ClusterClusterConfigWorkerConfigDiskConfigPtrOutput) ToClusterClusterConfigWorkerConfigDiskConfigPtrOutput

func (o ClusterClusterConfigWorkerConfigDiskConfigPtrOutput) ToClusterClusterConfigWorkerConfigDiskConfigPtrOutput() ClusterClusterConfigWorkerConfigDiskConfigPtrOutput

func (ClusterClusterConfigWorkerConfigDiskConfigPtrOutput) ToClusterClusterConfigWorkerConfigDiskConfigPtrOutputWithContext

func (o ClusterClusterConfigWorkerConfigDiskConfigPtrOutput) ToClusterClusterConfigWorkerConfigDiskConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigWorkerConfigDiskConfigPtrOutput

type ClusterClusterConfigWorkerConfigInput

type ClusterClusterConfigWorkerConfigInput interface {
	pulumi.Input

	ToClusterClusterConfigWorkerConfigOutput() ClusterClusterConfigWorkerConfigOutput
	ToClusterClusterConfigWorkerConfigOutputWithContext(context.Context) ClusterClusterConfigWorkerConfigOutput
}

ClusterClusterConfigWorkerConfigInput is an input type that accepts ClusterClusterConfigWorkerConfigArgs and ClusterClusterConfigWorkerConfigOutput values. You can construct a concrete instance of `ClusterClusterConfigWorkerConfigInput` via:

ClusterClusterConfigWorkerConfigArgs{...}

type ClusterClusterConfigWorkerConfigOutput

type ClusterClusterConfigWorkerConfigOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigWorkerConfigOutput) Accelerators

The Compute Engine accelerator configuration for these instances. Can be specified multiple times.

func (ClusterClusterConfigWorkerConfigOutput) DiskConfig

Disk Config

func (ClusterClusterConfigWorkerConfigOutput) ElementType

func (ClusterClusterConfigWorkerConfigOutput) ImageUri

The URI for the image to use for this worker. See [the guide](https://cloud.google.com/dataproc/docs/guides/dataproc-images) for more information.

func (ClusterClusterConfigWorkerConfigOutput) InstanceNames

func (ClusterClusterConfigWorkerConfigOutput) MachineType

The name of a Google Compute Engine machine type to create for the worker nodes. If not specified, GCP will default to a predetermined computed value (currently `n1-standard-4`).

func (ClusterClusterConfigWorkerConfigOutput) MinCpuPlatform

The name of a minimum generation of CPU family for the master. If not specified, GCP will default to a predetermined computed value for each zone. See [the guide](https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform) for details about which CPU families are available (and defaulted) for each zone.

func (ClusterClusterConfigWorkerConfigOutput) NumInstances

Specifies the number of preemptible nodes to create. Defaults to 0.

func (ClusterClusterConfigWorkerConfigOutput) ToClusterClusterConfigWorkerConfigOutput

func (o ClusterClusterConfigWorkerConfigOutput) ToClusterClusterConfigWorkerConfigOutput() ClusterClusterConfigWorkerConfigOutput

func (ClusterClusterConfigWorkerConfigOutput) ToClusterClusterConfigWorkerConfigOutputWithContext

func (o ClusterClusterConfigWorkerConfigOutput) ToClusterClusterConfigWorkerConfigOutputWithContext(ctx context.Context) ClusterClusterConfigWorkerConfigOutput

func (ClusterClusterConfigWorkerConfigOutput) ToClusterClusterConfigWorkerConfigPtrOutput

func (o ClusterClusterConfigWorkerConfigOutput) ToClusterClusterConfigWorkerConfigPtrOutput() ClusterClusterConfigWorkerConfigPtrOutput

func (ClusterClusterConfigWorkerConfigOutput) ToClusterClusterConfigWorkerConfigPtrOutputWithContext

func (o ClusterClusterConfigWorkerConfigOutput) ToClusterClusterConfigWorkerConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigWorkerConfigPtrOutput

type ClusterClusterConfigWorkerConfigPtrInput

type ClusterClusterConfigWorkerConfigPtrInput interface {
	pulumi.Input

	ToClusterClusterConfigWorkerConfigPtrOutput() ClusterClusterConfigWorkerConfigPtrOutput
	ToClusterClusterConfigWorkerConfigPtrOutputWithContext(context.Context) ClusterClusterConfigWorkerConfigPtrOutput
}

ClusterClusterConfigWorkerConfigPtrInput is an input type that accepts ClusterClusterConfigWorkerConfigArgs, ClusterClusterConfigWorkerConfigPtr and ClusterClusterConfigWorkerConfigPtrOutput values. You can construct a concrete instance of `ClusterClusterConfigWorkerConfigPtrInput` via:

        ClusterClusterConfigWorkerConfigArgs{...}

or:

        nil

type ClusterClusterConfigWorkerConfigPtrOutput

type ClusterClusterConfigWorkerConfigPtrOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigWorkerConfigPtrOutput) Accelerators

The Compute Engine accelerator configuration for these instances. Can be specified multiple times.

func (ClusterClusterConfigWorkerConfigPtrOutput) DiskConfig

Disk Config

func (ClusterClusterConfigWorkerConfigPtrOutput) Elem

func (ClusterClusterConfigWorkerConfigPtrOutput) ElementType

func (ClusterClusterConfigWorkerConfigPtrOutput) ImageUri

The URI for the image to use for this worker. See [the guide](https://cloud.google.com/dataproc/docs/guides/dataproc-images) for more information.

func (ClusterClusterConfigWorkerConfigPtrOutput) InstanceNames

func (ClusterClusterConfigWorkerConfigPtrOutput) MachineType

The name of a Google Compute Engine machine type to create for the worker nodes. If not specified, GCP will default to a predetermined computed value (currently `n1-standard-4`).

func (ClusterClusterConfigWorkerConfigPtrOutput) MinCpuPlatform

The name of a minimum generation of CPU family for the master. If not specified, GCP will default to a predetermined computed value for each zone. See [the guide](https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform) for details about which CPU families are available (and defaulted) for each zone.

func (ClusterClusterConfigWorkerConfigPtrOutput) NumInstances

Specifies the number of preemptible nodes to create. Defaults to 0.

func (ClusterClusterConfigWorkerConfigPtrOutput) ToClusterClusterConfigWorkerConfigPtrOutput

func (o ClusterClusterConfigWorkerConfigPtrOutput) ToClusterClusterConfigWorkerConfigPtrOutput() ClusterClusterConfigWorkerConfigPtrOutput

func (ClusterClusterConfigWorkerConfigPtrOutput) ToClusterClusterConfigWorkerConfigPtrOutputWithContext

func (o ClusterClusterConfigWorkerConfigPtrOutput) ToClusterClusterConfigWorkerConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigWorkerConfigPtrOutput

type ClusterIAMBinding

type ClusterIAMBinding struct {
	pulumi.CustomResourceState

	// The name or relative resource id of the cluster to manage IAM policies for.
	Cluster   pulumi.StringOutput                 `pulumi:"cluster"`
	Condition ClusterIAMBindingConditionPtrOutput `pulumi:"condition"`
	// (Computed) The etag of the clusters's IAM policy.
	Etag    pulumi.StringOutput      `pulumi:"etag"`
	Members pulumi.StringArrayOutput `pulumi:"members"`
	// The project in which the cluster belongs. If it
	// is not provided, the provider will use a default.
	Project pulumi.StringOutput `pulumi:"project"`
	// The region in which the cluster belongs. If it
	// is not provided, the provider will use a default.
	Region pulumi.StringOutput `pulumi:"region"`
	// The role that should be applied. Only one
	// `dataproc.ClusterIAMBinding` can be used per role. Note that custom roles must be of the format
	// `[projects|organizations]/{parent-name}/roles/{role-name}`.
	Role pulumi.StringOutput `pulumi:"role"`
}

Three different resources help you manage IAM policies on dataproc clusters. Each of these resources serves a different use case:

* `dataproc.ClusterIAMPolicy`: Authoritative. Sets the IAM policy for the cluster and replaces any existing policy already attached. * `dataproc.ClusterIAMBinding`: Authoritative for a given role. Updates the IAM policy to grant a role to a list of members. Other roles within the IAM policy for the cluster are preserved. * `dataproc.ClusterIAMMember`: Non-authoritative. Updates the IAM policy to grant a role to a new member. Other members for the role for the cluster are preserved.

> **Note:** `dataproc.ClusterIAMPolicy` **cannot** be used in conjunction with `dataproc.ClusterIAMBinding` and `dataproc.ClusterIAMMember` or they will fight over what your policy should be. In addition, be careful not to accidentally unset ownership of the cluster as `dataproc.ClusterIAMPolicy` replaces the entire policy.

> **Note:** `dataproc.ClusterIAMBinding` resources **can be** used in conjunction with `dataproc.ClusterIAMMember` resources **only if** they do not grant privilege to the same role.

## google\_dataproc\_cluster\_iam\_policy

```go package main

import (

"github.com/pulumi/pulumi-gcp/sdk/v4/go/gcp/dataproc"
"github.com/pulumi/pulumi-gcp/sdk/v4/go/gcp/organizations"
"github.com/pulumi/pulumi/sdk/v2/go/pulumi"

)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		admin, err := organizations.LookupIAMPolicy(ctx, &organizations.LookupIAMPolicyArgs{
			Bindings: []organizations.GetIAMPolicyBinding{
				organizations.GetIAMPolicyBinding{
					Role: "roles/editor",
					Members: []string{
						"user:jane@example.com",
					},
				},
			},
		}, nil)
		if err != nil {
			return err
		}
		_, err = dataproc.NewClusterIAMPolicy(ctx, "editor", &dataproc.ClusterIAMPolicyArgs{
			Project:    pulumi.String("your-project"),
			Region:     pulumi.String("your-region"),
			Cluster:    pulumi.String("your-dataproc-cluster"),
			PolicyData: pulumi.String(admin.PolicyData),
		})
		if err != nil {
			return err
		}
		return nil
	})
}

```

## google\_dataproc\_cluster\_iam\_binding

```go package main

import (

"github.com/pulumi/pulumi-gcp/sdk/v4/go/gcp/dataproc"
"github.com/pulumi/pulumi/sdk/v2/go/pulumi"

)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := dataproc.NewClusterIAMBinding(ctx, "editor", &dataproc.ClusterIAMBindingArgs{
			Cluster: pulumi.String("your-dataproc-cluster"),
			Members: pulumi.StringArray{
				pulumi.String("user:jane@example.com"),
			},
			Role: pulumi.String("roles/editor"),
		})
		if err != nil {
			return err
		}
		return nil
	})
}

```

## google\_dataproc\_cluster\_iam\_member

```go package main

import (

"github.com/pulumi/pulumi-gcp/sdk/v4/go/gcp/dataproc"
"github.com/pulumi/pulumi/sdk/v2/go/pulumi"

)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := dataproc.NewClusterIAMMember(ctx, "editor", &dataproc.ClusterIAMMemberArgs{
			Cluster: pulumi.String("your-dataproc-cluster"),
			Member:  pulumi.String("user:jane@example.com"),
			Role:    pulumi.String("roles/editor"),
		})
		if err != nil {
			return err
		}
		return nil
	})
}

```

## Import

Cluster IAM resources can be imported using the project, region, cluster name, role and/or member.

```sh

$ pulumi import gcp:dataproc/clusterIAMBinding:ClusterIAMBinding editor "projects/{project}/regions/{region}/clusters/{cluster}"

```

```sh

$ pulumi import gcp:dataproc/clusterIAMBinding:ClusterIAMBinding editor "projects/{project}/regions/{region}/clusters/{cluster} roles/editor"

```

```sh

$ pulumi import gcp:dataproc/clusterIAMBinding:ClusterIAMBinding editor "projects/{project}/regions/{region}/clusters/{cluster} roles/editor user:jane@example.com"

```

-> **Custom Roles**If you're importing a IAM resource with a custom role, make sure to use the

full name of the custom role, e.g. `[projects/my-project|organizations/my-org]/roles/my-custom-role`.

func GetClusterIAMBinding

func GetClusterIAMBinding(ctx *pulumi.Context,
	name string, id pulumi.IDInput, state *ClusterIAMBindingState, opts ...pulumi.ResourceOption) (*ClusterIAMBinding, error)

GetClusterIAMBinding gets an existing ClusterIAMBinding resource's state with the given name, ID, and optional state properties that are used to uniquely qualify the lookup (nil if not required).

func NewClusterIAMBinding

func NewClusterIAMBinding(ctx *pulumi.Context,
	name string, args *ClusterIAMBindingArgs, opts ...pulumi.ResourceOption) (*ClusterIAMBinding, error)

NewClusterIAMBinding registers a new resource with the given unique name, arguments, and options.

func (*ClusterIAMBinding) ElementType added in v4.4.0

func (*ClusterIAMBinding) ElementType() reflect.Type

func (*ClusterIAMBinding) ToClusterIAMBindingOutput added in v4.4.0

func (i *ClusterIAMBinding) ToClusterIAMBindingOutput() ClusterIAMBindingOutput

func (*ClusterIAMBinding) ToClusterIAMBindingOutputWithContext added in v4.4.0

func (i *ClusterIAMBinding) ToClusterIAMBindingOutputWithContext(ctx context.Context) ClusterIAMBindingOutput

func (*ClusterIAMBinding) ToClusterIAMBindingPtrOutput added in v4.11.1

func (i *ClusterIAMBinding) ToClusterIAMBindingPtrOutput() ClusterIAMBindingPtrOutput

func (*ClusterIAMBinding) ToClusterIAMBindingPtrOutputWithContext added in v4.11.1

func (i *ClusterIAMBinding) ToClusterIAMBindingPtrOutputWithContext(ctx context.Context) ClusterIAMBindingPtrOutput

type ClusterIAMBindingArgs

type ClusterIAMBindingArgs struct {
	// The name or relative resource id of the cluster to manage IAM policies for.
	Cluster   pulumi.StringInput
	Condition ClusterIAMBindingConditionPtrInput
	Members   pulumi.StringArrayInput
	// The project in which the cluster belongs. If it
	// is not provided, the provider will use a default.
	Project pulumi.StringPtrInput
	// The region in which the cluster belongs. If it
	// is not provided, the provider will use a default.
	Region pulumi.StringPtrInput
	// The role that should be applied. Only one
	// `dataproc.ClusterIAMBinding` can be used per role. Note that custom roles must be of the format
	// `[projects|organizations]/{parent-name}/roles/{role-name}`.
	Role pulumi.StringInput
}

The set of arguments for constructing a ClusterIAMBinding resource.

func (ClusterIAMBindingArgs) ElementType

func (ClusterIAMBindingArgs) ElementType() reflect.Type

type ClusterIAMBindingArray added in v4.11.1

type ClusterIAMBindingArray []ClusterIAMBindingInput

func (ClusterIAMBindingArray) ElementType added in v4.11.1

func (ClusterIAMBindingArray) ElementType() reflect.Type

func (ClusterIAMBindingArray) ToClusterIAMBindingArrayOutput added in v4.11.1

func (i ClusterIAMBindingArray) ToClusterIAMBindingArrayOutput() ClusterIAMBindingArrayOutput

func (ClusterIAMBindingArray) ToClusterIAMBindingArrayOutputWithContext added in v4.11.1

func (i ClusterIAMBindingArray) ToClusterIAMBindingArrayOutputWithContext(ctx context.Context) ClusterIAMBindingArrayOutput

type ClusterIAMBindingArrayInput added in v4.11.1

type ClusterIAMBindingArrayInput interface {
	pulumi.Input

	ToClusterIAMBindingArrayOutput() ClusterIAMBindingArrayOutput
	ToClusterIAMBindingArrayOutputWithContext(context.Context) ClusterIAMBindingArrayOutput
}

ClusterIAMBindingArrayInput is an input type that accepts ClusterIAMBindingArray and ClusterIAMBindingArrayOutput values. You can construct a concrete instance of `ClusterIAMBindingArrayInput` via:

ClusterIAMBindingArray{ ClusterIAMBindingArgs{...} }

type ClusterIAMBindingArrayOutput added in v4.11.1

type ClusterIAMBindingArrayOutput struct{ *pulumi.OutputState }

func (ClusterIAMBindingArrayOutput) ElementType added in v4.11.1

func (ClusterIAMBindingArrayOutput) Index added in v4.11.1

func (ClusterIAMBindingArrayOutput) ToClusterIAMBindingArrayOutput added in v4.11.1

func (o ClusterIAMBindingArrayOutput) ToClusterIAMBindingArrayOutput() ClusterIAMBindingArrayOutput

func (ClusterIAMBindingArrayOutput) ToClusterIAMBindingArrayOutputWithContext added in v4.11.1

func (o ClusterIAMBindingArrayOutput) ToClusterIAMBindingArrayOutputWithContext(ctx context.Context) ClusterIAMBindingArrayOutput

type ClusterIAMBindingCondition

type ClusterIAMBindingCondition struct {
	Description *string `pulumi:"description"`
	Expression  string  `pulumi:"expression"`
	Title       string  `pulumi:"title"`
}

type ClusterIAMBindingConditionArgs

type ClusterIAMBindingConditionArgs struct {
	Description pulumi.StringPtrInput `pulumi:"description"`
	Expression  pulumi.StringInput    `pulumi:"expression"`
	Title       pulumi.StringInput    `pulumi:"title"`
}

func (ClusterIAMBindingConditionArgs) ElementType

func (ClusterIAMBindingConditionArgs) ToClusterIAMBindingConditionOutput

func (i ClusterIAMBindingConditionArgs) ToClusterIAMBindingConditionOutput() ClusterIAMBindingConditionOutput

func (ClusterIAMBindingConditionArgs) ToClusterIAMBindingConditionOutputWithContext

func (i ClusterIAMBindingConditionArgs) ToClusterIAMBindingConditionOutputWithContext(ctx context.Context) ClusterIAMBindingConditionOutput

func (ClusterIAMBindingConditionArgs) ToClusterIAMBindingConditionPtrOutput

func (i ClusterIAMBindingConditionArgs) ToClusterIAMBindingConditionPtrOutput() ClusterIAMBindingConditionPtrOutput

func (ClusterIAMBindingConditionArgs) ToClusterIAMBindingConditionPtrOutputWithContext

func (i ClusterIAMBindingConditionArgs) ToClusterIAMBindingConditionPtrOutputWithContext(ctx context.Context) ClusterIAMBindingConditionPtrOutput

type ClusterIAMBindingConditionInput

type ClusterIAMBindingConditionInput interface {
	pulumi.Input

	ToClusterIAMBindingConditionOutput() ClusterIAMBindingConditionOutput
	ToClusterIAMBindingConditionOutputWithContext(context.Context) ClusterIAMBindingConditionOutput
}

ClusterIAMBindingConditionInput is an input type that accepts ClusterIAMBindingConditionArgs and ClusterIAMBindingConditionOutput values. You can construct a concrete instance of `ClusterIAMBindingConditionInput` via:

ClusterIAMBindingConditionArgs{...}

type ClusterIAMBindingConditionOutput

type ClusterIAMBindingConditionOutput struct{ *pulumi.OutputState }

func (ClusterIAMBindingConditionOutput) Description

func (ClusterIAMBindingConditionOutput) ElementType

func (ClusterIAMBindingConditionOutput) Expression

func (ClusterIAMBindingConditionOutput) Title

func (ClusterIAMBindingConditionOutput) ToClusterIAMBindingConditionOutput

func (o ClusterIAMBindingConditionOutput) ToClusterIAMBindingConditionOutput() ClusterIAMBindingConditionOutput

func (ClusterIAMBindingConditionOutput) ToClusterIAMBindingConditionOutputWithContext

func (o ClusterIAMBindingConditionOutput) ToClusterIAMBindingConditionOutputWithContext(ctx context.Context) ClusterIAMBindingConditionOutput

func (ClusterIAMBindingConditionOutput) ToClusterIAMBindingConditionPtrOutput

func (o ClusterIAMBindingConditionOutput) ToClusterIAMBindingConditionPtrOutput() ClusterIAMBindingConditionPtrOutput

func (ClusterIAMBindingConditionOutput) ToClusterIAMBindingConditionPtrOutputWithContext

func (o ClusterIAMBindingConditionOutput) ToClusterIAMBindingConditionPtrOutputWithContext(ctx context.Context) ClusterIAMBindingConditionPtrOutput

type ClusterIAMBindingConditionPtrInput

type ClusterIAMBindingConditionPtrInput interface {
	pulumi.Input

	ToClusterIAMBindingConditionPtrOutput() ClusterIAMBindingConditionPtrOutput
	ToClusterIAMBindingConditionPtrOutputWithContext(context.Context) ClusterIAMBindingConditionPtrOutput
}

ClusterIAMBindingConditionPtrInput is an input type that accepts ClusterIAMBindingConditionArgs, ClusterIAMBindingConditionPtr and ClusterIAMBindingConditionPtrOutput values. You can construct a concrete instance of `ClusterIAMBindingConditionPtrInput` via:

        ClusterIAMBindingConditionArgs{...}

or:

        nil

type ClusterIAMBindingConditionPtrOutput

type ClusterIAMBindingConditionPtrOutput struct{ *pulumi.OutputState }

func (ClusterIAMBindingConditionPtrOutput) Description

func (ClusterIAMBindingConditionPtrOutput) Elem

func (ClusterIAMBindingConditionPtrOutput) ElementType

func (ClusterIAMBindingConditionPtrOutput) Expression

func (ClusterIAMBindingConditionPtrOutput) Title

func (ClusterIAMBindingConditionPtrOutput) ToClusterIAMBindingConditionPtrOutput

func (o ClusterIAMBindingConditionPtrOutput) ToClusterIAMBindingConditionPtrOutput() ClusterIAMBindingConditionPtrOutput

func (ClusterIAMBindingConditionPtrOutput) ToClusterIAMBindingConditionPtrOutputWithContext

func (o ClusterIAMBindingConditionPtrOutput) ToClusterIAMBindingConditionPtrOutputWithContext(ctx context.Context) ClusterIAMBindingConditionPtrOutput

type ClusterIAMBindingInput added in v4.4.0

type ClusterIAMBindingInput interface {
	pulumi.Input

	ToClusterIAMBindingOutput() ClusterIAMBindingOutput
	ToClusterIAMBindingOutputWithContext(ctx context.Context) ClusterIAMBindingOutput
}

type ClusterIAMBindingMap added in v4.11.1

type ClusterIAMBindingMap map[string]ClusterIAMBindingInput

func (ClusterIAMBindingMap) ElementType added in v4.11.1

func (ClusterIAMBindingMap) ElementType() reflect.Type

func (ClusterIAMBindingMap) ToClusterIAMBindingMapOutput added in v4.11.1

func (i ClusterIAMBindingMap) ToClusterIAMBindingMapOutput() ClusterIAMBindingMapOutput

func (ClusterIAMBindingMap) ToClusterIAMBindingMapOutputWithContext added in v4.11.1

func (i ClusterIAMBindingMap) ToClusterIAMBindingMapOutputWithContext(ctx context.Context) ClusterIAMBindingMapOutput

type ClusterIAMBindingMapInput added in v4.11.1

type ClusterIAMBindingMapInput interface {
	pulumi.Input

	ToClusterIAMBindingMapOutput() ClusterIAMBindingMapOutput
	ToClusterIAMBindingMapOutputWithContext(context.Context) ClusterIAMBindingMapOutput
}

ClusterIAMBindingMapInput is an input type that accepts ClusterIAMBindingMap and ClusterIAMBindingMapOutput values. You can construct a concrete instance of `ClusterIAMBindingMapInput` via:

ClusterIAMBindingMap{ "key": ClusterIAMBindingArgs{...} }

type ClusterIAMBindingMapOutput added in v4.11.1

type ClusterIAMBindingMapOutput struct{ *pulumi.OutputState }

func (ClusterIAMBindingMapOutput) ElementType added in v4.11.1

func (ClusterIAMBindingMapOutput) ElementType() reflect.Type

func (ClusterIAMBindingMapOutput) MapIndex added in v4.11.1

func (ClusterIAMBindingMapOutput) ToClusterIAMBindingMapOutput added in v4.11.1

func (o ClusterIAMBindingMapOutput) ToClusterIAMBindingMapOutput() ClusterIAMBindingMapOutput

func (ClusterIAMBindingMapOutput) ToClusterIAMBindingMapOutputWithContext added in v4.11.1

func (o ClusterIAMBindingMapOutput) ToClusterIAMBindingMapOutputWithContext(ctx context.Context) ClusterIAMBindingMapOutput

type ClusterIAMBindingOutput added in v4.4.0

type ClusterIAMBindingOutput struct {
	*pulumi.OutputState
}

func (ClusterIAMBindingOutput) ElementType added in v4.4.0

func (ClusterIAMBindingOutput) ElementType() reflect.Type

func (ClusterIAMBindingOutput) ToClusterIAMBindingOutput added in v4.4.0

func (o ClusterIAMBindingOutput) ToClusterIAMBindingOutput() ClusterIAMBindingOutput

func (ClusterIAMBindingOutput) ToClusterIAMBindingOutputWithContext added in v4.4.0

func (o ClusterIAMBindingOutput) ToClusterIAMBindingOutputWithContext(ctx context.Context) ClusterIAMBindingOutput

func (ClusterIAMBindingOutput) ToClusterIAMBindingPtrOutput added in v4.11.1

func (o ClusterIAMBindingOutput) ToClusterIAMBindingPtrOutput() ClusterIAMBindingPtrOutput

func (ClusterIAMBindingOutput) ToClusterIAMBindingPtrOutputWithContext added in v4.11.1

func (o ClusterIAMBindingOutput) ToClusterIAMBindingPtrOutputWithContext(ctx context.Context) ClusterIAMBindingPtrOutput

type ClusterIAMBindingPtrInput added in v4.11.1

type ClusterIAMBindingPtrInput interface {
	pulumi.Input

	ToClusterIAMBindingPtrOutput() ClusterIAMBindingPtrOutput
	ToClusterIAMBindingPtrOutputWithContext(ctx context.Context) ClusterIAMBindingPtrOutput
}

type ClusterIAMBindingPtrOutput added in v4.11.1

type ClusterIAMBindingPtrOutput struct {
	*pulumi.OutputState
}

func (ClusterIAMBindingPtrOutput) ElementType added in v4.11.1

func (ClusterIAMBindingPtrOutput) ElementType() reflect.Type

func (ClusterIAMBindingPtrOutput) ToClusterIAMBindingPtrOutput added in v4.11.1

func (o ClusterIAMBindingPtrOutput) ToClusterIAMBindingPtrOutput() ClusterIAMBindingPtrOutput

func (ClusterIAMBindingPtrOutput) ToClusterIAMBindingPtrOutputWithContext added in v4.11.1

func (o ClusterIAMBindingPtrOutput) ToClusterIAMBindingPtrOutputWithContext(ctx context.Context) ClusterIAMBindingPtrOutput

type ClusterIAMBindingState

type ClusterIAMBindingState struct {
	// The name or relative resource id of the cluster to manage IAM policies for.
	Cluster   pulumi.StringPtrInput
	Condition ClusterIAMBindingConditionPtrInput
	// (Computed) The etag of the clusters's IAM policy.
	Etag    pulumi.StringPtrInput
	Members pulumi.StringArrayInput
	// The project in which the cluster belongs. If it
	// is not provided, the provider will use a default.
	Project pulumi.StringPtrInput
	// The region in which the cluster belongs. If it
	// is not provided, the provider will use a default.
	Region pulumi.StringPtrInput
	// The role that should be applied. Only one
	// `dataproc.ClusterIAMBinding` can be used per role. Note that custom roles must be of the format
	// `[projects|organizations]/{parent-name}/roles/{role-name}`.
	Role pulumi.StringPtrInput
}

func (ClusterIAMBindingState) ElementType

func (ClusterIAMBindingState) ElementType() reflect.Type

type ClusterIAMMember

type ClusterIAMMember struct {
	pulumi.CustomResourceState

	// The name or relative resource id of the cluster to manage IAM policies for.
	Cluster   pulumi.StringOutput                `pulumi:"cluster"`
	Condition ClusterIAMMemberConditionPtrOutput `pulumi:"condition"`
	// (Computed) The etag of the clusters's IAM policy.
	Etag   pulumi.StringOutput `pulumi:"etag"`
	Member pulumi.StringOutput `pulumi:"member"`
	// The project in which the cluster belongs. If it
	// is not provided, the provider will use a default.
	Project pulumi.StringOutput `pulumi:"project"`
	// The region in which the cluster belongs. If it
	// is not provided, the provider will use a default.
	Region pulumi.StringOutput `pulumi:"region"`
	// The role that should be applied. Only one
	// `dataproc.ClusterIAMBinding` can be used per role. Note that custom roles must be of the format
	// `[projects|organizations]/{parent-name}/roles/{role-name}`.
	Role pulumi.StringOutput `pulumi:"role"`
}

Three different resources help you manage IAM policies on dataproc clusters. Each of these resources serves a different use case:

* `dataproc.ClusterIAMPolicy`: Authoritative. Sets the IAM policy for the cluster and replaces any existing policy already attached. * `dataproc.ClusterIAMBinding`: Authoritative for a given role. Updates the IAM policy to grant a role to a list of members. Other roles within the IAM policy for the cluster are preserved. * `dataproc.ClusterIAMMember`: Non-authoritative. Updates the IAM policy to grant a role to a new member. Other members for the role for the cluster are preserved.

> **Note:** `dataproc.ClusterIAMPolicy` **cannot** be used in conjunction with `dataproc.ClusterIAMBinding` and `dataproc.ClusterIAMMember` or they will fight over what your policy should be. In addition, be careful not to accidentally unset ownership of the cluster as `dataproc.ClusterIAMPolicy` replaces the entire policy.

> **Note:** `dataproc.ClusterIAMBinding` resources **can be** used in conjunction with `dataproc.ClusterIAMMember` resources **only if** they do not grant privilege to the same role.

## google\_dataproc\_cluster\_iam\_policy

```go package main

import (

"github.com/pulumi/pulumi-gcp/sdk/v4/go/gcp/dataproc"
"github.com/pulumi/pulumi-gcp/sdk/v4/go/gcp/organizations"
"github.com/pulumi/pulumi/sdk/v2/go/pulumi"

)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		admin, err := organizations.LookupIAMPolicy(ctx, &organizations.LookupIAMPolicyArgs{
			Bindings: []organizations.GetIAMPolicyBinding{
				organizations.GetIAMPolicyBinding{
					Role: "roles/editor",
					Members: []string{
						"user:jane@example.com",
					},
				},
			},
		}, nil)
		if err != nil {
			return err
		}
		_, err = dataproc.NewClusterIAMPolicy(ctx, "editor", &dataproc.ClusterIAMPolicyArgs{
			Project:    pulumi.String("your-project"),
			Region:     pulumi.String("your-region"),
			Cluster:    pulumi.String("your-dataproc-cluster"),
			PolicyData: pulumi.String(admin.PolicyData),
		})
		if err != nil {
			return err
		}
		return nil
	})
}

```

## google\_dataproc\_cluster\_iam\_binding

```go package main

import (

"github.com/pulumi/pulumi-gcp/sdk/v4/go/gcp/dataproc"
"github.com/pulumi/pulumi/sdk/v2/go/pulumi"

)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := dataproc.NewClusterIAMBinding(ctx, "editor", &dataproc.ClusterIAMBindingArgs{
			Cluster: pulumi.String("your-dataproc-cluster"),
			Members: pulumi.StringArray{
				pulumi.String("user:jane@example.com"),
			},
			Role: pulumi.String("roles/editor"),
		})
		if err != nil {
			return err
		}
		return nil
	})
}

```

## google\_dataproc\_cluster\_iam\_member

```go package main

import (

"github.com/pulumi/pulumi-gcp/sdk/v4/go/gcp/dataproc"
"github.com/pulumi/pulumi/sdk/v2/go/pulumi"

)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := dataproc.NewClusterIAMMember(ctx, "editor", &dataproc.ClusterIAMMemberArgs{
			Cluster: pulumi.String("your-dataproc-cluster"),
			Member:  pulumi.String("user:jane@example.com"),
			Role:    pulumi.String("roles/editor"),
		})
		if err != nil {
			return err
		}
		return nil
	})
}

```

## Import

Cluster IAM resources can be imported using the project, region, cluster name, role and/or member.

```sh

$ pulumi import gcp:dataproc/clusterIAMMember:ClusterIAMMember editor "projects/{project}/regions/{region}/clusters/{cluster}"

```

```sh

$ pulumi import gcp:dataproc/clusterIAMMember:ClusterIAMMember editor "projects/{project}/regions/{region}/clusters/{cluster} roles/editor"

```

```sh

$ pulumi import gcp:dataproc/clusterIAMMember:ClusterIAMMember editor "projects/{project}/regions/{region}/clusters/{cluster} roles/editor user:jane@example.com"

```

-> **Custom Roles**If you're importing a IAM resource with a custom role, make sure to use the

full name of the custom role, e.g. `[projects/my-project|organizations/my-org]/roles/my-custom-role`.

func GetClusterIAMMember

func GetClusterIAMMember(ctx *pulumi.Context,
	name string, id pulumi.IDInput, state *ClusterIAMMemberState, opts ...pulumi.ResourceOption) (*ClusterIAMMember, error)

GetClusterIAMMember gets an existing ClusterIAMMember resource's state with the given name, ID, and optional state properties that are used to uniquely qualify the lookup (nil if not required).

func NewClusterIAMMember

func NewClusterIAMMember(ctx *pulumi.Context,
	name string, args *ClusterIAMMemberArgs, opts ...pulumi.ResourceOption) (*ClusterIAMMember, error)

NewClusterIAMMember registers a new resource with the given unique name, arguments, and options.

func (*ClusterIAMMember) ElementType added in v4.4.0

func (*ClusterIAMMember) ElementType() reflect.Type

func (*ClusterIAMMember) ToClusterIAMMemberOutput added in v4.4.0

func (i *ClusterIAMMember) ToClusterIAMMemberOutput() ClusterIAMMemberOutput

func (*ClusterIAMMember) ToClusterIAMMemberOutputWithContext added in v4.4.0

func (i *ClusterIAMMember) ToClusterIAMMemberOutputWithContext(ctx context.Context) ClusterIAMMemberOutput

func (*ClusterIAMMember) ToClusterIAMMemberPtrOutput added in v4.11.1

func (i *ClusterIAMMember) ToClusterIAMMemberPtrOutput() ClusterIAMMemberPtrOutput

func (*ClusterIAMMember) ToClusterIAMMemberPtrOutputWithContext added in v4.11.1

func (i *ClusterIAMMember) ToClusterIAMMemberPtrOutputWithContext(ctx context.Context) ClusterIAMMemberPtrOutput

type ClusterIAMMemberArgs

type ClusterIAMMemberArgs struct {
	// The name or relative resource id of the cluster to manage IAM policies for.
	Cluster   pulumi.StringInput
	Condition ClusterIAMMemberConditionPtrInput
	Member    pulumi.StringInput
	// The project in which the cluster belongs. If it
	// is not provided, the provider will use a default.
	Project pulumi.StringPtrInput
	// The region in which the cluster belongs. If it
	// is not provided, the provider will use a default.
	Region pulumi.StringPtrInput
	// The role that should be applied. Only one
	// `dataproc.ClusterIAMBinding` can be used per role. Note that custom roles must be of the format
	// `[projects|organizations]/{parent-name}/roles/{role-name}`.
	Role pulumi.StringInput
}

The set of arguments for constructing a ClusterIAMMember resource.

func (ClusterIAMMemberArgs) ElementType

func (ClusterIAMMemberArgs) ElementType() reflect.Type

type ClusterIAMMemberArray added in v4.11.1

type ClusterIAMMemberArray []ClusterIAMMemberInput

func (ClusterIAMMemberArray) ElementType added in v4.11.1

func (ClusterIAMMemberArray) ElementType() reflect.Type

func (ClusterIAMMemberArray) ToClusterIAMMemberArrayOutput added in v4.11.1

func (i ClusterIAMMemberArray) ToClusterIAMMemberArrayOutput() ClusterIAMMemberArrayOutput

func (ClusterIAMMemberArray) ToClusterIAMMemberArrayOutputWithContext added in v4.11.1

func (i ClusterIAMMemberArray) ToClusterIAMMemberArrayOutputWithContext(ctx context.Context) ClusterIAMMemberArrayOutput

type ClusterIAMMemberArrayInput added in v4.11.1

type ClusterIAMMemberArrayInput interface {
	pulumi.Input

	ToClusterIAMMemberArrayOutput() ClusterIAMMemberArrayOutput
	ToClusterIAMMemberArrayOutputWithContext(context.Context) ClusterIAMMemberArrayOutput
}

ClusterIAMMemberArrayInput is an input type that accepts ClusterIAMMemberArray and ClusterIAMMemberArrayOutput values. You can construct a concrete instance of `ClusterIAMMemberArrayInput` via:

ClusterIAMMemberArray{ ClusterIAMMemberArgs{...} }

type ClusterIAMMemberArrayOutput added in v4.11.1

type ClusterIAMMemberArrayOutput struct{ *pulumi.OutputState }

func (ClusterIAMMemberArrayOutput) ElementType added in v4.11.1

func (ClusterIAMMemberArrayOutput) Index added in v4.11.1

func (ClusterIAMMemberArrayOutput) ToClusterIAMMemberArrayOutput added in v4.11.1

func (o ClusterIAMMemberArrayOutput) ToClusterIAMMemberArrayOutput() ClusterIAMMemberArrayOutput

func (ClusterIAMMemberArrayOutput) ToClusterIAMMemberArrayOutputWithContext added in v4.11.1

func (o ClusterIAMMemberArrayOutput) ToClusterIAMMemberArrayOutputWithContext(ctx context.Context) ClusterIAMMemberArrayOutput

type ClusterIAMMemberCondition

type ClusterIAMMemberCondition struct {
	Description *string `pulumi:"description"`
	Expression  string  `pulumi:"expression"`
	Title       string  `pulumi:"title"`
}

type ClusterIAMMemberConditionArgs

type ClusterIAMMemberConditionArgs struct {
	Description pulumi.StringPtrInput `pulumi:"description"`
	Expression  pulumi.StringInput    `pulumi:"expression"`
	Title       pulumi.StringInput    `pulumi:"title"`
}

func (ClusterIAMMemberConditionArgs) ElementType

func (ClusterIAMMemberConditionArgs) ToClusterIAMMemberConditionOutput

func (i ClusterIAMMemberConditionArgs) ToClusterIAMMemberConditionOutput() ClusterIAMMemberConditionOutput

func (ClusterIAMMemberConditionArgs) ToClusterIAMMemberConditionOutputWithContext

func (i ClusterIAMMemberConditionArgs) ToClusterIAMMemberConditionOutputWithContext(ctx context.Context) ClusterIAMMemberConditionOutput

func (ClusterIAMMemberConditionArgs) ToClusterIAMMemberConditionPtrOutput

func (i ClusterIAMMemberConditionArgs) ToClusterIAMMemberConditionPtrOutput() ClusterIAMMemberConditionPtrOutput

func (ClusterIAMMemberConditionArgs) ToClusterIAMMemberConditionPtrOutputWithContext

func (i ClusterIAMMemberConditionArgs) ToClusterIAMMemberConditionPtrOutputWithContext(ctx context.Context) ClusterIAMMemberConditionPtrOutput

type ClusterIAMMemberConditionInput

type ClusterIAMMemberConditionInput interface {
	pulumi.Input

	ToClusterIAMMemberConditionOutput() ClusterIAMMemberConditionOutput
	ToClusterIAMMemberConditionOutputWithContext(context.Context) ClusterIAMMemberConditionOutput
}

ClusterIAMMemberConditionInput is an input type that accepts ClusterIAMMemberConditionArgs and ClusterIAMMemberConditionOutput values. You can construct a concrete instance of `ClusterIAMMemberConditionInput` via:

ClusterIAMMemberConditionArgs{...}

type ClusterIAMMemberConditionOutput

type ClusterIAMMemberConditionOutput struct{ *pulumi.OutputState }

func (ClusterIAMMemberConditionOutput) Description

func (ClusterIAMMemberConditionOutput) ElementType

func (ClusterIAMMemberConditionOutput) Expression

func (ClusterIAMMemberConditionOutput) Title

func (ClusterIAMMemberConditionOutput) ToClusterIAMMemberConditionOutput

func (o ClusterIAMMemberConditionOutput) ToClusterIAMMemberConditionOutput() ClusterIAMMemberConditionOutput

func (ClusterIAMMemberConditionOutput) ToClusterIAMMemberConditionOutputWithContext

func (o ClusterIAMMemberConditionOutput) ToClusterIAMMemberConditionOutputWithContext(ctx context.Context) ClusterIAMMemberConditionOutput

func (ClusterIAMMemberConditionOutput) ToClusterIAMMemberConditionPtrOutput

func (o ClusterIAMMemberConditionOutput) ToClusterIAMMemberConditionPtrOutput() ClusterIAMMemberConditionPtrOutput

func (ClusterIAMMemberConditionOutput) ToClusterIAMMemberConditionPtrOutputWithContext

func (o ClusterIAMMemberConditionOutput) ToClusterIAMMemberConditionPtrOutputWithContext(ctx context.Context) ClusterIAMMemberConditionPtrOutput

type ClusterIAMMemberConditionPtrInput

type ClusterIAMMemberConditionPtrInput interface {
	pulumi.Input

	ToClusterIAMMemberConditionPtrOutput() ClusterIAMMemberConditionPtrOutput
	ToClusterIAMMemberConditionPtrOutputWithContext(context.Context) ClusterIAMMemberConditionPtrOutput
}

ClusterIAMMemberConditionPtrInput is an input type that accepts ClusterIAMMemberConditionArgs, ClusterIAMMemberConditionPtr and ClusterIAMMemberConditionPtrOutput values. You can construct a concrete instance of `ClusterIAMMemberConditionPtrInput` via:

        ClusterIAMMemberConditionArgs{...}

or:

        nil

type ClusterIAMMemberConditionPtrOutput

type ClusterIAMMemberConditionPtrOutput struct{ *pulumi.OutputState }

func (ClusterIAMMemberConditionPtrOutput) Description

func (ClusterIAMMemberConditionPtrOutput) Elem

func (ClusterIAMMemberConditionPtrOutput) ElementType

func (ClusterIAMMemberConditionPtrOutput) Expression

func (ClusterIAMMemberConditionPtrOutput) Title

func (ClusterIAMMemberConditionPtrOutput) ToClusterIAMMemberConditionPtrOutput

func (o ClusterIAMMemberConditionPtrOutput) ToClusterIAMMemberConditionPtrOutput() ClusterIAMMemberConditionPtrOutput

func (ClusterIAMMemberConditionPtrOutput) ToClusterIAMMemberConditionPtrOutputWithContext

func (o ClusterIAMMemberConditionPtrOutput) ToClusterIAMMemberConditionPtrOutputWithContext(ctx context.Context) ClusterIAMMemberConditionPtrOutput

type ClusterIAMMemberInput added in v4.4.0

type ClusterIAMMemberInput interface {
	pulumi.Input

	ToClusterIAMMemberOutput() ClusterIAMMemberOutput
	ToClusterIAMMemberOutputWithContext(ctx context.Context) ClusterIAMMemberOutput
}

type ClusterIAMMemberMap added in v4.11.1

type ClusterIAMMemberMap map[string]ClusterIAMMemberInput

func (ClusterIAMMemberMap) ElementType added in v4.11.1

func (ClusterIAMMemberMap) ElementType() reflect.Type

func (ClusterIAMMemberMap) ToClusterIAMMemberMapOutput added in v4.11.1

func (i ClusterIAMMemberMap) ToClusterIAMMemberMapOutput() ClusterIAMMemberMapOutput

func (ClusterIAMMemberMap) ToClusterIAMMemberMapOutputWithContext added in v4.11.1

func (i ClusterIAMMemberMap) ToClusterIAMMemberMapOutputWithContext(ctx context.Context) ClusterIAMMemberMapOutput

type ClusterIAMMemberMapInput added in v4.11.1

type ClusterIAMMemberMapInput interface {
	pulumi.Input

	ToClusterIAMMemberMapOutput() ClusterIAMMemberMapOutput
	ToClusterIAMMemberMapOutputWithContext(context.Context) ClusterIAMMemberMapOutput
}

ClusterIAMMemberMapInput is an input type that accepts ClusterIAMMemberMap and ClusterIAMMemberMapOutput values. You can construct a concrete instance of `ClusterIAMMemberMapInput` via:

ClusterIAMMemberMap{ "key": ClusterIAMMemberArgs{...} }

type ClusterIAMMemberMapOutput added in v4.11.1

type ClusterIAMMemberMapOutput struct{ *pulumi.OutputState }

func (ClusterIAMMemberMapOutput) ElementType added in v4.11.1

func (ClusterIAMMemberMapOutput) ElementType() reflect.Type

func (ClusterIAMMemberMapOutput) MapIndex added in v4.11.1

func (ClusterIAMMemberMapOutput) ToClusterIAMMemberMapOutput added in v4.11.1

func (o ClusterIAMMemberMapOutput) ToClusterIAMMemberMapOutput() ClusterIAMMemberMapOutput

func (ClusterIAMMemberMapOutput) ToClusterIAMMemberMapOutputWithContext added in v4.11.1

func (o ClusterIAMMemberMapOutput) ToClusterIAMMemberMapOutputWithContext(ctx context.Context) ClusterIAMMemberMapOutput

type ClusterIAMMemberOutput added in v4.4.0

type ClusterIAMMemberOutput struct {
	*pulumi.OutputState
}

func (ClusterIAMMemberOutput) ElementType added in v4.4.0

func (ClusterIAMMemberOutput) ElementType() reflect.Type

func (ClusterIAMMemberOutput) ToClusterIAMMemberOutput added in v4.4.0

func (o ClusterIAMMemberOutput) ToClusterIAMMemberOutput() ClusterIAMMemberOutput

func (ClusterIAMMemberOutput) ToClusterIAMMemberOutputWithContext added in v4.4.0

func (o ClusterIAMMemberOutput) ToClusterIAMMemberOutputWithContext(ctx context.Context) ClusterIAMMemberOutput

func (ClusterIAMMemberOutput) ToClusterIAMMemberPtrOutput added in v4.11.1

func (o ClusterIAMMemberOutput) ToClusterIAMMemberPtrOutput() ClusterIAMMemberPtrOutput

func (ClusterIAMMemberOutput) ToClusterIAMMemberPtrOutputWithContext added in v4.11.1

func (o ClusterIAMMemberOutput) ToClusterIAMMemberPtrOutputWithContext(ctx context.Context) ClusterIAMMemberPtrOutput

type ClusterIAMMemberPtrInput added in v4.11.1

type ClusterIAMMemberPtrInput interface {
	pulumi.Input

	ToClusterIAMMemberPtrOutput() ClusterIAMMemberPtrOutput
	ToClusterIAMMemberPtrOutputWithContext(ctx context.Context) ClusterIAMMemberPtrOutput
}

type ClusterIAMMemberPtrOutput added in v4.11.1

type ClusterIAMMemberPtrOutput struct {
	*pulumi.OutputState
}

func (ClusterIAMMemberPtrOutput) ElementType added in v4.11.1

func (ClusterIAMMemberPtrOutput) ElementType() reflect.Type

func (ClusterIAMMemberPtrOutput) ToClusterIAMMemberPtrOutput added in v4.11.1

func (o ClusterIAMMemberPtrOutput) ToClusterIAMMemberPtrOutput() ClusterIAMMemberPtrOutput

func (ClusterIAMMemberPtrOutput) ToClusterIAMMemberPtrOutputWithContext added in v4.11.1

func (o ClusterIAMMemberPtrOutput) ToClusterIAMMemberPtrOutputWithContext(ctx context.Context) ClusterIAMMemberPtrOutput

type ClusterIAMMemberState

type ClusterIAMMemberState struct {
	// The name or relative resource id of the cluster to manage IAM policies for.
	Cluster   pulumi.StringPtrInput
	Condition ClusterIAMMemberConditionPtrInput
	// (Computed) The etag of the clusters's IAM policy.
	Etag   pulumi.StringPtrInput
	Member pulumi.StringPtrInput
	// The project in which the cluster belongs. If it
	// is not provided, the provider will use a default.
	Project pulumi.StringPtrInput
	// The region in which the cluster belongs. If it
	// is not provided, the provider will use a default.
	Region pulumi.StringPtrInput
	// The role that should be applied. Only one
	// `dataproc.ClusterIAMBinding` can be used per role. Note that custom roles must be of the format
	// `[projects|organizations]/{parent-name}/roles/{role-name}`.
	Role pulumi.StringPtrInput
}

func (ClusterIAMMemberState) ElementType

func (ClusterIAMMemberState) ElementType() reflect.Type

type ClusterIAMPolicy

type ClusterIAMPolicy struct {
	pulumi.CustomResourceState

	// The name or relative resource id of the cluster to manage IAM policies for.
	Cluster pulumi.StringOutput `pulumi:"cluster"`
	// (Computed) The etag of the clusters's IAM policy.
	Etag pulumi.StringOutput `pulumi:"etag"`
	// The policy data generated by a `organizations.getIAMPolicy` data source.
	PolicyData pulumi.StringOutput `pulumi:"policyData"`
	// The project in which the cluster belongs. If it
	// is not provided, the provider will use a default.
	Project pulumi.StringOutput `pulumi:"project"`
	// The region in which the cluster belongs. If it
	// is not provided, the provider will use a default.
	Region pulumi.StringOutput `pulumi:"region"`
}

Three different resources help you manage IAM policies on dataproc clusters. Each of these resources serves a different use case:

* `dataproc.ClusterIAMPolicy`: Authoritative. Sets the IAM policy for the cluster and replaces any existing policy already attached. * `dataproc.ClusterIAMBinding`: Authoritative for a given role. Updates the IAM policy to grant a role to a list of members. Other roles within the IAM policy for the cluster are preserved. * `dataproc.ClusterIAMMember`: Non-authoritative. Updates the IAM policy to grant a role to a new member. Other members for the role for the cluster are preserved.

> **Note:** `dataproc.ClusterIAMPolicy` **cannot** be used in conjunction with `dataproc.ClusterIAMBinding` and `dataproc.ClusterIAMMember` or they will fight over what your policy should be. In addition, be careful not to accidentally unset ownership of the cluster as `dataproc.ClusterIAMPolicy` replaces the entire policy.

> **Note:** `dataproc.ClusterIAMBinding` resources **can be** used in conjunction with `dataproc.ClusterIAMMember` resources **only if** they do not grant privilege to the same role.

## google\_dataproc\_cluster\_iam\_policy

```go package main

import (

"github.com/pulumi/pulumi-gcp/sdk/v4/go/gcp/dataproc"
"github.com/pulumi/pulumi-gcp/sdk/v4/go/gcp/organizations"
"github.com/pulumi/pulumi/sdk/v2/go/pulumi"

)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		admin, err := organizations.LookupIAMPolicy(ctx, &organizations.LookupIAMPolicyArgs{
			Bindings: []organizations.GetIAMPolicyBinding{
				organizations.GetIAMPolicyBinding{
					Role: "roles/editor",
					Members: []string{
						"user:jane@example.com",
					},
				},
			},
		}, nil)
		if err != nil {
			return err
		}
		_, err = dataproc.NewClusterIAMPolicy(ctx, "editor", &dataproc.ClusterIAMPolicyArgs{
			Project:    pulumi.String("your-project"),
			Region:     pulumi.String("your-region"),
			Cluster:    pulumi.String("your-dataproc-cluster"),
			PolicyData: pulumi.String(admin.PolicyData),
		})
		if err != nil {
			return err
		}
		return nil
	})
}

```

## google\_dataproc\_cluster\_iam\_binding

```go package main

import (

"github.com/pulumi/pulumi-gcp/sdk/v4/go/gcp/dataproc"
"github.com/pulumi/pulumi/sdk/v2/go/pulumi"

)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := dataproc.NewClusterIAMBinding(ctx, "editor", &dataproc.ClusterIAMBindingArgs{
			Cluster: pulumi.String("your-dataproc-cluster"),
			Members: pulumi.StringArray{
				pulumi.String("user:jane@example.com"),
			},
			Role: pulumi.String("roles/editor"),
		})
		if err != nil {
			return err
		}
		return nil
	})
}

```

## google\_dataproc\_cluster\_iam\_member

```go package main

import (

"github.com/pulumi/pulumi-gcp/sdk/v4/go/gcp/dataproc"
"github.com/pulumi/pulumi/sdk/v2/go/pulumi"

)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := dataproc.NewClusterIAMMember(ctx, "editor", &dataproc.ClusterIAMMemberArgs{
			Cluster: pulumi.String("your-dataproc-cluster"),
			Member:  pulumi.String("user:jane@example.com"),
			Role:    pulumi.String("roles/editor"),
		})
		if err != nil {
			return err
		}
		return nil
	})
}

```

## Import

Cluster IAM resources can be imported using the project, region, cluster name, role and/or member.

```sh

$ pulumi import gcp:dataproc/clusterIAMPolicy:ClusterIAMPolicy editor "projects/{project}/regions/{region}/clusters/{cluster}"

```

```sh

$ pulumi import gcp:dataproc/clusterIAMPolicy:ClusterIAMPolicy editor "projects/{project}/regions/{region}/clusters/{cluster} roles/editor"

```

```sh

$ pulumi import gcp:dataproc/clusterIAMPolicy:ClusterIAMPolicy editor "projects/{project}/regions/{region}/clusters/{cluster} roles/editor user:jane@example.com"

```

-> **Custom Roles**If you're importing a IAM resource with a custom role, make sure to use the

full name of the custom role, e.g. `[projects/my-project|organizations/my-org]/roles/my-custom-role`.

func GetClusterIAMPolicy

func GetClusterIAMPolicy(ctx *pulumi.Context,
	name string, id pulumi.IDInput, state *ClusterIAMPolicyState, opts ...pulumi.ResourceOption) (*ClusterIAMPolicy, error)

GetClusterIAMPolicy gets an existing ClusterIAMPolicy resource's state with the given name, ID, and optional state properties that are used to uniquely qualify the lookup (nil if not required).

func NewClusterIAMPolicy

func NewClusterIAMPolicy(ctx *pulumi.Context,
	name string, args *ClusterIAMPolicyArgs, opts ...pulumi.ResourceOption) (*ClusterIAMPolicy, error)

NewClusterIAMPolicy registers a new resource with the given unique name, arguments, and options.

func (*ClusterIAMPolicy) ElementType added in v4.4.0

func (*ClusterIAMPolicy) ElementType() reflect.Type

func (*ClusterIAMPolicy) ToClusterIAMPolicyOutput added in v4.4.0

func (i *ClusterIAMPolicy) ToClusterIAMPolicyOutput() ClusterIAMPolicyOutput

func (*ClusterIAMPolicy) ToClusterIAMPolicyOutputWithContext added in v4.4.0

func (i *ClusterIAMPolicy) ToClusterIAMPolicyOutputWithContext(ctx context.Context) ClusterIAMPolicyOutput

func (*ClusterIAMPolicy) ToClusterIAMPolicyPtrOutput added in v4.11.1

func (i *ClusterIAMPolicy) ToClusterIAMPolicyPtrOutput() ClusterIAMPolicyPtrOutput

func (*ClusterIAMPolicy) ToClusterIAMPolicyPtrOutputWithContext added in v4.11.1

func (i *ClusterIAMPolicy) ToClusterIAMPolicyPtrOutputWithContext(ctx context.Context) ClusterIAMPolicyPtrOutput

type ClusterIAMPolicyArgs

type ClusterIAMPolicyArgs struct {
	// The name or relative resource id of the cluster to manage IAM policies for.
	Cluster pulumi.StringInput
	// The policy data generated by a `organizations.getIAMPolicy` data source.
	PolicyData pulumi.StringInput
	// The project in which the cluster belongs. If it
	// is not provided, the provider will use a default.
	Project pulumi.StringPtrInput
	// The region in which the cluster belongs. If it
	// is not provided, the provider will use a default.
	Region pulumi.StringPtrInput
}

The set of arguments for constructing a ClusterIAMPolicy resource.

func (ClusterIAMPolicyArgs) ElementType

func (ClusterIAMPolicyArgs) ElementType() reflect.Type

type ClusterIAMPolicyArray added in v4.11.1

type ClusterIAMPolicyArray []ClusterIAMPolicyInput

func (ClusterIAMPolicyArray) ElementType added in v4.11.1

func (ClusterIAMPolicyArray) ElementType() reflect.Type

func (ClusterIAMPolicyArray) ToClusterIAMPolicyArrayOutput added in v4.11.1

func (i ClusterIAMPolicyArray) ToClusterIAMPolicyArrayOutput() ClusterIAMPolicyArrayOutput

func (ClusterIAMPolicyArray) ToClusterIAMPolicyArrayOutputWithContext added in v4.11.1

func (i ClusterIAMPolicyArray) ToClusterIAMPolicyArrayOutputWithContext(ctx context.Context) ClusterIAMPolicyArrayOutput

type ClusterIAMPolicyArrayInput added in v4.11.1

type ClusterIAMPolicyArrayInput interface {
	pulumi.Input

	ToClusterIAMPolicyArrayOutput() ClusterIAMPolicyArrayOutput
	ToClusterIAMPolicyArrayOutputWithContext(context.Context) ClusterIAMPolicyArrayOutput
}

ClusterIAMPolicyArrayInput is an input type that accepts ClusterIAMPolicyArray and ClusterIAMPolicyArrayOutput values. You can construct a concrete instance of `ClusterIAMPolicyArrayInput` via:

ClusterIAMPolicyArray{ ClusterIAMPolicyArgs{...} }

type ClusterIAMPolicyArrayOutput added in v4.11.1

type ClusterIAMPolicyArrayOutput struct{ *pulumi.OutputState }

func (ClusterIAMPolicyArrayOutput) ElementType added in v4.11.1

func (ClusterIAMPolicyArrayOutput) Index added in v4.11.1

func (ClusterIAMPolicyArrayOutput) ToClusterIAMPolicyArrayOutput added in v4.11.1

func (o ClusterIAMPolicyArrayOutput) ToClusterIAMPolicyArrayOutput() ClusterIAMPolicyArrayOutput

func (ClusterIAMPolicyArrayOutput) ToClusterIAMPolicyArrayOutputWithContext added in v4.11.1

func (o ClusterIAMPolicyArrayOutput) ToClusterIAMPolicyArrayOutputWithContext(ctx context.Context) ClusterIAMPolicyArrayOutput

type ClusterIAMPolicyInput added in v4.4.0

type ClusterIAMPolicyInput interface {
	pulumi.Input

	ToClusterIAMPolicyOutput() ClusterIAMPolicyOutput
	ToClusterIAMPolicyOutputWithContext(ctx context.Context) ClusterIAMPolicyOutput
}

type ClusterIAMPolicyMap added in v4.11.1

type ClusterIAMPolicyMap map[string]ClusterIAMPolicyInput

func (ClusterIAMPolicyMap) ElementType added in v4.11.1

func (ClusterIAMPolicyMap) ElementType() reflect.Type

func (ClusterIAMPolicyMap) ToClusterIAMPolicyMapOutput added in v4.11.1

func (i ClusterIAMPolicyMap) ToClusterIAMPolicyMapOutput() ClusterIAMPolicyMapOutput

func (ClusterIAMPolicyMap) ToClusterIAMPolicyMapOutputWithContext added in v4.11.1

func (i ClusterIAMPolicyMap) ToClusterIAMPolicyMapOutputWithContext(ctx context.Context) ClusterIAMPolicyMapOutput

type ClusterIAMPolicyMapInput added in v4.11.1

type ClusterIAMPolicyMapInput interface {
	pulumi.Input

	ToClusterIAMPolicyMapOutput() ClusterIAMPolicyMapOutput
	ToClusterIAMPolicyMapOutputWithContext(context.Context) ClusterIAMPolicyMapOutput
}

ClusterIAMPolicyMapInput is an input type that accepts ClusterIAMPolicyMap and ClusterIAMPolicyMapOutput values. You can construct a concrete instance of `ClusterIAMPolicyMapInput` via:

ClusterIAMPolicyMap{ "key": ClusterIAMPolicyArgs{...} }

type ClusterIAMPolicyMapOutput added in v4.11.1

type ClusterIAMPolicyMapOutput struct{ *pulumi.OutputState }

func (ClusterIAMPolicyMapOutput) ElementType added in v4.11.1

func (ClusterIAMPolicyMapOutput) ElementType() reflect.Type

func (ClusterIAMPolicyMapOutput) MapIndex added in v4.11.1

func (ClusterIAMPolicyMapOutput) ToClusterIAMPolicyMapOutput added in v4.11.1

func (o ClusterIAMPolicyMapOutput) ToClusterIAMPolicyMapOutput() ClusterIAMPolicyMapOutput

func (ClusterIAMPolicyMapOutput) ToClusterIAMPolicyMapOutputWithContext added in v4.11.1

func (o ClusterIAMPolicyMapOutput) ToClusterIAMPolicyMapOutputWithContext(ctx context.Context) ClusterIAMPolicyMapOutput

type ClusterIAMPolicyOutput added in v4.4.0

type ClusterIAMPolicyOutput struct {
	*pulumi.OutputState
}

func (ClusterIAMPolicyOutput) ElementType added in v4.4.0

func (ClusterIAMPolicyOutput) ElementType() reflect.Type

func (ClusterIAMPolicyOutput) ToClusterIAMPolicyOutput added in v4.4.0

func (o ClusterIAMPolicyOutput) ToClusterIAMPolicyOutput() ClusterIAMPolicyOutput

func (ClusterIAMPolicyOutput) ToClusterIAMPolicyOutputWithContext added in v4.4.0

func (o ClusterIAMPolicyOutput) ToClusterIAMPolicyOutputWithContext(ctx context.Context) ClusterIAMPolicyOutput

func (ClusterIAMPolicyOutput) ToClusterIAMPolicyPtrOutput added in v4.11.1

func (o ClusterIAMPolicyOutput) ToClusterIAMPolicyPtrOutput() ClusterIAMPolicyPtrOutput

func (ClusterIAMPolicyOutput) ToClusterIAMPolicyPtrOutputWithContext added in v4.11.1

func (o ClusterIAMPolicyOutput) ToClusterIAMPolicyPtrOutputWithContext(ctx context.Context) ClusterIAMPolicyPtrOutput

type ClusterIAMPolicyPtrInput added in v4.11.1

type ClusterIAMPolicyPtrInput interface {
	pulumi.Input

	ToClusterIAMPolicyPtrOutput() ClusterIAMPolicyPtrOutput
	ToClusterIAMPolicyPtrOutputWithContext(ctx context.Context) ClusterIAMPolicyPtrOutput
}

type ClusterIAMPolicyPtrOutput added in v4.11.1

type ClusterIAMPolicyPtrOutput struct {
	*pulumi.OutputState
}

func (ClusterIAMPolicyPtrOutput) ElementType added in v4.11.1

func (ClusterIAMPolicyPtrOutput) ElementType() reflect.Type

func (ClusterIAMPolicyPtrOutput) ToClusterIAMPolicyPtrOutput added in v4.11.1

func (o ClusterIAMPolicyPtrOutput) ToClusterIAMPolicyPtrOutput() ClusterIAMPolicyPtrOutput

func (ClusterIAMPolicyPtrOutput) ToClusterIAMPolicyPtrOutputWithContext added in v4.11.1

func (o ClusterIAMPolicyPtrOutput) ToClusterIAMPolicyPtrOutputWithContext(ctx context.Context) ClusterIAMPolicyPtrOutput

type ClusterIAMPolicyState

type ClusterIAMPolicyState struct {
	// The name or relative resource id of the cluster to manage IAM policies for.
	Cluster pulumi.StringPtrInput
	// (Computed) The etag of the clusters's IAM policy.
	Etag pulumi.StringPtrInput
	// The policy data generated by a `organizations.getIAMPolicy` data source.
	PolicyData pulumi.StringPtrInput
	// The project in which the cluster belongs. If it
	// is not provided, the provider will use a default.
	Project pulumi.StringPtrInput
	// The region in which the cluster belongs. If it
	// is not provided, the provider will use a default.
	Region pulumi.StringPtrInput
}

func (ClusterIAMPolicyState) ElementType

func (ClusterIAMPolicyState) ElementType() reflect.Type

type ClusterInput added in v4.4.0

type ClusterInput interface {
	pulumi.Input

	ToClusterOutput() ClusterOutput
	ToClusterOutputWithContext(ctx context.Context) ClusterOutput
}

type ClusterMap added in v4.11.1

type ClusterMap map[string]ClusterInput

func (ClusterMap) ElementType added in v4.11.1

func (ClusterMap) ElementType() reflect.Type

func (ClusterMap) ToClusterMapOutput added in v4.11.1

func (i ClusterMap) ToClusterMapOutput() ClusterMapOutput

func (ClusterMap) ToClusterMapOutputWithContext added in v4.11.1

func (i ClusterMap) ToClusterMapOutputWithContext(ctx context.Context) ClusterMapOutput

type ClusterMapInput added in v4.11.1

type ClusterMapInput interface {
	pulumi.Input

	ToClusterMapOutput() ClusterMapOutput
	ToClusterMapOutputWithContext(context.Context) ClusterMapOutput
}

ClusterMapInput is an input type that accepts ClusterMap and ClusterMapOutput values. You can construct a concrete instance of `ClusterMapInput` via:

ClusterMap{ "key": ClusterArgs{...} }

type ClusterMapOutput added in v4.11.1

type ClusterMapOutput struct{ *pulumi.OutputState }

func (ClusterMapOutput) ElementType added in v4.11.1

func (ClusterMapOutput) ElementType() reflect.Type

func (ClusterMapOutput) MapIndex added in v4.11.1

func (ClusterMapOutput) ToClusterMapOutput added in v4.11.1

func (o ClusterMapOutput) ToClusterMapOutput() ClusterMapOutput

func (ClusterMapOutput) ToClusterMapOutputWithContext added in v4.11.1

func (o ClusterMapOutput) ToClusterMapOutputWithContext(ctx context.Context) ClusterMapOutput

type ClusterOutput added in v4.4.0

type ClusterOutput struct {
	*pulumi.OutputState
}

func (ClusterOutput) ElementType added in v4.4.0

func (ClusterOutput) ElementType() reflect.Type

func (ClusterOutput) ToClusterOutput added in v4.4.0

func (o ClusterOutput) ToClusterOutput() ClusterOutput

func (ClusterOutput) ToClusterOutputWithContext added in v4.4.0

func (o ClusterOutput) ToClusterOutputWithContext(ctx context.Context) ClusterOutput

func (ClusterOutput) ToClusterPtrOutput added in v4.11.1

func (o ClusterOutput) ToClusterPtrOutput() ClusterPtrOutput

func (ClusterOutput) ToClusterPtrOutputWithContext added in v4.11.1

func (o ClusterOutput) ToClusterPtrOutputWithContext(ctx context.Context) ClusterPtrOutput

type ClusterPtrInput added in v4.11.1

type ClusterPtrInput interface {
	pulumi.Input

	ToClusterPtrOutput() ClusterPtrOutput
	ToClusterPtrOutputWithContext(ctx context.Context) ClusterPtrOutput
}

type ClusterPtrOutput added in v4.11.1

type ClusterPtrOutput struct {
	*pulumi.OutputState
}

func (ClusterPtrOutput) ElementType added in v4.11.1

func (ClusterPtrOutput) ElementType() reflect.Type

func (ClusterPtrOutput) ToClusterPtrOutput added in v4.11.1

func (o ClusterPtrOutput) ToClusterPtrOutput() ClusterPtrOutput

func (ClusterPtrOutput) ToClusterPtrOutputWithContext added in v4.11.1

func (o ClusterPtrOutput) ToClusterPtrOutputWithContext(ctx context.Context) ClusterPtrOutput

type ClusterState

type ClusterState struct {
	// Allows you to configure various aspects of the cluster.
	// Structure defined below.
	ClusterConfig ClusterClusterConfigPtrInput
	// The timeout duration which allows graceful decomissioning when you change the number of worker nodes directly through a
	// terraform apply
	GracefulDecommissionTimeout pulumi.StringPtrInput
	// The list of labels (key/value pairs) to be applied to
	// instances in the cluster. GCP generates some itself including `goog-dataproc-cluster-name`
	// which is the name of the cluster.
	Labels pulumi.StringMapInput
	// The name of the cluster, unique within the project and
	// zone.
	Name pulumi.StringPtrInput
	// The ID of the project in which the `cluster` will exist. If it
	// is not provided, the provider project is used.
	Project pulumi.StringPtrInput
	// The region in which the cluster and associated nodes will be created in.
	// Defaults to `global`.
	Region pulumi.StringPtrInput
}

func (ClusterState) ElementType

func (ClusterState) ElementType() reflect.Type

type Job

type Job struct {
	pulumi.CustomResourceState

	// If present, the location of miscellaneous control files which may be used as part of job setup and handling. If not present, control files may be placed in the same location as driver_output_uri.
	DriverControlsFilesUri pulumi.StringOutput `pulumi:"driverControlsFilesUri"`
	// A URI pointing to the location of the stdout of the job's driver program.
	DriverOutputResourceUri pulumi.StringOutput `pulumi:"driverOutputResourceUri"`
	// By default, you can only delete inactive jobs within
	// Dataproc. Setting this to true, and calling destroy, will ensure that the
	// job is first cancelled before issuing the delete.
	ForceDelete pulumi.BoolPtrOutput `pulumi:"forceDelete"`
	// The config of Hadoop job
	HadoopConfig JobHadoopConfigPtrOutput `pulumi:"hadoopConfig"`
	// The config of hive job
	HiveConfig JobHiveConfigPtrOutput `pulumi:"hiveConfig"`
	// The list of labels (key/value pairs) to add to the job.
	Labels pulumi.StringMapOutput `pulumi:"labels"`
	// The config of pag job.
	PigConfig JobPigConfigPtrOutput `pulumi:"pigConfig"`
	// The config of job placement.
	Placement JobPlacementOutput `pulumi:"placement"`
	// The project in which the `cluster` can be found and jobs
	// subsequently run against. If it is not provided, the provider project is used.
	Project pulumi.StringOutput `pulumi:"project"`
	// The config of pySpark job.
	PysparkConfig JobPysparkConfigPtrOutput `pulumi:"pysparkConfig"`
	// The reference of the job
	Reference JobReferenceOutput `pulumi:"reference"`
	// The Cloud Dataproc region. This essentially determines which clusters are available
	// for this job to be submitted to. If not specified, defaults to `global`.
	Region pulumi.StringPtrOutput `pulumi:"region"`
	// Optional. Job scheduling configuration.
	Scheduling JobSchedulingPtrOutput `pulumi:"scheduling"`
	// The config of the Spark job.
	SparkConfig JobSparkConfigPtrOutput `pulumi:"sparkConfig"`
	// The config of SparkSql job
	SparksqlConfig JobSparksqlConfigPtrOutput `pulumi:"sparksqlConfig"`
	// The status of the job.
	Statuses JobStatusArrayOutput `pulumi:"statuses"`
}

Manages a job resource within a Dataproc cluster within GCE. For more information see [the official dataproc documentation](https://cloud.google.com/dataproc/).

!> **Note:** This resource does not support 'update' and changing any attributes will cause the resource to be recreated.

## Example Usage

```go package main

import (

"github.com/pulumi/pulumi-gcp/sdk/v4/go/gcp/dataproc"
"github.com/pulumi/pulumi/sdk/v2/go/pulumi"

)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		mycluster, err := dataproc.NewCluster(ctx, "mycluster", &dataproc.ClusterArgs{
			Region: pulumi.String("us-central1"),
		})
		if err != nil {
			return err
		}
		spark, err := dataproc.NewJob(ctx, "spark", &dataproc.JobArgs{
			Region:      mycluster.Region,
			ForceDelete: pulumi.Bool(true),
			Placement: &dataproc.JobPlacementArgs{
				ClusterName: mycluster.Name,
			},
			SparkConfig: &dataproc.JobSparkConfigArgs{
				MainClass: pulumi.String("org.apache.spark.examples.SparkPi"),
				JarFileUris: pulumi.StringArray{
					pulumi.String("file:///usr/lib/spark/examples/jars/spark-examples.jar"),
				},
				Args: pulumi.StringArray{
					pulumi.String("1000"),
				},
				Properties: pulumi.StringMap{
					"spark.logConf": pulumi.String("true"),
				},
				LoggingConfig: &dataproc.JobSparkConfigLoggingConfigArgs{
					DriverLogLevels: pulumi.StringMap{
						"root": pulumi.String("INFO"),
					},
				},
			},
		})
		if err != nil {
			return err
		}
		pyspark, err := dataproc.NewJob(ctx, "pyspark", &dataproc.JobArgs{
			Region:      mycluster.Region,
			ForceDelete: pulumi.Bool(true),
			Placement: &dataproc.JobPlacementArgs{
				ClusterName: mycluster.Name,
			},
			PysparkConfig: &dataproc.JobPysparkConfigArgs{
				MainPythonFileUri: pulumi.String("gs://dataproc-examples-2f10d78d114f6aaec76462e3c310f31f/src/pyspark/hello-world/hello-world.py"),
				Properties: pulumi.StringMap{
					"spark.logConf": pulumi.String("true"),
				},
			},
		})
		if err != nil {
			return err
		}
		ctx.Export("sparkStatus", spark.Statuses.ApplyT(func(statuses []dataproc.JobStatus) (string, error) {
			return statuses[0].State, nil
		}).(pulumi.StringOutput))
		ctx.Export("pysparkStatus", pyspark.Statuses.ApplyT(func(statuses []dataproc.JobStatus) (string, error) {
			return statuses[0].State, nil
		}).(pulumi.StringOutput))
		return nil
	})
}

```

## Import

This resource does not support import.

func GetJob

func GetJob(ctx *pulumi.Context,
	name string, id pulumi.IDInput, state *JobState, opts ...pulumi.ResourceOption) (*Job, error)

GetJob gets an existing Job resource's state with the given name, ID, and optional state properties that are used to uniquely qualify the lookup (nil if not required).

func NewJob

func NewJob(ctx *pulumi.Context,
	name string, args *JobArgs, opts ...pulumi.ResourceOption) (*Job, error)

NewJob registers a new resource with the given unique name, arguments, and options.

func (*Job) ElementType added in v4.4.0

func (*Job) ElementType() reflect.Type

func (*Job) ToJobOutput added in v4.4.0

func (i *Job) ToJobOutput() JobOutput

func (*Job) ToJobOutputWithContext added in v4.4.0

func (i *Job) ToJobOutputWithContext(ctx context.Context) JobOutput

func (*Job) ToJobPtrOutput added in v4.11.1

func (i *Job) ToJobPtrOutput() JobPtrOutput

func (*Job) ToJobPtrOutputWithContext added in v4.11.1

func (i *Job) ToJobPtrOutputWithContext(ctx context.Context) JobPtrOutput

type JobArgs

type JobArgs struct {
	// By default, you can only delete inactive jobs within
	// Dataproc. Setting this to true, and calling destroy, will ensure that the
	// job is first cancelled before issuing the delete.
	ForceDelete pulumi.BoolPtrInput
	// The config of Hadoop job
	HadoopConfig JobHadoopConfigPtrInput
	// The config of hive job
	HiveConfig JobHiveConfigPtrInput
	// The list of labels (key/value pairs) to add to the job.
	Labels pulumi.StringMapInput
	// The config of pag job.
	PigConfig JobPigConfigPtrInput
	// The config of job placement.
	Placement JobPlacementInput
	// The project in which the `cluster` can be found and jobs
	// subsequently run against. If it is not provided, the provider project is used.
	Project pulumi.StringPtrInput
	// The config of pySpark job.
	PysparkConfig JobPysparkConfigPtrInput
	// The reference of the job
	Reference JobReferencePtrInput
	// The Cloud Dataproc region. This essentially determines which clusters are available
	// for this job to be submitted to. If not specified, defaults to `global`.
	Region pulumi.StringPtrInput
	// Optional. Job scheduling configuration.
	Scheduling JobSchedulingPtrInput
	// The config of the Spark job.
	SparkConfig JobSparkConfigPtrInput
	// The config of SparkSql job
	SparksqlConfig JobSparksqlConfigPtrInput
}

The set of arguments for constructing a Job resource.

func (JobArgs) ElementType

func (JobArgs) ElementType() reflect.Type

type JobArray added in v4.11.1

type JobArray []JobInput

func (JobArray) ElementType added in v4.11.1

func (JobArray) ElementType() reflect.Type

func (JobArray) ToJobArrayOutput added in v4.11.1

func (i JobArray) ToJobArrayOutput() JobArrayOutput

func (JobArray) ToJobArrayOutputWithContext added in v4.11.1

func (i JobArray) ToJobArrayOutputWithContext(ctx context.Context) JobArrayOutput

type JobArrayInput added in v4.11.1

type JobArrayInput interface {
	pulumi.Input

	ToJobArrayOutput() JobArrayOutput
	ToJobArrayOutputWithContext(context.Context) JobArrayOutput
}

JobArrayInput is an input type that accepts JobArray and JobArrayOutput values. You can construct a concrete instance of `JobArrayInput` via:

JobArray{ JobArgs{...} }

type JobArrayOutput added in v4.11.1

type JobArrayOutput struct{ *pulumi.OutputState }

func (JobArrayOutput) ElementType added in v4.11.1

func (JobArrayOutput) ElementType() reflect.Type

func (JobArrayOutput) Index added in v4.11.1

func (JobArrayOutput) ToJobArrayOutput added in v4.11.1

func (o JobArrayOutput) ToJobArrayOutput() JobArrayOutput

func (JobArrayOutput) ToJobArrayOutputWithContext added in v4.11.1

func (o JobArrayOutput) ToJobArrayOutputWithContext(ctx context.Context) JobArrayOutput

type JobHadoopConfig

type JobHadoopConfig struct {
	// HCFS URIs of archives to be extracted in the working directory of .jar, .tar, .tar.gz, .tgz, and .zip.
	ArchiveUris []string `pulumi:"archiveUris"`
	// The arguments to pass to the driver. Do not include arguments, such as -libjars or -Dfoo=bar, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
	Args []string `pulumi:"args"`
	// HCFS URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.
	FileUris []string `pulumi:"fileUris"`
	// HCFS URIs of jar files to be added to the Spark CLASSPATH.
	JarFileUris   []string                      `pulumi:"jarFileUris"`
	LoggingConfig *JobHadoopConfigLoggingConfig `pulumi:"loggingConfig"`
	// The name of the driver's main class. The jar file containing the class must be in the default CLASSPATH or specified in `jarFileUris`. Conflicts with `mainJarFileUri`
	MainClass *string `pulumi:"mainClass"`
	// The HCFS URI of the jar file containing the main class. Examples: 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'. Conflicts with `mainClass`
	MainJarFileUri *string `pulumi:"mainJarFileUri"`
	// A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Cloud Dataproc API may be overwritten.
	Properties map[string]string `pulumi:"properties"`
}

type JobHadoopConfigArgs

type JobHadoopConfigArgs struct {
	// HCFS URIs of archives to be extracted in the working directory of .jar, .tar, .tar.gz, .tgz, and .zip.
	ArchiveUris pulumi.StringArrayInput `pulumi:"archiveUris"`
	// The arguments to pass to the driver. Do not include arguments, such as -libjars or -Dfoo=bar, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
	Args pulumi.StringArrayInput `pulumi:"args"`
	// HCFS URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.
	FileUris pulumi.StringArrayInput `pulumi:"fileUris"`
	// HCFS URIs of jar files to be added to the Spark CLASSPATH.
	JarFileUris   pulumi.StringArrayInput              `pulumi:"jarFileUris"`
	LoggingConfig JobHadoopConfigLoggingConfigPtrInput `pulumi:"loggingConfig"`
	// The name of the driver's main class. The jar file containing the class must be in the default CLASSPATH or specified in `jarFileUris`. Conflicts with `mainJarFileUri`
	MainClass pulumi.StringPtrInput `pulumi:"mainClass"`
	// The HCFS URI of the jar file containing the main class. Examples: 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'. Conflicts with `mainClass`
	MainJarFileUri pulumi.StringPtrInput `pulumi:"mainJarFileUri"`
	// A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Cloud Dataproc API may be overwritten.
	Properties pulumi.StringMapInput `pulumi:"properties"`
}

func (JobHadoopConfigArgs) ElementType

func (JobHadoopConfigArgs) ElementType() reflect.Type

func (JobHadoopConfigArgs) ToJobHadoopConfigOutput

func (i JobHadoopConfigArgs) ToJobHadoopConfigOutput() JobHadoopConfigOutput

func (JobHadoopConfigArgs) ToJobHadoopConfigOutputWithContext

func (i JobHadoopConfigArgs) ToJobHadoopConfigOutputWithContext(ctx context.Context) JobHadoopConfigOutput

func (JobHadoopConfigArgs) ToJobHadoopConfigPtrOutput

func (i JobHadoopConfigArgs) ToJobHadoopConfigPtrOutput() JobHadoopConfigPtrOutput

func (JobHadoopConfigArgs) ToJobHadoopConfigPtrOutputWithContext

func (i JobHadoopConfigArgs) ToJobHadoopConfigPtrOutputWithContext(ctx context.Context) JobHadoopConfigPtrOutput

type JobHadoopConfigInput

type JobHadoopConfigInput interface {
	pulumi.Input

	ToJobHadoopConfigOutput() JobHadoopConfigOutput
	ToJobHadoopConfigOutputWithContext(context.Context) JobHadoopConfigOutput
}

JobHadoopConfigInput is an input type that accepts JobHadoopConfigArgs and JobHadoopConfigOutput values. You can construct a concrete instance of `JobHadoopConfigInput` via:

JobHadoopConfigArgs{...}

type JobHadoopConfigLoggingConfig

type JobHadoopConfigLoggingConfig struct {
	DriverLogLevels map[string]string `pulumi:"driverLogLevels"`
}

type JobHadoopConfigLoggingConfigArgs

type JobHadoopConfigLoggingConfigArgs struct {
	DriverLogLevels pulumi.StringMapInput `pulumi:"driverLogLevels"`
}

func (JobHadoopConfigLoggingConfigArgs) ElementType

func (JobHadoopConfigLoggingConfigArgs) ToJobHadoopConfigLoggingConfigOutput

func (i JobHadoopConfigLoggingConfigArgs) ToJobHadoopConfigLoggingConfigOutput() JobHadoopConfigLoggingConfigOutput

func (JobHadoopConfigLoggingConfigArgs) ToJobHadoopConfigLoggingConfigOutputWithContext

func (i JobHadoopConfigLoggingConfigArgs) ToJobHadoopConfigLoggingConfigOutputWithContext(ctx context.Context) JobHadoopConfigLoggingConfigOutput

func (JobHadoopConfigLoggingConfigArgs) ToJobHadoopConfigLoggingConfigPtrOutput

func (i JobHadoopConfigLoggingConfigArgs) ToJobHadoopConfigLoggingConfigPtrOutput() JobHadoopConfigLoggingConfigPtrOutput

func (JobHadoopConfigLoggingConfigArgs) ToJobHadoopConfigLoggingConfigPtrOutputWithContext

func (i JobHadoopConfigLoggingConfigArgs) ToJobHadoopConfigLoggingConfigPtrOutputWithContext(ctx context.Context) JobHadoopConfigLoggingConfigPtrOutput

type JobHadoopConfigLoggingConfigInput

type JobHadoopConfigLoggingConfigInput interface {
	pulumi.Input

	ToJobHadoopConfigLoggingConfigOutput() JobHadoopConfigLoggingConfigOutput
	ToJobHadoopConfigLoggingConfigOutputWithContext(context.Context) JobHadoopConfigLoggingConfigOutput
}

JobHadoopConfigLoggingConfigInput is an input type that accepts JobHadoopConfigLoggingConfigArgs and JobHadoopConfigLoggingConfigOutput values. You can construct a concrete instance of `JobHadoopConfigLoggingConfigInput` via:

JobHadoopConfigLoggingConfigArgs{...}

type JobHadoopConfigLoggingConfigOutput

type JobHadoopConfigLoggingConfigOutput struct{ *pulumi.OutputState }

func (JobHadoopConfigLoggingConfigOutput) DriverLogLevels

func (JobHadoopConfigLoggingConfigOutput) ElementType

func (JobHadoopConfigLoggingConfigOutput) ToJobHadoopConfigLoggingConfigOutput

func (o JobHadoopConfigLoggingConfigOutput) ToJobHadoopConfigLoggingConfigOutput() JobHadoopConfigLoggingConfigOutput

func (JobHadoopConfigLoggingConfigOutput) ToJobHadoopConfigLoggingConfigOutputWithContext

func (o JobHadoopConfigLoggingConfigOutput) ToJobHadoopConfigLoggingConfigOutputWithContext(ctx context.Context) JobHadoopConfigLoggingConfigOutput

func (JobHadoopConfigLoggingConfigOutput) ToJobHadoopConfigLoggingConfigPtrOutput

func (o JobHadoopConfigLoggingConfigOutput) ToJobHadoopConfigLoggingConfigPtrOutput() JobHadoopConfigLoggingConfigPtrOutput

func (JobHadoopConfigLoggingConfigOutput) ToJobHadoopConfigLoggingConfigPtrOutputWithContext

func (o JobHadoopConfigLoggingConfigOutput) ToJobHadoopConfigLoggingConfigPtrOutputWithContext(ctx context.Context) JobHadoopConfigLoggingConfigPtrOutput

type JobHadoopConfigLoggingConfigPtrInput

type JobHadoopConfigLoggingConfigPtrInput interface {
	pulumi.Input

	ToJobHadoopConfigLoggingConfigPtrOutput() JobHadoopConfigLoggingConfigPtrOutput
	ToJobHadoopConfigLoggingConfigPtrOutputWithContext(context.Context) JobHadoopConfigLoggingConfigPtrOutput
}

JobHadoopConfigLoggingConfigPtrInput is an input type that accepts JobHadoopConfigLoggingConfigArgs, JobHadoopConfigLoggingConfigPtr and JobHadoopConfigLoggingConfigPtrOutput values. You can construct a concrete instance of `JobHadoopConfigLoggingConfigPtrInput` via:

        JobHadoopConfigLoggingConfigArgs{...}

or:

        nil

type JobHadoopConfigLoggingConfigPtrOutput

type JobHadoopConfigLoggingConfigPtrOutput struct{ *pulumi.OutputState }

func (JobHadoopConfigLoggingConfigPtrOutput) DriverLogLevels

func (JobHadoopConfigLoggingConfigPtrOutput) Elem

func (JobHadoopConfigLoggingConfigPtrOutput) ElementType

func (JobHadoopConfigLoggingConfigPtrOutput) ToJobHadoopConfigLoggingConfigPtrOutput

func (o JobHadoopConfigLoggingConfigPtrOutput) ToJobHadoopConfigLoggingConfigPtrOutput() JobHadoopConfigLoggingConfigPtrOutput

func (JobHadoopConfigLoggingConfigPtrOutput) ToJobHadoopConfigLoggingConfigPtrOutputWithContext

func (o JobHadoopConfigLoggingConfigPtrOutput) ToJobHadoopConfigLoggingConfigPtrOutputWithContext(ctx context.Context) JobHadoopConfigLoggingConfigPtrOutput

type JobHadoopConfigOutput

type JobHadoopConfigOutput struct{ *pulumi.OutputState }

func (JobHadoopConfigOutput) ArchiveUris

HCFS URIs of archives to be extracted in the working directory of .jar, .tar, .tar.gz, .tgz, and .zip.

func (JobHadoopConfigOutput) Args

The arguments to pass to the driver. Do not include arguments, such as -libjars or -Dfoo=bar, that can be set as job properties, since a collision may occur that causes an incorrect job submission.

func (JobHadoopConfigOutput) ElementType

func (JobHadoopConfigOutput) ElementType() reflect.Type

func (JobHadoopConfigOutput) FileUris

HCFS URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.

func (JobHadoopConfigOutput) JarFileUris

HCFS URIs of jar files to be added to the Spark CLASSPATH.

func (JobHadoopConfigOutput) LoggingConfig

func (JobHadoopConfigOutput) MainClass

The name of the driver's main class. The jar file containing the class must be in the default CLASSPATH or specified in `jarFileUris`. Conflicts with `mainJarFileUri`

func (JobHadoopConfigOutput) MainJarFileUri

func (o JobHadoopConfigOutput) MainJarFileUri() pulumi.StringPtrOutput

The HCFS URI of the jar file containing the main class. Examples: 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'. Conflicts with `mainClass`

func (JobHadoopConfigOutput) Properties

A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Cloud Dataproc API may be overwritten.

func (JobHadoopConfigOutput) ToJobHadoopConfigOutput

func (o JobHadoopConfigOutput) ToJobHadoopConfigOutput() JobHadoopConfigOutput

func (JobHadoopConfigOutput) ToJobHadoopConfigOutputWithContext

func (o JobHadoopConfigOutput) ToJobHadoopConfigOutputWithContext(ctx context.Context) JobHadoopConfigOutput

func (JobHadoopConfigOutput) ToJobHadoopConfigPtrOutput

func (o JobHadoopConfigOutput) ToJobHadoopConfigPtrOutput() JobHadoopConfigPtrOutput

func (JobHadoopConfigOutput) ToJobHadoopConfigPtrOutputWithContext

func (o JobHadoopConfigOutput) ToJobHadoopConfigPtrOutputWithContext(ctx context.Context) JobHadoopConfigPtrOutput

type JobHadoopConfigPtrInput

type JobHadoopConfigPtrInput interface {
	pulumi.Input

	ToJobHadoopConfigPtrOutput() JobHadoopConfigPtrOutput
	ToJobHadoopConfigPtrOutputWithContext(context.Context) JobHadoopConfigPtrOutput
}

JobHadoopConfigPtrInput is an input type that accepts JobHadoopConfigArgs, JobHadoopConfigPtr and JobHadoopConfigPtrOutput values. You can construct a concrete instance of `JobHadoopConfigPtrInput` via:

        JobHadoopConfigArgs{...}

or:

        nil

type JobHadoopConfigPtrOutput

type JobHadoopConfigPtrOutput struct{ *pulumi.OutputState }

func (JobHadoopConfigPtrOutput) ArchiveUris

HCFS URIs of archives to be extracted in the working directory of .jar, .tar, .tar.gz, .tgz, and .zip.

func (JobHadoopConfigPtrOutput) Args

The arguments to pass to the driver. Do not include arguments, such as -libjars or -Dfoo=bar, that can be set as job properties, since a collision may occur that causes an incorrect job submission.

func (JobHadoopConfigPtrOutput) Elem

func (JobHadoopConfigPtrOutput) ElementType

func (JobHadoopConfigPtrOutput) ElementType() reflect.Type

func (JobHadoopConfigPtrOutput) FileUris

HCFS URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.

func (JobHadoopConfigPtrOutput) JarFileUris

HCFS URIs of jar files to be added to the Spark CLASSPATH.

func (JobHadoopConfigPtrOutput) LoggingConfig

func (JobHadoopConfigPtrOutput) MainClass

The name of the driver's main class. The jar file containing the class must be in the default CLASSPATH or specified in `jarFileUris`. Conflicts with `mainJarFileUri`

func (JobHadoopConfigPtrOutput) MainJarFileUri

func (o JobHadoopConfigPtrOutput) MainJarFileUri() pulumi.StringPtrOutput

The HCFS URI of the jar file containing the main class. Examples: 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'. Conflicts with `mainClass`

func (JobHadoopConfigPtrOutput) Properties

A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Cloud Dataproc API may be overwritten.

func (JobHadoopConfigPtrOutput) ToJobHadoopConfigPtrOutput

func (o JobHadoopConfigPtrOutput) ToJobHadoopConfigPtrOutput() JobHadoopConfigPtrOutput

func (JobHadoopConfigPtrOutput) ToJobHadoopConfigPtrOutputWithContext

func (o JobHadoopConfigPtrOutput) ToJobHadoopConfigPtrOutputWithContext(ctx context.Context) JobHadoopConfigPtrOutput

type JobHiveConfig

type JobHiveConfig struct {
	// Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries. Defaults to false.
	ContinueOnFailure *bool `pulumi:"continueOnFailure"`
	// HCFS URIs of jar files to be added to the Spark CLASSPATH.
	JarFileUris []string `pulumi:"jarFileUris"`
	// A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Cloud Dataproc API may be overwritten.
	Properties map[string]string `pulumi:"properties"`
	// The HCFS URI of the script that contains SQL queries.
	// Conflicts with `queryList`
	QueryFileUri *string `pulumi:"queryFileUri"`
	// The list of SQL queries or statements to execute as part of the job.
	// Conflicts with `queryFileUri`
	QueryLists []string `pulumi:"queryLists"`
	// Mapping of query variable names to values (equivalent to the Spark SQL command: `SET name="value";`).
	ScriptVariables map[string]string `pulumi:"scriptVariables"`
}

type JobHiveConfigArgs

type JobHiveConfigArgs struct {
	// Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries. Defaults to false.
	ContinueOnFailure pulumi.BoolPtrInput `pulumi:"continueOnFailure"`
	// HCFS URIs of jar files to be added to the Spark CLASSPATH.
	JarFileUris pulumi.StringArrayInput `pulumi:"jarFileUris"`
	// A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Cloud Dataproc API may be overwritten.
	Properties pulumi.StringMapInput `pulumi:"properties"`
	// The HCFS URI of the script that contains SQL queries.
	// Conflicts with `queryList`
	QueryFileUri pulumi.StringPtrInput `pulumi:"queryFileUri"`
	// The list of SQL queries or statements to execute as part of the job.
	// Conflicts with `queryFileUri`
	QueryLists pulumi.StringArrayInput `pulumi:"queryLists"`
	// Mapping of query variable names to values (equivalent to the Spark SQL command: `SET name="value";`).
	ScriptVariables pulumi.StringMapInput `pulumi:"scriptVariables"`
}

func (JobHiveConfigArgs) ElementType

func (JobHiveConfigArgs) ElementType() reflect.Type

func (JobHiveConfigArgs) ToJobHiveConfigOutput

func (i JobHiveConfigArgs) ToJobHiveConfigOutput() JobHiveConfigOutput

func (JobHiveConfigArgs) ToJobHiveConfigOutputWithContext

func (i JobHiveConfigArgs) ToJobHiveConfigOutputWithContext(ctx context.Context) JobHiveConfigOutput

func (JobHiveConfigArgs) ToJobHiveConfigPtrOutput

func (i JobHiveConfigArgs) ToJobHiveConfigPtrOutput() JobHiveConfigPtrOutput

func (JobHiveConfigArgs) ToJobHiveConfigPtrOutputWithContext

func (i JobHiveConfigArgs) ToJobHiveConfigPtrOutputWithContext(ctx context.Context) JobHiveConfigPtrOutput

type JobHiveConfigInput

type JobHiveConfigInput interface {
	pulumi.Input

	ToJobHiveConfigOutput() JobHiveConfigOutput
	ToJobHiveConfigOutputWithContext(context.Context) JobHiveConfigOutput
}

JobHiveConfigInput is an input type that accepts JobHiveConfigArgs and JobHiveConfigOutput values. You can construct a concrete instance of `JobHiveConfigInput` via:

JobHiveConfigArgs{...}

type JobHiveConfigOutput

type JobHiveConfigOutput struct{ *pulumi.OutputState }

func (JobHiveConfigOutput) ContinueOnFailure

func (o JobHiveConfigOutput) ContinueOnFailure() pulumi.BoolPtrOutput

Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries. Defaults to false.

func (JobHiveConfigOutput) ElementType

func (JobHiveConfigOutput) ElementType() reflect.Type

func (JobHiveConfigOutput) JarFileUris

HCFS URIs of jar files to be added to the Spark CLASSPATH.

func (JobHiveConfigOutput) Properties

A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Cloud Dataproc API may be overwritten.

func (JobHiveConfigOutput) QueryFileUri

func (o JobHiveConfigOutput) QueryFileUri() pulumi.StringPtrOutput

The HCFS URI of the script that contains SQL queries. Conflicts with `queryList`

func (JobHiveConfigOutput) QueryLists

The list of SQL queries or statements to execute as part of the job. Conflicts with `queryFileUri`

func (JobHiveConfigOutput) ScriptVariables

func (o JobHiveConfigOutput) ScriptVariables() pulumi.StringMapOutput

Mapping of query variable names to values (equivalent to the Spark SQL command: `SET name="value";`).

func (JobHiveConfigOutput) ToJobHiveConfigOutput

func (o JobHiveConfigOutput) ToJobHiveConfigOutput() JobHiveConfigOutput

func (JobHiveConfigOutput) ToJobHiveConfigOutputWithContext

func (o JobHiveConfigOutput) ToJobHiveConfigOutputWithContext(ctx context.Context) JobHiveConfigOutput

func (JobHiveConfigOutput) ToJobHiveConfigPtrOutput

func (o JobHiveConfigOutput) ToJobHiveConfigPtrOutput() JobHiveConfigPtrOutput

func (JobHiveConfigOutput) ToJobHiveConfigPtrOutputWithContext

func (o JobHiveConfigOutput) ToJobHiveConfigPtrOutputWithContext(ctx context.Context) JobHiveConfigPtrOutput

type JobHiveConfigPtrInput

type JobHiveConfigPtrInput interface {
	pulumi.Input

	ToJobHiveConfigPtrOutput() JobHiveConfigPtrOutput
	ToJobHiveConfigPtrOutputWithContext(context.Context) JobHiveConfigPtrOutput
}

JobHiveConfigPtrInput is an input type that accepts JobHiveConfigArgs, JobHiveConfigPtr and JobHiveConfigPtrOutput values. You can construct a concrete instance of `JobHiveConfigPtrInput` via:

        JobHiveConfigArgs{...}

or:

        nil

type JobHiveConfigPtrOutput

type JobHiveConfigPtrOutput struct{ *pulumi.OutputState }

func (JobHiveConfigPtrOutput) ContinueOnFailure

func (o JobHiveConfigPtrOutput) ContinueOnFailure() pulumi.BoolPtrOutput

Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries. Defaults to false.

func (JobHiveConfigPtrOutput) Elem

func (JobHiveConfigPtrOutput) ElementType

func (JobHiveConfigPtrOutput) ElementType() reflect.Type

func (JobHiveConfigPtrOutput) JarFileUris

HCFS URIs of jar files to be added to the Spark CLASSPATH.

func (JobHiveConfigPtrOutput) Properties

A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Cloud Dataproc API may be overwritten.

func (JobHiveConfigPtrOutput) QueryFileUri

The HCFS URI of the script that contains SQL queries. Conflicts with `queryList`

func (JobHiveConfigPtrOutput) QueryLists

The list of SQL queries or statements to execute as part of the job. Conflicts with `queryFileUri`

func (JobHiveConfigPtrOutput) ScriptVariables

func (o JobHiveConfigPtrOutput) ScriptVariables() pulumi.StringMapOutput

Mapping of query variable names to values (equivalent to the Spark SQL command: `SET name="value";`).

func (JobHiveConfigPtrOutput) ToJobHiveConfigPtrOutput

func (o JobHiveConfigPtrOutput) ToJobHiveConfigPtrOutput() JobHiveConfigPtrOutput

func (JobHiveConfigPtrOutput) ToJobHiveConfigPtrOutputWithContext

func (o JobHiveConfigPtrOutput) ToJobHiveConfigPtrOutputWithContext(ctx context.Context) JobHiveConfigPtrOutput

type JobIAMBinding

type JobIAMBinding struct {
	pulumi.CustomResourceState

	Condition JobIAMBindingConditionPtrOutput `pulumi:"condition"`
	// (Computed) The etag of the jobs's IAM policy.
	Etag    pulumi.StringOutput      `pulumi:"etag"`
	JobId   pulumi.StringOutput      `pulumi:"jobId"`
	Members pulumi.StringArrayOutput `pulumi:"members"`
	// The project in which the job belongs. If it
	// is not provided, the provider will use a default.
	Project pulumi.StringOutput `pulumi:"project"`
	// The region in which the job belongs. If it
	// is not provided, the provider will use a default.
	Region pulumi.StringOutput `pulumi:"region"`
	// The role that should be applied. Only one
	// `dataproc.JobIAMBinding` can be used per role. Note that custom roles must be of the format
	// `[projects|organizations]/{parent-name}/roles/{role-name}`.
	Role pulumi.StringOutput `pulumi:"role"`
}

Three different resources help you manage IAM policies on dataproc jobs. Each of these resources serves a different use case:

* `dataproc.JobIAMPolicy`: Authoritative. Sets the IAM policy for the job and replaces any existing policy already attached. * `dataproc.JobIAMBinding`: Authoritative for a given role. Updates the IAM policy to grant a role to a list of members. Other roles within the IAM policy for the job are preserved. * `dataproc.JobIAMMember`: Non-authoritative. Updates the IAM policy to grant a role to a new member. Other members for the role for the job are preserved.

> **Note:** `dataproc.JobIAMPolicy` **cannot** be used in conjunction with `dataproc.JobIAMBinding` and `dataproc.JobIAMMember` or they will fight over what your policy should be. In addition, be careful not to accidentally unset ownership of the job as `dataproc.JobIAMPolicy` replaces the entire policy.

> **Note:** `dataproc.JobIAMBinding` resources **can be** used in conjunction with `dataproc.JobIAMMember` resources **only if** they do not grant privilege to the same role.

## google\_dataproc\_job\_iam\_policy

```go package main

import (

"github.com/pulumi/pulumi-gcp/sdk/v4/go/gcp/dataproc"
"github.com/pulumi/pulumi-gcp/sdk/v4/go/gcp/organizations"
"github.com/pulumi/pulumi/sdk/v2/go/pulumi"

)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		admin, err := organizations.LookupIAMPolicy(ctx, &organizations.LookupIAMPolicyArgs{
			Bindings: []organizations.GetIAMPolicyBinding{
				organizations.GetIAMPolicyBinding{
					Role: "roles/editor",
					Members: []string{
						"user:jane@example.com",
					},
				},
			},
		}, nil)
		if err != nil {
			return err
		}
		_, err = dataproc.NewJobIAMPolicy(ctx, "editor", &dataproc.JobIAMPolicyArgs{
			Project:    pulumi.String("your-project"),
			Region:     pulumi.String("your-region"),
			JobId:      pulumi.String("your-dataproc-job"),
			PolicyData: pulumi.String(admin.PolicyData),
		})
		if err != nil {
			return err
		}
		return nil
	})
}

```

## google\_dataproc\_job\_iam\_binding

```go package main

import (

"github.com/pulumi/pulumi-gcp/sdk/v4/go/gcp/dataproc"
"github.com/pulumi/pulumi/sdk/v2/go/pulumi"

)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := dataproc.NewJobIAMBinding(ctx, "editor", &dataproc.JobIAMBindingArgs{
			JobId: pulumi.String("your-dataproc-job"),
			Members: pulumi.StringArray{
				pulumi.String("user:jane@example.com"),
			},
			Role: pulumi.String("roles/editor"),
		})
		if err != nil {
			return err
		}
		return nil
	})
}

```

## google\_dataproc\_job\_iam\_member

```go package main

import (

"github.com/pulumi/pulumi-gcp/sdk/v4/go/gcp/dataproc"
"github.com/pulumi/pulumi/sdk/v2/go/pulumi"

)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := dataproc.NewJobIAMMember(ctx, "editor", &dataproc.JobIAMMemberArgs{
			JobId:  pulumi.String("your-dataproc-job"),
			Member: pulumi.String("user:jane@example.com"),
			Role:   pulumi.String("roles/editor"),
		})
		if err != nil {
			return err
		}
		return nil
	})
}

```

## Import

Job IAM resources can be imported using the project, region, job id, role and/or member.

```sh

$ pulumi import gcp:dataproc/jobIAMBinding:JobIAMBinding editor "projects/{project}/regions/{region}/jobs/{job_id}"

```

```sh

$ pulumi import gcp:dataproc/jobIAMBinding:JobIAMBinding editor "projects/{project}/regions/{region}/jobs/{job_id} roles/editor"

```

```sh

$ pulumi import gcp:dataproc/jobIAMBinding:JobIAMBinding editor "projects/{project}/regions/{region}/jobs/{job_id} roles/editor user:jane@example.com"

```

-> **Custom Roles**If you're importing a IAM resource with a custom role, make sure to use the

full name of the custom role, e.g. `[projects/my-project|organizations/my-org]/roles/my-custom-role`.

func GetJobIAMBinding

func GetJobIAMBinding(ctx *pulumi.Context,
	name string, id pulumi.IDInput, state *JobIAMBindingState, opts ...pulumi.ResourceOption) (*JobIAMBinding, error)

GetJobIAMBinding gets an existing JobIAMBinding resource's state with the given name, ID, and optional state properties that are used to uniquely qualify the lookup (nil if not required).

func NewJobIAMBinding

func NewJobIAMBinding(ctx *pulumi.Context,
	name string, args *JobIAMBindingArgs, opts ...pulumi.ResourceOption) (*JobIAMBinding, error)

NewJobIAMBinding registers a new resource with the given unique name, arguments, and options.

func (*JobIAMBinding) ElementType added in v4.4.0

func (*JobIAMBinding) ElementType() reflect.Type

func (*JobIAMBinding) ToJobIAMBindingOutput added in v4.4.0

func (i *JobIAMBinding) ToJobIAMBindingOutput() JobIAMBindingOutput

func (*JobIAMBinding) ToJobIAMBindingOutputWithContext added in v4.4.0

func (i *JobIAMBinding) ToJobIAMBindingOutputWithContext(ctx context.Context) JobIAMBindingOutput

func (*JobIAMBinding) ToJobIAMBindingPtrOutput added in v4.11.1

func (i *JobIAMBinding) ToJobIAMBindingPtrOutput() JobIAMBindingPtrOutput

func (*JobIAMBinding) ToJobIAMBindingPtrOutputWithContext added in v4.11.1

func (i *JobIAMBinding) ToJobIAMBindingPtrOutputWithContext(ctx context.Context) JobIAMBindingPtrOutput

type JobIAMBindingArgs

type JobIAMBindingArgs struct {
	Condition JobIAMBindingConditionPtrInput
	JobId     pulumi.StringInput
	Members   pulumi.StringArrayInput
	// The project in which the job belongs. If it
	// is not provided, the provider will use a default.
	Project pulumi.StringPtrInput
	// The region in which the job belongs. If it
	// is not provided, the provider will use a default.
	Region pulumi.StringPtrInput
	// The role that should be applied. Only one
	// `dataproc.JobIAMBinding` can be used per role. Note that custom roles must be of the format
	// `[projects|organizations]/{parent-name}/roles/{role-name}`.
	Role pulumi.StringInput
}

The set of arguments for constructing a JobIAMBinding resource.

func (JobIAMBindingArgs) ElementType

func (JobIAMBindingArgs) ElementType() reflect.Type

type JobIAMBindingArray added in v4.11.1

type JobIAMBindingArray []JobIAMBindingInput

func (JobIAMBindingArray) ElementType added in v4.11.1

func (JobIAMBindingArray) ElementType() reflect.Type

func (JobIAMBindingArray) ToJobIAMBindingArrayOutput added in v4.11.1

func (i JobIAMBindingArray) ToJobIAMBindingArrayOutput() JobIAMBindingArrayOutput

func (JobIAMBindingArray) ToJobIAMBindingArrayOutputWithContext added in v4.11.1

func (i JobIAMBindingArray) ToJobIAMBindingArrayOutputWithContext(ctx context.Context) JobIAMBindingArrayOutput

type JobIAMBindingArrayInput added in v4.11.1

type JobIAMBindingArrayInput interface {
	pulumi.Input

	ToJobIAMBindingArrayOutput() JobIAMBindingArrayOutput
	ToJobIAMBindingArrayOutputWithContext(context.Context) JobIAMBindingArrayOutput
}

JobIAMBindingArrayInput is an input type that accepts JobIAMBindingArray and JobIAMBindingArrayOutput values. You can construct a concrete instance of `JobIAMBindingArrayInput` via:

JobIAMBindingArray{ JobIAMBindingArgs{...} }

type JobIAMBindingArrayOutput added in v4.11.1

type JobIAMBindingArrayOutput struct{ *pulumi.OutputState }

func (JobIAMBindingArrayOutput) ElementType added in v4.11.1

func (JobIAMBindingArrayOutput) ElementType() reflect.Type

func (JobIAMBindingArrayOutput) Index added in v4.11.1

func (JobIAMBindingArrayOutput) ToJobIAMBindingArrayOutput added in v4.11.1

func (o JobIAMBindingArrayOutput) ToJobIAMBindingArrayOutput() JobIAMBindingArrayOutput

func (JobIAMBindingArrayOutput) ToJobIAMBindingArrayOutputWithContext added in v4.11.1

func (o JobIAMBindingArrayOutput) ToJobIAMBindingArrayOutputWithContext(ctx context.Context) JobIAMBindingArrayOutput

type JobIAMBindingCondition

type JobIAMBindingCondition struct {
	Description *string `pulumi:"description"`
	Expression  string  `pulumi:"expression"`
	Title       string  `pulumi:"title"`
}

type JobIAMBindingConditionArgs

type JobIAMBindingConditionArgs struct {
	Description pulumi.StringPtrInput `pulumi:"description"`
	Expression  pulumi.StringInput    `pulumi:"expression"`
	Title       pulumi.StringInput    `pulumi:"title"`
}

func (JobIAMBindingConditionArgs) ElementType

func (JobIAMBindingConditionArgs) ElementType() reflect.Type

func (JobIAMBindingConditionArgs) ToJobIAMBindingConditionOutput

func (i JobIAMBindingConditionArgs) ToJobIAMBindingConditionOutput() JobIAMBindingConditionOutput

func (JobIAMBindingConditionArgs) ToJobIAMBindingConditionOutputWithContext

func (i JobIAMBindingConditionArgs) ToJobIAMBindingConditionOutputWithContext(ctx context.Context) JobIAMBindingConditionOutput

func (JobIAMBindingConditionArgs) ToJobIAMBindingConditionPtrOutput

func (i JobIAMBindingConditionArgs) ToJobIAMBindingConditionPtrOutput() JobIAMBindingConditionPtrOutput

func (JobIAMBindingConditionArgs) ToJobIAMBindingConditionPtrOutputWithContext

func (i JobIAMBindingConditionArgs) ToJobIAMBindingConditionPtrOutputWithContext(ctx context.Context) JobIAMBindingConditionPtrOutput

type JobIAMBindingConditionInput

type JobIAMBindingConditionInput interface {
	pulumi.Input

	ToJobIAMBindingConditionOutput() JobIAMBindingConditionOutput
	ToJobIAMBindingConditionOutputWithContext(context.Context) JobIAMBindingConditionOutput
}

JobIAMBindingConditionInput is an input type that accepts JobIAMBindingConditionArgs and JobIAMBindingConditionOutput values. You can construct a concrete instance of `JobIAMBindingConditionInput` via:

JobIAMBindingConditionArgs{...}

type JobIAMBindingConditionOutput

type JobIAMBindingConditionOutput struct{ *pulumi.OutputState }

func (JobIAMBindingConditionOutput) Description

func (JobIAMBindingConditionOutput) ElementType

func (JobIAMBindingConditionOutput) Expression

func (JobIAMBindingConditionOutput) Title

func (JobIAMBindingConditionOutput) ToJobIAMBindingConditionOutput

func (o JobIAMBindingConditionOutput) ToJobIAMBindingConditionOutput() JobIAMBindingConditionOutput

func (JobIAMBindingConditionOutput) ToJobIAMBindingConditionOutputWithContext

func (o JobIAMBindingConditionOutput) ToJobIAMBindingConditionOutputWithContext(ctx context.Context) JobIAMBindingConditionOutput

func (JobIAMBindingConditionOutput) ToJobIAMBindingConditionPtrOutput

func (o JobIAMBindingConditionOutput) ToJobIAMBindingConditionPtrOutput() JobIAMBindingConditionPtrOutput

func (JobIAMBindingConditionOutput) ToJobIAMBindingConditionPtrOutputWithContext

func (o JobIAMBindingConditionOutput) ToJobIAMBindingConditionPtrOutputWithContext(ctx context.Context) JobIAMBindingConditionPtrOutput

type JobIAMBindingConditionPtrInput

type JobIAMBindingConditionPtrInput interface {
	pulumi.Input

	ToJobIAMBindingConditionPtrOutput() JobIAMBindingConditionPtrOutput
	ToJobIAMBindingConditionPtrOutputWithContext(context.Context) JobIAMBindingConditionPtrOutput
}

JobIAMBindingConditionPtrInput is an input type that accepts JobIAMBindingConditionArgs, JobIAMBindingConditionPtr and JobIAMBindingConditionPtrOutput values. You can construct a concrete instance of `JobIAMBindingConditionPtrInput` via:

        JobIAMBindingConditionArgs{...}

or:

        nil

type JobIAMBindingConditionPtrOutput

type JobIAMBindingConditionPtrOutput struct{ *pulumi.OutputState }

func (JobIAMBindingConditionPtrOutput) Description

func (JobIAMBindingConditionPtrOutput) Elem

func (JobIAMBindingConditionPtrOutput) ElementType

func (JobIAMBindingConditionPtrOutput) Expression

func (JobIAMBindingConditionPtrOutput) Title

func (JobIAMBindingConditionPtrOutput) ToJobIAMBindingConditionPtrOutput

func (o JobIAMBindingConditionPtrOutput) ToJobIAMBindingConditionPtrOutput() JobIAMBindingConditionPtrOutput

func (JobIAMBindingConditionPtrOutput) ToJobIAMBindingConditionPtrOutputWithContext

func (o JobIAMBindingConditionPtrOutput) ToJobIAMBindingConditionPtrOutputWithContext(ctx context.Context) JobIAMBindingConditionPtrOutput

type JobIAMBindingInput added in v4.4.0

type JobIAMBindingInput interface {
	pulumi.Input

	ToJobIAMBindingOutput() JobIAMBindingOutput
	ToJobIAMBindingOutputWithContext(ctx context.Context) JobIAMBindingOutput
}

type JobIAMBindingMap added in v4.11.1

type JobIAMBindingMap map[string]JobIAMBindingInput

func (JobIAMBindingMap) ElementType added in v4.11.1

func (JobIAMBindingMap) ElementType() reflect.Type

func (JobIAMBindingMap) ToJobIAMBindingMapOutput added in v4.11.1

func (i JobIAMBindingMap) ToJobIAMBindingMapOutput() JobIAMBindingMapOutput

func (JobIAMBindingMap) ToJobIAMBindingMapOutputWithContext added in v4.11.1

func (i JobIAMBindingMap) ToJobIAMBindingMapOutputWithContext(ctx context.Context) JobIAMBindingMapOutput

type JobIAMBindingMapInput added in v4.11.1

type JobIAMBindingMapInput interface {
	pulumi.Input

	ToJobIAMBindingMapOutput() JobIAMBindingMapOutput
	ToJobIAMBindingMapOutputWithContext(context.Context) JobIAMBindingMapOutput
}

JobIAMBindingMapInput is an input type that accepts JobIAMBindingMap and JobIAMBindingMapOutput values. You can construct a concrete instance of `JobIAMBindingMapInput` via:

JobIAMBindingMap{ "key": JobIAMBindingArgs{...} }

type JobIAMBindingMapOutput added in v4.11.1

type JobIAMBindingMapOutput struct{ *pulumi.OutputState }

func (JobIAMBindingMapOutput) ElementType added in v4.11.1

func (JobIAMBindingMapOutput) ElementType() reflect.Type

func (JobIAMBindingMapOutput) MapIndex added in v4.11.1

func (JobIAMBindingMapOutput) ToJobIAMBindingMapOutput added in v4.11.1

func (o JobIAMBindingMapOutput) ToJobIAMBindingMapOutput() JobIAMBindingMapOutput

func (JobIAMBindingMapOutput) ToJobIAMBindingMapOutputWithContext added in v4.11.1

func (o JobIAMBindingMapOutput) ToJobIAMBindingMapOutputWithContext(ctx context.Context) JobIAMBindingMapOutput

type JobIAMBindingOutput added in v4.4.0

type JobIAMBindingOutput struct {
	*pulumi.OutputState
}

func (JobIAMBindingOutput) ElementType added in v4.4.0

func (JobIAMBindingOutput) ElementType() reflect.Type

func (JobIAMBindingOutput) ToJobIAMBindingOutput added in v4.4.0

func (o JobIAMBindingOutput) ToJobIAMBindingOutput() JobIAMBindingOutput

func (JobIAMBindingOutput) ToJobIAMBindingOutputWithContext added in v4.4.0

func (o JobIAMBindingOutput) ToJobIAMBindingOutputWithContext(ctx context.Context) JobIAMBindingOutput

func (JobIAMBindingOutput) ToJobIAMBindingPtrOutput added in v4.11.1

func (o JobIAMBindingOutput) ToJobIAMBindingPtrOutput() JobIAMBindingPtrOutput

func (JobIAMBindingOutput) ToJobIAMBindingPtrOutputWithContext added in v4.11.1

func (o JobIAMBindingOutput) ToJobIAMBindingPtrOutputWithContext(ctx context.Context) JobIAMBindingPtrOutput

type JobIAMBindingPtrInput added in v4.11.1

type JobIAMBindingPtrInput interface {
	pulumi.Input

	ToJobIAMBindingPtrOutput() JobIAMBindingPtrOutput
	ToJobIAMBindingPtrOutputWithContext(ctx context.Context) JobIAMBindingPtrOutput
}

type JobIAMBindingPtrOutput added in v4.11.1

type JobIAMBindingPtrOutput struct {
	*pulumi.OutputState
}

func (JobIAMBindingPtrOutput) ElementType added in v4.11.1

func (JobIAMBindingPtrOutput) ElementType() reflect.Type

func (JobIAMBindingPtrOutput) ToJobIAMBindingPtrOutput added in v4.11.1

func (o JobIAMBindingPtrOutput) ToJobIAMBindingPtrOutput() JobIAMBindingPtrOutput

func (JobIAMBindingPtrOutput) ToJobIAMBindingPtrOutputWithContext added in v4.11.1

func (o JobIAMBindingPtrOutput) ToJobIAMBindingPtrOutputWithContext(ctx context.Context) JobIAMBindingPtrOutput

type JobIAMBindingState

type JobIAMBindingState struct {
	Condition JobIAMBindingConditionPtrInput
	// (Computed) The etag of the jobs's IAM policy.
	Etag    pulumi.StringPtrInput
	JobId   pulumi.StringPtrInput
	Members pulumi.StringArrayInput
	// The project in which the job belongs. If it
	// is not provided, the provider will use a default.
	Project pulumi.StringPtrInput
	// The region in which the job belongs. If it
	// is not provided, the provider will use a default.
	Region pulumi.StringPtrInput
	// The role that should be applied. Only one
	// `dataproc.JobIAMBinding` can be used per role. Note that custom roles must be of the format
	// `[projects|organizations]/{parent-name}/roles/{role-name}`.
	Role pulumi.StringPtrInput
}

func (JobIAMBindingState) ElementType

func (JobIAMBindingState) ElementType() reflect.Type

type JobIAMMember

type JobIAMMember struct {
	pulumi.CustomResourceState

	Condition JobIAMMemberConditionPtrOutput `pulumi:"condition"`
	// (Computed) The etag of the jobs's IAM policy.
	Etag   pulumi.StringOutput `pulumi:"etag"`
	JobId  pulumi.StringOutput `pulumi:"jobId"`
	Member pulumi.StringOutput `pulumi:"member"`
	// The project in which the job belongs. If it
	// is not provided, the provider will use a default.
	Project pulumi.StringOutput `pulumi:"project"`
	// The region in which the job belongs. If it
	// is not provided, the provider will use a default.
	Region pulumi.StringOutput `pulumi:"region"`
	// The role that should be applied. Only one
	// `dataproc.JobIAMBinding` can be used per role. Note that custom roles must be of the format
	// `[projects|organizations]/{parent-name}/roles/{role-name}`.
	Role pulumi.StringOutput `pulumi:"role"`
}

Three different resources help you manage IAM policies on dataproc jobs. Each of these resources serves a different use case:

* `dataproc.JobIAMPolicy`: Authoritative. Sets the IAM policy for the job and replaces any existing policy already attached. * `dataproc.JobIAMBinding`: Authoritative for a given role. Updates the IAM policy to grant a role to a list of members. Other roles within the IAM policy for the job are preserved. * `dataproc.JobIAMMember`: Non-authoritative. Updates the IAM policy to grant a role to a new member. Other members for the role for the job are preserved.

> **Note:** `dataproc.JobIAMPolicy` **cannot** be used in conjunction with `dataproc.JobIAMBinding` and `dataproc.JobIAMMember` or they will fight over what your policy should be. In addition, be careful not to accidentally unset ownership of the job as `dataproc.JobIAMPolicy` replaces the entire policy.

> **Note:** `dataproc.JobIAMBinding` resources **can be** used in conjunction with `dataproc.JobIAMMember` resources **only if** they do not grant privilege to the same role.

## google\_dataproc\_job\_iam\_policy

```go package main

import (

"github.com/pulumi/pulumi-gcp/sdk/v4/go/gcp/dataproc"
"github.com/pulumi/pulumi-gcp/sdk/v4/go/gcp/organizations"
"github.com/pulumi/pulumi/sdk/v2/go/pulumi"

)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		admin, err := organizations.LookupIAMPolicy(ctx, &organizations.LookupIAMPolicyArgs{
			Bindings: []organizations.GetIAMPolicyBinding{
				organizations.GetIAMPolicyBinding{
					Role: "roles/editor",
					Members: []string{
						"user:jane@example.com",
					},
				},
			},
		}, nil)
		if err != nil {
			return err
		}
		_, err = dataproc.NewJobIAMPolicy(ctx, "editor", &dataproc.JobIAMPolicyArgs{
			Project:    pulumi.String("your-project"),
			Region:     pulumi.String("your-region"),
			JobId:      pulumi.String("your-dataproc-job"),
			PolicyData: pulumi.String(admin.PolicyData),
		})
		if err != nil {
			return err
		}
		return nil
	})
}

```

## google\_dataproc\_job\_iam\_binding

```go package main

import (

"github.com/pulumi/pulumi-gcp/sdk/v4/go/gcp/dataproc"
"github.com/pulumi/pulumi/sdk/v2/go/pulumi"

)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := dataproc.NewJobIAMBinding(ctx, "editor", &dataproc.JobIAMBindingArgs{
			JobId: pulumi.String("your-dataproc-job"),
			Members: pulumi.StringArray{
				pulumi.String("user:jane@example.com"),
			},
			Role: pulumi.String("roles/editor"),
		})
		if err != nil {
			return err
		}
		return nil
	})
}

```

## google\_dataproc\_job\_iam\_member

```go package main

import (

"github.com/pulumi/pulumi-gcp/sdk/v4/go/gcp/dataproc"
"github.com/pulumi/pulumi/sdk/v2/go/pulumi"

)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := dataproc.NewJobIAMMember(ctx, "editor", &dataproc.JobIAMMemberArgs{
			JobId:  pulumi.String("your-dataproc-job"),
			Member: pulumi.String("user:jane@example.com"),
			Role:   pulumi.String("roles/editor"),
		})
		if err != nil {
			return err
		}
		return nil
	})
}

```

## Import

Job IAM resources can be imported using the project, region, job id, role and/or member.

```sh

$ pulumi import gcp:dataproc/jobIAMMember:JobIAMMember editor "projects/{project}/regions/{region}/jobs/{job_id}"

```

```sh

$ pulumi import gcp:dataproc/jobIAMMember:JobIAMMember editor "projects/{project}/regions/{region}/jobs/{job_id} roles/editor"

```

```sh

$ pulumi import gcp:dataproc/jobIAMMember:JobIAMMember editor "projects/{project}/regions/{region}/jobs/{job_id} roles/editor user:jane@example.com"

```

-> **Custom Roles**If you're importing a IAM resource with a custom role, make sure to use the

full name of the custom role, e.g. `[projects/my-project|organizations/my-org]/roles/my-custom-role`.

func GetJobIAMMember

func GetJobIAMMember(ctx *pulumi.Context,
	name string, id pulumi.IDInput, state *JobIAMMemberState, opts ...pulumi.ResourceOption) (*JobIAMMember, error)

GetJobIAMMember gets an existing JobIAMMember resource's state with the given name, ID, and optional state properties that are used to uniquely qualify the lookup (nil if not required).

func NewJobIAMMember

func NewJobIAMMember(ctx *pulumi.Context,
	name string, args *JobIAMMemberArgs, opts ...pulumi.ResourceOption) (*JobIAMMember, error)

NewJobIAMMember registers a new resource with the given unique name, arguments, and options.

func (*JobIAMMember) ElementType added in v4.4.0

func (*JobIAMMember) ElementType() reflect.Type

func (*JobIAMMember) ToJobIAMMemberOutput added in v4.4.0

func (i *JobIAMMember) ToJobIAMMemberOutput() JobIAMMemberOutput

func (*JobIAMMember) ToJobIAMMemberOutputWithContext added in v4.4.0

func (i *JobIAMMember) ToJobIAMMemberOutputWithContext(ctx context.Context) JobIAMMemberOutput

func (*JobIAMMember) ToJobIAMMemberPtrOutput added in v4.11.1

func (i *JobIAMMember) ToJobIAMMemberPtrOutput() JobIAMMemberPtrOutput

func (*JobIAMMember) ToJobIAMMemberPtrOutputWithContext added in v4.11.1

func (i *JobIAMMember) ToJobIAMMemberPtrOutputWithContext(ctx context.Context) JobIAMMemberPtrOutput

type JobIAMMemberArgs

type JobIAMMemberArgs struct {
	Condition JobIAMMemberConditionPtrInput
	JobId     pulumi.StringInput
	Member    pulumi.StringInput
	// The project in which the job belongs. If it
	// is not provided, the provider will use a default.
	Project pulumi.StringPtrInput
	// The region in which the job belongs. If it
	// is not provided, the provider will use a default.
	Region pulumi.StringPtrInput
	// The role that should be applied. Only one
	// `dataproc.JobIAMBinding` can be used per role. Note that custom roles must be of the format
	// `[projects|organizations]/{parent-name}/roles/{role-name}`.
	Role pulumi.StringInput
}

The set of arguments for constructing a JobIAMMember resource.

func (JobIAMMemberArgs) ElementType

func (JobIAMMemberArgs) ElementType() reflect.Type

type JobIAMMemberArray added in v4.11.1

type JobIAMMemberArray []JobIAMMemberInput

func (JobIAMMemberArray) ElementType added in v4.11.1

func (JobIAMMemberArray) ElementType() reflect.Type

func (JobIAMMemberArray) ToJobIAMMemberArrayOutput added in v4.11.1

func (i JobIAMMemberArray) ToJobIAMMemberArrayOutput() JobIAMMemberArrayOutput

func (JobIAMMemberArray) ToJobIAMMemberArrayOutputWithContext added in v4.11.1

func (i JobIAMMemberArray) ToJobIAMMemberArrayOutputWithContext(ctx context.Context) JobIAMMemberArrayOutput

type JobIAMMemberArrayInput added in v4.11.1

type JobIAMMemberArrayInput interface {
	pulumi.Input

	ToJobIAMMemberArrayOutput() JobIAMMemberArrayOutput
	ToJobIAMMemberArrayOutputWithContext(context.Context) JobIAMMemberArrayOutput
}

JobIAMMemberArrayInput is an input type that accepts JobIAMMemberArray and JobIAMMemberArrayOutput values. You can construct a concrete instance of `JobIAMMemberArrayInput` via:

JobIAMMemberArray{ JobIAMMemberArgs{...} }

type JobIAMMemberArrayOutput added in v4.11.1

type JobIAMMemberArrayOutput struct{ *pulumi.OutputState }

func (JobIAMMemberArrayOutput) ElementType added in v4.11.1

func (JobIAMMemberArrayOutput) ElementType() reflect.Type

func (JobIAMMemberArrayOutput) Index added in v4.11.1

func (JobIAMMemberArrayOutput) ToJobIAMMemberArrayOutput added in v4.11.1

func (o JobIAMMemberArrayOutput) ToJobIAMMemberArrayOutput() JobIAMMemberArrayOutput

func (JobIAMMemberArrayOutput) ToJobIAMMemberArrayOutputWithContext added in v4.11.1

func (o JobIAMMemberArrayOutput) ToJobIAMMemberArrayOutputWithContext(ctx context.Context) JobIAMMemberArrayOutput

type JobIAMMemberCondition

type JobIAMMemberCondition struct {
	Description *string `pulumi:"description"`
	Expression  string  `pulumi:"expression"`
	Title       string  `pulumi:"title"`
}

type JobIAMMemberConditionArgs

type JobIAMMemberConditionArgs struct {
	Description pulumi.StringPtrInput `pulumi:"description"`
	Expression  pulumi.StringInput    `pulumi:"expression"`
	Title       pulumi.StringInput    `pulumi:"title"`
}

func (JobIAMMemberConditionArgs) ElementType

func (JobIAMMemberConditionArgs) ElementType() reflect.Type

func (JobIAMMemberConditionArgs) ToJobIAMMemberConditionOutput

func (i JobIAMMemberConditionArgs) ToJobIAMMemberConditionOutput() JobIAMMemberConditionOutput

func (JobIAMMemberConditionArgs) ToJobIAMMemberConditionOutputWithContext

func (i JobIAMMemberConditionArgs) ToJobIAMMemberConditionOutputWithContext(ctx context.Context) JobIAMMemberConditionOutput

func (JobIAMMemberConditionArgs) ToJobIAMMemberConditionPtrOutput

func (i JobIAMMemberConditionArgs) ToJobIAMMemberConditionPtrOutput() JobIAMMemberConditionPtrOutput

func (JobIAMMemberConditionArgs) ToJobIAMMemberConditionPtrOutputWithContext

func (i JobIAMMemberConditionArgs) ToJobIAMMemberConditionPtrOutputWithContext(ctx context.Context) JobIAMMemberConditionPtrOutput

type JobIAMMemberConditionInput

type JobIAMMemberConditionInput interface {
	pulumi.Input

	ToJobIAMMemberConditionOutput() JobIAMMemberConditionOutput
	ToJobIAMMemberConditionOutputWithContext(context.Context) JobIAMMemberConditionOutput
}

JobIAMMemberConditionInput is an input type that accepts JobIAMMemberConditionArgs and JobIAMMemberConditionOutput values. You can construct a concrete instance of `JobIAMMemberConditionInput` via:

JobIAMMemberConditionArgs{...}

type JobIAMMemberConditionOutput

type JobIAMMemberConditionOutput struct{ *pulumi.OutputState }

func (JobIAMMemberConditionOutput) Description

func (JobIAMMemberConditionOutput) ElementType

func (JobIAMMemberConditionOutput) Expression

func (JobIAMMemberConditionOutput) Title

func (JobIAMMemberConditionOutput) ToJobIAMMemberConditionOutput

func (o JobIAMMemberConditionOutput) ToJobIAMMemberConditionOutput() JobIAMMemberConditionOutput

func (JobIAMMemberConditionOutput) ToJobIAMMemberConditionOutputWithContext

func (o JobIAMMemberConditionOutput) ToJobIAMMemberConditionOutputWithContext(ctx context.Context) JobIAMMemberConditionOutput

func (JobIAMMemberConditionOutput) ToJobIAMMemberConditionPtrOutput

func (o JobIAMMemberConditionOutput) ToJobIAMMemberConditionPtrOutput() JobIAMMemberConditionPtrOutput

func (JobIAMMemberConditionOutput) ToJobIAMMemberConditionPtrOutputWithContext

func (o JobIAMMemberConditionOutput) ToJobIAMMemberConditionPtrOutputWithContext(ctx context.Context) JobIAMMemberConditionPtrOutput

type JobIAMMemberConditionPtrInput

type JobIAMMemberConditionPtrInput interface {
	pulumi.Input

	ToJobIAMMemberConditionPtrOutput() JobIAMMemberConditionPtrOutput
	ToJobIAMMemberConditionPtrOutputWithContext(context.Context) JobIAMMemberConditionPtrOutput
}

JobIAMMemberConditionPtrInput is an input type that accepts JobIAMMemberConditionArgs, JobIAMMemberConditionPtr and JobIAMMemberConditionPtrOutput values. You can construct a concrete instance of `JobIAMMemberConditionPtrInput` via:

        JobIAMMemberConditionArgs{...}

or:

        nil

type JobIAMMemberConditionPtrOutput

type JobIAMMemberConditionPtrOutput struct{ *pulumi.OutputState }

func (JobIAMMemberConditionPtrOutput) Description

func (JobIAMMemberConditionPtrOutput) Elem

func (JobIAMMemberConditionPtrOutput) ElementType

func (JobIAMMemberConditionPtrOutput) Expression

func (JobIAMMemberConditionPtrOutput) Title

func (JobIAMMemberConditionPtrOutput) ToJobIAMMemberConditionPtrOutput

func (o JobIAMMemberConditionPtrOutput) ToJobIAMMemberConditionPtrOutput() JobIAMMemberConditionPtrOutput

func (JobIAMMemberConditionPtrOutput) ToJobIAMMemberConditionPtrOutputWithContext

func (o JobIAMMemberConditionPtrOutput) ToJobIAMMemberConditionPtrOutputWithContext(ctx context.Context) JobIAMMemberConditionPtrOutput

type JobIAMMemberInput added in v4.4.0

type JobIAMMemberInput interface {
	pulumi.Input

	ToJobIAMMemberOutput() JobIAMMemberOutput
	ToJobIAMMemberOutputWithContext(ctx context.Context) JobIAMMemberOutput
}

type JobIAMMemberMap added in v4.11.1

type JobIAMMemberMap map[string]JobIAMMemberInput

func (JobIAMMemberMap) ElementType added in v4.11.1

func (JobIAMMemberMap) ElementType() reflect.Type

func (JobIAMMemberMap) ToJobIAMMemberMapOutput added in v4.11.1

func (i JobIAMMemberMap) ToJobIAMMemberMapOutput() JobIAMMemberMapOutput

func (JobIAMMemberMap) ToJobIAMMemberMapOutputWithContext added in v4.11.1

func (i JobIAMMemberMap) ToJobIAMMemberMapOutputWithContext(ctx context.Context) JobIAMMemberMapOutput

type JobIAMMemberMapInput added in v4.11.1

type JobIAMMemberMapInput interface {
	pulumi.Input

	ToJobIAMMemberMapOutput() JobIAMMemberMapOutput
	ToJobIAMMemberMapOutputWithContext(context.Context) JobIAMMemberMapOutput
}

JobIAMMemberMapInput is an input type that accepts JobIAMMemberMap and JobIAMMemberMapOutput values. You can construct a concrete instance of `JobIAMMemberMapInput` via:

JobIAMMemberMap{ "key": JobIAMMemberArgs{...} }

type JobIAMMemberMapOutput added in v4.11.1

type JobIAMMemberMapOutput struct{ *pulumi.OutputState }

func (JobIAMMemberMapOutput) ElementType added in v4.11.1

func (JobIAMMemberMapOutput) ElementType() reflect.Type

func (JobIAMMemberMapOutput) MapIndex added in v4.11.1

func (JobIAMMemberMapOutput) ToJobIAMMemberMapOutput added in v4.11.1

func (o JobIAMMemberMapOutput) ToJobIAMMemberMapOutput() JobIAMMemberMapOutput

func (JobIAMMemberMapOutput) ToJobIAMMemberMapOutputWithContext added in v4.11.1

func (o JobIAMMemberMapOutput) ToJobIAMMemberMapOutputWithContext(ctx context.Context) JobIAMMemberMapOutput

type JobIAMMemberOutput added in v4.4.0

type JobIAMMemberOutput struct {
	*pulumi.OutputState
}

func (JobIAMMemberOutput) ElementType added in v4.4.0

func (JobIAMMemberOutput) ElementType() reflect.Type

func (JobIAMMemberOutput) ToJobIAMMemberOutput added in v4.4.0

func (o JobIAMMemberOutput) ToJobIAMMemberOutput() JobIAMMemberOutput

func (JobIAMMemberOutput) ToJobIAMMemberOutputWithContext added in v4.4.0

func (o JobIAMMemberOutput) ToJobIAMMemberOutputWithContext(ctx context.Context) JobIAMMemberOutput

func (JobIAMMemberOutput) ToJobIAMMemberPtrOutput added in v4.11.1

func (o JobIAMMemberOutput) ToJobIAMMemberPtrOutput() JobIAMMemberPtrOutput

func (JobIAMMemberOutput) ToJobIAMMemberPtrOutputWithContext added in v4.11.1

func (o JobIAMMemberOutput) ToJobIAMMemberPtrOutputWithContext(ctx context.Context) JobIAMMemberPtrOutput

type JobIAMMemberPtrInput added in v4.11.1

type JobIAMMemberPtrInput interface {
	pulumi.Input

	ToJobIAMMemberPtrOutput() JobIAMMemberPtrOutput
	ToJobIAMMemberPtrOutputWithContext(ctx context.Context) JobIAMMemberPtrOutput
}

type JobIAMMemberPtrOutput added in v4.11.1

type JobIAMMemberPtrOutput struct {
	*pulumi.OutputState
}

func (JobIAMMemberPtrOutput) ElementType added in v4.11.1

func (JobIAMMemberPtrOutput) ElementType() reflect.Type

func (JobIAMMemberPtrOutput) ToJobIAMMemberPtrOutput added in v4.11.1

func (o JobIAMMemberPtrOutput) ToJobIAMMemberPtrOutput() JobIAMMemberPtrOutput

func (JobIAMMemberPtrOutput) ToJobIAMMemberPtrOutputWithContext added in v4.11.1

func (o JobIAMMemberPtrOutput) ToJobIAMMemberPtrOutputWithContext(ctx context.Context) JobIAMMemberPtrOutput

type JobIAMMemberState

type JobIAMMemberState struct {
	Condition JobIAMMemberConditionPtrInput
	// (Computed) The etag of the jobs's IAM policy.
	Etag   pulumi.StringPtrInput
	JobId  pulumi.StringPtrInput
	Member pulumi.StringPtrInput
	// The project in which the job belongs. If it
	// is not provided, the provider will use a default.
	Project pulumi.StringPtrInput
	// The region in which the job belongs. If it
	// is not provided, the provider will use a default.
	Region pulumi.StringPtrInput
	// The role that should be applied. Only one
	// `dataproc.JobIAMBinding` can be used per role. Note that custom roles must be of the format
	// `[projects|organizations]/{parent-name}/roles/{role-name}`.
	Role pulumi.StringPtrInput
}

func (JobIAMMemberState) ElementType

func (JobIAMMemberState) ElementType() reflect.Type

type JobIAMPolicy

type JobIAMPolicy struct {
	pulumi.CustomResourceState

	// (Computed) The etag of the jobs's IAM policy.
	Etag  pulumi.StringOutput `pulumi:"etag"`
	JobId pulumi.StringOutput `pulumi:"jobId"`
	// The policy data generated by a `organizations.getIAMPolicy` data source.
	PolicyData pulumi.StringOutput `pulumi:"policyData"`
	// The project in which the job belongs. If it
	// is not provided, the provider will use a default.
	Project pulumi.StringOutput `pulumi:"project"`
	// The region in which the job belongs. If it
	// is not provided, the provider will use a default.
	Region pulumi.StringOutput `pulumi:"region"`
}

Three different resources help you manage IAM policies on dataproc jobs. Each of these resources serves a different use case:

* `dataproc.JobIAMPolicy`: Authoritative. Sets the IAM policy for the job and replaces any existing policy already attached. * `dataproc.JobIAMBinding`: Authoritative for a given role. Updates the IAM policy to grant a role to a list of members. Other roles within the IAM policy for the job are preserved. * `dataproc.JobIAMMember`: Non-authoritative. Updates the IAM policy to grant a role to a new member. Other members for the role for the job are preserved.

> **Note:** `dataproc.JobIAMPolicy` **cannot** be used in conjunction with `dataproc.JobIAMBinding` and `dataproc.JobIAMMember` or they will fight over what your policy should be. In addition, be careful not to accidentally unset ownership of the job as `dataproc.JobIAMPolicy` replaces the entire policy.

> **Note:** `dataproc.JobIAMBinding` resources **can be** used in conjunction with `dataproc.JobIAMMember` resources **only if** they do not grant privilege to the same role.

## google\_dataproc\_job\_iam\_policy

```go package main

import (

"github.com/pulumi/pulumi-gcp/sdk/v4/go/gcp/dataproc"
"github.com/pulumi/pulumi-gcp/sdk/v4/go/gcp/organizations"
"github.com/pulumi/pulumi/sdk/v2/go/pulumi"

)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		admin, err := organizations.LookupIAMPolicy(ctx, &organizations.LookupIAMPolicyArgs{
			Bindings: []organizations.GetIAMPolicyBinding{
				organizations.GetIAMPolicyBinding{
					Role: "roles/editor",
					Members: []string{
						"user:jane@example.com",
					},
				},
			},
		}, nil)
		if err != nil {
			return err
		}
		_, err = dataproc.NewJobIAMPolicy(ctx, "editor", &dataproc.JobIAMPolicyArgs{
			Project:    pulumi.String("your-project"),
			Region:     pulumi.String("your-region"),
			JobId:      pulumi.String("your-dataproc-job"),
			PolicyData: pulumi.String(admin.PolicyData),
		})
		if err != nil {
			return err
		}
		return nil
	})
}

```

## google\_dataproc\_job\_iam\_binding

```go package main

import (

"github.com/pulumi/pulumi-gcp/sdk/v4/go/gcp/dataproc"
"github.com/pulumi/pulumi/sdk/v2/go/pulumi"

)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := dataproc.NewJobIAMBinding(ctx, "editor", &dataproc.JobIAMBindingArgs{
			JobId: pulumi.String("your-dataproc-job"),
			Members: pulumi.StringArray{
				pulumi.String("user:jane@example.com"),
			},
			Role: pulumi.String("roles/editor"),
		})
		if err != nil {
			return err
		}
		return nil
	})
}

```

## google\_dataproc\_job\_iam\_member

```go package main

import (

"github.com/pulumi/pulumi-gcp/sdk/v4/go/gcp/dataproc"
"github.com/pulumi/pulumi/sdk/v2/go/pulumi"

)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := dataproc.NewJobIAMMember(ctx, "editor", &dataproc.JobIAMMemberArgs{
			JobId:  pulumi.String("your-dataproc-job"),
			Member: pulumi.String("user:jane@example.com"),
			Role:   pulumi.String("roles/editor"),
		})
		if err != nil {
			return err
		}
		return nil
	})
}

```

## Import

Job IAM resources can be imported using the project, region, job id, role and/or member.

```sh

$ pulumi import gcp:dataproc/jobIAMPolicy:JobIAMPolicy editor "projects/{project}/regions/{region}/jobs/{job_id}"

```

```sh

$ pulumi import gcp:dataproc/jobIAMPolicy:JobIAMPolicy editor "projects/{project}/regions/{region}/jobs/{job_id} roles/editor"

```

```sh

$ pulumi import gcp:dataproc/jobIAMPolicy:JobIAMPolicy editor "projects/{project}/regions/{region}/jobs/{job_id} roles/editor user:jane@example.com"

```

-> **Custom Roles**If you're importing a IAM resource with a custom role, make sure to use the

full name of the custom role, e.g. `[projects/my-project|organizations/my-org]/roles/my-custom-role`.

func GetJobIAMPolicy

func GetJobIAMPolicy(ctx *pulumi.Context,
	name string, id pulumi.IDInput, state *JobIAMPolicyState, opts ...pulumi.ResourceOption) (*JobIAMPolicy, error)

GetJobIAMPolicy gets an existing JobIAMPolicy resource's state with the given name, ID, and optional state properties that are used to uniquely qualify the lookup (nil if not required).

func NewJobIAMPolicy

func NewJobIAMPolicy(ctx *pulumi.Context,
	name string, args *JobIAMPolicyArgs, opts ...pulumi.ResourceOption) (*JobIAMPolicy, error)

NewJobIAMPolicy registers a new resource with the given unique name, arguments, and options.

func (*JobIAMPolicy) ElementType added in v4.4.0

func (*JobIAMPolicy) ElementType() reflect.Type

func (*JobIAMPolicy) ToJobIAMPolicyOutput added in v4.4.0

func (i *JobIAMPolicy) ToJobIAMPolicyOutput() JobIAMPolicyOutput

func (*JobIAMPolicy) ToJobIAMPolicyOutputWithContext added in v4.4.0

func (i *JobIAMPolicy) ToJobIAMPolicyOutputWithContext(ctx context.Context) JobIAMPolicyOutput

func (*JobIAMPolicy) ToJobIAMPolicyPtrOutput added in v4.11.1

func (i *JobIAMPolicy) ToJobIAMPolicyPtrOutput() JobIAMPolicyPtrOutput

func (*JobIAMPolicy) ToJobIAMPolicyPtrOutputWithContext added in v4.11.1

func (i *JobIAMPolicy) ToJobIAMPolicyPtrOutputWithContext(ctx context.Context) JobIAMPolicyPtrOutput

type JobIAMPolicyArgs

type JobIAMPolicyArgs struct {
	JobId pulumi.StringInput
	// The policy data generated by a `organizations.getIAMPolicy` data source.
	PolicyData pulumi.StringInput
	// The project in which the job belongs. If it
	// is not provided, the provider will use a default.
	Project pulumi.StringPtrInput
	// The region in which the job belongs. If it
	// is not provided, the provider will use a default.
	Region pulumi.StringPtrInput
}

The set of arguments for constructing a JobIAMPolicy resource.

func (JobIAMPolicyArgs) ElementType

func (JobIAMPolicyArgs) ElementType() reflect.Type

type JobIAMPolicyArray added in v4.11.1

type JobIAMPolicyArray []JobIAMPolicyInput

func (JobIAMPolicyArray) ElementType added in v4.11.1

func (JobIAMPolicyArray) ElementType() reflect.Type

func (JobIAMPolicyArray) ToJobIAMPolicyArrayOutput added in v4.11.1

func (i JobIAMPolicyArray) ToJobIAMPolicyArrayOutput() JobIAMPolicyArrayOutput

func (JobIAMPolicyArray) ToJobIAMPolicyArrayOutputWithContext added in v4.11.1

func (i JobIAMPolicyArray) ToJobIAMPolicyArrayOutputWithContext(ctx context.Context) JobIAMPolicyArrayOutput

type JobIAMPolicyArrayInput added in v4.11.1

type JobIAMPolicyArrayInput interface {
	pulumi.Input

	ToJobIAMPolicyArrayOutput() JobIAMPolicyArrayOutput
	ToJobIAMPolicyArrayOutputWithContext(context.Context) JobIAMPolicyArrayOutput
}

JobIAMPolicyArrayInput is an input type that accepts JobIAMPolicyArray and JobIAMPolicyArrayOutput values. You can construct a concrete instance of `JobIAMPolicyArrayInput` via:

JobIAMPolicyArray{ JobIAMPolicyArgs{...} }

type JobIAMPolicyArrayOutput added in v4.11.1

type JobIAMPolicyArrayOutput struct{ *pulumi.OutputState }

func (JobIAMPolicyArrayOutput) ElementType added in v4.11.1

func (JobIAMPolicyArrayOutput) ElementType() reflect.Type

func (JobIAMPolicyArrayOutput) Index added in v4.11.1

func (JobIAMPolicyArrayOutput) ToJobIAMPolicyArrayOutput added in v4.11.1

func (o JobIAMPolicyArrayOutput) ToJobIAMPolicyArrayOutput() JobIAMPolicyArrayOutput

func (JobIAMPolicyArrayOutput) ToJobIAMPolicyArrayOutputWithContext added in v4.11.1

func (o JobIAMPolicyArrayOutput) ToJobIAMPolicyArrayOutputWithContext(ctx context.Context) JobIAMPolicyArrayOutput

type JobIAMPolicyInput added in v4.4.0

type JobIAMPolicyInput interface {
	pulumi.Input

	ToJobIAMPolicyOutput() JobIAMPolicyOutput
	ToJobIAMPolicyOutputWithContext(ctx context.Context) JobIAMPolicyOutput
}

type JobIAMPolicyMap added in v4.11.1

type JobIAMPolicyMap map[string]JobIAMPolicyInput

func (JobIAMPolicyMap) ElementType added in v4.11.1

func (JobIAMPolicyMap) ElementType() reflect.Type

func (JobIAMPolicyMap) ToJobIAMPolicyMapOutput added in v4.11.1

func (i JobIAMPolicyMap) ToJobIAMPolicyMapOutput() JobIAMPolicyMapOutput

func (JobIAMPolicyMap) ToJobIAMPolicyMapOutputWithContext added in v4.11.1

func (i JobIAMPolicyMap) ToJobIAMPolicyMapOutputWithContext(ctx context.Context) JobIAMPolicyMapOutput

type JobIAMPolicyMapInput added in v4.11.1

type JobIAMPolicyMapInput interface {
	pulumi.Input

	ToJobIAMPolicyMapOutput() JobIAMPolicyMapOutput
	ToJobIAMPolicyMapOutputWithContext(context.Context) JobIAMPolicyMapOutput
}

JobIAMPolicyMapInput is an input type that accepts JobIAMPolicyMap and JobIAMPolicyMapOutput values. You can construct a concrete instance of `JobIAMPolicyMapInput` via:

JobIAMPolicyMap{ "key": JobIAMPolicyArgs{...} }

type JobIAMPolicyMapOutput added in v4.11.1

type JobIAMPolicyMapOutput struct{ *pulumi.OutputState }

func (JobIAMPolicyMapOutput) ElementType added in v4.11.1

func (JobIAMPolicyMapOutput) ElementType() reflect.Type

func (JobIAMPolicyMapOutput) MapIndex added in v4.11.1

func (JobIAMPolicyMapOutput) ToJobIAMPolicyMapOutput added in v4.11.1

func (o JobIAMPolicyMapOutput) ToJobIAMPolicyMapOutput() JobIAMPolicyMapOutput

func (JobIAMPolicyMapOutput) ToJobIAMPolicyMapOutputWithContext added in v4.11.1

func (o JobIAMPolicyMapOutput) ToJobIAMPolicyMapOutputWithContext(ctx context.Context) JobIAMPolicyMapOutput

type JobIAMPolicyOutput added in v4.4.0

type JobIAMPolicyOutput struct {
	*pulumi.OutputState
}

func (JobIAMPolicyOutput) ElementType added in v4.4.0

func (JobIAMPolicyOutput) ElementType() reflect.Type

func (JobIAMPolicyOutput) ToJobIAMPolicyOutput added in v4.4.0

func (o JobIAMPolicyOutput) ToJobIAMPolicyOutput() JobIAMPolicyOutput

func (JobIAMPolicyOutput) ToJobIAMPolicyOutputWithContext added in v4.4.0

func (o JobIAMPolicyOutput) ToJobIAMPolicyOutputWithContext(ctx context.Context) JobIAMPolicyOutput

func (JobIAMPolicyOutput) ToJobIAMPolicyPtrOutput added in v4.11.1

func (o JobIAMPolicyOutput) ToJobIAMPolicyPtrOutput() JobIAMPolicyPtrOutput

func (JobIAMPolicyOutput) ToJobIAMPolicyPtrOutputWithContext added in v4.11.1

func (o JobIAMPolicyOutput) ToJobIAMPolicyPtrOutputWithContext(ctx context.Context) JobIAMPolicyPtrOutput

type JobIAMPolicyPtrInput added in v4.11.1

type JobIAMPolicyPtrInput interface {
	pulumi.Input

	ToJobIAMPolicyPtrOutput() JobIAMPolicyPtrOutput
	ToJobIAMPolicyPtrOutputWithContext(ctx context.Context) JobIAMPolicyPtrOutput
}

type JobIAMPolicyPtrOutput added in v4.11.1

type JobIAMPolicyPtrOutput struct {
	*pulumi.OutputState
}

func (JobIAMPolicyPtrOutput) ElementType added in v4.11.1

func (JobIAMPolicyPtrOutput) ElementType() reflect.Type

func (JobIAMPolicyPtrOutput) ToJobIAMPolicyPtrOutput added in v4.11.1

func (o JobIAMPolicyPtrOutput) ToJobIAMPolicyPtrOutput() JobIAMPolicyPtrOutput

func (JobIAMPolicyPtrOutput) ToJobIAMPolicyPtrOutputWithContext added in v4.11.1

func (o JobIAMPolicyPtrOutput) ToJobIAMPolicyPtrOutputWithContext(ctx context.Context) JobIAMPolicyPtrOutput

type JobIAMPolicyState

type JobIAMPolicyState struct {
	// (Computed) The etag of the jobs's IAM policy.
	Etag  pulumi.StringPtrInput
	JobId pulumi.StringPtrInput
	// The policy data generated by a `organizations.getIAMPolicy` data source.
	PolicyData pulumi.StringPtrInput
	// The project in which the job belongs. If it
	// is not provided, the provider will use a default.
	Project pulumi.StringPtrInput
	// The region in which the job belongs. If it
	// is not provided, the provider will use a default.
	Region pulumi.StringPtrInput
}

func (JobIAMPolicyState) ElementType

func (JobIAMPolicyState) ElementType() reflect.Type

type JobInput added in v4.4.0

type JobInput interface {
	pulumi.Input

	ToJobOutput() JobOutput
	ToJobOutputWithContext(ctx context.Context) JobOutput
}

type JobMap added in v4.11.1

type JobMap map[string]JobInput

func (JobMap) ElementType added in v4.11.1

func (JobMap) ElementType() reflect.Type

func (JobMap) ToJobMapOutput added in v4.11.1

func (i JobMap) ToJobMapOutput() JobMapOutput

func (JobMap) ToJobMapOutputWithContext added in v4.11.1

func (i JobMap) ToJobMapOutputWithContext(ctx context.Context) JobMapOutput

type JobMapInput added in v4.11.1

type JobMapInput interface {
	pulumi.Input

	ToJobMapOutput() JobMapOutput
	ToJobMapOutputWithContext(context.Context) JobMapOutput
}

JobMapInput is an input type that accepts JobMap and JobMapOutput values. You can construct a concrete instance of `JobMapInput` via:

JobMap{ "key": JobArgs{...} }

type JobMapOutput added in v4.11.1

type JobMapOutput struct{ *pulumi.OutputState }

func (JobMapOutput) ElementType added in v4.11.1

func (JobMapOutput) ElementType() reflect.Type

func (JobMapOutput) MapIndex added in v4.11.1

func (o JobMapOutput) MapIndex(k pulumi.StringInput) JobOutput

func (JobMapOutput) ToJobMapOutput added in v4.11.1

func (o JobMapOutput) ToJobMapOutput() JobMapOutput

func (JobMapOutput) ToJobMapOutputWithContext added in v4.11.1

func (o JobMapOutput) ToJobMapOutputWithContext(ctx context.Context) JobMapOutput

type JobOutput added in v4.4.0

type JobOutput struct {
	*pulumi.OutputState
}

func (JobOutput) ElementType added in v4.4.0

func (JobOutput) ElementType() reflect.Type

func (JobOutput) ToJobOutput added in v4.4.0

func (o JobOutput) ToJobOutput() JobOutput

func (JobOutput) ToJobOutputWithContext added in v4.4.0

func (o JobOutput) ToJobOutputWithContext(ctx context.Context) JobOutput

func (JobOutput) ToJobPtrOutput added in v4.11.1

func (o JobOutput) ToJobPtrOutput() JobPtrOutput

func (JobOutput) ToJobPtrOutputWithContext added in v4.11.1

func (o JobOutput) ToJobPtrOutputWithContext(ctx context.Context) JobPtrOutput

type JobPigConfig

type JobPigConfig struct {
	// Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries. Defaults to false.
	ContinueOnFailure *bool `pulumi:"continueOnFailure"`
	// HCFS URIs of jar files to be added to the Spark CLASSPATH.
	JarFileUris   []string                   `pulumi:"jarFileUris"`
	LoggingConfig *JobPigConfigLoggingConfig `pulumi:"loggingConfig"`
	// A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Cloud Dataproc API may be overwritten.
	Properties map[string]string `pulumi:"properties"`
	// The HCFS URI of the script that contains SQL queries.
	// Conflicts with `queryList`
	QueryFileUri *string `pulumi:"queryFileUri"`
	// The list of SQL queries or statements to execute as part of the job.
	// Conflicts with `queryFileUri`
	QueryLists []string `pulumi:"queryLists"`
	// Mapping of query variable names to values (equivalent to the Spark SQL command: `SET name="value";`).
	ScriptVariables map[string]string `pulumi:"scriptVariables"`
}

type JobPigConfigArgs

type JobPigConfigArgs struct {
	// Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries. Defaults to false.
	ContinueOnFailure pulumi.BoolPtrInput `pulumi:"continueOnFailure"`
	// HCFS URIs of jar files to be added to the Spark CLASSPATH.
	JarFileUris   pulumi.StringArrayInput           `pulumi:"jarFileUris"`
	LoggingConfig JobPigConfigLoggingConfigPtrInput `pulumi:"loggingConfig"`
	// A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Cloud Dataproc API may be overwritten.
	Properties pulumi.StringMapInput `pulumi:"properties"`
	// The HCFS URI of the script that contains SQL queries.
	// Conflicts with `queryList`
	QueryFileUri pulumi.StringPtrInput `pulumi:"queryFileUri"`
	// The list of SQL queries or statements to execute as part of the job.
	// Conflicts with `queryFileUri`
	QueryLists pulumi.StringArrayInput `pulumi:"queryLists"`
	// Mapping of query variable names to values (equivalent to the Spark SQL command: `SET name="value";`).
	ScriptVariables pulumi.StringMapInput `pulumi:"scriptVariables"`
}

func (JobPigConfigArgs) ElementType

func (JobPigConfigArgs) ElementType() reflect.Type

func (JobPigConfigArgs) ToJobPigConfigOutput

func (i JobPigConfigArgs) ToJobPigConfigOutput() JobPigConfigOutput

func (JobPigConfigArgs) ToJobPigConfigOutputWithContext

func (i JobPigConfigArgs) ToJobPigConfigOutputWithContext(ctx context.Context) JobPigConfigOutput

func (JobPigConfigArgs) ToJobPigConfigPtrOutput

func (i JobPigConfigArgs) ToJobPigConfigPtrOutput() JobPigConfigPtrOutput

func (JobPigConfigArgs) ToJobPigConfigPtrOutputWithContext

func (i JobPigConfigArgs) ToJobPigConfigPtrOutputWithContext(ctx context.Context) JobPigConfigPtrOutput

type JobPigConfigInput

type JobPigConfigInput interface {
	pulumi.Input

	ToJobPigConfigOutput() JobPigConfigOutput
	ToJobPigConfigOutputWithContext(context.Context) JobPigConfigOutput
}

JobPigConfigInput is an input type that accepts JobPigConfigArgs and JobPigConfigOutput values. You can construct a concrete instance of `JobPigConfigInput` via:

JobPigConfigArgs{...}

type JobPigConfigLoggingConfig

type JobPigConfigLoggingConfig struct {
	DriverLogLevels map[string]string `pulumi:"driverLogLevels"`
}

type JobPigConfigLoggingConfigArgs

type JobPigConfigLoggingConfigArgs struct {
	DriverLogLevels pulumi.StringMapInput `pulumi:"driverLogLevels"`
}

func (JobPigConfigLoggingConfigArgs) ElementType

func (JobPigConfigLoggingConfigArgs) ToJobPigConfigLoggingConfigOutput

func (i JobPigConfigLoggingConfigArgs) ToJobPigConfigLoggingConfigOutput() JobPigConfigLoggingConfigOutput

func (JobPigConfigLoggingConfigArgs) ToJobPigConfigLoggingConfigOutputWithContext

func (i JobPigConfigLoggingConfigArgs) ToJobPigConfigLoggingConfigOutputWithContext(ctx context.Context) JobPigConfigLoggingConfigOutput

func (JobPigConfigLoggingConfigArgs) ToJobPigConfigLoggingConfigPtrOutput

func (i JobPigConfigLoggingConfigArgs) ToJobPigConfigLoggingConfigPtrOutput() JobPigConfigLoggingConfigPtrOutput

func (JobPigConfigLoggingConfigArgs) ToJobPigConfigLoggingConfigPtrOutputWithContext

func (i JobPigConfigLoggingConfigArgs) ToJobPigConfigLoggingConfigPtrOutputWithContext(ctx context.Context) JobPigConfigLoggingConfigPtrOutput

type JobPigConfigLoggingConfigInput

type JobPigConfigLoggingConfigInput interface {
	pulumi.Input

	ToJobPigConfigLoggingConfigOutput() JobPigConfigLoggingConfigOutput
	ToJobPigConfigLoggingConfigOutputWithContext(context.Context) JobPigConfigLoggingConfigOutput
}

JobPigConfigLoggingConfigInput is an input type that accepts JobPigConfigLoggingConfigArgs and JobPigConfigLoggingConfigOutput values. You can construct a concrete instance of `JobPigConfigLoggingConfigInput` via:

JobPigConfigLoggingConfigArgs{...}

type JobPigConfigLoggingConfigOutput

type JobPigConfigLoggingConfigOutput struct{ *pulumi.OutputState }

func (JobPigConfigLoggingConfigOutput) DriverLogLevels

func (JobPigConfigLoggingConfigOutput) ElementType

func (JobPigConfigLoggingConfigOutput) ToJobPigConfigLoggingConfigOutput

func (o JobPigConfigLoggingConfigOutput) ToJobPigConfigLoggingConfigOutput() JobPigConfigLoggingConfigOutput

func (JobPigConfigLoggingConfigOutput) ToJobPigConfigLoggingConfigOutputWithContext

func (o JobPigConfigLoggingConfigOutput) ToJobPigConfigLoggingConfigOutputWithContext(ctx context.Context) JobPigConfigLoggingConfigOutput

func (JobPigConfigLoggingConfigOutput) ToJobPigConfigLoggingConfigPtrOutput

func (o JobPigConfigLoggingConfigOutput) ToJobPigConfigLoggingConfigPtrOutput() JobPigConfigLoggingConfigPtrOutput

func (JobPigConfigLoggingConfigOutput) ToJobPigConfigLoggingConfigPtrOutputWithContext

func (o JobPigConfigLoggingConfigOutput) ToJobPigConfigLoggingConfigPtrOutputWithContext(ctx context.Context) JobPigConfigLoggingConfigPtrOutput

type JobPigConfigLoggingConfigPtrInput

type JobPigConfigLoggingConfigPtrInput interface {
	pulumi.Input

	ToJobPigConfigLoggingConfigPtrOutput() JobPigConfigLoggingConfigPtrOutput
	ToJobPigConfigLoggingConfigPtrOutputWithContext(context.Context) JobPigConfigLoggingConfigPtrOutput
}

JobPigConfigLoggingConfigPtrInput is an input type that accepts JobPigConfigLoggingConfigArgs, JobPigConfigLoggingConfigPtr and JobPigConfigLoggingConfigPtrOutput values. You can construct a concrete instance of `JobPigConfigLoggingConfigPtrInput` via:

        JobPigConfigLoggingConfigArgs{...}

or:

        nil

type JobPigConfigLoggingConfigPtrOutput

type JobPigConfigLoggingConfigPtrOutput struct{ *pulumi.OutputState }

func (JobPigConfigLoggingConfigPtrOutput) DriverLogLevels

func (JobPigConfigLoggingConfigPtrOutput) Elem

func (JobPigConfigLoggingConfigPtrOutput) ElementType

func (JobPigConfigLoggingConfigPtrOutput) ToJobPigConfigLoggingConfigPtrOutput

func (o JobPigConfigLoggingConfigPtrOutput) ToJobPigConfigLoggingConfigPtrOutput() JobPigConfigLoggingConfigPtrOutput

func (JobPigConfigLoggingConfigPtrOutput) ToJobPigConfigLoggingConfigPtrOutputWithContext

func (o JobPigConfigLoggingConfigPtrOutput) ToJobPigConfigLoggingConfigPtrOutputWithContext(ctx context.Context) JobPigConfigLoggingConfigPtrOutput

type JobPigConfigOutput

type JobPigConfigOutput struct{ *pulumi.OutputState }

func (JobPigConfigOutput) ContinueOnFailure

func (o JobPigConfigOutput) ContinueOnFailure() pulumi.BoolPtrOutput

Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries. Defaults to false.

func (JobPigConfigOutput) ElementType

func (JobPigConfigOutput) ElementType() reflect.Type

func (JobPigConfigOutput) JarFileUris

HCFS URIs of jar files to be added to the Spark CLASSPATH.

func (JobPigConfigOutput) LoggingConfig

func (JobPigConfigOutput) Properties

func (o JobPigConfigOutput) Properties() pulumi.StringMapOutput

A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Cloud Dataproc API may be overwritten.

func (JobPigConfigOutput) QueryFileUri

func (o JobPigConfigOutput) QueryFileUri() pulumi.StringPtrOutput

The HCFS URI of the script that contains SQL queries. Conflicts with `queryList`

func (JobPigConfigOutput) QueryLists

The list of SQL queries or statements to execute as part of the job. Conflicts with `queryFileUri`

func (JobPigConfigOutput) ScriptVariables

func (o JobPigConfigOutput) ScriptVariables() pulumi.StringMapOutput

Mapping of query variable names to values (equivalent to the Spark SQL command: `SET name="value";`).

func (JobPigConfigOutput) ToJobPigConfigOutput

func (o JobPigConfigOutput) ToJobPigConfigOutput() JobPigConfigOutput

func (JobPigConfigOutput) ToJobPigConfigOutputWithContext

func (o JobPigConfigOutput) ToJobPigConfigOutputWithContext(ctx context.Context) JobPigConfigOutput

func (JobPigConfigOutput) ToJobPigConfigPtrOutput

func (o JobPigConfigOutput) ToJobPigConfigPtrOutput() JobPigConfigPtrOutput

func (JobPigConfigOutput) ToJobPigConfigPtrOutputWithContext

func (o JobPigConfigOutput) ToJobPigConfigPtrOutputWithContext(ctx context.Context) JobPigConfigPtrOutput

type JobPigConfigPtrInput

type JobPigConfigPtrInput interface {
	pulumi.Input

	ToJobPigConfigPtrOutput() JobPigConfigPtrOutput
	ToJobPigConfigPtrOutputWithContext(context.Context) JobPigConfigPtrOutput
}

JobPigConfigPtrInput is an input type that accepts JobPigConfigArgs, JobPigConfigPtr and JobPigConfigPtrOutput values. You can construct a concrete instance of `JobPigConfigPtrInput` via:

        JobPigConfigArgs{...}

or:

        nil

type JobPigConfigPtrOutput

type JobPigConfigPtrOutput struct{ *pulumi.OutputState }

func (JobPigConfigPtrOutput) ContinueOnFailure

func (o JobPigConfigPtrOutput) ContinueOnFailure() pulumi.BoolPtrOutput

Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries. Defaults to false.

func (JobPigConfigPtrOutput) Elem

func (JobPigConfigPtrOutput) ElementType

func (JobPigConfigPtrOutput) ElementType() reflect.Type

func (JobPigConfigPtrOutput) JarFileUris

HCFS URIs of jar files to be added to the Spark CLASSPATH.

func (JobPigConfigPtrOutput) LoggingConfig

func (JobPigConfigPtrOutput) Properties

A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Cloud Dataproc API may be overwritten.

func (JobPigConfigPtrOutput) QueryFileUri

func (o JobPigConfigPtrOutput) QueryFileUri() pulumi.StringPtrOutput

The HCFS URI of the script that contains SQL queries. Conflicts with `queryList`

func (JobPigConfigPtrOutput) QueryLists

The list of SQL queries or statements to execute as part of the job. Conflicts with `queryFileUri`

func (JobPigConfigPtrOutput) ScriptVariables

func (o JobPigConfigPtrOutput) ScriptVariables() pulumi.StringMapOutput

Mapping of query variable names to values (equivalent to the Spark SQL command: `SET name="value";`).

func (JobPigConfigPtrOutput) ToJobPigConfigPtrOutput

func (o JobPigConfigPtrOutput) ToJobPigConfigPtrOutput() JobPigConfigPtrOutput

func (JobPigConfigPtrOutput) ToJobPigConfigPtrOutputWithContext

func (o JobPigConfigPtrOutput) ToJobPigConfigPtrOutputWithContext(ctx context.Context) JobPigConfigPtrOutput

type JobPlacement

type JobPlacement struct {
	ClusterName string  `pulumi:"clusterName"`
	ClusterUuid *string `pulumi:"clusterUuid"`
}

type JobPlacementArgs

type JobPlacementArgs struct {
	ClusterName pulumi.StringInput    `pulumi:"clusterName"`
	ClusterUuid pulumi.StringPtrInput `pulumi:"clusterUuid"`
}

func (JobPlacementArgs) ElementType

func (JobPlacementArgs) ElementType() reflect.Type

func (JobPlacementArgs) ToJobPlacementOutput

func (i JobPlacementArgs) ToJobPlacementOutput() JobPlacementOutput

func (JobPlacementArgs) ToJobPlacementOutputWithContext

func (i JobPlacementArgs) ToJobPlacementOutputWithContext(ctx context.Context) JobPlacementOutput

func (JobPlacementArgs) ToJobPlacementPtrOutput

func (i JobPlacementArgs) ToJobPlacementPtrOutput() JobPlacementPtrOutput

func (JobPlacementArgs) ToJobPlacementPtrOutputWithContext

func (i JobPlacementArgs) ToJobPlacementPtrOutputWithContext(ctx context.Context) JobPlacementPtrOutput

type JobPlacementInput

type JobPlacementInput interface {
	pulumi.Input

	ToJobPlacementOutput() JobPlacementOutput
	ToJobPlacementOutputWithContext(context.Context) JobPlacementOutput
}

JobPlacementInput is an input type that accepts JobPlacementArgs and JobPlacementOutput values. You can construct a concrete instance of `JobPlacementInput` via:

JobPlacementArgs{...}

type JobPlacementOutput

type JobPlacementOutput struct{ *pulumi.OutputState }

func (JobPlacementOutput) ClusterName

func (o JobPlacementOutput) ClusterName() pulumi.StringOutput

func (JobPlacementOutput) ClusterUuid

func (o JobPlacementOutput) ClusterUuid() pulumi.StringPtrOutput

func (JobPlacementOutput) ElementType

func (JobPlacementOutput) ElementType() reflect.Type

func (JobPlacementOutput) ToJobPlacementOutput

func (o JobPlacementOutput) ToJobPlacementOutput() JobPlacementOutput

func (JobPlacementOutput) ToJobPlacementOutputWithContext

func (o JobPlacementOutput) ToJobPlacementOutputWithContext(ctx context.Context) JobPlacementOutput

func (JobPlacementOutput) ToJobPlacementPtrOutput

func (o JobPlacementOutput) ToJobPlacementPtrOutput() JobPlacementPtrOutput

func (JobPlacementOutput) ToJobPlacementPtrOutputWithContext

func (o JobPlacementOutput) ToJobPlacementPtrOutputWithContext(ctx context.Context) JobPlacementPtrOutput

type JobPlacementPtrInput

type JobPlacementPtrInput interface {
	pulumi.Input

	ToJobPlacementPtrOutput() JobPlacementPtrOutput
	ToJobPlacementPtrOutputWithContext(context.Context) JobPlacementPtrOutput
}

JobPlacementPtrInput is an input type that accepts JobPlacementArgs, JobPlacementPtr and JobPlacementPtrOutput values. You can construct a concrete instance of `JobPlacementPtrInput` via:

        JobPlacementArgs{...}

or:

        nil

type JobPlacementPtrOutput

type JobPlacementPtrOutput struct{ *pulumi.OutputState }

func (JobPlacementPtrOutput) ClusterName

func (JobPlacementPtrOutput) ClusterUuid

func (JobPlacementPtrOutput) Elem

func (JobPlacementPtrOutput) ElementType

func (JobPlacementPtrOutput) ElementType() reflect.Type

func (JobPlacementPtrOutput) ToJobPlacementPtrOutput

func (o JobPlacementPtrOutput) ToJobPlacementPtrOutput() JobPlacementPtrOutput

func (JobPlacementPtrOutput) ToJobPlacementPtrOutputWithContext

func (o JobPlacementPtrOutput) ToJobPlacementPtrOutputWithContext(ctx context.Context) JobPlacementPtrOutput

type JobPtrInput added in v4.11.1

type JobPtrInput interface {
	pulumi.Input

	ToJobPtrOutput() JobPtrOutput
	ToJobPtrOutputWithContext(ctx context.Context) JobPtrOutput
}

type JobPtrOutput added in v4.11.1

type JobPtrOutput struct {
	*pulumi.OutputState
}

func (JobPtrOutput) ElementType added in v4.11.1

func (JobPtrOutput) ElementType() reflect.Type

func (JobPtrOutput) ToJobPtrOutput added in v4.11.1

func (o JobPtrOutput) ToJobPtrOutput() JobPtrOutput

func (JobPtrOutput) ToJobPtrOutputWithContext added in v4.11.1

func (o JobPtrOutput) ToJobPtrOutputWithContext(ctx context.Context) JobPtrOutput

type JobPysparkConfig

type JobPysparkConfig struct {
	// HCFS URIs of archives to be extracted in the working directory of .jar, .tar, .tar.gz, .tgz, and .zip.
	ArchiveUris []string `pulumi:"archiveUris"`
	// The arguments to pass to the driver. Do not include arguments, such as -libjars or -Dfoo=bar, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
	Args []string `pulumi:"args"`
	// HCFS URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.
	FileUris []string `pulumi:"fileUris"`
	// HCFS URIs of jar files to be added to the Spark CLASSPATH.
	JarFileUris   []string                       `pulumi:"jarFileUris"`
	LoggingConfig *JobPysparkConfigLoggingConfig `pulumi:"loggingConfig"`
	// The HCFS URI of the main Python file to use as the driver. Must be a .py file.
	MainPythonFileUri string `pulumi:"mainPythonFileUri"`
	// A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Cloud Dataproc API may be overwritten.
	Properties map[string]string `pulumi:"properties"`
	// HCFS file URIs of Python files to pass to the PySpark framework. Supported file types: .py, .egg, and .zip.
	PythonFileUris []string `pulumi:"pythonFileUris"`
}

type JobPysparkConfigArgs

type JobPysparkConfigArgs struct {
	// HCFS URIs of archives to be extracted in the working directory of .jar, .tar, .tar.gz, .tgz, and .zip.
	ArchiveUris pulumi.StringArrayInput `pulumi:"archiveUris"`
	// The arguments to pass to the driver. Do not include arguments, such as -libjars or -Dfoo=bar, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
	Args pulumi.StringArrayInput `pulumi:"args"`
	// HCFS URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.
	FileUris pulumi.StringArrayInput `pulumi:"fileUris"`
	// HCFS URIs of jar files to be added to the Spark CLASSPATH.
	JarFileUris   pulumi.StringArrayInput               `pulumi:"jarFileUris"`
	LoggingConfig JobPysparkConfigLoggingConfigPtrInput `pulumi:"loggingConfig"`
	// The HCFS URI of the main Python file to use as the driver. Must be a .py file.
	MainPythonFileUri pulumi.StringInput `pulumi:"mainPythonFileUri"`
	// A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Cloud Dataproc API may be overwritten.
	Properties pulumi.StringMapInput `pulumi:"properties"`
	// HCFS file URIs of Python files to pass to the PySpark framework. Supported file types: .py, .egg, and .zip.
	PythonFileUris pulumi.StringArrayInput `pulumi:"pythonFileUris"`
}

func (JobPysparkConfigArgs) ElementType

func (JobPysparkConfigArgs) ElementType() reflect.Type

func (JobPysparkConfigArgs) ToJobPysparkConfigOutput

func (i JobPysparkConfigArgs) ToJobPysparkConfigOutput() JobPysparkConfigOutput

func (JobPysparkConfigArgs) ToJobPysparkConfigOutputWithContext

func (i JobPysparkConfigArgs) ToJobPysparkConfigOutputWithContext(ctx context.Context) JobPysparkConfigOutput

func (JobPysparkConfigArgs) ToJobPysparkConfigPtrOutput

func (i JobPysparkConfigArgs) ToJobPysparkConfigPtrOutput() JobPysparkConfigPtrOutput

func (JobPysparkConfigArgs) ToJobPysparkConfigPtrOutputWithContext

func (i JobPysparkConfigArgs) ToJobPysparkConfigPtrOutputWithContext(ctx context.Context) JobPysparkConfigPtrOutput

type JobPysparkConfigInput

type JobPysparkConfigInput interface {
	pulumi.Input

	ToJobPysparkConfigOutput() JobPysparkConfigOutput
	ToJobPysparkConfigOutputWithContext(context.Context) JobPysparkConfigOutput
}

JobPysparkConfigInput is an input type that accepts JobPysparkConfigArgs and JobPysparkConfigOutput values. You can construct a concrete instance of `JobPysparkConfigInput` via:

JobPysparkConfigArgs{...}

type JobPysparkConfigLoggingConfig

type JobPysparkConfigLoggingConfig struct {
	DriverLogLevels map[string]string `pulumi:"driverLogLevels"`
}

type JobPysparkConfigLoggingConfigArgs

type JobPysparkConfigLoggingConfigArgs struct {
	DriverLogLevels pulumi.StringMapInput `pulumi:"driverLogLevels"`
}

func (JobPysparkConfigLoggingConfigArgs) ElementType

func (JobPysparkConfigLoggingConfigArgs) ToJobPysparkConfigLoggingConfigOutput

func (i JobPysparkConfigLoggingConfigArgs) ToJobPysparkConfigLoggingConfigOutput() JobPysparkConfigLoggingConfigOutput

func (JobPysparkConfigLoggingConfigArgs) ToJobPysparkConfigLoggingConfigOutputWithContext

func (i JobPysparkConfigLoggingConfigArgs) ToJobPysparkConfigLoggingConfigOutputWithContext(ctx context.Context) JobPysparkConfigLoggingConfigOutput

func (JobPysparkConfigLoggingConfigArgs) ToJobPysparkConfigLoggingConfigPtrOutput

func (i JobPysparkConfigLoggingConfigArgs) ToJobPysparkConfigLoggingConfigPtrOutput() JobPysparkConfigLoggingConfigPtrOutput

func (JobPysparkConfigLoggingConfigArgs) ToJobPysparkConfigLoggingConfigPtrOutputWithContext

func (i JobPysparkConfigLoggingConfigArgs) ToJobPysparkConfigLoggingConfigPtrOutputWithContext(ctx context.Context) JobPysparkConfigLoggingConfigPtrOutput

type JobPysparkConfigLoggingConfigInput

type JobPysparkConfigLoggingConfigInput interface {
	pulumi.Input

	ToJobPysparkConfigLoggingConfigOutput() JobPysparkConfigLoggingConfigOutput
	ToJobPysparkConfigLoggingConfigOutputWithContext(context.Context) JobPysparkConfigLoggingConfigOutput
}

JobPysparkConfigLoggingConfigInput is an input type that accepts JobPysparkConfigLoggingConfigArgs and JobPysparkConfigLoggingConfigOutput values. You can construct a concrete instance of `JobPysparkConfigLoggingConfigInput` via:

JobPysparkConfigLoggingConfigArgs{...}

type JobPysparkConfigLoggingConfigOutput

type JobPysparkConfigLoggingConfigOutput struct{ *pulumi.OutputState }

func (JobPysparkConfigLoggingConfigOutput) DriverLogLevels

func (JobPysparkConfigLoggingConfigOutput) ElementType

func (JobPysparkConfigLoggingConfigOutput) ToJobPysparkConfigLoggingConfigOutput

func (o JobPysparkConfigLoggingConfigOutput) ToJobPysparkConfigLoggingConfigOutput() JobPysparkConfigLoggingConfigOutput

func (JobPysparkConfigLoggingConfigOutput) ToJobPysparkConfigLoggingConfigOutputWithContext

func (o JobPysparkConfigLoggingConfigOutput) ToJobPysparkConfigLoggingConfigOutputWithContext(ctx context.Context) JobPysparkConfigLoggingConfigOutput

func (JobPysparkConfigLoggingConfigOutput) ToJobPysparkConfigLoggingConfigPtrOutput

func (o JobPysparkConfigLoggingConfigOutput) ToJobPysparkConfigLoggingConfigPtrOutput() JobPysparkConfigLoggingConfigPtrOutput

func (JobPysparkConfigLoggingConfigOutput) ToJobPysparkConfigLoggingConfigPtrOutputWithContext

func (o JobPysparkConfigLoggingConfigOutput) ToJobPysparkConfigLoggingConfigPtrOutputWithContext(ctx context.Context) JobPysparkConfigLoggingConfigPtrOutput

type JobPysparkConfigLoggingConfigPtrInput

type JobPysparkConfigLoggingConfigPtrInput interface {
	pulumi.Input

	ToJobPysparkConfigLoggingConfigPtrOutput() JobPysparkConfigLoggingConfigPtrOutput
	ToJobPysparkConfigLoggingConfigPtrOutputWithContext(context.Context) JobPysparkConfigLoggingConfigPtrOutput
}

JobPysparkConfigLoggingConfigPtrInput is an input type that accepts JobPysparkConfigLoggingConfigArgs, JobPysparkConfigLoggingConfigPtr and JobPysparkConfigLoggingConfigPtrOutput values. You can construct a concrete instance of `JobPysparkConfigLoggingConfigPtrInput` via:

        JobPysparkConfigLoggingConfigArgs{...}

or:

        nil

type JobPysparkConfigLoggingConfigPtrOutput

type JobPysparkConfigLoggingConfigPtrOutput struct{ *pulumi.OutputState }

func (JobPysparkConfigLoggingConfigPtrOutput) DriverLogLevels

func (JobPysparkConfigLoggingConfigPtrOutput) Elem

func (JobPysparkConfigLoggingConfigPtrOutput) ElementType

func (JobPysparkConfigLoggingConfigPtrOutput) ToJobPysparkConfigLoggingConfigPtrOutput

func (o JobPysparkConfigLoggingConfigPtrOutput) ToJobPysparkConfigLoggingConfigPtrOutput() JobPysparkConfigLoggingConfigPtrOutput

func (JobPysparkConfigLoggingConfigPtrOutput) ToJobPysparkConfigLoggingConfigPtrOutputWithContext

func (o JobPysparkConfigLoggingConfigPtrOutput) ToJobPysparkConfigLoggingConfigPtrOutputWithContext(ctx context.Context) JobPysparkConfigLoggingConfigPtrOutput

type JobPysparkConfigOutput

type JobPysparkConfigOutput struct{ *pulumi.OutputState }

func (JobPysparkConfigOutput) ArchiveUris

HCFS URIs of archives to be extracted in the working directory of .jar, .tar, .tar.gz, .tgz, and .zip.

func (JobPysparkConfigOutput) Args

The arguments to pass to the driver. Do not include arguments, such as -libjars or -Dfoo=bar, that can be set as job properties, since a collision may occur that causes an incorrect job submission.

func (JobPysparkConfigOutput) ElementType

func (JobPysparkConfigOutput) ElementType() reflect.Type

func (JobPysparkConfigOutput) FileUris

HCFS URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.

func (JobPysparkConfigOutput) JarFileUris

HCFS URIs of jar files to be added to the Spark CLASSPATH.

func (JobPysparkConfigOutput) LoggingConfig

func (JobPysparkConfigOutput) MainPythonFileUri

func (o JobPysparkConfigOutput) MainPythonFileUri() pulumi.StringOutput

The HCFS URI of the main Python file to use as the driver. Must be a .py file.

func (JobPysparkConfigOutput) Properties

A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Cloud Dataproc API may be overwritten.

func (JobPysparkConfigOutput) PythonFileUris

func (o JobPysparkConfigOutput) PythonFileUris() pulumi.StringArrayOutput

HCFS file URIs of Python files to pass to the PySpark framework. Supported file types: .py, .egg, and .zip.

func (JobPysparkConfigOutput) ToJobPysparkConfigOutput

func (o JobPysparkConfigOutput) ToJobPysparkConfigOutput() JobPysparkConfigOutput

func (JobPysparkConfigOutput) ToJobPysparkConfigOutputWithContext

func (o JobPysparkConfigOutput) ToJobPysparkConfigOutputWithContext(ctx context.Context) JobPysparkConfigOutput

func (JobPysparkConfigOutput) ToJobPysparkConfigPtrOutput

func (o JobPysparkConfigOutput) ToJobPysparkConfigPtrOutput() JobPysparkConfigPtrOutput

func (JobPysparkConfigOutput) ToJobPysparkConfigPtrOutputWithContext

func (o JobPysparkConfigOutput) ToJobPysparkConfigPtrOutputWithContext(ctx context.Context) JobPysparkConfigPtrOutput

type JobPysparkConfigPtrInput

type JobPysparkConfigPtrInput interface {
	pulumi.Input

	ToJobPysparkConfigPtrOutput() JobPysparkConfigPtrOutput
	ToJobPysparkConfigPtrOutputWithContext(context.Context) JobPysparkConfigPtrOutput
}

JobPysparkConfigPtrInput is an input type that accepts JobPysparkConfigArgs, JobPysparkConfigPtr and JobPysparkConfigPtrOutput values. You can construct a concrete instance of `JobPysparkConfigPtrInput` via:

        JobPysparkConfigArgs{...}

or:

        nil

type JobPysparkConfigPtrOutput

type JobPysparkConfigPtrOutput struct{ *pulumi.OutputState }

func (JobPysparkConfigPtrOutput) ArchiveUris

HCFS URIs of archives to be extracted in the working directory of .jar, .tar, .tar.gz, .tgz, and .zip.

func (JobPysparkConfigPtrOutput) Args

The arguments to pass to the driver. Do not include arguments, such as -libjars or -Dfoo=bar, that can be set as job properties, since a collision may occur that causes an incorrect job submission.

func (JobPysparkConfigPtrOutput) Elem

func (JobPysparkConfigPtrOutput) ElementType

func (JobPysparkConfigPtrOutput) ElementType() reflect.Type

func (JobPysparkConfigPtrOutput) FileUris

HCFS URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.

func (JobPysparkConfigPtrOutput) JarFileUris

HCFS URIs of jar files to be added to the Spark CLASSPATH.

func (JobPysparkConfigPtrOutput) LoggingConfig

func (JobPysparkConfigPtrOutput) MainPythonFileUri

func (o JobPysparkConfigPtrOutput) MainPythonFileUri() pulumi.StringPtrOutput

The HCFS URI of the main Python file to use as the driver. Must be a .py file.

func (JobPysparkConfigPtrOutput) Properties

A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Cloud Dataproc API may be overwritten.

func (JobPysparkConfigPtrOutput) PythonFileUris

HCFS file URIs of Python files to pass to the PySpark framework. Supported file types: .py, .egg, and .zip.

func (JobPysparkConfigPtrOutput) ToJobPysparkConfigPtrOutput

func (o JobPysparkConfigPtrOutput) ToJobPysparkConfigPtrOutput() JobPysparkConfigPtrOutput

func (JobPysparkConfigPtrOutput) ToJobPysparkConfigPtrOutputWithContext

func (o JobPysparkConfigPtrOutput) ToJobPysparkConfigPtrOutputWithContext(ctx context.Context) JobPysparkConfigPtrOutput

type JobReference

type JobReference struct {
	JobId *string `pulumi:"jobId"`
}

type JobReferenceArgs

type JobReferenceArgs struct {
	JobId pulumi.StringPtrInput `pulumi:"jobId"`
}

func (JobReferenceArgs) ElementType

func (JobReferenceArgs) ElementType() reflect.Type

func (JobReferenceArgs) ToJobReferenceOutput

func (i JobReferenceArgs) ToJobReferenceOutput() JobReferenceOutput

func (JobReferenceArgs) ToJobReferenceOutputWithContext

func (i JobReferenceArgs) ToJobReferenceOutputWithContext(ctx context.Context) JobReferenceOutput

func (JobReferenceArgs) ToJobReferencePtrOutput

func (i JobReferenceArgs) ToJobReferencePtrOutput() JobReferencePtrOutput

func (JobReferenceArgs) ToJobReferencePtrOutputWithContext

func (i JobReferenceArgs) ToJobReferencePtrOutputWithContext(ctx context.Context) JobReferencePtrOutput

type JobReferenceInput

type JobReferenceInput interface {
	pulumi.Input

	ToJobReferenceOutput() JobReferenceOutput
	ToJobReferenceOutputWithContext(context.Context) JobReferenceOutput
}

JobReferenceInput is an input type that accepts JobReferenceArgs and JobReferenceOutput values. You can construct a concrete instance of `JobReferenceInput` via:

JobReferenceArgs{...}

type JobReferenceOutput

type JobReferenceOutput struct{ *pulumi.OutputState }

func (JobReferenceOutput) ElementType

func (JobReferenceOutput) ElementType() reflect.Type

func (JobReferenceOutput) JobId

func (JobReferenceOutput) ToJobReferenceOutput

func (o JobReferenceOutput) ToJobReferenceOutput() JobReferenceOutput

func (JobReferenceOutput) ToJobReferenceOutputWithContext

func (o JobReferenceOutput) ToJobReferenceOutputWithContext(ctx context.Context) JobReferenceOutput

func (JobReferenceOutput) ToJobReferencePtrOutput

func (o JobReferenceOutput) ToJobReferencePtrOutput() JobReferencePtrOutput

func (JobReferenceOutput) ToJobReferencePtrOutputWithContext

func (o JobReferenceOutput) ToJobReferencePtrOutputWithContext(ctx context.Context) JobReferencePtrOutput

type JobReferencePtrInput

type JobReferencePtrInput interface {
	pulumi.Input

	ToJobReferencePtrOutput() JobReferencePtrOutput
	ToJobReferencePtrOutputWithContext(context.Context) JobReferencePtrOutput
}

JobReferencePtrInput is an input type that accepts JobReferenceArgs, JobReferencePtr and JobReferencePtrOutput values. You can construct a concrete instance of `JobReferencePtrInput` via:

        JobReferenceArgs{...}

or:

        nil

type JobReferencePtrOutput

type JobReferencePtrOutput struct{ *pulumi.OutputState }

func (JobReferencePtrOutput) Elem

func (JobReferencePtrOutput) ElementType

func (JobReferencePtrOutput) ElementType() reflect.Type

func (JobReferencePtrOutput) JobId

func (JobReferencePtrOutput) ToJobReferencePtrOutput

func (o JobReferencePtrOutput) ToJobReferencePtrOutput() JobReferencePtrOutput

func (JobReferencePtrOutput) ToJobReferencePtrOutputWithContext

func (o JobReferencePtrOutput) ToJobReferencePtrOutputWithContext(ctx context.Context) JobReferencePtrOutput

type JobScheduling

type JobScheduling struct {
	MaxFailuresPerHour int `pulumi:"maxFailuresPerHour"`
	MaxFailuresTotal   int `pulumi:"maxFailuresTotal"`
}

type JobSchedulingArgs

type JobSchedulingArgs struct {
	MaxFailuresPerHour pulumi.IntInput `pulumi:"maxFailuresPerHour"`
	MaxFailuresTotal   pulumi.IntInput `pulumi:"maxFailuresTotal"`
}

func (JobSchedulingArgs) ElementType

func (JobSchedulingArgs) ElementType() reflect.Type

func (JobSchedulingArgs) ToJobSchedulingOutput

func (i JobSchedulingArgs) ToJobSchedulingOutput() JobSchedulingOutput

func (JobSchedulingArgs) ToJobSchedulingOutputWithContext

func (i JobSchedulingArgs) ToJobSchedulingOutputWithContext(ctx context.Context) JobSchedulingOutput

func (JobSchedulingArgs) ToJobSchedulingPtrOutput

func (i JobSchedulingArgs) ToJobSchedulingPtrOutput() JobSchedulingPtrOutput

func (JobSchedulingArgs) ToJobSchedulingPtrOutputWithContext

func (i JobSchedulingArgs) ToJobSchedulingPtrOutputWithContext(ctx context.Context) JobSchedulingPtrOutput

type JobSchedulingInput

type JobSchedulingInput interface {
	pulumi.Input

	ToJobSchedulingOutput() JobSchedulingOutput
	ToJobSchedulingOutputWithContext(context.Context) JobSchedulingOutput
}

JobSchedulingInput is an input type that accepts JobSchedulingArgs and JobSchedulingOutput values. You can construct a concrete instance of `JobSchedulingInput` via:

JobSchedulingArgs{...}

type JobSchedulingOutput

type JobSchedulingOutput struct{ *pulumi.OutputState }

func (JobSchedulingOutput) ElementType

func (JobSchedulingOutput) ElementType() reflect.Type

func (JobSchedulingOutput) MaxFailuresPerHour

func (o JobSchedulingOutput) MaxFailuresPerHour() pulumi.IntOutput

func (JobSchedulingOutput) MaxFailuresTotal added in v4.13.0

func (o JobSchedulingOutput) MaxFailuresTotal() pulumi.IntOutput

func (JobSchedulingOutput) ToJobSchedulingOutput

func (o JobSchedulingOutput) ToJobSchedulingOutput() JobSchedulingOutput

func (JobSchedulingOutput) ToJobSchedulingOutputWithContext

func (o JobSchedulingOutput) ToJobSchedulingOutputWithContext(ctx context.Context) JobSchedulingOutput

func (JobSchedulingOutput) ToJobSchedulingPtrOutput

func (o JobSchedulingOutput) ToJobSchedulingPtrOutput() JobSchedulingPtrOutput

func (JobSchedulingOutput) ToJobSchedulingPtrOutputWithContext

func (o JobSchedulingOutput) ToJobSchedulingPtrOutputWithContext(ctx context.Context) JobSchedulingPtrOutput

type JobSchedulingPtrInput

type JobSchedulingPtrInput interface {
	pulumi.Input

	ToJobSchedulingPtrOutput() JobSchedulingPtrOutput
	ToJobSchedulingPtrOutputWithContext(context.Context) JobSchedulingPtrOutput
}

JobSchedulingPtrInput is an input type that accepts JobSchedulingArgs, JobSchedulingPtr and JobSchedulingPtrOutput values. You can construct a concrete instance of `JobSchedulingPtrInput` via:

        JobSchedulingArgs{...}

or:

        nil

type JobSchedulingPtrOutput

type JobSchedulingPtrOutput struct{ *pulumi.OutputState }

func (JobSchedulingPtrOutput) Elem

func (JobSchedulingPtrOutput) ElementType

func (JobSchedulingPtrOutput) ElementType() reflect.Type

func (JobSchedulingPtrOutput) MaxFailuresPerHour

func (o JobSchedulingPtrOutput) MaxFailuresPerHour() pulumi.IntPtrOutput

func (JobSchedulingPtrOutput) MaxFailuresTotal added in v4.13.0

func (o JobSchedulingPtrOutput) MaxFailuresTotal() pulumi.IntPtrOutput

func (JobSchedulingPtrOutput) ToJobSchedulingPtrOutput

func (o JobSchedulingPtrOutput) ToJobSchedulingPtrOutput() JobSchedulingPtrOutput

func (JobSchedulingPtrOutput) ToJobSchedulingPtrOutputWithContext

func (o JobSchedulingPtrOutput) ToJobSchedulingPtrOutputWithContext(ctx context.Context) JobSchedulingPtrOutput

type JobSparkConfig

type JobSparkConfig struct {
	// HCFS URIs of archives to be extracted in the working directory of .jar, .tar, .tar.gz, .tgz, and .zip.
	ArchiveUris []string `pulumi:"archiveUris"`
	// The arguments to pass to the driver. Do not include arguments, such as -libjars or -Dfoo=bar, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
	Args []string `pulumi:"args"`
	// HCFS URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.
	FileUris []string `pulumi:"fileUris"`
	// HCFS URIs of jar files to be added to the Spark CLASSPATH.
	JarFileUris   []string                     `pulumi:"jarFileUris"`
	LoggingConfig *JobSparkConfigLoggingConfig `pulumi:"loggingConfig"`
	// The name of the driver's main class. The jar file containing the class must be in the default CLASSPATH or specified in `jarFileUris`. Conflicts with `mainJarFileUri`
	MainClass *string `pulumi:"mainClass"`
	// The HCFS URI of the jar file containing the main class. Examples: 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'. Conflicts with `mainClass`
	MainJarFileUri *string `pulumi:"mainJarFileUri"`
	// A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Cloud Dataproc API may be overwritten.
	Properties map[string]string `pulumi:"properties"`
}

type JobSparkConfigArgs

type JobSparkConfigArgs struct {
	// HCFS URIs of archives to be extracted in the working directory of .jar, .tar, .tar.gz, .tgz, and .zip.
	ArchiveUris pulumi.StringArrayInput `pulumi:"archiveUris"`
	// The arguments to pass to the driver. Do not include arguments, such as -libjars or -Dfoo=bar, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
	Args pulumi.StringArrayInput `pulumi:"args"`
	// HCFS URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.
	FileUris pulumi.StringArrayInput `pulumi:"fileUris"`
	// HCFS URIs of jar files to be added to the Spark CLASSPATH.
	JarFileUris   pulumi.StringArrayInput             `pulumi:"jarFileUris"`
	LoggingConfig JobSparkConfigLoggingConfigPtrInput `pulumi:"loggingConfig"`
	// The name of the driver's main class. The jar file containing the class must be in the default CLASSPATH or specified in `jarFileUris`. Conflicts with `mainJarFileUri`
	MainClass pulumi.StringPtrInput `pulumi:"mainClass"`
	// The HCFS URI of the jar file containing the main class. Examples: 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'. Conflicts with `mainClass`
	MainJarFileUri pulumi.StringPtrInput `pulumi:"mainJarFileUri"`
	// A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Cloud Dataproc API may be overwritten.
	Properties pulumi.StringMapInput `pulumi:"properties"`
}

func (JobSparkConfigArgs) ElementType

func (JobSparkConfigArgs) ElementType() reflect.Type

func (JobSparkConfigArgs) ToJobSparkConfigOutput

func (i JobSparkConfigArgs) ToJobSparkConfigOutput() JobSparkConfigOutput

func (JobSparkConfigArgs) ToJobSparkConfigOutputWithContext

func (i JobSparkConfigArgs) ToJobSparkConfigOutputWithContext(ctx context.Context) JobSparkConfigOutput

func (JobSparkConfigArgs) ToJobSparkConfigPtrOutput

func (i JobSparkConfigArgs) ToJobSparkConfigPtrOutput() JobSparkConfigPtrOutput

func (JobSparkConfigArgs) ToJobSparkConfigPtrOutputWithContext

func (i JobSparkConfigArgs) ToJobSparkConfigPtrOutputWithContext(ctx context.Context) JobSparkConfigPtrOutput

type JobSparkConfigInput

type JobSparkConfigInput interface {
	pulumi.Input

	ToJobSparkConfigOutput() JobSparkConfigOutput
	ToJobSparkConfigOutputWithContext(context.Context) JobSparkConfigOutput
}

JobSparkConfigInput is an input type that accepts JobSparkConfigArgs and JobSparkConfigOutput values. You can construct a concrete instance of `JobSparkConfigInput` via:

JobSparkConfigArgs{...}

type JobSparkConfigLoggingConfig

type JobSparkConfigLoggingConfig struct {
	DriverLogLevels map[string]string `pulumi:"driverLogLevels"`
}

type JobSparkConfigLoggingConfigArgs

type JobSparkConfigLoggingConfigArgs struct {
	DriverLogLevels pulumi.StringMapInput `pulumi:"driverLogLevels"`
}

func (JobSparkConfigLoggingConfigArgs) ElementType

func (JobSparkConfigLoggingConfigArgs) ToJobSparkConfigLoggingConfigOutput

func (i JobSparkConfigLoggingConfigArgs) ToJobSparkConfigLoggingConfigOutput() JobSparkConfigLoggingConfigOutput

func (JobSparkConfigLoggingConfigArgs) ToJobSparkConfigLoggingConfigOutputWithContext

func (i JobSparkConfigLoggingConfigArgs) ToJobSparkConfigLoggingConfigOutputWithContext(ctx context.Context) JobSparkConfigLoggingConfigOutput

func (JobSparkConfigLoggingConfigArgs) ToJobSparkConfigLoggingConfigPtrOutput

func (i JobSparkConfigLoggingConfigArgs) ToJobSparkConfigLoggingConfigPtrOutput() JobSparkConfigLoggingConfigPtrOutput

func (JobSparkConfigLoggingConfigArgs) ToJobSparkConfigLoggingConfigPtrOutputWithContext

func (i JobSparkConfigLoggingConfigArgs) ToJobSparkConfigLoggingConfigPtrOutputWithContext(ctx context.Context) JobSparkConfigLoggingConfigPtrOutput

type JobSparkConfigLoggingConfigInput

type JobSparkConfigLoggingConfigInput interface {
	pulumi.Input

	ToJobSparkConfigLoggingConfigOutput() JobSparkConfigLoggingConfigOutput
	ToJobSparkConfigLoggingConfigOutputWithContext(context.Context) JobSparkConfigLoggingConfigOutput
}

JobSparkConfigLoggingConfigInput is an input type that accepts JobSparkConfigLoggingConfigArgs and JobSparkConfigLoggingConfigOutput values. You can construct a concrete instance of `JobSparkConfigLoggingConfigInput` via:

JobSparkConfigLoggingConfigArgs{...}

type JobSparkConfigLoggingConfigOutput

type JobSparkConfigLoggingConfigOutput struct{ *pulumi.OutputState }

func (JobSparkConfigLoggingConfigOutput) DriverLogLevels

func (JobSparkConfigLoggingConfigOutput) ElementType

func (JobSparkConfigLoggingConfigOutput) ToJobSparkConfigLoggingConfigOutput

func (o JobSparkConfigLoggingConfigOutput) ToJobSparkConfigLoggingConfigOutput() JobSparkConfigLoggingConfigOutput

func (JobSparkConfigLoggingConfigOutput) ToJobSparkConfigLoggingConfigOutputWithContext

func (o JobSparkConfigLoggingConfigOutput) ToJobSparkConfigLoggingConfigOutputWithContext(ctx context.Context) JobSparkConfigLoggingConfigOutput

func (JobSparkConfigLoggingConfigOutput) ToJobSparkConfigLoggingConfigPtrOutput

func (o JobSparkConfigLoggingConfigOutput) ToJobSparkConfigLoggingConfigPtrOutput() JobSparkConfigLoggingConfigPtrOutput

func (JobSparkConfigLoggingConfigOutput) ToJobSparkConfigLoggingConfigPtrOutputWithContext

func (o JobSparkConfigLoggingConfigOutput) ToJobSparkConfigLoggingConfigPtrOutputWithContext(ctx context.Context) JobSparkConfigLoggingConfigPtrOutput

type JobSparkConfigLoggingConfigPtrInput

type JobSparkConfigLoggingConfigPtrInput interface {
	pulumi.Input

	ToJobSparkConfigLoggingConfigPtrOutput() JobSparkConfigLoggingConfigPtrOutput
	ToJobSparkConfigLoggingConfigPtrOutputWithContext(context.Context) JobSparkConfigLoggingConfigPtrOutput
}

JobSparkConfigLoggingConfigPtrInput is an input type that accepts JobSparkConfigLoggingConfigArgs, JobSparkConfigLoggingConfigPtr and JobSparkConfigLoggingConfigPtrOutput values. You can construct a concrete instance of `JobSparkConfigLoggingConfigPtrInput` via:

        JobSparkConfigLoggingConfigArgs{...}

or:

        nil

type JobSparkConfigLoggingConfigPtrOutput

type JobSparkConfigLoggingConfigPtrOutput struct{ *pulumi.OutputState }

func (JobSparkConfigLoggingConfigPtrOutput) DriverLogLevels

func (JobSparkConfigLoggingConfigPtrOutput) Elem

func (JobSparkConfigLoggingConfigPtrOutput) ElementType

func (JobSparkConfigLoggingConfigPtrOutput) ToJobSparkConfigLoggingConfigPtrOutput

func (o JobSparkConfigLoggingConfigPtrOutput) ToJobSparkConfigLoggingConfigPtrOutput() JobSparkConfigLoggingConfigPtrOutput

func (JobSparkConfigLoggingConfigPtrOutput) ToJobSparkConfigLoggingConfigPtrOutputWithContext

func (o JobSparkConfigLoggingConfigPtrOutput) ToJobSparkConfigLoggingConfigPtrOutputWithContext(ctx context.Context) JobSparkConfigLoggingConfigPtrOutput

type JobSparkConfigOutput

type JobSparkConfigOutput struct{ *pulumi.OutputState }

func (JobSparkConfigOutput) ArchiveUris

HCFS URIs of archives to be extracted in the working directory of .jar, .tar, .tar.gz, .tgz, and .zip.

func (JobSparkConfigOutput) Args

The arguments to pass to the driver. Do not include arguments, such as -libjars or -Dfoo=bar, that can be set as job properties, since a collision may occur that causes an incorrect job submission.

func (JobSparkConfigOutput) ElementType

func (JobSparkConfigOutput) ElementType() reflect.Type

func (JobSparkConfigOutput) FileUris

HCFS URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.

func (JobSparkConfigOutput) JarFileUris

HCFS URIs of jar files to be added to the Spark CLASSPATH.

func (JobSparkConfigOutput) LoggingConfig

func (JobSparkConfigOutput) MainClass

The name of the driver's main class. The jar file containing the class must be in the default CLASSPATH or specified in `jarFileUris`. Conflicts with `mainJarFileUri`

func (JobSparkConfigOutput) MainJarFileUri

func (o JobSparkConfigOutput) MainJarFileUri() pulumi.StringPtrOutput

The HCFS URI of the jar file containing the main class. Examples: 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'. Conflicts with `mainClass`

func (JobSparkConfigOutput) Properties

A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Cloud Dataproc API may be overwritten.

func (JobSparkConfigOutput) ToJobSparkConfigOutput

func (o JobSparkConfigOutput) ToJobSparkConfigOutput() JobSparkConfigOutput

func (JobSparkConfigOutput) ToJobSparkConfigOutputWithContext

func (o JobSparkConfigOutput) ToJobSparkConfigOutputWithContext(ctx context.Context) JobSparkConfigOutput

func (JobSparkConfigOutput) ToJobSparkConfigPtrOutput

func (o JobSparkConfigOutput) ToJobSparkConfigPtrOutput() JobSparkConfigPtrOutput

func (JobSparkConfigOutput) ToJobSparkConfigPtrOutputWithContext

func (o JobSparkConfigOutput) ToJobSparkConfigPtrOutputWithContext(ctx context.Context) JobSparkConfigPtrOutput

type JobSparkConfigPtrInput

type JobSparkConfigPtrInput interface {
	pulumi.Input

	ToJobSparkConfigPtrOutput() JobSparkConfigPtrOutput
	ToJobSparkConfigPtrOutputWithContext(context.Context) JobSparkConfigPtrOutput
}

JobSparkConfigPtrInput is an input type that accepts JobSparkConfigArgs, JobSparkConfigPtr and JobSparkConfigPtrOutput values. You can construct a concrete instance of `JobSparkConfigPtrInput` via:

        JobSparkConfigArgs{...}

or:

        nil

type JobSparkConfigPtrOutput

type JobSparkConfigPtrOutput struct{ *pulumi.OutputState }

func (JobSparkConfigPtrOutput) ArchiveUris

HCFS URIs of archives to be extracted in the working directory of .jar, .tar, .tar.gz, .tgz, and .zip.

func (JobSparkConfigPtrOutput) Args

The arguments to pass to the driver. Do not include arguments, such as -libjars or -Dfoo=bar, that can be set as job properties, since a collision may occur that causes an incorrect job submission.

func (JobSparkConfigPtrOutput) Elem

func (JobSparkConfigPtrOutput) ElementType

func (JobSparkConfigPtrOutput) ElementType() reflect.Type

func (JobSparkConfigPtrOutput) FileUris

HCFS URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.

func (JobSparkConfigPtrOutput) JarFileUris

HCFS URIs of jar files to be added to the Spark CLASSPATH.

func (JobSparkConfigPtrOutput) LoggingConfig

func (JobSparkConfigPtrOutput) MainClass

The name of the driver's main class. The jar file containing the class must be in the default CLASSPATH or specified in `jarFileUris`. Conflicts with `mainJarFileUri`

func (JobSparkConfigPtrOutput) MainJarFileUri

func (o JobSparkConfigPtrOutput) MainJarFileUri() pulumi.StringPtrOutput

The HCFS URI of the jar file containing the main class. Examples: 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'. Conflicts with `mainClass`

func (JobSparkConfigPtrOutput) Properties

A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Cloud Dataproc API may be overwritten.

func (JobSparkConfigPtrOutput) ToJobSparkConfigPtrOutput

func (o JobSparkConfigPtrOutput) ToJobSparkConfigPtrOutput() JobSparkConfigPtrOutput

func (JobSparkConfigPtrOutput) ToJobSparkConfigPtrOutputWithContext

func (o JobSparkConfigPtrOutput) ToJobSparkConfigPtrOutputWithContext(ctx context.Context) JobSparkConfigPtrOutput

type JobSparksqlConfig

type JobSparksqlConfig struct {
	// HCFS URIs of jar files to be added to the Spark CLASSPATH.
	JarFileUris   []string                        `pulumi:"jarFileUris"`
	LoggingConfig *JobSparksqlConfigLoggingConfig `pulumi:"loggingConfig"`
	// A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Cloud Dataproc API may be overwritten.
	Properties map[string]string `pulumi:"properties"`
	// The HCFS URI of the script that contains SQL queries.
	// Conflicts with `queryList`
	QueryFileUri *string `pulumi:"queryFileUri"`
	// The list of SQL queries or statements to execute as part of the job.
	// Conflicts with `queryFileUri`
	QueryLists []string `pulumi:"queryLists"`
	// Mapping of query variable names to values (equivalent to the Spark SQL command: `SET name="value";`).
	ScriptVariables map[string]string `pulumi:"scriptVariables"`
}

type JobSparksqlConfigArgs

type JobSparksqlConfigArgs struct {
	// HCFS URIs of jar files to be added to the Spark CLASSPATH.
	JarFileUris   pulumi.StringArrayInput                `pulumi:"jarFileUris"`
	LoggingConfig JobSparksqlConfigLoggingConfigPtrInput `pulumi:"loggingConfig"`
	// A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Cloud Dataproc API may be overwritten.
	Properties pulumi.StringMapInput `pulumi:"properties"`
	// The HCFS URI of the script that contains SQL queries.
	// Conflicts with `queryList`
	QueryFileUri pulumi.StringPtrInput `pulumi:"queryFileUri"`
	// The list of SQL queries or statements to execute as part of the job.
	// Conflicts with `queryFileUri`
	QueryLists pulumi.StringArrayInput `pulumi:"queryLists"`
	// Mapping of query variable names to values (equivalent to the Spark SQL command: `SET name="value";`).
	ScriptVariables pulumi.StringMapInput `pulumi:"scriptVariables"`
}

func (JobSparksqlConfigArgs) ElementType

func (JobSparksqlConfigArgs) ElementType() reflect.Type

func (JobSparksqlConfigArgs) ToJobSparksqlConfigOutput

func (i JobSparksqlConfigArgs) ToJobSparksqlConfigOutput() JobSparksqlConfigOutput

func (JobSparksqlConfigArgs) ToJobSparksqlConfigOutputWithContext

func (i JobSparksqlConfigArgs) ToJobSparksqlConfigOutputWithContext(ctx context.Context) JobSparksqlConfigOutput

func (JobSparksqlConfigArgs) ToJobSparksqlConfigPtrOutput

func (i JobSparksqlConfigArgs) ToJobSparksqlConfigPtrOutput() JobSparksqlConfigPtrOutput

func (JobSparksqlConfigArgs) ToJobSparksqlConfigPtrOutputWithContext

func (i JobSparksqlConfigArgs) ToJobSparksqlConfigPtrOutputWithContext(ctx context.Context) JobSparksqlConfigPtrOutput

type JobSparksqlConfigInput

type JobSparksqlConfigInput interface {
	pulumi.Input

	ToJobSparksqlConfigOutput() JobSparksqlConfigOutput
	ToJobSparksqlConfigOutputWithContext(context.Context) JobSparksqlConfigOutput
}

JobSparksqlConfigInput is an input type that accepts JobSparksqlConfigArgs and JobSparksqlConfigOutput values. You can construct a concrete instance of `JobSparksqlConfigInput` via:

JobSparksqlConfigArgs{...}

type JobSparksqlConfigLoggingConfig

type JobSparksqlConfigLoggingConfig struct {
	DriverLogLevels map[string]string `pulumi:"driverLogLevels"`
}

type JobSparksqlConfigLoggingConfigArgs

type JobSparksqlConfigLoggingConfigArgs struct {
	DriverLogLevels pulumi.StringMapInput `pulumi:"driverLogLevels"`
}

func (JobSparksqlConfigLoggingConfigArgs) ElementType

func (JobSparksqlConfigLoggingConfigArgs) ToJobSparksqlConfigLoggingConfigOutput

func (i JobSparksqlConfigLoggingConfigArgs) ToJobSparksqlConfigLoggingConfigOutput() JobSparksqlConfigLoggingConfigOutput

func (JobSparksqlConfigLoggingConfigArgs) ToJobSparksqlConfigLoggingConfigOutputWithContext

func (i JobSparksqlConfigLoggingConfigArgs) ToJobSparksqlConfigLoggingConfigOutputWithContext(ctx context.Context) JobSparksqlConfigLoggingConfigOutput

func (JobSparksqlConfigLoggingConfigArgs) ToJobSparksqlConfigLoggingConfigPtrOutput

func (i JobSparksqlConfigLoggingConfigArgs) ToJobSparksqlConfigLoggingConfigPtrOutput() JobSparksqlConfigLoggingConfigPtrOutput

func (JobSparksqlConfigLoggingConfigArgs) ToJobSparksqlConfigLoggingConfigPtrOutputWithContext

func (i JobSparksqlConfigLoggingConfigArgs) ToJobSparksqlConfigLoggingConfigPtrOutputWithContext(ctx context.Context) JobSparksqlConfigLoggingConfigPtrOutput

type JobSparksqlConfigLoggingConfigInput

type JobSparksqlConfigLoggingConfigInput interface {
	pulumi.Input

	ToJobSparksqlConfigLoggingConfigOutput() JobSparksqlConfigLoggingConfigOutput
	ToJobSparksqlConfigLoggingConfigOutputWithContext(context.Context) JobSparksqlConfigLoggingConfigOutput
}

JobSparksqlConfigLoggingConfigInput is an input type that accepts JobSparksqlConfigLoggingConfigArgs and JobSparksqlConfigLoggingConfigOutput values. You can construct a concrete instance of `JobSparksqlConfigLoggingConfigInput` via:

JobSparksqlConfigLoggingConfigArgs{...}

type JobSparksqlConfigLoggingConfigOutput

type JobSparksqlConfigLoggingConfigOutput struct{ *pulumi.OutputState }

func (JobSparksqlConfigLoggingConfigOutput) DriverLogLevels

func (JobSparksqlConfigLoggingConfigOutput) ElementType

func (JobSparksqlConfigLoggingConfigOutput) ToJobSparksqlConfigLoggingConfigOutput

func (o JobSparksqlConfigLoggingConfigOutput) ToJobSparksqlConfigLoggingConfigOutput() JobSparksqlConfigLoggingConfigOutput

func (JobSparksqlConfigLoggingConfigOutput) ToJobSparksqlConfigLoggingConfigOutputWithContext

func (o JobSparksqlConfigLoggingConfigOutput) ToJobSparksqlConfigLoggingConfigOutputWithContext(ctx context.Context) JobSparksqlConfigLoggingConfigOutput

func (JobSparksqlConfigLoggingConfigOutput) ToJobSparksqlConfigLoggingConfigPtrOutput

func (o JobSparksqlConfigLoggingConfigOutput) ToJobSparksqlConfigLoggingConfigPtrOutput() JobSparksqlConfigLoggingConfigPtrOutput

func (JobSparksqlConfigLoggingConfigOutput) ToJobSparksqlConfigLoggingConfigPtrOutputWithContext

func (o JobSparksqlConfigLoggingConfigOutput) ToJobSparksqlConfigLoggingConfigPtrOutputWithContext(ctx context.Context) JobSparksqlConfigLoggingConfigPtrOutput

type JobSparksqlConfigLoggingConfigPtrInput

type JobSparksqlConfigLoggingConfigPtrInput interface {
	pulumi.Input

	ToJobSparksqlConfigLoggingConfigPtrOutput() JobSparksqlConfigLoggingConfigPtrOutput
	ToJobSparksqlConfigLoggingConfigPtrOutputWithContext(context.Context) JobSparksqlConfigLoggingConfigPtrOutput
}

JobSparksqlConfigLoggingConfigPtrInput is an input type that accepts JobSparksqlConfigLoggingConfigArgs, JobSparksqlConfigLoggingConfigPtr and JobSparksqlConfigLoggingConfigPtrOutput values. You can construct a concrete instance of `JobSparksqlConfigLoggingConfigPtrInput` via:

        JobSparksqlConfigLoggingConfigArgs{...}

or:

        nil

type JobSparksqlConfigLoggingConfigPtrOutput

type JobSparksqlConfigLoggingConfigPtrOutput struct{ *pulumi.OutputState }

func (JobSparksqlConfigLoggingConfigPtrOutput) DriverLogLevels

func (JobSparksqlConfigLoggingConfigPtrOutput) Elem

func (JobSparksqlConfigLoggingConfigPtrOutput) ElementType

func (JobSparksqlConfigLoggingConfigPtrOutput) ToJobSparksqlConfigLoggingConfigPtrOutput

func (o JobSparksqlConfigLoggingConfigPtrOutput) ToJobSparksqlConfigLoggingConfigPtrOutput() JobSparksqlConfigLoggingConfigPtrOutput

func (JobSparksqlConfigLoggingConfigPtrOutput) ToJobSparksqlConfigLoggingConfigPtrOutputWithContext

func (o JobSparksqlConfigLoggingConfigPtrOutput) ToJobSparksqlConfigLoggingConfigPtrOutputWithContext(ctx context.Context) JobSparksqlConfigLoggingConfigPtrOutput

type JobSparksqlConfigOutput

type JobSparksqlConfigOutput struct{ *pulumi.OutputState }

func (JobSparksqlConfigOutput) ElementType

func (JobSparksqlConfigOutput) ElementType() reflect.Type

func (JobSparksqlConfigOutput) JarFileUris

HCFS URIs of jar files to be added to the Spark CLASSPATH.

func (JobSparksqlConfigOutput) LoggingConfig

func (JobSparksqlConfigOutput) Properties

A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Cloud Dataproc API may be overwritten.

func (JobSparksqlConfigOutput) QueryFileUri

The HCFS URI of the script that contains SQL queries. Conflicts with `queryList`

func (JobSparksqlConfigOutput) QueryLists

The list of SQL queries or statements to execute as part of the job. Conflicts with `queryFileUri`

func (JobSparksqlConfigOutput) ScriptVariables

func (o JobSparksqlConfigOutput) ScriptVariables() pulumi.StringMapOutput

Mapping of query variable names to values (equivalent to the Spark SQL command: `SET name="value";`).

func (JobSparksqlConfigOutput) ToJobSparksqlConfigOutput

func (o JobSparksqlConfigOutput) ToJobSparksqlConfigOutput() JobSparksqlConfigOutput

func (JobSparksqlConfigOutput) ToJobSparksqlConfigOutputWithContext

func (o JobSparksqlConfigOutput) ToJobSparksqlConfigOutputWithContext(ctx context.Context) JobSparksqlConfigOutput

func (JobSparksqlConfigOutput) ToJobSparksqlConfigPtrOutput

func (o JobSparksqlConfigOutput) ToJobSparksqlConfigPtrOutput() JobSparksqlConfigPtrOutput

func (JobSparksqlConfigOutput) ToJobSparksqlConfigPtrOutputWithContext

func (o JobSparksqlConfigOutput) ToJobSparksqlConfigPtrOutputWithContext(ctx context.Context) JobSparksqlConfigPtrOutput

type JobSparksqlConfigPtrInput

type JobSparksqlConfigPtrInput interface {
	pulumi.Input

	ToJobSparksqlConfigPtrOutput() JobSparksqlConfigPtrOutput
	ToJobSparksqlConfigPtrOutputWithContext(context.Context) JobSparksqlConfigPtrOutput
}

JobSparksqlConfigPtrInput is an input type that accepts JobSparksqlConfigArgs, JobSparksqlConfigPtr and JobSparksqlConfigPtrOutput values. You can construct a concrete instance of `JobSparksqlConfigPtrInput` via:

        JobSparksqlConfigArgs{...}

or:

        nil

type JobSparksqlConfigPtrOutput

type JobSparksqlConfigPtrOutput struct{ *pulumi.OutputState }

func (JobSparksqlConfigPtrOutput) Elem

func (JobSparksqlConfigPtrOutput) ElementType

func (JobSparksqlConfigPtrOutput) ElementType() reflect.Type

func (JobSparksqlConfigPtrOutput) JarFileUris

HCFS URIs of jar files to be added to the Spark CLASSPATH.

func (JobSparksqlConfigPtrOutput) LoggingConfig

func (JobSparksqlConfigPtrOutput) Properties

A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Cloud Dataproc API may be overwritten.

func (JobSparksqlConfigPtrOutput) QueryFileUri

The HCFS URI of the script that contains SQL queries. Conflicts with `queryList`

func (JobSparksqlConfigPtrOutput) QueryLists

The list of SQL queries or statements to execute as part of the job. Conflicts with `queryFileUri`

func (JobSparksqlConfigPtrOutput) ScriptVariables

Mapping of query variable names to values (equivalent to the Spark SQL command: `SET name="value";`).

func (JobSparksqlConfigPtrOutput) ToJobSparksqlConfigPtrOutput

func (o JobSparksqlConfigPtrOutput) ToJobSparksqlConfigPtrOutput() JobSparksqlConfigPtrOutput

func (JobSparksqlConfigPtrOutput) ToJobSparksqlConfigPtrOutputWithContext

func (o JobSparksqlConfigPtrOutput) ToJobSparksqlConfigPtrOutputWithContext(ctx context.Context) JobSparksqlConfigPtrOutput

type JobState

type JobState struct {
	// If present, the location of miscellaneous control files which may be used as part of job setup and handling. If not present, control files may be placed in the same location as driver_output_uri.
	DriverControlsFilesUri pulumi.StringPtrInput
	// A URI pointing to the location of the stdout of the job's driver program.
	DriverOutputResourceUri pulumi.StringPtrInput
	// By default, you can only delete inactive jobs within
	// Dataproc. Setting this to true, and calling destroy, will ensure that the
	// job is first cancelled before issuing the delete.
	ForceDelete pulumi.BoolPtrInput
	// The config of Hadoop job
	HadoopConfig JobHadoopConfigPtrInput
	// The config of hive job
	HiveConfig JobHiveConfigPtrInput
	// The list of labels (key/value pairs) to add to the job.
	Labels pulumi.StringMapInput
	// The config of pag job.
	PigConfig JobPigConfigPtrInput
	// The config of job placement.
	Placement JobPlacementPtrInput
	// The project in which the `cluster` can be found and jobs
	// subsequently run against. If it is not provided, the provider project is used.
	Project pulumi.StringPtrInput
	// The config of pySpark job.
	PysparkConfig JobPysparkConfigPtrInput
	// The reference of the job
	Reference JobReferencePtrInput
	// The Cloud Dataproc region. This essentially determines which clusters are available
	// for this job to be submitted to. If not specified, defaults to `global`.
	Region pulumi.StringPtrInput
	// Optional. Job scheduling configuration.
	Scheduling JobSchedulingPtrInput
	// The config of the Spark job.
	SparkConfig JobSparkConfigPtrInput
	// The config of SparkSql job
	SparksqlConfig JobSparksqlConfigPtrInput
	// The status of the job.
	Statuses JobStatusArrayInput
}

func (JobState) ElementType

func (JobState) ElementType() reflect.Type

type JobStatus

type JobStatus struct {
	Details        *string `pulumi:"details"`
	State          *string `pulumi:"state"`
	StateStartTime *string `pulumi:"stateStartTime"`
	Substate       *string `pulumi:"substate"`
}

type JobStatusArgs

type JobStatusArgs struct {
	Details        pulumi.StringPtrInput `pulumi:"details"`
	State          pulumi.StringPtrInput `pulumi:"state"`
	StateStartTime pulumi.StringPtrInput `pulumi:"stateStartTime"`
	Substate       pulumi.StringPtrInput `pulumi:"substate"`
}

func (JobStatusArgs) ElementType

func (JobStatusArgs) ElementType() reflect.Type

func (JobStatusArgs) ToJobStatusOutput

func (i JobStatusArgs) ToJobStatusOutput() JobStatusOutput

func (JobStatusArgs) ToJobStatusOutputWithContext

func (i JobStatusArgs) ToJobStatusOutputWithContext(ctx context.Context) JobStatusOutput

type JobStatusArray

type JobStatusArray []JobStatusInput

func (JobStatusArray) ElementType

func (JobStatusArray) ElementType() reflect.Type

func (JobStatusArray) ToJobStatusArrayOutput

func (i JobStatusArray) ToJobStatusArrayOutput() JobStatusArrayOutput

func (JobStatusArray) ToJobStatusArrayOutputWithContext

func (i JobStatusArray) ToJobStatusArrayOutputWithContext(ctx context.Context) JobStatusArrayOutput

type JobStatusArrayInput

type JobStatusArrayInput interface {
	pulumi.Input

	ToJobStatusArrayOutput() JobStatusArrayOutput
	ToJobStatusArrayOutputWithContext(context.Context) JobStatusArrayOutput
}

JobStatusArrayInput is an input type that accepts JobStatusArray and JobStatusArrayOutput values. You can construct a concrete instance of `JobStatusArrayInput` via:

JobStatusArray{ JobStatusArgs{...} }

type JobStatusArrayOutput

type JobStatusArrayOutput struct{ *pulumi.OutputState }

func (JobStatusArrayOutput) ElementType

func (JobStatusArrayOutput) ElementType() reflect.Type

func (JobStatusArrayOutput) Index

func (JobStatusArrayOutput) ToJobStatusArrayOutput

func (o JobStatusArrayOutput) ToJobStatusArrayOutput() JobStatusArrayOutput

func (JobStatusArrayOutput) ToJobStatusArrayOutputWithContext

func (o JobStatusArrayOutput) ToJobStatusArrayOutputWithContext(ctx context.Context) JobStatusArrayOutput

type JobStatusInput

type JobStatusInput interface {
	pulumi.Input

	ToJobStatusOutput() JobStatusOutput
	ToJobStatusOutputWithContext(context.Context) JobStatusOutput
}

JobStatusInput is an input type that accepts JobStatusArgs and JobStatusOutput values. You can construct a concrete instance of `JobStatusInput` via:

JobStatusArgs{...}

type JobStatusOutput

type JobStatusOutput struct{ *pulumi.OutputState }

func (JobStatusOutput) Details

func (JobStatusOutput) ElementType

func (JobStatusOutput) ElementType() reflect.Type

func (JobStatusOutput) State

func (JobStatusOutput) StateStartTime

func (o JobStatusOutput) StateStartTime() pulumi.StringPtrOutput

func (JobStatusOutput) Substate

func (o JobStatusOutput) Substate() pulumi.StringPtrOutput

func (JobStatusOutput) ToJobStatusOutput

func (o JobStatusOutput) ToJobStatusOutput() JobStatusOutput

func (JobStatusOutput) ToJobStatusOutputWithContext

func (o JobStatusOutput) ToJobStatusOutputWithContext(ctx context.Context) JobStatusOutput

type MetastoreService added in v4.14.0

type MetastoreService struct {
	pulumi.CustomResourceState

	// A Cloud Storage URI (starting with gs://) that specifies where artifacts related to the metastore service are stored.
	ArtifactGcsUri pulumi.StringOutput `pulumi:"artifactGcsUri"`
	// The URI of the endpoint used to access the metastore service.
	EndpointUri pulumi.StringOutput `pulumi:"endpointUri"`
	// Configuration information specific to running Hive metastore software as the metastore service.
	// Structure is documented below.
	HiveMetastoreConfig MetastoreServiceHiveMetastoreConfigPtrOutput `pulumi:"hiveMetastoreConfig"`
	// User-defined labels for the metastore service.
	Labels pulumi.StringMapOutput `pulumi:"labels"`
	// The  location where the autoscaling policy should reside.
	// The default value is `global`.
	Location pulumi.StringPtrOutput `pulumi:"location"`
	// The one hour maintenance window of the metastore service.
	// This specifies when the service can be restarted for maintenance purposes in UTC time.
	// Structure is documented below.
	MaintenanceWindow MetastoreServiceMaintenanceWindowPtrOutput `pulumi:"maintenanceWindow"`
	// The relative resource name of the metastore service.
	Name pulumi.StringOutput `pulumi:"name"`
	// The relative resource name of the VPC network on which the instance can be accessed. It is specified in the following form:
	// "projects/{projectNumber}/global/networks/{network_id}".
	Network pulumi.StringOutput `pulumi:"network"`
	// The TCP port at which the metastore service is reached. Default: 9083.
	Port pulumi.IntOutput `pulumi:"port"`
	// The ID of the project in which the resource belongs.
	// If it is not provided, the provider project is used.
	Project pulumi.StringOutput `pulumi:"project"`
	// The ID of the metastore service. The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_),
	// and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between
	// 3 and 63 characters.
	ServiceId pulumi.StringOutput `pulumi:"serviceId"`
	// The current state of the metastore service.
	State pulumi.StringOutput `pulumi:"state"`
	// Additional information about the current state of the metastore service, if available.
	StateMessage pulumi.StringOutput `pulumi:"stateMessage"`
	// The tier of the service.
	// Possible values are `DEVELOPER` and `ENTERPRISE`.
	Tier pulumi.StringOutput `pulumi:"tier"`
}

A managed metastore service that serves metadata queries.

## Example Usage ### Dataproc Metastore Service Basic

```go package main

import (

"github.com/pulumi/pulumi-gcp/sdk/v4/go/gcp/dataproc"
"github.com/pulumi/pulumi/sdk/v2/go/pulumi"

)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := dataproc.NewMetastoreService(ctx, "_default", &dataproc.MetastoreServiceArgs{
			ServiceId: pulumi.String("metastore-srv"),
			Location:  pulumi.String("us-central1"),
			Port:      pulumi.Int(9080),
			Tier:      pulumi.String("DEVELOPER"),
			MaintenanceWindow: &dataproc.MetastoreServiceMaintenanceWindowArgs{
				HourOfDay: pulumi.Int(2),
				DayOfWeek: pulumi.String("SUNDAY"),
			},
			HiveMetastoreConfig: &dataproc.MetastoreServiceHiveMetastoreConfigArgs{
				Version: pulumi.String("2.3.6"),
			},
		}, pulumi.Provider(google_beta))
		if err != nil {
			return err
		}
		return nil
	})
}

```

## Import

Service can be imported using any of these accepted formats

```sh

$ pulumi import gcp:dataproc/metastoreService:MetastoreService default projects/{{project}}/locations/{{location}}/services/{{service_id}}

```

```sh

$ pulumi import gcp:dataproc/metastoreService:MetastoreService default {{project}}/{{location}}/{{service_id}}

```

```sh

$ pulumi import gcp:dataproc/metastoreService:MetastoreService default {{location}}/{{service_id}}

```

func GetMetastoreService added in v4.14.0

func GetMetastoreService(ctx *pulumi.Context,
	name string, id pulumi.IDInput, state *MetastoreServiceState, opts ...pulumi.ResourceOption) (*MetastoreService, error)

GetMetastoreService gets an existing MetastoreService resource's state with the given name, ID, and optional state properties that are used to uniquely qualify the lookup (nil if not required).

func NewMetastoreService added in v4.14.0

func NewMetastoreService(ctx *pulumi.Context,
	name string, args *MetastoreServiceArgs, opts ...pulumi.ResourceOption) (*MetastoreService, error)

NewMetastoreService registers a new resource with the given unique name, arguments, and options.

func (*MetastoreService) ElementType added in v4.14.0

func (*MetastoreService) ElementType() reflect.Type

func (*MetastoreService) ToMetastoreServiceOutput added in v4.14.0

func (i *MetastoreService) ToMetastoreServiceOutput() MetastoreServiceOutput

func (*MetastoreService) ToMetastoreServiceOutputWithContext added in v4.14.0

func (i *MetastoreService) ToMetastoreServiceOutputWithContext(ctx context.Context) MetastoreServiceOutput

func (*MetastoreService) ToMetastoreServicePtrOutput added in v4.14.0

func (i *MetastoreService) ToMetastoreServicePtrOutput() MetastoreServicePtrOutput

func (*MetastoreService) ToMetastoreServicePtrOutputWithContext added in v4.14.0

func (i *MetastoreService) ToMetastoreServicePtrOutputWithContext(ctx context.Context) MetastoreServicePtrOutput

type MetastoreServiceArgs added in v4.14.0

type MetastoreServiceArgs struct {
	// Configuration information specific to running Hive metastore software as the metastore service.
	// Structure is documented below.
	HiveMetastoreConfig MetastoreServiceHiveMetastoreConfigPtrInput
	// User-defined labels for the metastore service.
	Labels pulumi.StringMapInput
	// The  location where the autoscaling policy should reside.
	// The default value is `global`.
	Location pulumi.StringPtrInput
	// The one hour maintenance window of the metastore service.
	// This specifies when the service can be restarted for maintenance purposes in UTC time.
	// Structure is documented below.
	MaintenanceWindow MetastoreServiceMaintenanceWindowPtrInput
	// The relative resource name of the VPC network on which the instance can be accessed. It is specified in the following form:
	// "projects/{projectNumber}/global/networks/{network_id}".
	Network pulumi.StringPtrInput
	// The TCP port at which the metastore service is reached. Default: 9083.
	Port pulumi.IntPtrInput
	// The ID of the project in which the resource belongs.
	// If it is not provided, the provider project is used.
	Project pulumi.StringPtrInput
	// The ID of the metastore service. The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_),
	// and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between
	// 3 and 63 characters.
	ServiceId pulumi.StringInput
	// The tier of the service.
	// Possible values are `DEVELOPER` and `ENTERPRISE`.
	Tier pulumi.StringPtrInput
}

The set of arguments for constructing a MetastoreService resource.

func (MetastoreServiceArgs) ElementType added in v4.14.0

func (MetastoreServiceArgs) ElementType() reflect.Type

type MetastoreServiceArray added in v4.14.0

type MetastoreServiceArray []MetastoreServiceInput

func (MetastoreServiceArray) ElementType added in v4.14.0

func (MetastoreServiceArray) ElementType() reflect.Type

func (MetastoreServiceArray) ToMetastoreServiceArrayOutput added in v4.14.0

func (i MetastoreServiceArray) ToMetastoreServiceArrayOutput() MetastoreServiceArrayOutput

func (MetastoreServiceArray) ToMetastoreServiceArrayOutputWithContext added in v4.14.0

func (i MetastoreServiceArray) ToMetastoreServiceArrayOutputWithContext(ctx context.Context) MetastoreServiceArrayOutput

type MetastoreServiceArrayInput added in v4.14.0

type MetastoreServiceArrayInput interface {
	pulumi.Input

	ToMetastoreServiceArrayOutput() MetastoreServiceArrayOutput
	ToMetastoreServiceArrayOutputWithContext(context.Context) MetastoreServiceArrayOutput
}

MetastoreServiceArrayInput is an input type that accepts MetastoreServiceArray and MetastoreServiceArrayOutput values. You can construct a concrete instance of `MetastoreServiceArrayInput` via:

MetastoreServiceArray{ MetastoreServiceArgs{...} }

type MetastoreServiceArrayOutput added in v4.14.0

type MetastoreServiceArrayOutput struct{ *pulumi.OutputState }

func (MetastoreServiceArrayOutput) ElementType added in v4.14.0

func (MetastoreServiceArrayOutput) Index added in v4.14.0

func (MetastoreServiceArrayOutput) ToMetastoreServiceArrayOutput added in v4.14.0

func (o MetastoreServiceArrayOutput) ToMetastoreServiceArrayOutput() MetastoreServiceArrayOutput

func (MetastoreServiceArrayOutput) ToMetastoreServiceArrayOutputWithContext added in v4.14.0

func (o MetastoreServiceArrayOutput) ToMetastoreServiceArrayOutputWithContext(ctx context.Context) MetastoreServiceArrayOutput

type MetastoreServiceHiveMetastoreConfig added in v4.14.0

type MetastoreServiceHiveMetastoreConfig struct {
	// A mapping of Hive metastore configuration key-value pairs to apply to the Hive metastore (configured in hive-site.xml).
	// The mappings override system defaults (some keys cannot be overridden)
	ConfigOverrides map[string]string `pulumi:"configOverrides"`
	// Information used to configure the Hive metastore service as a service principal in a Kerberos realm.
	// Structure is documented below.
	KerberosConfig *MetastoreServiceHiveMetastoreConfigKerberosConfig `pulumi:"kerberosConfig"`
	// The Hive metastore schema version.
	Version string `pulumi:"version"`
}

type MetastoreServiceHiveMetastoreConfigArgs added in v4.14.0

type MetastoreServiceHiveMetastoreConfigArgs struct {
	// A mapping of Hive metastore configuration key-value pairs to apply to the Hive metastore (configured in hive-site.xml).
	// The mappings override system defaults (some keys cannot be overridden)
	ConfigOverrides pulumi.StringMapInput `pulumi:"configOverrides"`
	// Information used to configure the Hive metastore service as a service principal in a Kerberos realm.
	// Structure is documented below.
	KerberosConfig MetastoreServiceHiveMetastoreConfigKerberosConfigPtrInput `pulumi:"kerberosConfig"`
	// The Hive metastore schema version.
	Version pulumi.StringInput `pulumi:"version"`
}

func (MetastoreServiceHiveMetastoreConfigArgs) ElementType added in v4.14.0

func (MetastoreServiceHiveMetastoreConfigArgs) ToMetastoreServiceHiveMetastoreConfigOutput added in v4.14.0

func (i MetastoreServiceHiveMetastoreConfigArgs) ToMetastoreServiceHiveMetastoreConfigOutput() MetastoreServiceHiveMetastoreConfigOutput

func (MetastoreServiceHiveMetastoreConfigArgs) ToMetastoreServiceHiveMetastoreConfigOutputWithContext added in v4.14.0

func (i MetastoreServiceHiveMetastoreConfigArgs) ToMetastoreServiceHiveMetastoreConfigOutputWithContext(ctx context.Context) MetastoreServiceHiveMetastoreConfigOutput

func (MetastoreServiceHiveMetastoreConfigArgs) ToMetastoreServiceHiveMetastoreConfigPtrOutput added in v4.14.0

func (i MetastoreServiceHiveMetastoreConfigArgs) ToMetastoreServiceHiveMetastoreConfigPtrOutput() MetastoreServiceHiveMetastoreConfigPtrOutput

func (MetastoreServiceHiveMetastoreConfigArgs) ToMetastoreServiceHiveMetastoreConfigPtrOutputWithContext added in v4.14.0

func (i MetastoreServiceHiveMetastoreConfigArgs) ToMetastoreServiceHiveMetastoreConfigPtrOutputWithContext(ctx context.Context) MetastoreServiceHiveMetastoreConfigPtrOutput

type MetastoreServiceHiveMetastoreConfigInput added in v4.14.0

type MetastoreServiceHiveMetastoreConfigInput interface {
	pulumi.Input

	ToMetastoreServiceHiveMetastoreConfigOutput() MetastoreServiceHiveMetastoreConfigOutput
	ToMetastoreServiceHiveMetastoreConfigOutputWithContext(context.Context) MetastoreServiceHiveMetastoreConfigOutput
}

MetastoreServiceHiveMetastoreConfigInput is an input type that accepts MetastoreServiceHiveMetastoreConfigArgs and MetastoreServiceHiveMetastoreConfigOutput values. You can construct a concrete instance of `MetastoreServiceHiveMetastoreConfigInput` via:

MetastoreServiceHiveMetastoreConfigArgs{...}

type MetastoreServiceHiveMetastoreConfigKerberosConfig added in v4.14.0

type MetastoreServiceHiveMetastoreConfigKerberosConfig struct {
	// A Kerberos keytab file that can be used to authenticate a service principal with a Kerberos Key Distribution Center (KDC).
	// Structure is documented below.
	Keytab MetastoreServiceHiveMetastoreConfigKerberosConfigKeytab `pulumi:"keytab"`
	// A Cloud Storage URI that specifies the path to a krb5.conf file. It is of the form gs://{bucket_name}/path/to/krb5.conf, although the file does not need to be named krb5.conf explicitly.
	Krb5ConfigGcsUri string `pulumi:"krb5ConfigGcsUri"`
	// A Kerberos principal that exists in the both the keytab the KDC to authenticate as. A typical principal is of the form "primary/instance@REALM", but there is no exact format.
	Principal string `pulumi:"principal"`
}

type MetastoreServiceHiveMetastoreConfigKerberosConfigArgs added in v4.14.0

type MetastoreServiceHiveMetastoreConfigKerberosConfigArgs struct {
	// A Kerberos keytab file that can be used to authenticate a service principal with a Kerberos Key Distribution Center (KDC).
	// Structure is documented below.
	Keytab MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabInput `pulumi:"keytab"`
	// A Cloud Storage URI that specifies the path to a krb5.conf file. It is of the form gs://{bucket_name}/path/to/krb5.conf, although the file does not need to be named krb5.conf explicitly.
	Krb5ConfigGcsUri pulumi.StringInput `pulumi:"krb5ConfigGcsUri"`
	// A Kerberos principal that exists in the both the keytab the KDC to authenticate as. A typical principal is of the form "primary/instance@REALM", but there is no exact format.
	Principal pulumi.StringInput `pulumi:"principal"`
}

func (MetastoreServiceHiveMetastoreConfigKerberosConfigArgs) ElementType added in v4.14.0

func (MetastoreServiceHiveMetastoreConfigKerberosConfigArgs) ToMetastoreServiceHiveMetastoreConfigKerberosConfigOutput added in v4.14.0

func (i MetastoreServiceHiveMetastoreConfigKerberosConfigArgs) ToMetastoreServiceHiveMetastoreConfigKerberosConfigOutput() MetastoreServiceHiveMetastoreConfigKerberosConfigOutput

func (MetastoreServiceHiveMetastoreConfigKerberosConfigArgs) ToMetastoreServiceHiveMetastoreConfigKerberosConfigOutputWithContext added in v4.14.0

func (i MetastoreServiceHiveMetastoreConfigKerberosConfigArgs) ToMetastoreServiceHiveMetastoreConfigKerberosConfigOutputWithContext(ctx context.Context) MetastoreServiceHiveMetastoreConfigKerberosConfigOutput

func (MetastoreServiceHiveMetastoreConfigKerberosConfigArgs) ToMetastoreServiceHiveMetastoreConfigKerberosConfigPtrOutput added in v4.14.0

func (i MetastoreServiceHiveMetastoreConfigKerberosConfigArgs) ToMetastoreServiceHiveMetastoreConfigKerberosConfigPtrOutput() MetastoreServiceHiveMetastoreConfigKerberosConfigPtrOutput

func (MetastoreServiceHiveMetastoreConfigKerberosConfigArgs) ToMetastoreServiceHiveMetastoreConfigKerberosConfigPtrOutputWithContext added in v4.14.0

func (i MetastoreServiceHiveMetastoreConfigKerberosConfigArgs) ToMetastoreServiceHiveMetastoreConfigKerberosConfigPtrOutputWithContext(ctx context.Context) MetastoreServiceHiveMetastoreConfigKerberosConfigPtrOutput

type MetastoreServiceHiveMetastoreConfigKerberosConfigInput added in v4.14.0

type MetastoreServiceHiveMetastoreConfigKerberosConfigInput interface {
	pulumi.Input

	ToMetastoreServiceHiveMetastoreConfigKerberosConfigOutput() MetastoreServiceHiveMetastoreConfigKerberosConfigOutput
	ToMetastoreServiceHiveMetastoreConfigKerberosConfigOutputWithContext(context.Context) MetastoreServiceHiveMetastoreConfigKerberosConfigOutput
}

MetastoreServiceHiveMetastoreConfigKerberosConfigInput is an input type that accepts MetastoreServiceHiveMetastoreConfigKerberosConfigArgs and MetastoreServiceHiveMetastoreConfigKerberosConfigOutput values. You can construct a concrete instance of `MetastoreServiceHiveMetastoreConfigKerberosConfigInput` via:

MetastoreServiceHiveMetastoreConfigKerberosConfigArgs{...}

type MetastoreServiceHiveMetastoreConfigKerberosConfigKeytab added in v4.14.0

type MetastoreServiceHiveMetastoreConfigKerberosConfigKeytab struct {
	// The relative resource name of a Secret Manager secret version, in the following form:
	// "projects/{projectNumber}/secrets/{secret_id}/versions/{version_id}".
	CloudSecret string `pulumi:"cloudSecret"`
}

type MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabArgs added in v4.14.0

type MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabArgs struct {
	// The relative resource name of a Secret Manager secret version, in the following form:
	// "projects/{projectNumber}/secrets/{secret_id}/versions/{version_id}".
	CloudSecret pulumi.StringInput `pulumi:"cloudSecret"`
}

func (MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabArgs) ElementType added in v4.14.0

func (MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabArgs) ToMetastoreServiceHiveMetastoreConfigKerberosConfigKeytabOutput added in v4.14.0

func (MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabArgs) ToMetastoreServiceHiveMetastoreConfigKerberosConfigKeytabOutputWithContext added in v4.14.0

func (i MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabArgs) ToMetastoreServiceHiveMetastoreConfigKerberosConfigKeytabOutputWithContext(ctx context.Context) MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabOutput

func (MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabArgs) ToMetastoreServiceHiveMetastoreConfigKerberosConfigKeytabPtrOutput added in v4.14.0

func (MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabArgs) ToMetastoreServiceHiveMetastoreConfigKerberosConfigKeytabPtrOutputWithContext added in v4.14.0

func (i MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabArgs) ToMetastoreServiceHiveMetastoreConfigKerberosConfigKeytabPtrOutputWithContext(ctx context.Context) MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabPtrOutput

type MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabInput added in v4.14.0

type MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabInput interface {
	pulumi.Input

	ToMetastoreServiceHiveMetastoreConfigKerberosConfigKeytabOutput() MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabOutput
	ToMetastoreServiceHiveMetastoreConfigKerberosConfigKeytabOutputWithContext(context.Context) MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabOutput
}

MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabInput is an input type that accepts MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabArgs and MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabOutput values. You can construct a concrete instance of `MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabInput` via:

MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabArgs{...}

type MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabOutput added in v4.14.0

type MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabOutput struct{ *pulumi.OutputState }

func (MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabOutput) CloudSecret added in v4.14.0

The relative resource name of a Secret Manager secret version, in the following form: "projects/{projectNumber}/secrets/{secret_id}/versions/{version_id}".

func (MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabOutput) ElementType added in v4.14.0

func (MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabOutput) ToMetastoreServiceHiveMetastoreConfigKerberosConfigKeytabOutput added in v4.14.0

func (MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabOutput) ToMetastoreServiceHiveMetastoreConfigKerberosConfigKeytabOutputWithContext added in v4.14.0

func (o MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabOutput) ToMetastoreServiceHiveMetastoreConfigKerberosConfigKeytabOutputWithContext(ctx context.Context) MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabOutput

func (MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabOutput) ToMetastoreServiceHiveMetastoreConfigKerberosConfigKeytabPtrOutput added in v4.14.0

func (MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabOutput) ToMetastoreServiceHiveMetastoreConfigKerberosConfigKeytabPtrOutputWithContext added in v4.14.0

func (o MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabOutput) ToMetastoreServiceHiveMetastoreConfigKerberosConfigKeytabPtrOutputWithContext(ctx context.Context) MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabPtrOutput

type MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabPtrInput added in v4.14.0

type MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabPtrInput interface {
	pulumi.Input

	ToMetastoreServiceHiveMetastoreConfigKerberosConfigKeytabPtrOutput() MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabPtrOutput
	ToMetastoreServiceHiveMetastoreConfigKerberosConfigKeytabPtrOutputWithContext(context.Context) MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabPtrOutput
}

MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabPtrInput is an input type that accepts MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabArgs, MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabPtr and MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabPtrOutput values. You can construct a concrete instance of `MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabPtrInput` via:

        MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabArgs{...}

or:

        nil

type MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabPtrOutput added in v4.14.0

type MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabPtrOutput struct{ *pulumi.OutputState }

func (MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabPtrOutput) CloudSecret added in v4.14.0

The relative resource name of a Secret Manager secret version, in the following form: "projects/{projectNumber}/secrets/{secret_id}/versions/{version_id}".

func (MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabPtrOutput) Elem added in v4.14.0

func (MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabPtrOutput) ElementType added in v4.14.0

func (MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabPtrOutput) ToMetastoreServiceHiveMetastoreConfigKerberosConfigKeytabPtrOutput added in v4.14.0

func (MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabPtrOutput) ToMetastoreServiceHiveMetastoreConfigKerberosConfigKeytabPtrOutputWithContext added in v4.14.0

func (o MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabPtrOutput) ToMetastoreServiceHiveMetastoreConfigKerberosConfigKeytabPtrOutputWithContext(ctx context.Context) MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabPtrOutput

type MetastoreServiceHiveMetastoreConfigKerberosConfigOutput added in v4.14.0

type MetastoreServiceHiveMetastoreConfigKerberosConfigOutput struct{ *pulumi.OutputState }

func (MetastoreServiceHiveMetastoreConfigKerberosConfigOutput) ElementType added in v4.14.0

func (MetastoreServiceHiveMetastoreConfigKerberosConfigOutput) Keytab added in v4.14.0

A Kerberos keytab file that can be used to authenticate a service principal with a Kerberos Key Distribution Center (KDC). Structure is documented below.

func (MetastoreServiceHiveMetastoreConfigKerberosConfigOutput) Krb5ConfigGcsUri added in v4.14.0

A Cloud Storage URI that specifies the path to a krb5.conf file. It is of the form gs://{bucket_name}/path/to/krb5.conf, although the file does not need to be named krb5.conf explicitly.

func (MetastoreServiceHiveMetastoreConfigKerberosConfigOutput) Principal added in v4.14.0

A Kerberos principal that exists in the both the keytab the KDC to authenticate as. A typical principal is of the form "primary/instance@REALM", but there is no exact format.

func (MetastoreServiceHiveMetastoreConfigKerberosConfigOutput) ToMetastoreServiceHiveMetastoreConfigKerberosConfigOutput added in v4.14.0

func (MetastoreServiceHiveMetastoreConfigKerberosConfigOutput) ToMetastoreServiceHiveMetastoreConfigKerberosConfigOutputWithContext added in v4.14.0

func (o MetastoreServiceHiveMetastoreConfigKerberosConfigOutput) ToMetastoreServiceHiveMetastoreConfigKerberosConfigOutputWithContext(ctx context.Context) MetastoreServiceHiveMetastoreConfigKerberosConfigOutput

func (MetastoreServiceHiveMetastoreConfigKerberosConfigOutput) ToMetastoreServiceHiveMetastoreConfigKerberosConfigPtrOutput added in v4.14.0

func (MetastoreServiceHiveMetastoreConfigKerberosConfigOutput) ToMetastoreServiceHiveMetastoreConfigKerberosConfigPtrOutputWithContext added in v4.14.0

func (o MetastoreServiceHiveMetastoreConfigKerberosConfigOutput) ToMetastoreServiceHiveMetastoreConfigKerberosConfigPtrOutputWithContext(ctx context.Context) MetastoreServiceHiveMetastoreConfigKerberosConfigPtrOutput

type MetastoreServiceHiveMetastoreConfigKerberosConfigPtrInput added in v4.14.0

type MetastoreServiceHiveMetastoreConfigKerberosConfigPtrInput interface {
	pulumi.Input

	ToMetastoreServiceHiveMetastoreConfigKerberosConfigPtrOutput() MetastoreServiceHiveMetastoreConfigKerberosConfigPtrOutput
	ToMetastoreServiceHiveMetastoreConfigKerberosConfigPtrOutputWithContext(context.Context) MetastoreServiceHiveMetastoreConfigKerberosConfigPtrOutput
}

MetastoreServiceHiveMetastoreConfigKerberosConfigPtrInput is an input type that accepts MetastoreServiceHiveMetastoreConfigKerberosConfigArgs, MetastoreServiceHiveMetastoreConfigKerberosConfigPtr and MetastoreServiceHiveMetastoreConfigKerberosConfigPtrOutput values. You can construct a concrete instance of `MetastoreServiceHiveMetastoreConfigKerberosConfigPtrInput` via:

        MetastoreServiceHiveMetastoreConfigKerberosConfigArgs{...}

or:

        nil

type MetastoreServiceHiveMetastoreConfigKerberosConfigPtrOutput added in v4.14.0

type MetastoreServiceHiveMetastoreConfigKerberosConfigPtrOutput struct{ *pulumi.OutputState }

func (MetastoreServiceHiveMetastoreConfigKerberosConfigPtrOutput) Elem added in v4.14.0

func (MetastoreServiceHiveMetastoreConfigKerberosConfigPtrOutput) ElementType added in v4.14.0

func (MetastoreServiceHiveMetastoreConfigKerberosConfigPtrOutput) Keytab added in v4.14.0

A Kerberos keytab file that can be used to authenticate a service principal with a Kerberos Key Distribution Center (KDC). Structure is documented below.

func (MetastoreServiceHiveMetastoreConfigKerberosConfigPtrOutput) Krb5ConfigGcsUri added in v4.14.0

A Cloud Storage URI that specifies the path to a krb5.conf file. It is of the form gs://{bucket_name}/path/to/krb5.conf, although the file does not need to be named krb5.conf explicitly.

func (MetastoreServiceHiveMetastoreConfigKerberosConfigPtrOutput) Principal added in v4.14.0

A Kerberos principal that exists in the both the keytab the KDC to authenticate as. A typical principal is of the form "primary/instance@REALM", but there is no exact format.

func (MetastoreServiceHiveMetastoreConfigKerberosConfigPtrOutput) ToMetastoreServiceHiveMetastoreConfigKerberosConfigPtrOutput added in v4.14.0

func (MetastoreServiceHiveMetastoreConfigKerberosConfigPtrOutput) ToMetastoreServiceHiveMetastoreConfigKerberosConfigPtrOutputWithContext added in v4.14.0

func (o MetastoreServiceHiveMetastoreConfigKerberosConfigPtrOutput) ToMetastoreServiceHiveMetastoreConfigKerberosConfigPtrOutputWithContext(ctx context.Context) MetastoreServiceHiveMetastoreConfigKerberosConfigPtrOutput

type MetastoreServiceHiveMetastoreConfigOutput added in v4.14.0

type MetastoreServiceHiveMetastoreConfigOutput struct{ *pulumi.OutputState }

func (MetastoreServiceHiveMetastoreConfigOutput) ConfigOverrides added in v4.14.0

A mapping of Hive metastore configuration key-value pairs to apply to the Hive metastore (configured in hive-site.xml). The mappings override system defaults (some keys cannot be overridden)

func (MetastoreServiceHiveMetastoreConfigOutput) ElementType added in v4.14.0

func (MetastoreServiceHiveMetastoreConfigOutput) KerberosConfig added in v4.14.0

Information used to configure the Hive metastore service as a service principal in a Kerberos realm. Structure is documented below.

func (MetastoreServiceHiveMetastoreConfigOutput) ToMetastoreServiceHiveMetastoreConfigOutput added in v4.14.0

func (o MetastoreServiceHiveMetastoreConfigOutput) ToMetastoreServiceHiveMetastoreConfigOutput() MetastoreServiceHiveMetastoreConfigOutput

func (MetastoreServiceHiveMetastoreConfigOutput) ToMetastoreServiceHiveMetastoreConfigOutputWithContext added in v4.14.0

func (o MetastoreServiceHiveMetastoreConfigOutput) ToMetastoreServiceHiveMetastoreConfigOutputWithContext(ctx context.Context) MetastoreServiceHiveMetastoreConfigOutput

func (MetastoreServiceHiveMetastoreConfigOutput) ToMetastoreServiceHiveMetastoreConfigPtrOutput added in v4.14.0

func (o MetastoreServiceHiveMetastoreConfigOutput) ToMetastoreServiceHiveMetastoreConfigPtrOutput() MetastoreServiceHiveMetastoreConfigPtrOutput

func (MetastoreServiceHiveMetastoreConfigOutput) ToMetastoreServiceHiveMetastoreConfigPtrOutputWithContext added in v4.14.0

func (o MetastoreServiceHiveMetastoreConfigOutput) ToMetastoreServiceHiveMetastoreConfigPtrOutputWithContext(ctx context.Context) MetastoreServiceHiveMetastoreConfigPtrOutput

func (MetastoreServiceHiveMetastoreConfigOutput) Version added in v4.14.0

The Hive metastore schema version.

type MetastoreServiceHiveMetastoreConfigPtrInput added in v4.14.0

type MetastoreServiceHiveMetastoreConfigPtrInput interface {
	pulumi.Input

	ToMetastoreServiceHiveMetastoreConfigPtrOutput() MetastoreServiceHiveMetastoreConfigPtrOutput
	ToMetastoreServiceHiveMetastoreConfigPtrOutputWithContext(context.Context) MetastoreServiceHiveMetastoreConfigPtrOutput
}

MetastoreServiceHiveMetastoreConfigPtrInput is an input type that accepts MetastoreServiceHiveMetastoreConfigArgs, MetastoreServiceHiveMetastoreConfigPtr and MetastoreServiceHiveMetastoreConfigPtrOutput values. You can construct a concrete instance of `MetastoreServiceHiveMetastoreConfigPtrInput` via:

        MetastoreServiceHiveMetastoreConfigArgs{...}

or:

        nil

type MetastoreServiceHiveMetastoreConfigPtrOutput added in v4.14.0

type MetastoreServiceHiveMetastoreConfigPtrOutput struct{ *pulumi.OutputState }

func (MetastoreServiceHiveMetastoreConfigPtrOutput) ConfigOverrides added in v4.14.0

A mapping of Hive metastore configuration key-value pairs to apply to the Hive metastore (configured in hive-site.xml). The mappings override system defaults (some keys cannot be overridden)

func (MetastoreServiceHiveMetastoreConfigPtrOutput) Elem added in v4.14.0

func (MetastoreServiceHiveMetastoreConfigPtrOutput) ElementType added in v4.14.0

func (MetastoreServiceHiveMetastoreConfigPtrOutput) KerberosConfig added in v4.14.0

Information used to configure the Hive metastore service as a service principal in a Kerberos realm. Structure is documented below.

func (MetastoreServiceHiveMetastoreConfigPtrOutput) ToMetastoreServiceHiveMetastoreConfigPtrOutput added in v4.14.0

func (o MetastoreServiceHiveMetastoreConfigPtrOutput) ToMetastoreServiceHiveMetastoreConfigPtrOutput() MetastoreServiceHiveMetastoreConfigPtrOutput

func (MetastoreServiceHiveMetastoreConfigPtrOutput) ToMetastoreServiceHiveMetastoreConfigPtrOutputWithContext added in v4.14.0

func (o MetastoreServiceHiveMetastoreConfigPtrOutput) ToMetastoreServiceHiveMetastoreConfigPtrOutputWithContext(ctx context.Context) MetastoreServiceHiveMetastoreConfigPtrOutput

func (MetastoreServiceHiveMetastoreConfigPtrOutput) Version added in v4.14.0

The Hive metastore schema version.

type MetastoreServiceInput added in v4.14.0

type MetastoreServiceInput interface {
	pulumi.Input

	ToMetastoreServiceOutput() MetastoreServiceOutput
	ToMetastoreServiceOutputWithContext(ctx context.Context) MetastoreServiceOutput
}

type MetastoreServiceMaintenanceWindow added in v4.14.0

type MetastoreServiceMaintenanceWindow struct {
	// The day of week, when the window starts.
	// Possible values are `MONDAY`, `TUESDAY`, `WEDNESDAY`, `THURSDAY`, `FRIDAY`, `SATURDAY`, and `SUNDAY`.
	DayOfWeek string `pulumi:"dayOfWeek"`
	// The hour of day (0-23) when the window starts.
	HourOfDay int `pulumi:"hourOfDay"`
}

type MetastoreServiceMaintenanceWindowArgs added in v4.14.0

type MetastoreServiceMaintenanceWindowArgs struct {
	// The day of week, when the window starts.
	// Possible values are `MONDAY`, `TUESDAY`, `WEDNESDAY`, `THURSDAY`, `FRIDAY`, `SATURDAY`, and `SUNDAY`.
	DayOfWeek pulumi.StringInput `pulumi:"dayOfWeek"`
	// The hour of day (0-23) when the window starts.
	HourOfDay pulumi.IntInput `pulumi:"hourOfDay"`
}

func (MetastoreServiceMaintenanceWindowArgs) ElementType added in v4.14.0

func (MetastoreServiceMaintenanceWindowArgs) ToMetastoreServiceMaintenanceWindowOutput added in v4.14.0

func (i MetastoreServiceMaintenanceWindowArgs) ToMetastoreServiceMaintenanceWindowOutput() MetastoreServiceMaintenanceWindowOutput

func (MetastoreServiceMaintenanceWindowArgs) ToMetastoreServiceMaintenanceWindowOutputWithContext added in v4.14.0

func (i MetastoreServiceMaintenanceWindowArgs) ToMetastoreServiceMaintenanceWindowOutputWithContext(ctx context.Context) MetastoreServiceMaintenanceWindowOutput

func (MetastoreServiceMaintenanceWindowArgs) ToMetastoreServiceMaintenanceWindowPtrOutput added in v4.14.0

func (i MetastoreServiceMaintenanceWindowArgs) ToMetastoreServiceMaintenanceWindowPtrOutput() MetastoreServiceMaintenanceWindowPtrOutput

func (MetastoreServiceMaintenanceWindowArgs) ToMetastoreServiceMaintenanceWindowPtrOutputWithContext added in v4.14.0

func (i MetastoreServiceMaintenanceWindowArgs) ToMetastoreServiceMaintenanceWindowPtrOutputWithContext(ctx context.Context) MetastoreServiceMaintenanceWindowPtrOutput

type MetastoreServiceMaintenanceWindowInput added in v4.14.0

type MetastoreServiceMaintenanceWindowInput interface {
	pulumi.Input

	ToMetastoreServiceMaintenanceWindowOutput() MetastoreServiceMaintenanceWindowOutput
	ToMetastoreServiceMaintenanceWindowOutputWithContext(context.Context) MetastoreServiceMaintenanceWindowOutput
}

MetastoreServiceMaintenanceWindowInput is an input type that accepts MetastoreServiceMaintenanceWindowArgs and MetastoreServiceMaintenanceWindowOutput values. You can construct a concrete instance of `MetastoreServiceMaintenanceWindowInput` via:

MetastoreServiceMaintenanceWindowArgs{...}

type MetastoreServiceMaintenanceWindowOutput added in v4.14.0

type MetastoreServiceMaintenanceWindowOutput struct{ *pulumi.OutputState }

func (MetastoreServiceMaintenanceWindowOutput) DayOfWeek added in v4.14.0

The day of week, when the window starts. Possible values are `MONDAY`, `TUESDAY`, `WEDNESDAY`, `THURSDAY`, `FRIDAY`, `SATURDAY`, and `SUNDAY`.

func (MetastoreServiceMaintenanceWindowOutput) ElementType added in v4.14.0

func (MetastoreServiceMaintenanceWindowOutput) HourOfDay added in v4.14.0

The hour of day (0-23) when the window starts.

func (MetastoreServiceMaintenanceWindowOutput) ToMetastoreServiceMaintenanceWindowOutput added in v4.14.0

func (o MetastoreServiceMaintenanceWindowOutput) ToMetastoreServiceMaintenanceWindowOutput() MetastoreServiceMaintenanceWindowOutput

func (MetastoreServiceMaintenanceWindowOutput) ToMetastoreServiceMaintenanceWindowOutputWithContext added in v4.14.0

func (o MetastoreServiceMaintenanceWindowOutput) ToMetastoreServiceMaintenanceWindowOutputWithContext(ctx context.Context) MetastoreServiceMaintenanceWindowOutput

func (MetastoreServiceMaintenanceWindowOutput) ToMetastoreServiceMaintenanceWindowPtrOutput added in v4.14.0

func (o MetastoreServiceMaintenanceWindowOutput) ToMetastoreServiceMaintenanceWindowPtrOutput() MetastoreServiceMaintenanceWindowPtrOutput

func (MetastoreServiceMaintenanceWindowOutput) ToMetastoreServiceMaintenanceWindowPtrOutputWithContext added in v4.14.0

func (o MetastoreServiceMaintenanceWindowOutput) ToMetastoreServiceMaintenanceWindowPtrOutputWithContext(ctx context.Context) MetastoreServiceMaintenanceWindowPtrOutput

type MetastoreServiceMaintenanceWindowPtrInput added in v4.14.0

type MetastoreServiceMaintenanceWindowPtrInput interface {
	pulumi.Input

	ToMetastoreServiceMaintenanceWindowPtrOutput() MetastoreServiceMaintenanceWindowPtrOutput
	ToMetastoreServiceMaintenanceWindowPtrOutputWithContext(context.Context) MetastoreServiceMaintenanceWindowPtrOutput
}

MetastoreServiceMaintenanceWindowPtrInput is an input type that accepts MetastoreServiceMaintenanceWindowArgs, MetastoreServiceMaintenanceWindowPtr and MetastoreServiceMaintenanceWindowPtrOutput values. You can construct a concrete instance of `MetastoreServiceMaintenanceWindowPtrInput` via:

        MetastoreServiceMaintenanceWindowArgs{...}

or:

        nil

type MetastoreServiceMaintenanceWindowPtrOutput added in v4.14.0

type MetastoreServiceMaintenanceWindowPtrOutput struct{ *pulumi.OutputState }

func (MetastoreServiceMaintenanceWindowPtrOutput) DayOfWeek added in v4.14.0

The day of week, when the window starts. Possible values are `MONDAY`, `TUESDAY`, `WEDNESDAY`, `THURSDAY`, `FRIDAY`, `SATURDAY`, and `SUNDAY`.

func (MetastoreServiceMaintenanceWindowPtrOutput) Elem added in v4.14.0

func (MetastoreServiceMaintenanceWindowPtrOutput) ElementType added in v4.14.0

func (MetastoreServiceMaintenanceWindowPtrOutput) HourOfDay added in v4.14.0

The hour of day (0-23) when the window starts.

func (MetastoreServiceMaintenanceWindowPtrOutput) ToMetastoreServiceMaintenanceWindowPtrOutput added in v4.14.0

func (o MetastoreServiceMaintenanceWindowPtrOutput) ToMetastoreServiceMaintenanceWindowPtrOutput() MetastoreServiceMaintenanceWindowPtrOutput

func (MetastoreServiceMaintenanceWindowPtrOutput) ToMetastoreServiceMaintenanceWindowPtrOutputWithContext added in v4.14.0

func (o MetastoreServiceMaintenanceWindowPtrOutput) ToMetastoreServiceMaintenanceWindowPtrOutputWithContext(ctx context.Context) MetastoreServiceMaintenanceWindowPtrOutput

type MetastoreServiceMap added in v4.14.0

type MetastoreServiceMap map[string]MetastoreServiceInput

func (MetastoreServiceMap) ElementType added in v4.14.0

func (MetastoreServiceMap) ElementType() reflect.Type

func (MetastoreServiceMap) ToMetastoreServiceMapOutput added in v4.14.0

func (i MetastoreServiceMap) ToMetastoreServiceMapOutput() MetastoreServiceMapOutput

func (MetastoreServiceMap) ToMetastoreServiceMapOutputWithContext added in v4.14.0

func (i MetastoreServiceMap) ToMetastoreServiceMapOutputWithContext(ctx context.Context) MetastoreServiceMapOutput

type MetastoreServiceMapInput added in v4.14.0

type MetastoreServiceMapInput interface {
	pulumi.Input

	ToMetastoreServiceMapOutput() MetastoreServiceMapOutput
	ToMetastoreServiceMapOutputWithContext(context.Context) MetastoreServiceMapOutput
}

MetastoreServiceMapInput is an input type that accepts MetastoreServiceMap and MetastoreServiceMapOutput values. You can construct a concrete instance of `MetastoreServiceMapInput` via:

MetastoreServiceMap{ "key": MetastoreServiceArgs{...} }

type MetastoreServiceMapOutput added in v4.14.0

type MetastoreServiceMapOutput struct{ *pulumi.OutputState }

func (MetastoreServiceMapOutput) ElementType added in v4.14.0

func (MetastoreServiceMapOutput) ElementType() reflect.Type

func (MetastoreServiceMapOutput) MapIndex added in v4.14.0

func (MetastoreServiceMapOutput) ToMetastoreServiceMapOutput added in v4.14.0

func (o MetastoreServiceMapOutput) ToMetastoreServiceMapOutput() MetastoreServiceMapOutput

func (MetastoreServiceMapOutput) ToMetastoreServiceMapOutputWithContext added in v4.14.0

func (o MetastoreServiceMapOutput) ToMetastoreServiceMapOutputWithContext(ctx context.Context) MetastoreServiceMapOutput

type MetastoreServiceOutput added in v4.14.0

type MetastoreServiceOutput struct {
	*pulumi.OutputState
}

func (MetastoreServiceOutput) ElementType added in v4.14.0

func (MetastoreServiceOutput) ElementType() reflect.Type

func (MetastoreServiceOutput) ToMetastoreServiceOutput added in v4.14.0

func (o MetastoreServiceOutput) ToMetastoreServiceOutput() MetastoreServiceOutput

func (MetastoreServiceOutput) ToMetastoreServiceOutputWithContext added in v4.14.0

func (o MetastoreServiceOutput) ToMetastoreServiceOutputWithContext(ctx context.Context) MetastoreServiceOutput

func (MetastoreServiceOutput) ToMetastoreServicePtrOutput added in v4.14.0

func (o MetastoreServiceOutput) ToMetastoreServicePtrOutput() MetastoreServicePtrOutput

func (MetastoreServiceOutput) ToMetastoreServicePtrOutputWithContext added in v4.14.0

func (o MetastoreServiceOutput) ToMetastoreServicePtrOutputWithContext(ctx context.Context) MetastoreServicePtrOutput

type MetastoreServicePtrInput added in v4.14.0

type MetastoreServicePtrInput interface {
	pulumi.Input

	ToMetastoreServicePtrOutput() MetastoreServicePtrOutput
	ToMetastoreServicePtrOutputWithContext(ctx context.Context) MetastoreServicePtrOutput
}

type MetastoreServicePtrOutput added in v4.14.0

type MetastoreServicePtrOutput struct {
	*pulumi.OutputState
}

func (MetastoreServicePtrOutput) ElementType added in v4.14.0

func (MetastoreServicePtrOutput) ElementType() reflect.Type

func (MetastoreServicePtrOutput) ToMetastoreServicePtrOutput added in v4.14.0

func (o MetastoreServicePtrOutput) ToMetastoreServicePtrOutput() MetastoreServicePtrOutput

func (MetastoreServicePtrOutput) ToMetastoreServicePtrOutputWithContext added in v4.14.0

func (o MetastoreServicePtrOutput) ToMetastoreServicePtrOutputWithContext(ctx context.Context) MetastoreServicePtrOutput

type MetastoreServiceState added in v4.14.0

type MetastoreServiceState struct {
	// A Cloud Storage URI (starting with gs://) that specifies where artifacts related to the metastore service are stored.
	ArtifactGcsUri pulumi.StringPtrInput
	// The URI of the endpoint used to access the metastore service.
	EndpointUri pulumi.StringPtrInput
	// Configuration information specific to running Hive metastore software as the metastore service.
	// Structure is documented below.
	HiveMetastoreConfig MetastoreServiceHiveMetastoreConfigPtrInput
	// User-defined labels for the metastore service.
	Labels pulumi.StringMapInput
	// The  location where the autoscaling policy should reside.
	// The default value is `global`.
	Location pulumi.StringPtrInput
	// The one hour maintenance window of the metastore service.
	// This specifies when the service can be restarted for maintenance purposes in UTC time.
	// Structure is documented below.
	MaintenanceWindow MetastoreServiceMaintenanceWindowPtrInput
	// The relative resource name of the metastore service.
	Name pulumi.StringPtrInput
	// The relative resource name of the VPC network on which the instance can be accessed. It is specified in the following form:
	// "projects/{projectNumber}/global/networks/{network_id}".
	Network pulumi.StringPtrInput
	// The TCP port at which the metastore service is reached. Default: 9083.
	Port pulumi.IntPtrInput
	// The ID of the project in which the resource belongs.
	// If it is not provided, the provider project is used.
	Project pulumi.StringPtrInput
	// The ID of the metastore service. The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_),
	// and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between
	// 3 and 63 characters.
	ServiceId pulumi.StringPtrInput
	// The current state of the metastore service.
	State pulumi.StringPtrInput
	// Additional information about the current state of the metastore service, if available.
	StateMessage pulumi.StringPtrInput
	// The tier of the service.
	// Possible values are `DEVELOPER` and `ENTERPRISE`.
	Tier pulumi.StringPtrInput
}

func (MetastoreServiceState) ElementType added in v4.14.0

func (MetastoreServiceState) ElementType() reflect.Type

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL