dataproc

package
v5.26.0 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Nov 1, 2021 License: Apache-2.0 Imports: 7 Imported by: 0

Documentation

Index

Constants

This section is empty.

Variables

This section is empty.

Functions

This section is empty.

Types

type AutoscalingPolicy

type AutoscalingPolicy struct {
	pulumi.CustomResourceState

	// Basic algorithm for autoscaling.
	// Structure is documented below.
	BasicAlgorithm AutoscalingPolicyBasicAlgorithmPtrOutput `pulumi:"basicAlgorithm"`
	// The  location where the autoscaling policy should reside.
	// The default value is `global`.
	Location pulumi.StringPtrOutput `pulumi:"location"`
	// The "resource name" of the autoscaling policy.
	Name pulumi.StringOutput `pulumi:"name"`
	// The policy id. The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_),
	// and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between
	// 3 and 50 characters.
	PolicyId pulumi.StringOutput `pulumi:"policyId"`
	// The ID of the project in which the resource belongs.
	// If it is not provided, the provider project is used.
	Project pulumi.StringOutput `pulumi:"project"`
	// Describes how the autoscaler will operate for secondary workers.
	// Structure is documented below.
	SecondaryWorkerConfig AutoscalingPolicySecondaryWorkerConfigPtrOutput `pulumi:"secondaryWorkerConfig"`
	// Describes how the autoscaler will operate for primary workers.
	// Structure is documented below.
	WorkerConfig AutoscalingPolicyWorkerConfigPtrOutput `pulumi:"workerConfig"`
}

Describes an autoscaling policy for Dataproc cluster autoscaler.

## Example Usage ### Dataproc Autoscaling Policy

```go package main

import (

"github.com/pulumi/pulumi-gcp/sdk/v5/go/gcp/dataproc"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"

)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		asp, err := dataproc.NewAutoscalingPolicy(ctx, "asp", &dataproc.AutoscalingPolicyArgs{
			PolicyId: pulumi.String("dataproc-policy"),
			Location: pulumi.String("us-central1"),
			WorkerConfig: &dataproc.AutoscalingPolicyWorkerConfigArgs{
				MaxInstances: pulumi.Int(3),
			},
			BasicAlgorithm: &dataproc.AutoscalingPolicyBasicAlgorithmArgs{
				YarnConfig: &dataproc.AutoscalingPolicyBasicAlgorithmYarnConfigArgs{
					GracefulDecommissionTimeout: pulumi.String("30s"),
					ScaleUpFactor:               pulumi.Float64(0.5),
					ScaleDownFactor:             pulumi.Float64(0.5),
				},
			},
		})
		if err != nil {
			return err
		}
		_, err = dataproc.NewCluster(ctx, "basic", &dataproc.ClusterArgs{
			Region: pulumi.String("us-central1"),
			ClusterConfig: &dataproc.ClusterClusterConfigArgs{
				AutoscalingConfig: &dataproc.ClusterClusterConfigAutoscalingConfigArgs{
					PolicyUri: asp.Name,
				},
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}

```

## Import

AutoscalingPolicy can be imported using any of these accepted formats

```sh

$ pulumi import gcp:dataproc/autoscalingPolicy:AutoscalingPolicy default projects/{{project}}/locations/{{location}}/autoscalingPolicies/{{policy_id}}

```

```sh

$ pulumi import gcp:dataproc/autoscalingPolicy:AutoscalingPolicy default {{project}}/{{location}}/{{policy_id}}

```

```sh

$ pulumi import gcp:dataproc/autoscalingPolicy:AutoscalingPolicy default {{location}}/{{policy_id}}

```

func GetAutoscalingPolicy

func GetAutoscalingPolicy(ctx *pulumi.Context,
	name string, id pulumi.IDInput, state *AutoscalingPolicyState, opts ...pulumi.ResourceOption) (*AutoscalingPolicy, error)

GetAutoscalingPolicy gets an existing AutoscalingPolicy resource's state with the given name, ID, and optional state properties that are used to uniquely qualify the lookup (nil if not required).

func NewAutoscalingPolicy

func NewAutoscalingPolicy(ctx *pulumi.Context,
	name string, args *AutoscalingPolicyArgs, opts ...pulumi.ResourceOption) (*AutoscalingPolicy, error)

NewAutoscalingPolicy registers a new resource with the given unique name, arguments, and options.

func (*AutoscalingPolicy) ElementType

func (*AutoscalingPolicy) ElementType() reflect.Type

func (*AutoscalingPolicy) ToAutoscalingPolicyOutput

func (i *AutoscalingPolicy) ToAutoscalingPolicyOutput() AutoscalingPolicyOutput

func (*AutoscalingPolicy) ToAutoscalingPolicyOutputWithContext

func (i *AutoscalingPolicy) ToAutoscalingPolicyOutputWithContext(ctx context.Context) AutoscalingPolicyOutput

func (*AutoscalingPolicy) ToAutoscalingPolicyPtrOutput

func (i *AutoscalingPolicy) ToAutoscalingPolicyPtrOutput() AutoscalingPolicyPtrOutput

func (*AutoscalingPolicy) ToAutoscalingPolicyPtrOutputWithContext

func (i *AutoscalingPolicy) ToAutoscalingPolicyPtrOutputWithContext(ctx context.Context) AutoscalingPolicyPtrOutput

type AutoscalingPolicyArgs

type AutoscalingPolicyArgs struct {
	// Basic algorithm for autoscaling.
	// Structure is documented below.
	BasicAlgorithm AutoscalingPolicyBasicAlgorithmPtrInput
	// The  location where the autoscaling policy should reside.
	// The default value is `global`.
	Location pulumi.StringPtrInput
	// The policy id. The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_),
	// and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between
	// 3 and 50 characters.
	PolicyId pulumi.StringInput
	// The ID of the project in which the resource belongs.
	// If it is not provided, the provider project is used.
	Project pulumi.StringPtrInput
	// Describes how the autoscaler will operate for secondary workers.
	// Structure is documented below.
	SecondaryWorkerConfig AutoscalingPolicySecondaryWorkerConfigPtrInput
	// Describes how the autoscaler will operate for primary workers.
	// Structure is documented below.
	WorkerConfig AutoscalingPolicyWorkerConfigPtrInput
}

The set of arguments for constructing a AutoscalingPolicy resource.

func (AutoscalingPolicyArgs) ElementType

func (AutoscalingPolicyArgs) ElementType() reflect.Type

type AutoscalingPolicyArray

type AutoscalingPolicyArray []AutoscalingPolicyInput

func (AutoscalingPolicyArray) ElementType

func (AutoscalingPolicyArray) ElementType() reflect.Type

func (AutoscalingPolicyArray) ToAutoscalingPolicyArrayOutput

func (i AutoscalingPolicyArray) ToAutoscalingPolicyArrayOutput() AutoscalingPolicyArrayOutput

func (AutoscalingPolicyArray) ToAutoscalingPolicyArrayOutputWithContext

func (i AutoscalingPolicyArray) ToAutoscalingPolicyArrayOutputWithContext(ctx context.Context) AutoscalingPolicyArrayOutput

type AutoscalingPolicyArrayInput

type AutoscalingPolicyArrayInput interface {
	pulumi.Input

	ToAutoscalingPolicyArrayOutput() AutoscalingPolicyArrayOutput
	ToAutoscalingPolicyArrayOutputWithContext(context.Context) AutoscalingPolicyArrayOutput
}

AutoscalingPolicyArrayInput is an input type that accepts AutoscalingPolicyArray and AutoscalingPolicyArrayOutput values. You can construct a concrete instance of `AutoscalingPolicyArrayInput` via:

AutoscalingPolicyArray{ AutoscalingPolicyArgs{...} }

type AutoscalingPolicyArrayOutput

type AutoscalingPolicyArrayOutput struct{ *pulumi.OutputState }

func (AutoscalingPolicyArrayOutput) ElementType

func (AutoscalingPolicyArrayOutput) Index

func (AutoscalingPolicyArrayOutput) ToAutoscalingPolicyArrayOutput

func (o AutoscalingPolicyArrayOutput) ToAutoscalingPolicyArrayOutput() AutoscalingPolicyArrayOutput

func (AutoscalingPolicyArrayOutput) ToAutoscalingPolicyArrayOutputWithContext

func (o AutoscalingPolicyArrayOutput) ToAutoscalingPolicyArrayOutputWithContext(ctx context.Context) AutoscalingPolicyArrayOutput

type AutoscalingPolicyBasicAlgorithm

type AutoscalingPolicyBasicAlgorithm struct {
	// Duration between scaling events. A scaling period starts after the
	// update operation from the previous event has completed.
	// Bounds: [2m, 1d]. Default: 2m.
	CooldownPeriod *string `pulumi:"cooldownPeriod"`
	// YARN autoscaling configuration.
	// Structure is documented below.
	YarnConfig AutoscalingPolicyBasicAlgorithmYarnConfig `pulumi:"yarnConfig"`
}

type AutoscalingPolicyBasicAlgorithmArgs

type AutoscalingPolicyBasicAlgorithmArgs struct {
	// Duration between scaling events. A scaling period starts after the
	// update operation from the previous event has completed.
	// Bounds: [2m, 1d]. Default: 2m.
	CooldownPeriod pulumi.StringPtrInput `pulumi:"cooldownPeriod"`
	// YARN autoscaling configuration.
	// Structure is documented below.
	YarnConfig AutoscalingPolicyBasicAlgorithmYarnConfigInput `pulumi:"yarnConfig"`
}

func (AutoscalingPolicyBasicAlgorithmArgs) ElementType

func (AutoscalingPolicyBasicAlgorithmArgs) ToAutoscalingPolicyBasicAlgorithmOutput

func (i AutoscalingPolicyBasicAlgorithmArgs) ToAutoscalingPolicyBasicAlgorithmOutput() AutoscalingPolicyBasicAlgorithmOutput

func (AutoscalingPolicyBasicAlgorithmArgs) ToAutoscalingPolicyBasicAlgorithmOutputWithContext

func (i AutoscalingPolicyBasicAlgorithmArgs) ToAutoscalingPolicyBasicAlgorithmOutputWithContext(ctx context.Context) AutoscalingPolicyBasicAlgorithmOutput

func (AutoscalingPolicyBasicAlgorithmArgs) ToAutoscalingPolicyBasicAlgorithmPtrOutput

func (i AutoscalingPolicyBasicAlgorithmArgs) ToAutoscalingPolicyBasicAlgorithmPtrOutput() AutoscalingPolicyBasicAlgorithmPtrOutput

func (AutoscalingPolicyBasicAlgorithmArgs) ToAutoscalingPolicyBasicAlgorithmPtrOutputWithContext

func (i AutoscalingPolicyBasicAlgorithmArgs) ToAutoscalingPolicyBasicAlgorithmPtrOutputWithContext(ctx context.Context) AutoscalingPolicyBasicAlgorithmPtrOutput

type AutoscalingPolicyBasicAlgorithmInput

type AutoscalingPolicyBasicAlgorithmInput interface {
	pulumi.Input

	ToAutoscalingPolicyBasicAlgorithmOutput() AutoscalingPolicyBasicAlgorithmOutput
	ToAutoscalingPolicyBasicAlgorithmOutputWithContext(context.Context) AutoscalingPolicyBasicAlgorithmOutput
}

AutoscalingPolicyBasicAlgorithmInput is an input type that accepts AutoscalingPolicyBasicAlgorithmArgs and AutoscalingPolicyBasicAlgorithmOutput values. You can construct a concrete instance of `AutoscalingPolicyBasicAlgorithmInput` via:

AutoscalingPolicyBasicAlgorithmArgs{...}

type AutoscalingPolicyBasicAlgorithmOutput

type AutoscalingPolicyBasicAlgorithmOutput struct{ *pulumi.OutputState }

func (AutoscalingPolicyBasicAlgorithmOutput) CooldownPeriod

Duration between scaling events. A scaling period starts after the update operation from the previous event has completed. Bounds: [2m, 1d]. Default: 2m.

func (AutoscalingPolicyBasicAlgorithmOutput) ElementType

func (AutoscalingPolicyBasicAlgorithmOutput) ToAutoscalingPolicyBasicAlgorithmOutput

func (o AutoscalingPolicyBasicAlgorithmOutput) ToAutoscalingPolicyBasicAlgorithmOutput() AutoscalingPolicyBasicAlgorithmOutput

func (AutoscalingPolicyBasicAlgorithmOutput) ToAutoscalingPolicyBasicAlgorithmOutputWithContext

func (o AutoscalingPolicyBasicAlgorithmOutput) ToAutoscalingPolicyBasicAlgorithmOutputWithContext(ctx context.Context) AutoscalingPolicyBasicAlgorithmOutput

func (AutoscalingPolicyBasicAlgorithmOutput) ToAutoscalingPolicyBasicAlgorithmPtrOutput

func (o AutoscalingPolicyBasicAlgorithmOutput) ToAutoscalingPolicyBasicAlgorithmPtrOutput() AutoscalingPolicyBasicAlgorithmPtrOutput

func (AutoscalingPolicyBasicAlgorithmOutput) ToAutoscalingPolicyBasicAlgorithmPtrOutputWithContext

func (o AutoscalingPolicyBasicAlgorithmOutput) ToAutoscalingPolicyBasicAlgorithmPtrOutputWithContext(ctx context.Context) AutoscalingPolicyBasicAlgorithmPtrOutput

func (AutoscalingPolicyBasicAlgorithmOutput) YarnConfig

YARN autoscaling configuration. Structure is documented below.

type AutoscalingPolicyBasicAlgorithmPtrInput

type AutoscalingPolicyBasicAlgorithmPtrInput interface {
	pulumi.Input

	ToAutoscalingPolicyBasicAlgorithmPtrOutput() AutoscalingPolicyBasicAlgorithmPtrOutput
	ToAutoscalingPolicyBasicAlgorithmPtrOutputWithContext(context.Context) AutoscalingPolicyBasicAlgorithmPtrOutput
}

AutoscalingPolicyBasicAlgorithmPtrInput is an input type that accepts AutoscalingPolicyBasicAlgorithmArgs, AutoscalingPolicyBasicAlgorithmPtr and AutoscalingPolicyBasicAlgorithmPtrOutput values. You can construct a concrete instance of `AutoscalingPolicyBasicAlgorithmPtrInput` via:

        AutoscalingPolicyBasicAlgorithmArgs{...}

or:

        nil

type AutoscalingPolicyBasicAlgorithmPtrOutput

type AutoscalingPolicyBasicAlgorithmPtrOutput struct{ *pulumi.OutputState }

func (AutoscalingPolicyBasicAlgorithmPtrOutput) CooldownPeriod

Duration between scaling events. A scaling period starts after the update operation from the previous event has completed. Bounds: [2m, 1d]. Default: 2m.

func (AutoscalingPolicyBasicAlgorithmPtrOutput) Elem

func (AutoscalingPolicyBasicAlgorithmPtrOutput) ElementType

func (AutoscalingPolicyBasicAlgorithmPtrOutput) ToAutoscalingPolicyBasicAlgorithmPtrOutput

func (o AutoscalingPolicyBasicAlgorithmPtrOutput) ToAutoscalingPolicyBasicAlgorithmPtrOutput() AutoscalingPolicyBasicAlgorithmPtrOutput

func (AutoscalingPolicyBasicAlgorithmPtrOutput) ToAutoscalingPolicyBasicAlgorithmPtrOutputWithContext

func (o AutoscalingPolicyBasicAlgorithmPtrOutput) ToAutoscalingPolicyBasicAlgorithmPtrOutputWithContext(ctx context.Context) AutoscalingPolicyBasicAlgorithmPtrOutput

func (AutoscalingPolicyBasicAlgorithmPtrOutput) YarnConfig

YARN autoscaling configuration. Structure is documented below.

type AutoscalingPolicyBasicAlgorithmYarnConfig

type AutoscalingPolicyBasicAlgorithmYarnConfig struct {
	// Timeout for YARN graceful decommissioning of Node Managers. Specifies the
	// duration to wait for jobs to complete before forcefully removing workers
	// (and potentially interrupting jobs). Only applicable to downscaling operations.
	// Bounds: [0s, 1d].
	GracefulDecommissionTimeout string `pulumi:"gracefulDecommissionTimeout"`
	// Fraction of average pending memory in the last cooldown period for which to
	// remove workers. A scale-down factor of 1 will result in scaling down so that there
	// is no available memory remaining after the update (more aggressive scaling).
	// A scale-down factor of 0 disables removing workers, which can be beneficial for
	// autoscaling a single job.
	// Bounds: [0.0, 1.0].
	ScaleDownFactor float64 `pulumi:"scaleDownFactor"`
	// Minimum scale-down threshold as a fraction of total cluster size before scaling occurs.
	// For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must
	// recommend at least a 2 worker scale-down for the cluster to scale. A threshold of 0
	// means the autoscaler will scale down on any recommended change.
	// Bounds: [0.0, 1.0]. Default: 0.0.
	ScaleDownMinWorkerFraction *float64 `pulumi:"scaleDownMinWorkerFraction"`
	// Fraction of average pending memory in the last cooldown period for which to
	// add workers. A scale-up factor of 1.0 will result in scaling up so that there
	// is no pending memory remaining after the update (more aggressive scaling).
	// A scale-up factor closer to 0 will result in a smaller magnitude of scaling up
	// (less aggressive scaling).
	// Bounds: [0.0, 1.0].
	ScaleUpFactor float64 `pulumi:"scaleUpFactor"`
	// Minimum scale-up threshold as a fraction of total cluster size before scaling
	// occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler
	// must recommend at least a 2-worker scale-up for the cluster to scale. A threshold of
	// 0 means the autoscaler will scale up on any recommended change.
	// Bounds: [0.0, 1.0]. Default: 0.0.
	ScaleUpMinWorkerFraction *float64 `pulumi:"scaleUpMinWorkerFraction"`
}

type AutoscalingPolicyBasicAlgorithmYarnConfigArgs

type AutoscalingPolicyBasicAlgorithmYarnConfigArgs struct {
	// Timeout for YARN graceful decommissioning of Node Managers. Specifies the
	// duration to wait for jobs to complete before forcefully removing workers
	// (and potentially interrupting jobs). Only applicable to downscaling operations.
	// Bounds: [0s, 1d].
	GracefulDecommissionTimeout pulumi.StringInput `pulumi:"gracefulDecommissionTimeout"`
	// Fraction of average pending memory in the last cooldown period for which to
	// remove workers. A scale-down factor of 1 will result in scaling down so that there
	// is no available memory remaining after the update (more aggressive scaling).
	// A scale-down factor of 0 disables removing workers, which can be beneficial for
	// autoscaling a single job.
	// Bounds: [0.0, 1.0].
	ScaleDownFactor pulumi.Float64Input `pulumi:"scaleDownFactor"`
	// Minimum scale-down threshold as a fraction of total cluster size before scaling occurs.
	// For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must
	// recommend at least a 2 worker scale-down for the cluster to scale. A threshold of 0
	// means the autoscaler will scale down on any recommended change.
	// Bounds: [0.0, 1.0]. Default: 0.0.
	ScaleDownMinWorkerFraction pulumi.Float64PtrInput `pulumi:"scaleDownMinWorkerFraction"`
	// Fraction of average pending memory in the last cooldown period for which to
	// add workers. A scale-up factor of 1.0 will result in scaling up so that there
	// is no pending memory remaining after the update (more aggressive scaling).
	// A scale-up factor closer to 0 will result in a smaller magnitude of scaling up
	// (less aggressive scaling).
	// Bounds: [0.0, 1.0].
	ScaleUpFactor pulumi.Float64Input `pulumi:"scaleUpFactor"`
	// Minimum scale-up threshold as a fraction of total cluster size before scaling
	// occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler
	// must recommend at least a 2-worker scale-up for the cluster to scale. A threshold of
	// 0 means the autoscaler will scale up on any recommended change.
	// Bounds: [0.0, 1.0]. Default: 0.0.
	ScaleUpMinWorkerFraction pulumi.Float64PtrInput `pulumi:"scaleUpMinWorkerFraction"`
}

func (AutoscalingPolicyBasicAlgorithmYarnConfigArgs) ElementType

func (AutoscalingPolicyBasicAlgorithmYarnConfigArgs) ToAutoscalingPolicyBasicAlgorithmYarnConfigOutput

func (i AutoscalingPolicyBasicAlgorithmYarnConfigArgs) ToAutoscalingPolicyBasicAlgorithmYarnConfigOutput() AutoscalingPolicyBasicAlgorithmYarnConfigOutput

func (AutoscalingPolicyBasicAlgorithmYarnConfigArgs) ToAutoscalingPolicyBasicAlgorithmYarnConfigOutputWithContext

func (i AutoscalingPolicyBasicAlgorithmYarnConfigArgs) ToAutoscalingPolicyBasicAlgorithmYarnConfigOutputWithContext(ctx context.Context) AutoscalingPolicyBasicAlgorithmYarnConfigOutput

func (AutoscalingPolicyBasicAlgorithmYarnConfigArgs) ToAutoscalingPolicyBasicAlgorithmYarnConfigPtrOutput

func (i AutoscalingPolicyBasicAlgorithmYarnConfigArgs) ToAutoscalingPolicyBasicAlgorithmYarnConfigPtrOutput() AutoscalingPolicyBasicAlgorithmYarnConfigPtrOutput

func (AutoscalingPolicyBasicAlgorithmYarnConfigArgs) ToAutoscalingPolicyBasicAlgorithmYarnConfigPtrOutputWithContext

func (i AutoscalingPolicyBasicAlgorithmYarnConfigArgs) ToAutoscalingPolicyBasicAlgorithmYarnConfigPtrOutputWithContext(ctx context.Context) AutoscalingPolicyBasicAlgorithmYarnConfigPtrOutput

type AutoscalingPolicyBasicAlgorithmYarnConfigInput

type AutoscalingPolicyBasicAlgorithmYarnConfigInput interface {
	pulumi.Input

	ToAutoscalingPolicyBasicAlgorithmYarnConfigOutput() AutoscalingPolicyBasicAlgorithmYarnConfigOutput
	ToAutoscalingPolicyBasicAlgorithmYarnConfigOutputWithContext(context.Context) AutoscalingPolicyBasicAlgorithmYarnConfigOutput
}

AutoscalingPolicyBasicAlgorithmYarnConfigInput is an input type that accepts AutoscalingPolicyBasicAlgorithmYarnConfigArgs and AutoscalingPolicyBasicAlgorithmYarnConfigOutput values. You can construct a concrete instance of `AutoscalingPolicyBasicAlgorithmYarnConfigInput` via:

AutoscalingPolicyBasicAlgorithmYarnConfigArgs{...}

type AutoscalingPolicyBasicAlgorithmYarnConfigOutput

type AutoscalingPolicyBasicAlgorithmYarnConfigOutput struct{ *pulumi.OutputState }

func (AutoscalingPolicyBasicAlgorithmYarnConfigOutput) ElementType

func (AutoscalingPolicyBasicAlgorithmYarnConfigOutput) GracefulDecommissionTimeout

Timeout for YARN graceful decommissioning of Node Managers. Specifies the duration to wait for jobs to complete before forcefully removing workers (and potentially interrupting jobs). Only applicable to downscaling operations. Bounds: [0s, 1d].

func (AutoscalingPolicyBasicAlgorithmYarnConfigOutput) ScaleDownFactor

Fraction of average pending memory in the last cooldown period for which to remove workers. A scale-down factor of 1 will result in scaling down so that there is no available memory remaining after the update (more aggressive scaling). A scale-down factor of 0 disables removing workers, which can be beneficial for autoscaling a single job. Bounds: [0.0, 1.0].

func (AutoscalingPolicyBasicAlgorithmYarnConfigOutput) ScaleDownMinWorkerFraction

Minimum scale-down threshold as a fraction of total cluster size before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at least a 2 worker scale-down for the cluster to scale. A threshold of 0 means the autoscaler will scale down on any recommended change. Bounds: [0.0, 1.0]. Default: 0.0.

func (AutoscalingPolicyBasicAlgorithmYarnConfigOutput) ScaleUpFactor

Fraction of average pending memory in the last cooldown period for which to add workers. A scale-up factor of 1.0 will result in scaling up so that there is no pending memory remaining after the update (more aggressive scaling). A scale-up factor closer to 0 will result in a smaller magnitude of scaling up (less aggressive scaling). Bounds: [0.0, 1.0].

func (AutoscalingPolicyBasicAlgorithmYarnConfigOutput) ScaleUpMinWorkerFraction

Minimum scale-up threshold as a fraction of total cluster size before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at least a 2-worker scale-up for the cluster to scale. A threshold of 0 means the autoscaler will scale up on any recommended change. Bounds: [0.0, 1.0]. Default: 0.0.

func (AutoscalingPolicyBasicAlgorithmYarnConfigOutput) ToAutoscalingPolicyBasicAlgorithmYarnConfigOutput

func (o AutoscalingPolicyBasicAlgorithmYarnConfigOutput) ToAutoscalingPolicyBasicAlgorithmYarnConfigOutput() AutoscalingPolicyBasicAlgorithmYarnConfigOutput

func (AutoscalingPolicyBasicAlgorithmYarnConfigOutput) ToAutoscalingPolicyBasicAlgorithmYarnConfigOutputWithContext

func (o AutoscalingPolicyBasicAlgorithmYarnConfigOutput) ToAutoscalingPolicyBasicAlgorithmYarnConfigOutputWithContext(ctx context.Context) AutoscalingPolicyBasicAlgorithmYarnConfigOutput

func (AutoscalingPolicyBasicAlgorithmYarnConfigOutput) ToAutoscalingPolicyBasicAlgorithmYarnConfigPtrOutput

func (o AutoscalingPolicyBasicAlgorithmYarnConfigOutput) ToAutoscalingPolicyBasicAlgorithmYarnConfigPtrOutput() AutoscalingPolicyBasicAlgorithmYarnConfigPtrOutput

func (AutoscalingPolicyBasicAlgorithmYarnConfigOutput) ToAutoscalingPolicyBasicAlgorithmYarnConfigPtrOutputWithContext

func (o AutoscalingPolicyBasicAlgorithmYarnConfigOutput) ToAutoscalingPolicyBasicAlgorithmYarnConfigPtrOutputWithContext(ctx context.Context) AutoscalingPolicyBasicAlgorithmYarnConfigPtrOutput

type AutoscalingPolicyBasicAlgorithmYarnConfigPtrInput

type AutoscalingPolicyBasicAlgorithmYarnConfigPtrInput interface {
	pulumi.Input

	ToAutoscalingPolicyBasicAlgorithmYarnConfigPtrOutput() AutoscalingPolicyBasicAlgorithmYarnConfigPtrOutput
	ToAutoscalingPolicyBasicAlgorithmYarnConfigPtrOutputWithContext(context.Context) AutoscalingPolicyBasicAlgorithmYarnConfigPtrOutput
}

AutoscalingPolicyBasicAlgorithmYarnConfigPtrInput is an input type that accepts AutoscalingPolicyBasicAlgorithmYarnConfigArgs, AutoscalingPolicyBasicAlgorithmYarnConfigPtr and AutoscalingPolicyBasicAlgorithmYarnConfigPtrOutput values. You can construct a concrete instance of `AutoscalingPolicyBasicAlgorithmYarnConfigPtrInput` via:

        AutoscalingPolicyBasicAlgorithmYarnConfigArgs{...}

or:

        nil

type AutoscalingPolicyBasicAlgorithmYarnConfigPtrOutput

type AutoscalingPolicyBasicAlgorithmYarnConfigPtrOutput struct{ *pulumi.OutputState }

func (AutoscalingPolicyBasicAlgorithmYarnConfigPtrOutput) Elem

func (AutoscalingPolicyBasicAlgorithmYarnConfigPtrOutput) ElementType

func (AutoscalingPolicyBasicAlgorithmYarnConfigPtrOutput) GracefulDecommissionTimeout

Timeout for YARN graceful decommissioning of Node Managers. Specifies the duration to wait for jobs to complete before forcefully removing workers (and potentially interrupting jobs). Only applicable to downscaling operations. Bounds: [0s, 1d].

func (AutoscalingPolicyBasicAlgorithmYarnConfigPtrOutput) ScaleDownFactor

Fraction of average pending memory in the last cooldown period for which to remove workers. A scale-down factor of 1 will result in scaling down so that there is no available memory remaining after the update (more aggressive scaling). A scale-down factor of 0 disables removing workers, which can be beneficial for autoscaling a single job. Bounds: [0.0, 1.0].

func (AutoscalingPolicyBasicAlgorithmYarnConfigPtrOutput) ScaleDownMinWorkerFraction

Minimum scale-down threshold as a fraction of total cluster size before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at least a 2 worker scale-down for the cluster to scale. A threshold of 0 means the autoscaler will scale down on any recommended change. Bounds: [0.0, 1.0]. Default: 0.0.

func (AutoscalingPolicyBasicAlgorithmYarnConfigPtrOutput) ScaleUpFactor

Fraction of average pending memory in the last cooldown period for which to add workers. A scale-up factor of 1.0 will result in scaling up so that there is no pending memory remaining after the update (more aggressive scaling). A scale-up factor closer to 0 will result in a smaller magnitude of scaling up (less aggressive scaling). Bounds: [0.0, 1.0].

func (AutoscalingPolicyBasicAlgorithmYarnConfigPtrOutput) ScaleUpMinWorkerFraction

Minimum scale-up threshold as a fraction of total cluster size before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at least a 2-worker scale-up for the cluster to scale. A threshold of 0 means the autoscaler will scale up on any recommended change. Bounds: [0.0, 1.0]. Default: 0.0.

func (AutoscalingPolicyBasicAlgorithmYarnConfigPtrOutput) ToAutoscalingPolicyBasicAlgorithmYarnConfigPtrOutput

func (o AutoscalingPolicyBasicAlgorithmYarnConfigPtrOutput) ToAutoscalingPolicyBasicAlgorithmYarnConfigPtrOutput() AutoscalingPolicyBasicAlgorithmYarnConfigPtrOutput

func (AutoscalingPolicyBasicAlgorithmYarnConfigPtrOutput) ToAutoscalingPolicyBasicAlgorithmYarnConfigPtrOutputWithContext

func (o AutoscalingPolicyBasicAlgorithmYarnConfigPtrOutput) ToAutoscalingPolicyBasicAlgorithmYarnConfigPtrOutputWithContext(ctx context.Context) AutoscalingPolicyBasicAlgorithmYarnConfigPtrOutput

type AutoscalingPolicyInput

type AutoscalingPolicyInput interface {
	pulumi.Input

	ToAutoscalingPolicyOutput() AutoscalingPolicyOutput
	ToAutoscalingPolicyOutputWithContext(ctx context.Context) AutoscalingPolicyOutput
}

type AutoscalingPolicyMap

type AutoscalingPolicyMap map[string]AutoscalingPolicyInput

func (AutoscalingPolicyMap) ElementType

func (AutoscalingPolicyMap) ElementType() reflect.Type

func (AutoscalingPolicyMap) ToAutoscalingPolicyMapOutput

func (i AutoscalingPolicyMap) ToAutoscalingPolicyMapOutput() AutoscalingPolicyMapOutput

func (AutoscalingPolicyMap) ToAutoscalingPolicyMapOutputWithContext

func (i AutoscalingPolicyMap) ToAutoscalingPolicyMapOutputWithContext(ctx context.Context) AutoscalingPolicyMapOutput

type AutoscalingPolicyMapInput

type AutoscalingPolicyMapInput interface {
	pulumi.Input

	ToAutoscalingPolicyMapOutput() AutoscalingPolicyMapOutput
	ToAutoscalingPolicyMapOutputWithContext(context.Context) AutoscalingPolicyMapOutput
}

AutoscalingPolicyMapInput is an input type that accepts AutoscalingPolicyMap and AutoscalingPolicyMapOutput values. You can construct a concrete instance of `AutoscalingPolicyMapInput` via:

AutoscalingPolicyMap{ "key": AutoscalingPolicyArgs{...} }

type AutoscalingPolicyMapOutput

type AutoscalingPolicyMapOutput struct{ *pulumi.OutputState }

func (AutoscalingPolicyMapOutput) ElementType

func (AutoscalingPolicyMapOutput) ElementType() reflect.Type

func (AutoscalingPolicyMapOutput) MapIndex

func (AutoscalingPolicyMapOutput) ToAutoscalingPolicyMapOutput

func (o AutoscalingPolicyMapOutput) ToAutoscalingPolicyMapOutput() AutoscalingPolicyMapOutput

func (AutoscalingPolicyMapOutput) ToAutoscalingPolicyMapOutputWithContext

func (o AutoscalingPolicyMapOutput) ToAutoscalingPolicyMapOutputWithContext(ctx context.Context) AutoscalingPolicyMapOutput

type AutoscalingPolicyOutput

type AutoscalingPolicyOutput struct{ *pulumi.OutputState }

func (AutoscalingPolicyOutput) ElementType

func (AutoscalingPolicyOutput) ElementType() reflect.Type

func (AutoscalingPolicyOutput) ToAutoscalingPolicyOutput

func (o AutoscalingPolicyOutput) ToAutoscalingPolicyOutput() AutoscalingPolicyOutput

func (AutoscalingPolicyOutput) ToAutoscalingPolicyOutputWithContext

func (o AutoscalingPolicyOutput) ToAutoscalingPolicyOutputWithContext(ctx context.Context) AutoscalingPolicyOutput

func (AutoscalingPolicyOutput) ToAutoscalingPolicyPtrOutput

func (o AutoscalingPolicyOutput) ToAutoscalingPolicyPtrOutput() AutoscalingPolicyPtrOutput

func (AutoscalingPolicyOutput) ToAutoscalingPolicyPtrOutputWithContext

func (o AutoscalingPolicyOutput) ToAutoscalingPolicyPtrOutputWithContext(ctx context.Context) AutoscalingPolicyPtrOutput

type AutoscalingPolicyPtrInput

type AutoscalingPolicyPtrInput interface {
	pulumi.Input

	ToAutoscalingPolicyPtrOutput() AutoscalingPolicyPtrOutput
	ToAutoscalingPolicyPtrOutputWithContext(ctx context.Context) AutoscalingPolicyPtrOutput
}

type AutoscalingPolicyPtrOutput

type AutoscalingPolicyPtrOutput struct{ *pulumi.OutputState }

func (AutoscalingPolicyPtrOutput) Elem added in v5.21.0

func (AutoscalingPolicyPtrOutput) ElementType

func (AutoscalingPolicyPtrOutput) ElementType() reflect.Type

func (AutoscalingPolicyPtrOutput) ToAutoscalingPolicyPtrOutput

func (o AutoscalingPolicyPtrOutput) ToAutoscalingPolicyPtrOutput() AutoscalingPolicyPtrOutput

func (AutoscalingPolicyPtrOutput) ToAutoscalingPolicyPtrOutputWithContext

func (o AutoscalingPolicyPtrOutput) ToAutoscalingPolicyPtrOutputWithContext(ctx context.Context) AutoscalingPolicyPtrOutput

type AutoscalingPolicySecondaryWorkerConfig

type AutoscalingPolicySecondaryWorkerConfig struct {
	// Maximum number of instances for this group. Note that by default, clusters will not use
	// secondary workers. Required for secondary workers if the minimum secondary instances is set.
	// Bounds: [minInstances, ). Defaults to 0.
	MaxInstances *int `pulumi:"maxInstances"`
	// Minimum number of instances for this group. Bounds: [0, maxInstances]. Defaults to 0.
	MinInstances *int `pulumi:"minInstances"`
	// Weight for the instance group, which is used to determine the fraction of total workers
	// in the cluster from this instance group. For example, if primary workers have weight 2,
	// and secondary workers have weight 1, the cluster will have approximately 2 primary workers
	// for each secondary worker.
	// The cluster may not reach the specified balance if constrained by min/max bounds or other
	// autoscaling settings. For example, if maxInstances for secondary workers is 0, then only
	// primary workers will be added. The cluster can also be out of balance when created.
	// If weight is not set on any instance group, the cluster will default to equal weight for
	// all groups: the cluster will attempt to maintain an equal number of workers in each group
	// within the configured size bounds for each group. If weight is set for one group only,
	// the cluster will default to zero weight on the unset group. For example if weight is set
	// only on primary workers, the cluster will use primary workers only and no secondary workers.
	Weight *int `pulumi:"weight"`
}

type AutoscalingPolicySecondaryWorkerConfigArgs

type AutoscalingPolicySecondaryWorkerConfigArgs struct {
	// Maximum number of instances for this group. Note that by default, clusters will not use
	// secondary workers. Required for secondary workers if the minimum secondary instances is set.
	// Bounds: [minInstances, ). Defaults to 0.
	MaxInstances pulumi.IntPtrInput `pulumi:"maxInstances"`
	// Minimum number of instances for this group. Bounds: [0, maxInstances]. Defaults to 0.
	MinInstances pulumi.IntPtrInput `pulumi:"minInstances"`
	// Weight for the instance group, which is used to determine the fraction of total workers
	// in the cluster from this instance group. For example, if primary workers have weight 2,
	// and secondary workers have weight 1, the cluster will have approximately 2 primary workers
	// for each secondary worker.
	// The cluster may not reach the specified balance if constrained by min/max bounds or other
	// autoscaling settings. For example, if maxInstances for secondary workers is 0, then only
	// primary workers will be added. The cluster can also be out of balance when created.
	// If weight is not set on any instance group, the cluster will default to equal weight for
	// all groups: the cluster will attempt to maintain an equal number of workers in each group
	// within the configured size bounds for each group. If weight is set for one group only,
	// the cluster will default to zero weight on the unset group. For example if weight is set
	// only on primary workers, the cluster will use primary workers only and no secondary workers.
	Weight pulumi.IntPtrInput `pulumi:"weight"`
}

func (AutoscalingPolicySecondaryWorkerConfigArgs) ElementType

func (AutoscalingPolicySecondaryWorkerConfigArgs) ToAutoscalingPolicySecondaryWorkerConfigOutput

func (i AutoscalingPolicySecondaryWorkerConfigArgs) ToAutoscalingPolicySecondaryWorkerConfigOutput() AutoscalingPolicySecondaryWorkerConfigOutput

func (AutoscalingPolicySecondaryWorkerConfigArgs) ToAutoscalingPolicySecondaryWorkerConfigOutputWithContext

func (i AutoscalingPolicySecondaryWorkerConfigArgs) ToAutoscalingPolicySecondaryWorkerConfigOutputWithContext(ctx context.Context) AutoscalingPolicySecondaryWorkerConfigOutput

func (AutoscalingPolicySecondaryWorkerConfigArgs) ToAutoscalingPolicySecondaryWorkerConfigPtrOutput

func (i AutoscalingPolicySecondaryWorkerConfigArgs) ToAutoscalingPolicySecondaryWorkerConfigPtrOutput() AutoscalingPolicySecondaryWorkerConfigPtrOutput

func (AutoscalingPolicySecondaryWorkerConfigArgs) ToAutoscalingPolicySecondaryWorkerConfigPtrOutputWithContext

func (i AutoscalingPolicySecondaryWorkerConfigArgs) ToAutoscalingPolicySecondaryWorkerConfigPtrOutputWithContext(ctx context.Context) AutoscalingPolicySecondaryWorkerConfigPtrOutput

type AutoscalingPolicySecondaryWorkerConfigInput

type AutoscalingPolicySecondaryWorkerConfigInput interface {
	pulumi.Input

	ToAutoscalingPolicySecondaryWorkerConfigOutput() AutoscalingPolicySecondaryWorkerConfigOutput
	ToAutoscalingPolicySecondaryWorkerConfigOutputWithContext(context.Context) AutoscalingPolicySecondaryWorkerConfigOutput
}

AutoscalingPolicySecondaryWorkerConfigInput is an input type that accepts AutoscalingPolicySecondaryWorkerConfigArgs and AutoscalingPolicySecondaryWorkerConfigOutput values. You can construct a concrete instance of `AutoscalingPolicySecondaryWorkerConfigInput` via:

AutoscalingPolicySecondaryWorkerConfigArgs{...}

type AutoscalingPolicySecondaryWorkerConfigOutput

type AutoscalingPolicySecondaryWorkerConfigOutput struct{ *pulumi.OutputState }

func (AutoscalingPolicySecondaryWorkerConfigOutput) ElementType

func (AutoscalingPolicySecondaryWorkerConfigOutput) MaxInstances

Maximum number of instances for this group. Note that by default, clusters will not use secondary workers. Required for secondary workers if the minimum secondary instances is set. Bounds: [minInstances, ). Defaults to 0.

func (AutoscalingPolicySecondaryWorkerConfigOutput) MinInstances

Minimum number of instances for this group. Bounds: [0, maxInstances]. Defaults to 0.

func (AutoscalingPolicySecondaryWorkerConfigOutput) ToAutoscalingPolicySecondaryWorkerConfigOutput

func (o AutoscalingPolicySecondaryWorkerConfigOutput) ToAutoscalingPolicySecondaryWorkerConfigOutput() AutoscalingPolicySecondaryWorkerConfigOutput

func (AutoscalingPolicySecondaryWorkerConfigOutput) ToAutoscalingPolicySecondaryWorkerConfigOutputWithContext

func (o AutoscalingPolicySecondaryWorkerConfigOutput) ToAutoscalingPolicySecondaryWorkerConfigOutputWithContext(ctx context.Context) AutoscalingPolicySecondaryWorkerConfigOutput

func (AutoscalingPolicySecondaryWorkerConfigOutput) ToAutoscalingPolicySecondaryWorkerConfigPtrOutput

func (o AutoscalingPolicySecondaryWorkerConfigOutput) ToAutoscalingPolicySecondaryWorkerConfigPtrOutput() AutoscalingPolicySecondaryWorkerConfigPtrOutput

func (AutoscalingPolicySecondaryWorkerConfigOutput) ToAutoscalingPolicySecondaryWorkerConfigPtrOutputWithContext

func (o AutoscalingPolicySecondaryWorkerConfigOutput) ToAutoscalingPolicySecondaryWorkerConfigPtrOutputWithContext(ctx context.Context) AutoscalingPolicySecondaryWorkerConfigPtrOutput

func (AutoscalingPolicySecondaryWorkerConfigOutput) Weight

Weight for the instance group, which is used to determine the fraction of total workers in the cluster from this instance group. For example, if primary workers have weight 2, and secondary workers have weight 1, the cluster will have approximately 2 primary workers for each secondary worker. The cluster may not reach the specified balance if constrained by min/max bounds or other autoscaling settings. For example, if maxInstances for secondary workers is 0, then only primary workers will be added. The cluster can also be out of balance when created. If weight is not set on any instance group, the cluster will default to equal weight for all groups: the cluster will attempt to maintain an equal number of workers in each group within the configured size bounds for each group. If weight is set for one group only, the cluster will default to zero weight on the unset group. For example if weight is set only on primary workers, the cluster will use primary workers only and no secondary workers.

type AutoscalingPolicySecondaryWorkerConfigPtrInput

type AutoscalingPolicySecondaryWorkerConfigPtrInput interface {
	pulumi.Input

	ToAutoscalingPolicySecondaryWorkerConfigPtrOutput() AutoscalingPolicySecondaryWorkerConfigPtrOutput
	ToAutoscalingPolicySecondaryWorkerConfigPtrOutputWithContext(context.Context) AutoscalingPolicySecondaryWorkerConfigPtrOutput
}

AutoscalingPolicySecondaryWorkerConfigPtrInput is an input type that accepts AutoscalingPolicySecondaryWorkerConfigArgs, AutoscalingPolicySecondaryWorkerConfigPtr and AutoscalingPolicySecondaryWorkerConfigPtrOutput values. You can construct a concrete instance of `AutoscalingPolicySecondaryWorkerConfigPtrInput` via:

        AutoscalingPolicySecondaryWorkerConfigArgs{...}

or:

        nil

type AutoscalingPolicySecondaryWorkerConfigPtrOutput

type AutoscalingPolicySecondaryWorkerConfigPtrOutput struct{ *pulumi.OutputState }

func (AutoscalingPolicySecondaryWorkerConfigPtrOutput) Elem

func (AutoscalingPolicySecondaryWorkerConfigPtrOutput) ElementType

func (AutoscalingPolicySecondaryWorkerConfigPtrOutput) MaxInstances

Maximum number of instances for this group. Note that by default, clusters will not use secondary workers. Required for secondary workers if the minimum secondary instances is set. Bounds: [minInstances, ). Defaults to 0.

func (AutoscalingPolicySecondaryWorkerConfigPtrOutput) MinInstances

Minimum number of instances for this group. Bounds: [0, maxInstances]. Defaults to 0.

func (AutoscalingPolicySecondaryWorkerConfigPtrOutput) ToAutoscalingPolicySecondaryWorkerConfigPtrOutput

func (o AutoscalingPolicySecondaryWorkerConfigPtrOutput) ToAutoscalingPolicySecondaryWorkerConfigPtrOutput() AutoscalingPolicySecondaryWorkerConfigPtrOutput

func (AutoscalingPolicySecondaryWorkerConfigPtrOutput) ToAutoscalingPolicySecondaryWorkerConfigPtrOutputWithContext

func (o AutoscalingPolicySecondaryWorkerConfigPtrOutput) ToAutoscalingPolicySecondaryWorkerConfigPtrOutputWithContext(ctx context.Context) AutoscalingPolicySecondaryWorkerConfigPtrOutput

func (AutoscalingPolicySecondaryWorkerConfigPtrOutput) Weight

Weight for the instance group, which is used to determine the fraction of total workers in the cluster from this instance group. For example, if primary workers have weight 2, and secondary workers have weight 1, the cluster will have approximately 2 primary workers for each secondary worker. The cluster may not reach the specified balance if constrained by min/max bounds or other autoscaling settings. For example, if maxInstances for secondary workers is 0, then only primary workers will be added. The cluster can also be out of balance when created. If weight is not set on any instance group, the cluster will default to equal weight for all groups: the cluster will attempt to maintain an equal number of workers in each group within the configured size bounds for each group. If weight is set for one group only, the cluster will default to zero weight on the unset group. For example if weight is set only on primary workers, the cluster will use primary workers only and no secondary workers.

type AutoscalingPolicyState

type AutoscalingPolicyState struct {
	// Basic algorithm for autoscaling.
	// Structure is documented below.
	BasicAlgorithm AutoscalingPolicyBasicAlgorithmPtrInput
	// The  location where the autoscaling policy should reside.
	// The default value is `global`.
	Location pulumi.StringPtrInput
	// The "resource name" of the autoscaling policy.
	Name pulumi.StringPtrInput
	// The policy id. The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_),
	// and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between
	// 3 and 50 characters.
	PolicyId pulumi.StringPtrInput
	// The ID of the project in which the resource belongs.
	// If it is not provided, the provider project is used.
	Project pulumi.StringPtrInput
	// Describes how the autoscaler will operate for secondary workers.
	// Structure is documented below.
	SecondaryWorkerConfig AutoscalingPolicySecondaryWorkerConfigPtrInput
	// Describes how the autoscaler will operate for primary workers.
	// Structure is documented below.
	WorkerConfig AutoscalingPolicyWorkerConfigPtrInput
}

func (AutoscalingPolicyState) ElementType

func (AutoscalingPolicyState) ElementType() reflect.Type

type AutoscalingPolicyWorkerConfig

type AutoscalingPolicyWorkerConfig struct {
	// Maximum number of instances for this group. Note that by default, clusters will not use
	// secondary workers. Required for secondary workers if the minimum secondary instances is set.
	// Bounds: [minInstances, ). Defaults to 0.
	MaxInstances int `pulumi:"maxInstances"`
	// Minimum number of instances for this group. Bounds: [0, maxInstances]. Defaults to 0.
	MinInstances *int `pulumi:"minInstances"`
	// Weight for the instance group, which is used to determine the fraction of total workers
	// in the cluster from this instance group. For example, if primary workers have weight 2,
	// and secondary workers have weight 1, the cluster will have approximately 2 primary workers
	// for each secondary worker.
	// The cluster may not reach the specified balance if constrained by min/max bounds or other
	// autoscaling settings. For example, if maxInstances for secondary workers is 0, then only
	// primary workers will be added. The cluster can also be out of balance when created.
	// If weight is not set on any instance group, the cluster will default to equal weight for
	// all groups: the cluster will attempt to maintain an equal number of workers in each group
	// within the configured size bounds for each group. If weight is set for one group only,
	// the cluster will default to zero weight on the unset group. For example if weight is set
	// only on primary workers, the cluster will use primary workers only and no secondary workers.
	Weight *int `pulumi:"weight"`
}

type AutoscalingPolicyWorkerConfigArgs

type AutoscalingPolicyWorkerConfigArgs struct {
	// Maximum number of instances for this group. Note that by default, clusters will not use
	// secondary workers. Required for secondary workers if the minimum secondary instances is set.
	// Bounds: [minInstances, ). Defaults to 0.
	MaxInstances pulumi.IntInput `pulumi:"maxInstances"`
	// Minimum number of instances for this group. Bounds: [0, maxInstances]. Defaults to 0.
	MinInstances pulumi.IntPtrInput `pulumi:"minInstances"`
	// Weight for the instance group, which is used to determine the fraction of total workers
	// in the cluster from this instance group. For example, if primary workers have weight 2,
	// and secondary workers have weight 1, the cluster will have approximately 2 primary workers
	// for each secondary worker.
	// The cluster may not reach the specified balance if constrained by min/max bounds or other
	// autoscaling settings. For example, if maxInstances for secondary workers is 0, then only
	// primary workers will be added. The cluster can also be out of balance when created.
	// If weight is not set on any instance group, the cluster will default to equal weight for
	// all groups: the cluster will attempt to maintain an equal number of workers in each group
	// within the configured size bounds for each group. If weight is set for one group only,
	// the cluster will default to zero weight on the unset group. For example if weight is set
	// only on primary workers, the cluster will use primary workers only and no secondary workers.
	Weight pulumi.IntPtrInput `pulumi:"weight"`
}

func (AutoscalingPolicyWorkerConfigArgs) ElementType

func (AutoscalingPolicyWorkerConfigArgs) ToAutoscalingPolicyWorkerConfigOutput

func (i AutoscalingPolicyWorkerConfigArgs) ToAutoscalingPolicyWorkerConfigOutput() AutoscalingPolicyWorkerConfigOutput

func (AutoscalingPolicyWorkerConfigArgs) ToAutoscalingPolicyWorkerConfigOutputWithContext

func (i AutoscalingPolicyWorkerConfigArgs) ToAutoscalingPolicyWorkerConfigOutputWithContext(ctx context.Context) AutoscalingPolicyWorkerConfigOutput

func (AutoscalingPolicyWorkerConfigArgs) ToAutoscalingPolicyWorkerConfigPtrOutput

func (i AutoscalingPolicyWorkerConfigArgs) ToAutoscalingPolicyWorkerConfigPtrOutput() AutoscalingPolicyWorkerConfigPtrOutput

func (AutoscalingPolicyWorkerConfigArgs) ToAutoscalingPolicyWorkerConfigPtrOutputWithContext

func (i AutoscalingPolicyWorkerConfigArgs) ToAutoscalingPolicyWorkerConfigPtrOutputWithContext(ctx context.Context) AutoscalingPolicyWorkerConfigPtrOutput

type AutoscalingPolicyWorkerConfigInput

type AutoscalingPolicyWorkerConfigInput interface {
	pulumi.Input

	ToAutoscalingPolicyWorkerConfigOutput() AutoscalingPolicyWorkerConfigOutput
	ToAutoscalingPolicyWorkerConfigOutputWithContext(context.Context) AutoscalingPolicyWorkerConfigOutput
}

AutoscalingPolicyWorkerConfigInput is an input type that accepts AutoscalingPolicyWorkerConfigArgs and AutoscalingPolicyWorkerConfigOutput values. You can construct a concrete instance of `AutoscalingPolicyWorkerConfigInput` via:

AutoscalingPolicyWorkerConfigArgs{...}

type AutoscalingPolicyWorkerConfigOutput

type AutoscalingPolicyWorkerConfigOutput struct{ *pulumi.OutputState }

func (AutoscalingPolicyWorkerConfigOutput) ElementType

func (AutoscalingPolicyWorkerConfigOutput) MaxInstances

Maximum number of instances for this group. Note that by default, clusters will not use secondary workers. Required for secondary workers if the minimum secondary instances is set. Bounds: [minInstances, ). Defaults to 0.

func (AutoscalingPolicyWorkerConfigOutput) MinInstances

Minimum number of instances for this group. Bounds: [0, maxInstances]. Defaults to 0.

func (AutoscalingPolicyWorkerConfigOutput) ToAutoscalingPolicyWorkerConfigOutput

func (o AutoscalingPolicyWorkerConfigOutput) ToAutoscalingPolicyWorkerConfigOutput() AutoscalingPolicyWorkerConfigOutput

func (AutoscalingPolicyWorkerConfigOutput) ToAutoscalingPolicyWorkerConfigOutputWithContext

func (o AutoscalingPolicyWorkerConfigOutput) ToAutoscalingPolicyWorkerConfigOutputWithContext(ctx context.Context) AutoscalingPolicyWorkerConfigOutput

func (AutoscalingPolicyWorkerConfigOutput) ToAutoscalingPolicyWorkerConfigPtrOutput

func (o AutoscalingPolicyWorkerConfigOutput) ToAutoscalingPolicyWorkerConfigPtrOutput() AutoscalingPolicyWorkerConfigPtrOutput

func (AutoscalingPolicyWorkerConfigOutput) ToAutoscalingPolicyWorkerConfigPtrOutputWithContext

func (o AutoscalingPolicyWorkerConfigOutput) ToAutoscalingPolicyWorkerConfigPtrOutputWithContext(ctx context.Context) AutoscalingPolicyWorkerConfigPtrOutput

func (AutoscalingPolicyWorkerConfigOutput) Weight

Weight for the instance group, which is used to determine the fraction of total workers in the cluster from this instance group. For example, if primary workers have weight 2, and secondary workers have weight 1, the cluster will have approximately 2 primary workers for each secondary worker. The cluster may not reach the specified balance if constrained by min/max bounds or other autoscaling settings. For example, if maxInstances for secondary workers is 0, then only primary workers will be added. The cluster can also be out of balance when created. If weight is not set on any instance group, the cluster will default to equal weight for all groups: the cluster will attempt to maintain an equal number of workers in each group within the configured size bounds for each group. If weight is set for one group only, the cluster will default to zero weight on the unset group. For example if weight is set only on primary workers, the cluster will use primary workers only and no secondary workers.

type AutoscalingPolicyWorkerConfigPtrInput

type AutoscalingPolicyWorkerConfigPtrInput interface {
	pulumi.Input

	ToAutoscalingPolicyWorkerConfigPtrOutput() AutoscalingPolicyWorkerConfigPtrOutput
	ToAutoscalingPolicyWorkerConfigPtrOutputWithContext(context.Context) AutoscalingPolicyWorkerConfigPtrOutput
}

AutoscalingPolicyWorkerConfigPtrInput is an input type that accepts AutoscalingPolicyWorkerConfigArgs, AutoscalingPolicyWorkerConfigPtr and AutoscalingPolicyWorkerConfigPtrOutput values. You can construct a concrete instance of `AutoscalingPolicyWorkerConfigPtrInput` via:

        AutoscalingPolicyWorkerConfigArgs{...}

or:

        nil

type AutoscalingPolicyWorkerConfigPtrOutput

type AutoscalingPolicyWorkerConfigPtrOutput struct{ *pulumi.OutputState }

func (AutoscalingPolicyWorkerConfigPtrOutput) Elem

func (AutoscalingPolicyWorkerConfigPtrOutput) ElementType

func (AutoscalingPolicyWorkerConfigPtrOutput) MaxInstances

Maximum number of instances for this group. Note that by default, clusters will not use secondary workers. Required for secondary workers if the minimum secondary instances is set. Bounds: [minInstances, ). Defaults to 0.

func (AutoscalingPolicyWorkerConfigPtrOutput) MinInstances

Minimum number of instances for this group. Bounds: [0, maxInstances]. Defaults to 0.

func (AutoscalingPolicyWorkerConfigPtrOutput) ToAutoscalingPolicyWorkerConfigPtrOutput

func (o AutoscalingPolicyWorkerConfigPtrOutput) ToAutoscalingPolicyWorkerConfigPtrOutput() AutoscalingPolicyWorkerConfigPtrOutput

func (AutoscalingPolicyWorkerConfigPtrOutput) ToAutoscalingPolicyWorkerConfigPtrOutputWithContext

func (o AutoscalingPolicyWorkerConfigPtrOutput) ToAutoscalingPolicyWorkerConfigPtrOutputWithContext(ctx context.Context) AutoscalingPolicyWorkerConfigPtrOutput

func (AutoscalingPolicyWorkerConfigPtrOutput) Weight

Weight for the instance group, which is used to determine the fraction of total workers in the cluster from this instance group. For example, if primary workers have weight 2, and secondary workers have weight 1, the cluster will have approximately 2 primary workers for each secondary worker. The cluster may not reach the specified balance if constrained by min/max bounds or other autoscaling settings. For example, if maxInstances for secondary workers is 0, then only primary workers will be added. The cluster can also be out of balance when created. If weight is not set on any instance group, the cluster will default to equal weight for all groups: the cluster will attempt to maintain an equal number of workers in each group within the configured size bounds for each group. If weight is set for one group only, the cluster will default to zero weight on the unset group. For example if weight is set only on primary workers, the cluster will use primary workers only and no secondary workers.

type Cluster

type Cluster struct {
	pulumi.CustomResourceState

	// Allows you to configure various aspects of the cluster.
	// Structure defined below.
	ClusterConfig ClusterClusterConfigOutput `pulumi:"clusterConfig"`
	// The timeout duration which allows graceful decomissioning when you change the number of worker nodes directly through a
	// terraform apply
	GracefulDecommissionTimeout pulumi.StringPtrOutput `pulumi:"gracefulDecommissionTimeout"`
	// The list of labels (key/value pairs) to be applied to
	// instances in the cluster. GCP generates some itself including `goog-dataproc-cluster-name`
	// which is the name of the cluster.
	Labels pulumi.StringMapOutput `pulumi:"labels"`
	// The name of the cluster, unique within the project and
	// zone.
	Name pulumi.StringOutput `pulumi:"name"`
	// The ID of the project in which the `cluster` will exist. If it
	// is not provided, the provider project is used.
	Project pulumi.StringOutput `pulumi:"project"`
	// The region in which the cluster and associated nodes will be created in.
	// Defaults to `global`.
	Region pulumi.StringPtrOutput `pulumi:"region"`
}

Manages a Cloud Dataproc cluster resource within GCP.

* [API documentation](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.clusters) * How-to Guides

!> **Warning:** Due to limitations of the API, all arguments except `labels`,`cluster_config.worker_config.num_instances` and `cluster_config.preemptible_worker_config.num_instances` are non-updatable. Changing others will cause recreation of the whole cluster!

## Example Usage ### Basic

```go package main

import (

"github.com/pulumi/pulumi-gcp/sdk/v5/go/gcp/dataproc"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"

)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := dataproc.NewCluster(ctx, "simplecluster", &dataproc.ClusterArgs{
			Region: pulumi.String("us-central1"),
		})
		if err != nil {
			return err
		}
		return nil
	})
}

``` ### Advanced

```go package main

import (

"github.com/pulumi/pulumi-gcp/sdk/v5/go/gcp/dataproc"
"github.com/pulumi/pulumi-gcp/sdk/v5/go/gcp/serviceAccount"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"

)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := serviceAccount.NewAccount(ctx, "_default", &serviceAccount.AccountArgs{
			AccountId:   pulumi.String("service-account-id"),
			DisplayName: pulumi.String("Service Account"),
		})
		if err != nil {
			return err
		}
		_, err = dataproc.NewCluster(ctx, "mycluster", &dataproc.ClusterArgs{
			Region:                      pulumi.String("us-central1"),
			GracefulDecommissionTimeout: pulumi.String("120s"),
			Labels: pulumi.StringMap{
				"foo": pulumi.String("bar"),
			},
			ClusterConfig: &dataproc.ClusterClusterConfigArgs{
				StagingBucket: pulumi.String("dataproc-staging-bucket"),
				MasterConfig: &dataproc.ClusterClusterConfigMasterConfigArgs{
					NumInstances: pulumi.Int(1),
					MachineType:  pulumi.String("e2-medium"),
					DiskConfig: &dataproc.ClusterClusterConfigMasterConfigDiskConfigArgs{
						BootDiskType:   pulumi.String("pd-ssd"),
						BootDiskSizeGb: pulumi.Int(30),
					},
				},
				WorkerConfig: &dataproc.ClusterClusterConfigWorkerConfigArgs{
					NumInstances:   pulumi.Int(2),
					MachineType:    pulumi.String("e2-medium"),
					MinCpuPlatform: pulumi.String("Intel Skylake"),
					DiskConfig: &dataproc.ClusterClusterConfigWorkerConfigDiskConfigArgs{
						BootDiskSizeGb: pulumi.Int(30),
						NumLocalSsds:   pulumi.Int(1),
					},
				},
				PreemptibleWorkerConfig: &dataproc.ClusterClusterConfigPreemptibleWorkerConfigArgs{
					NumInstances: pulumi.Int(0),
				},
				SoftwareConfig: &dataproc.ClusterClusterConfigSoftwareConfigArgs{
					ImageVersion: pulumi.String("1.3.7-deb9"),
					OverrideProperties: pulumi.StringMap{
						"dataproc:dataproc.allow.zero.workers": pulumi.String("true"),
					},
				},
				GceClusterConfig: &dataproc.ClusterClusterConfigGceClusterConfigArgs{
					Tags: pulumi.StringArray{
						pulumi.String("foo"),
						pulumi.String("bar"),
					},
					ServiceAccount: _default.Email,
					ServiceAccountScopes: pulumi.StringArray{
						pulumi.String("cloud-platform"),
					},
				},
				InitializationActions: dataproc.ClusterClusterConfigInitializationActionArray{
					&dataproc.ClusterClusterConfigInitializationActionArgs{
						Script:     pulumi.String("gs://dataproc-initialization-actions/stackdriver/stackdriver.sh"),
						TimeoutSec: pulumi.Int(500),
					},
				},
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}

``` ### Using A GPU Accelerator

```go package main

import (

"github.com/pulumi/pulumi-gcp/sdk/v5/go/gcp/dataproc"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"

)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := dataproc.NewCluster(ctx, "acceleratedCluster", &dataproc.ClusterArgs{
			ClusterConfig: &dataproc.ClusterClusterConfigArgs{
				GceClusterConfig: &dataproc.ClusterClusterConfigGceClusterConfigArgs{
					Zone: pulumi.String("us-central1-a"),
				},
				MasterConfig: &dataproc.ClusterClusterConfigMasterConfigArgs{
					Accelerators: dataproc.ClusterClusterConfigMasterConfigAcceleratorArray{
						&dataproc.ClusterClusterConfigMasterConfigAcceleratorArgs{
							AcceleratorCount: pulumi.Int(1),
							AcceleratorType:  pulumi.String("nvidia-tesla-k80"),
						},
					},
				},
			},
			Region: pulumi.String("us-central1"),
		})
		if err != nil {
			return err
		}
		return nil
	})
}

```

## Import

This resource does not support import.

func GetCluster

func GetCluster(ctx *pulumi.Context,
	name string, id pulumi.IDInput, state *ClusterState, opts ...pulumi.ResourceOption) (*Cluster, error)

GetCluster gets an existing Cluster resource's state with the given name, ID, and optional state properties that are used to uniquely qualify the lookup (nil if not required).

func NewCluster

func NewCluster(ctx *pulumi.Context,
	name string, args *ClusterArgs, opts ...pulumi.ResourceOption) (*Cluster, error)

NewCluster registers a new resource with the given unique name, arguments, and options.

func (*Cluster) ElementType

func (*Cluster) ElementType() reflect.Type

func (*Cluster) ToClusterOutput

func (i *Cluster) ToClusterOutput() ClusterOutput

func (*Cluster) ToClusterOutputWithContext

func (i *Cluster) ToClusterOutputWithContext(ctx context.Context) ClusterOutput

func (*Cluster) ToClusterPtrOutput

func (i *Cluster) ToClusterPtrOutput() ClusterPtrOutput

func (*Cluster) ToClusterPtrOutputWithContext

func (i *Cluster) ToClusterPtrOutputWithContext(ctx context.Context) ClusterPtrOutput

type ClusterArgs

type ClusterArgs struct {
	// Allows you to configure various aspects of the cluster.
	// Structure defined below.
	ClusterConfig ClusterClusterConfigPtrInput
	// The timeout duration which allows graceful decomissioning when you change the number of worker nodes directly through a
	// terraform apply
	GracefulDecommissionTimeout pulumi.StringPtrInput
	// The list of labels (key/value pairs) to be applied to
	// instances in the cluster. GCP generates some itself including `goog-dataproc-cluster-name`
	// which is the name of the cluster.
	Labels pulumi.StringMapInput
	// The name of the cluster, unique within the project and
	// zone.
	Name pulumi.StringPtrInput
	// The ID of the project in which the `cluster` will exist. If it
	// is not provided, the provider project is used.
	Project pulumi.StringPtrInput
	// The region in which the cluster and associated nodes will be created in.
	// Defaults to `global`.
	Region pulumi.StringPtrInput
}

The set of arguments for constructing a Cluster resource.

func (ClusterArgs) ElementType

func (ClusterArgs) ElementType() reflect.Type

type ClusterArray

type ClusterArray []ClusterInput

func (ClusterArray) ElementType

func (ClusterArray) ElementType() reflect.Type

func (ClusterArray) ToClusterArrayOutput

func (i ClusterArray) ToClusterArrayOutput() ClusterArrayOutput

func (ClusterArray) ToClusterArrayOutputWithContext

func (i ClusterArray) ToClusterArrayOutputWithContext(ctx context.Context) ClusterArrayOutput

type ClusterArrayInput

type ClusterArrayInput interface {
	pulumi.Input

	ToClusterArrayOutput() ClusterArrayOutput
	ToClusterArrayOutputWithContext(context.Context) ClusterArrayOutput
}

ClusterArrayInput is an input type that accepts ClusterArray and ClusterArrayOutput values. You can construct a concrete instance of `ClusterArrayInput` via:

ClusterArray{ ClusterArgs{...} }

type ClusterArrayOutput

type ClusterArrayOutput struct{ *pulumi.OutputState }

func (ClusterArrayOutput) ElementType

func (ClusterArrayOutput) ElementType() reflect.Type

func (ClusterArrayOutput) Index

func (ClusterArrayOutput) ToClusterArrayOutput

func (o ClusterArrayOutput) ToClusterArrayOutput() ClusterArrayOutput

func (ClusterArrayOutput) ToClusterArrayOutputWithContext

func (o ClusterArrayOutput) ToClusterArrayOutputWithContext(ctx context.Context) ClusterArrayOutput

type ClusterClusterConfig

type ClusterClusterConfig struct {
	// The autoscaling policy config associated with the cluster.
	// Note that once set, if `autoscalingConfig` is the only field set in `clusterConfig`, it can
	// only be removed by setting `policyUri = ""`, rather than removing the whole block.
	// Structure defined below.
	AutoscalingConfig *ClusterClusterConfigAutoscalingConfig `pulumi:"autoscalingConfig"`
	Bucket            *string                                `pulumi:"bucket"`
	// The Customer managed encryption keys settings for the cluster.
	// Structure defined below.
	EncryptionConfig *ClusterClusterConfigEncryptionConfig `pulumi:"encryptionConfig"`
	// The config settings for port access on the cluster.
	// Structure defined below.
	EndpointConfig *ClusterClusterConfigEndpointConfig `pulumi:"endpointConfig"`
	// Common config settings for resources of Google Compute Engine cluster
	// instances, applicable to all instances in the cluster. Structure defined below.
	GceClusterConfig *ClusterClusterConfigGceClusterConfig `pulumi:"gceClusterConfig"`
	// Commands to execute on each node after config is completed.
	// You can specify multiple versions of these. Structure defined below.
	InitializationActions []ClusterClusterConfigInitializationAction `pulumi:"initializationActions"`
	// The settings for auto deletion cluster schedule.
	// Structure defined below.
	LifecycleConfig *ClusterClusterConfigLifecycleConfig `pulumi:"lifecycleConfig"`
	// The Google Compute Engine config settings for the master instances
	// in a cluster. Structure defined below.
	MasterConfig *ClusterClusterConfigMasterConfig `pulumi:"masterConfig"`
	// The config setting for metastore service with the cluster.
	// Structure defined below.
	// ***
	MetastoreConfig *ClusterClusterConfigMetastoreConfig `pulumi:"metastoreConfig"`
	// The Google Compute Engine config settings for the additional
	// instances in a cluster. Structure defined below.
	// * **NOTE** : `preemptibleWorkerConfig` is
	//   an alias for the api's [secondaryWorkerConfig](https://cloud.google.com/dataproc/docs/reference/rest/v1/ClusterConfig#InstanceGroupConfig). The name doesn't necessarily mean it is preemptible and is named as
	//   such for legacy/compatibility reasons.
	PreemptibleWorkerConfig *ClusterClusterConfigPreemptibleWorkerConfig `pulumi:"preemptibleWorkerConfig"`
	// Security related configuration. Structure defined below.
	SecurityConfig *ClusterClusterConfigSecurityConfig `pulumi:"securityConfig"`
	// The config settings for software inside the cluster.
	// Structure defined below.
	SoftwareConfig *ClusterClusterConfigSoftwareConfig `pulumi:"softwareConfig"`
	// The Cloud Storage staging bucket used to stage files,
	// such as Hadoop jars, between client machines and the cluster.
	// Note: If you don't explicitly specify a `stagingBucket`
	// then GCP will auto create / assign one for you. However, you are not guaranteed
	// an auto generated bucket which is solely dedicated to your cluster; it may be shared
	// with other clusters in the same region/zone also choosing to use the auto generation
	// option.
	StagingBucket *string `pulumi:"stagingBucket"`
	// The Cloud Storage temp bucket used to store ephemeral cluster
	// and jobs data, such as Spark and MapReduce history files.
	// Note: If you don't explicitly specify a `tempBucket` then GCP will auto create / assign one for you.
	TempBucket *string `pulumi:"tempBucket"`
	// The Google Compute Engine config settings for the worker instances
	// in a cluster. Structure defined below.
	WorkerConfig *ClusterClusterConfigWorkerConfig `pulumi:"workerConfig"`
}

type ClusterClusterConfigArgs

type ClusterClusterConfigArgs struct {
	// The autoscaling policy config associated with the cluster.
	// Note that once set, if `autoscalingConfig` is the only field set in `clusterConfig`, it can
	// only be removed by setting `policyUri = ""`, rather than removing the whole block.
	// Structure defined below.
	AutoscalingConfig ClusterClusterConfigAutoscalingConfigPtrInput `pulumi:"autoscalingConfig"`
	Bucket            pulumi.StringPtrInput                         `pulumi:"bucket"`
	// The Customer managed encryption keys settings for the cluster.
	// Structure defined below.
	EncryptionConfig ClusterClusterConfigEncryptionConfigPtrInput `pulumi:"encryptionConfig"`
	// The config settings for port access on the cluster.
	// Structure defined below.
	EndpointConfig ClusterClusterConfigEndpointConfigPtrInput `pulumi:"endpointConfig"`
	// Common config settings for resources of Google Compute Engine cluster
	// instances, applicable to all instances in the cluster. Structure defined below.
	GceClusterConfig ClusterClusterConfigGceClusterConfigPtrInput `pulumi:"gceClusterConfig"`
	// Commands to execute on each node after config is completed.
	// You can specify multiple versions of these. Structure defined below.
	InitializationActions ClusterClusterConfigInitializationActionArrayInput `pulumi:"initializationActions"`
	// The settings for auto deletion cluster schedule.
	// Structure defined below.
	LifecycleConfig ClusterClusterConfigLifecycleConfigPtrInput `pulumi:"lifecycleConfig"`
	// The Google Compute Engine config settings for the master instances
	// in a cluster. Structure defined below.
	MasterConfig ClusterClusterConfigMasterConfigPtrInput `pulumi:"masterConfig"`
	// The config setting for metastore service with the cluster.
	// Structure defined below.
	// ***
	MetastoreConfig ClusterClusterConfigMetastoreConfigPtrInput `pulumi:"metastoreConfig"`
	// The Google Compute Engine config settings for the additional
	// instances in a cluster. Structure defined below.
	// * **NOTE** : `preemptibleWorkerConfig` is
	//   an alias for the api's [secondaryWorkerConfig](https://cloud.google.com/dataproc/docs/reference/rest/v1/ClusterConfig#InstanceGroupConfig). The name doesn't necessarily mean it is preemptible and is named as
	//   such for legacy/compatibility reasons.
	PreemptibleWorkerConfig ClusterClusterConfigPreemptibleWorkerConfigPtrInput `pulumi:"preemptibleWorkerConfig"`
	// Security related configuration. Structure defined below.
	SecurityConfig ClusterClusterConfigSecurityConfigPtrInput `pulumi:"securityConfig"`
	// The config settings for software inside the cluster.
	// Structure defined below.
	SoftwareConfig ClusterClusterConfigSoftwareConfigPtrInput `pulumi:"softwareConfig"`
	// The Cloud Storage staging bucket used to stage files,
	// such as Hadoop jars, between client machines and the cluster.
	// Note: If you don't explicitly specify a `stagingBucket`
	// then GCP will auto create / assign one for you. However, you are not guaranteed
	// an auto generated bucket which is solely dedicated to your cluster; it may be shared
	// with other clusters in the same region/zone also choosing to use the auto generation
	// option.
	StagingBucket pulumi.StringPtrInput `pulumi:"stagingBucket"`
	// The Cloud Storage temp bucket used to store ephemeral cluster
	// and jobs data, such as Spark and MapReduce history files.
	// Note: If you don't explicitly specify a `tempBucket` then GCP will auto create / assign one for you.
	TempBucket pulumi.StringPtrInput `pulumi:"tempBucket"`
	// The Google Compute Engine config settings for the worker instances
	// in a cluster. Structure defined below.
	WorkerConfig ClusterClusterConfigWorkerConfigPtrInput `pulumi:"workerConfig"`
}

func (ClusterClusterConfigArgs) ElementType

func (ClusterClusterConfigArgs) ElementType() reflect.Type

func (ClusterClusterConfigArgs) ToClusterClusterConfigOutput

func (i ClusterClusterConfigArgs) ToClusterClusterConfigOutput() ClusterClusterConfigOutput

func (ClusterClusterConfigArgs) ToClusterClusterConfigOutputWithContext

func (i ClusterClusterConfigArgs) ToClusterClusterConfigOutputWithContext(ctx context.Context) ClusterClusterConfigOutput

func (ClusterClusterConfigArgs) ToClusterClusterConfigPtrOutput

func (i ClusterClusterConfigArgs) ToClusterClusterConfigPtrOutput() ClusterClusterConfigPtrOutput

func (ClusterClusterConfigArgs) ToClusterClusterConfigPtrOutputWithContext

func (i ClusterClusterConfigArgs) ToClusterClusterConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigPtrOutput

type ClusterClusterConfigAutoscalingConfig

type ClusterClusterConfigAutoscalingConfig struct {
	// The autoscaling policy used by the cluster.
	PolicyUri string `pulumi:"policyUri"`
}

type ClusterClusterConfigAutoscalingConfigArgs

type ClusterClusterConfigAutoscalingConfigArgs struct {
	// The autoscaling policy used by the cluster.
	PolicyUri pulumi.StringInput `pulumi:"policyUri"`
}

func (ClusterClusterConfigAutoscalingConfigArgs) ElementType

func (ClusterClusterConfigAutoscalingConfigArgs) ToClusterClusterConfigAutoscalingConfigOutput

func (i ClusterClusterConfigAutoscalingConfigArgs) ToClusterClusterConfigAutoscalingConfigOutput() ClusterClusterConfigAutoscalingConfigOutput

func (ClusterClusterConfigAutoscalingConfigArgs) ToClusterClusterConfigAutoscalingConfigOutputWithContext

func (i ClusterClusterConfigAutoscalingConfigArgs) ToClusterClusterConfigAutoscalingConfigOutputWithContext(ctx context.Context) ClusterClusterConfigAutoscalingConfigOutput

func (ClusterClusterConfigAutoscalingConfigArgs) ToClusterClusterConfigAutoscalingConfigPtrOutput

func (i ClusterClusterConfigAutoscalingConfigArgs) ToClusterClusterConfigAutoscalingConfigPtrOutput() ClusterClusterConfigAutoscalingConfigPtrOutput

func (ClusterClusterConfigAutoscalingConfigArgs) ToClusterClusterConfigAutoscalingConfigPtrOutputWithContext

func (i ClusterClusterConfigAutoscalingConfigArgs) ToClusterClusterConfigAutoscalingConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigAutoscalingConfigPtrOutput

type ClusterClusterConfigAutoscalingConfigInput

type ClusterClusterConfigAutoscalingConfigInput interface {
	pulumi.Input

	ToClusterClusterConfigAutoscalingConfigOutput() ClusterClusterConfigAutoscalingConfigOutput
	ToClusterClusterConfigAutoscalingConfigOutputWithContext(context.Context) ClusterClusterConfigAutoscalingConfigOutput
}

ClusterClusterConfigAutoscalingConfigInput is an input type that accepts ClusterClusterConfigAutoscalingConfigArgs and ClusterClusterConfigAutoscalingConfigOutput values. You can construct a concrete instance of `ClusterClusterConfigAutoscalingConfigInput` via:

ClusterClusterConfigAutoscalingConfigArgs{...}

type ClusterClusterConfigAutoscalingConfigOutput

type ClusterClusterConfigAutoscalingConfigOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigAutoscalingConfigOutput) ElementType

func (ClusterClusterConfigAutoscalingConfigOutput) PolicyUri

The autoscaling policy used by the cluster.

func (ClusterClusterConfigAutoscalingConfigOutput) ToClusterClusterConfigAutoscalingConfigOutput

func (o ClusterClusterConfigAutoscalingConfigOutput) ToClusterClusterConfigAutoscalingConfigOutput() ClusterClusterConfigAutoscalingConfigOutput

func (ClusterClusterConfigAutoscalingConfigOutput) ToClusterClusterConfigAutoscalingConfigOutputWithContext

func (o ClusterClusterConfigAutoscalingConfigOutput) ToClusterClusterConfigAutoscalingConfigOutputWithContext(ctx context.Context) ClusterClusterConfigAutoscalingConfigOutput

func (ClusterClusterConfigAutoscalingConfigOutput) ToClusterClusterConfigAutoscalingConfigPtrOutput

func (o ClusterClusterConfigAutoscalingConfigOutput) ToClusterClusterConfigAutoscalingConfigPtrOutput() ClusterClusterConfigAutoscalingConfigPtrOutput

func (ClusterClusterConfigAutoscalingConfigOutput) ToClusterClusterConfigAutoscalingConfigPtrOutputWithContext

func (o ClusterClusterConfigAutoscalingConfigOutput) ToClusterClusterConfigAutoscalingConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigAutoscalingConfigPtrOutput

type ClusterClusterConfigAutoscalingConfigPtrInput

type ClusterClusterConfigAutoscalingConfigPtrInput interface {
	pulumi.Input

	ToClusterClusterConfigAutoscalingConfigPtrOutput() ClusterClusterConfigAutoscalingConfigPtrOutput
	ToClusterClusterConfigAutoscalingConfigPtrOutputWithContext(context.Context) ClusterClusterConfigAutoscalingConfigPtrOutput
}

ClusterClusterConfigAutoscalingConfigPtrInput is an input type that accepts ClusterClusterConfigAutoscalingConfigArgs, ClusterClusterConfigAutoscalingConfigPtr and ClusterClusterConfigAutoscalingConfigPtrOutput values. You can construct a concrete instance of `ClusterClusterConfigAutoscalingConfigPtrInput` via:

        ClusterClusterConfigAutoscalingConfigArgs{...}

or:

        nil

type ClusterClusterConfigAutoscalingConfigPtrOutput

type ClusterClusterConfigAutoscalingConfigPtrOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigAutoscalingConfigPtrOutput) Elem

func (ClusterClusterConfigAutoscalingConfigPtrOutput) ElementType

func (ClusterClusterConfigAutoscalingConfigPtrOutput) PolicyUri

The autoscaling policy used by the cluster.

func (ClusterClusterConfigAutoscalingConfigPtrOutput) ToClusterClusterConfigAutoscalingConfigPtrOutput

func (o ClusterClusterConfigAutoscalingConfigPtrOutput) ToClusterClusterConfigAutoscalingConfigPtrOutput() ClusterClusterConfigAutoscalingConfigPtrOutput

func (ClusterClusterConfigAutoscalingConfigPtrOutput) ToClusterClusterConfigAutoscalingConfigPtrOutputWithContext

func (o ClusterClusterConfigAutoscalingConfigPtrOutput) ToClusterClusterConfigAutoscalingConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigAutoscalingConfigPtrOutput

type ClusterClusterConfigEncryptionConfig

type ClusterClusterConfigEncryptionConfig struct {
	// The Cloud KMS key name to use for PD disk encryption for
	// all instances in the cluster.
	KmsKeyName string `pulumi:"kmsKeyName"`
}

type ClusterClusterConfigEncryptionConfigArgs

type ClusterClusterConfigEncryptionConfigArgs struct {
	// The Cloud KMS key name to use for PD disk encryption for
	// all instances in the cluster.
	KmsKeyName pulumi.StringInput `pulumi:"kmsKeyName"`
}

func (ClusterClusterConfigEncryptionConfigArgs) ElementType

func (ClusterClusterConfigEncryptionConfigArgs) ToClusterClusterConfigEncryptionConfigOutput

func (i ClusterClusterConfigEncryptionConfigArgs) ToClusterClusterConfigEncryptionConfigOutput() ClusterClusterConfigEncryptionConfigOutput

func (ClusterClusterConfigEncryptionConfigArgs) ToClusterClusterConfigEncryptionConfigOutputWithContext

func (i ClusterClusterConfigEncryptionConfigArgs) ToClusterClusterConfigEncryptionConfigOutputWithContext(ctx context.Context) ClusterClusterConfigEncryptionConfigOutput

func (ClusterClusterConfigEncryptionConfigArgs) ToClusterClusterConfigEncryptionConfigPtrOutput

func (i ClusterClusterConfigEncryptionConfigArgs) ToClusterClusterConfigEncryptionConfigPtrOutput() ClusterClusterConfigEncryptionConfigPtrOutput

func (ClusterClusterConfigEncryptionConfigArgs) ToClusterClusterConfigEncryptionConfigPtrOutputWithContext

func (i ClusterClusterConfigEncryptionConfigArgs) ToClusterClusterConfigEncryptionConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigEncryptionConfigPtrOutput

type ClusterClusterConfigEncryptionConfigInput

type ClusterClusterConfigEncryptionConfigInput interface {
	pulumi.Input

	ToClusterClusterConfigEncryptionConfigOutput() ClusterClusterConfigEncryptionConfigOutput
	ToClusterClusterConfigEncryptionConfigOutputWithContext(context.Context) ClusterClusterConfigEncryptionConfigOutput
}

ClusterClusterConfigEncryptionConfigInput is an input type that accepts ClusterClusterConfigEncryptionConfigArgs and ClusterClusterConfigEncryptionConfigOutput values. You can construct a concrete instance of `ClusterClusterConfigEncryptionConfigInput` via:

ClusterClusterConfigEncryptionConfigArgs{...}

type ClusterClusterConfigEncryptionConfigOutput

type ClusterClusterConfigEncryptionConfigOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigEncryptionConfigOutput) ElementType

func (ClusterClusterConfigEncryptionConfigOutput) KmsKeyName

The Cloud KMS key name to use for PD disk encryption for all instances in the cluster.

func (ClusterClusterConfigEncryptionConfigOutput) ToClusterClusterConfigEncryptionConfigOutput

func (o ClusterClusterConfigEncryptionConfigOutput) ToClusterClusterConfigEncryptionConfigOutput() ClusterClusterConfigEncryptionConfigOutput

func (ClusterClusterConfigEncryptionConfigOutput) ToClusterClusterConfigEncryptionConfigOutputWithContext

func (o ClusterClusterConfigEncryptionConfigOutput) ToClusterClusterConfigEncryptionConfigOutputWithContext(ctx context.Context) ClusterClusterConfigEncryptionConfigOutput

func (ClusterClusterConfigEncryptionConfigOutput) ToClusterClusterConfigEncryptionConfigPtrOutput

func (o ClusterClusterConfigEncryptionConfigOutput) ToClusterClusterConfigEncryptionConfigPtrOutput() ClusterClusterConfigEncryptionConfigPtrOutput

func (ClusterClusterConfigEncryptionConfigOutput) ToClusterClusterConfigEncryptionConfigPtrOutputWithContext

func (o ClusterClusterConfigEncryptionConfigOutput) ToClusterClusterConfigEncryptionConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigEncryptionConfigPtrOutput

type ClusterClusterConfigEncryptionConfigPtrInput

type ClusterClusterConfigEncryptionConfigPtrInput interface {
	pulumi.Input

	ToClusterClusterConfigEncryptionConfigPtrOutput() ClusterClusterConfigEncryptionConfigPtrOutput
	ToClusterClusterConfigEncryptionConfigPtrOutputWithContext(context.Context) ClusterClusterConfigEncryptionConfigPtrOutput
}

ClusterClusterConfigEncryptionConfigPtrInput is an input type that accepts ClusterClusterConfigEncryptionConfigArgs, ClusterClusterConfigEncryptionConfigPtr and ClusterClusterConfigEncryptionConfigPtrOutput values. You can construct a concrete instance of `ClusterClusterConfigEncryptionConfigPtrInput` via:

        ClusterClusterConfigEncryptionConfigArgs{...}

or:

        nil

type ClusterClusterConfigEncryptionConfigPtrOutput

type ClusterClusterConfigEncryptionConfigPtrOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigEncryptionConfigPtrOutput) Elem

func (ClusterClusterConfigEncryptionConfigPtrOutput) ElementType

func (ClusterClusterConfigEncryptionConfigPtrOutput) KmsKeyName

The Cloud KMS key name to use for PD disk encryption for all instances in the cluster.

func (ClusterClusterConfigEncryptionConfigPtrOutput) ToClusterClusterConfigEncryptionConfigPtrOutput

func (o ClusterClusterConfigEncryptionConfigPtrOutput) ToClusterClusterConfigEncryptionConfigPtrOutput() ClusterClusterConfigEncryptionConfigPtrOutput

func (ClusterClusterConfigEncryptionConfigPtrOutput) ToClusterClusterConfigEncryptionConfigPtrOutputWithContext

func (o ClusterClusterConfigEncryptionConfigPtrOutput) ToClusterClusterConfigEncryptionConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigEncryptionConfigPtrOutput

type ClusterClusterConfigEndpointConfig

type ClusterClusterConfigEndpointConfig struct {
	// The flag to enable http access to specific ports
	// on the cluster from external sources (aka Component Gateway). Defaults to false.
	EnableHttpPortAccess bool                   `pulumi:"enableHttpPortAccess"`
	HttpPorts            map[string]interface{} `pulumi:"httpPorts"`
}

type ClusterClusterConfigEndpointConfigArgs

type ClusterClusterConfigEndpointConfigArgs struct {
	// The flag to enable http access to specific ports
	// on the cluster from external sources (aka Component Gateway). Defaults to false.
	EnableHttpPortAccess pulumi.BoolInput `pulumi:"enableHttpPortAccess"`
	HttpPorts            pulumi.MapInput  `pulumi:"httpPorts"`
}

func (ClusterClusterConfigEndpointConfigArgs) ElementType

func (ClusterClusterConfigEndpointConfigArgs) ToClusterClusterConfigEndpointConfigOutput

func (i ClusterClusterConfigEndpointConfigArgs) ToClusterClusterConfigEndpointConfigOutput() ClusterClusterConfigEndpointConfigOutput

func (ClusterClusterConfigEndpointConfigArgs) ToClusterClusterConfigEndpointConfigOutputWithContext

func (i ClusterClusterConfigEndpointConfigArgs) ToClusterClusterConfigEndpointConfigOutputWithContext(ctx context.Context) ClusterClusterConfigEndpointConfigOutput

func (ClusterClusterConfigEndpointConfigArgs) ToClusterClusterConfigEndpointConfigPtrOutput

func (i ClusterClusterConfigEndpointConfigArgs) ToClusterClusterConfigEndpointConfigPtrOutput() ClusterClusterConfigEndpointConfigPtrOutput

func (ClusterClusterConfigEndpointConfigArgs) ToClusterClusterConfigEndpointConfigPtrOutputWithContext

func (i ClusterClusterConfigEndpointConfigArgs) ToClusterClusterConfigEndpointConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigEndpointConfigPtrOutput

type ClusterClusterConfigEndpointConfigInput

type ClusterClusterConfigEndpointConfigInput interface {
	pulumi.Input

	ToClusterClusterConfigEndpointConfigOutput() ClusterClusterConfigEndpointConfigOutput
	ToClusterClusterConfigEndpointConfigOutputWithContext(context.Context) ClusterClusterConfigEndpointConfigOutput
}

ClusterClusterConfigEndpointConfigInput is an input type that accepts ClusterClusterConfigEndpointConfigArgs and ClusterClusterConfigEndpointConfigOutput values. You can construct a concrete instance of `ClusterClusterConfigEndpointConfigInput` via:

ClusterClusterConfigEndpointConfigArgs{...}

type ClusterClusterConfigEndpointConfigOutput

type ClusterClusterConfigEndpointConfigOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigEndpointConfigOutput) ElementType

func (ClusterClusterConfigEndpointConfigOutput) EnableHttpPortAccess

The flag to enable http access to specific ports on the cluster from external sources (aka Component Gateway). Defaults to false.

func (ClusterClusterConfigEndpointConfigOutput) HttpPorts

func (ClusterClusterConfigEndpointConfigOutput) ToClusterClusterConfigEndpointConfigOutput

func (o ClusterClusterConfigEndpointConfigOutput) ToClusterClusterConfigEndpointConfigOutput() ClusterClusterConfigEndpointConfigOutput

func (ClusterClusterConfigEndpointConfigOutput) ToClusterClusterConfigEndpointConfigOutputWithContext

func (o ClusterClusterConfigEndpointConfigOutput) ToClusterClusterConfigEndpointConfigOutputWithContext(ctx context.Context) ClusterClusterConfigEndpointConfigOutput

func (ClusterClusterConfigEndpointConfigOutput) ToClusterClusterConfigEndpointConfigPtrOutput

func (o ClusterClusterConfigEndpointConfigOutput) ToClusterClusterConfigEndpointConfigPtrOutput() ClusterClusterConfigEndpointConfigPtrOutput

func (ClusterClusterConfigEndpointConfigOutput) ToClusterClusterConfigEndpointConfigPtrOutputWithContext

func (o ClusterClusterConfigEndpointConfigOutput) ToClusterClusterConfigEndpointConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigEndpointConfigPtrOutput

type ClusterClusterConfigEndpointConfigPtrInput

type ClusterClusterConfigEndpointConfigPtrInput interface {
	pulumi.Input

	ToClusterClusterConfigEndpointConfigPtrOutput() ClusterClusterConfigEndpointConfigPtrOutput
	ToClusterClusterConfigEndpointConfigPtrOutputWithContext(context.Context) ClusterClusterConfigEndpointConfigPtrOutput
}

ClusterClusterConfigEndpointConfigPtrInput is an input type that accepts ClusterClusterConfigEndpointConfigArgs, ClusterClusterConfigEndpointConfigPtr and ClusterClusterConfigEndpointConfigPtrOutput values. You can construct a concrete instance of `ClusterClusterConfigEndpointConfigPtrInput` via:

        ClusterClusterConfigEndpointConfigArgs{...}

or:

        nil

type ClusterClusterConfigEndpointConfigPtrOutput

type ClusterClusterConfigEndpointConfigPtrOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigEndpointConfigPtrOutput) Elem

func (ClusterClusterConfigEndpointConfigPtrOutput) ElementType

func (ClusterClusterConfigEndpointConfigPtrOutput) EnableHttpPortAccess

The flag to enable http access to specific ports on the cluster from external sources (aka Component Gateway). Defaults to false.

func (ClusterClusterConfigEndpointConfigPtrOutput) HttpPorts

func (ClusterClusterConfigEndpointConfigPtrOutput) ToClusterClusterConfigEndpointConfigPtrOutput

func (o ClusterClusterConfigEndpointConfigPtrOutput) ToClusterClusterConfigEndpointConfigPtrOutput() ClusterClusterConfigEndpointConfigPtrOutput

func (ClusterClusterConfigEndpointConfigPtrOutput) ToClusterClusterConfigEndpointConfigPtrOutputWithContext

func (o ClusterClusterConfigEndpointConfigPtrOutput) ToClusterClusterConfigEndpointConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigEndpointConfigPtrOutput

type ClusterClusterConfigGceClusterConfig

type ClusterClusterConfigGceClusterConfig struct {
	// By default, clusters are not restricted to internal IP addresses,
	// and will have ephemeral external IP addresses assigned to each instance. If set to true, all
	// instances in the cluster will only have internal IP addresses. Note: Private Google Access
	// (also known as `privateIpGoogleAccess`) must be enabled on the subnetwork that the cluster
	// will be launched in.
	InternalIpOnly *bool `pulumi:"internalIpOnly"`
	// A map of the Compute Engine metadata entries to add to all instances
	// (see [Project and instance metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
	Metadata map[string]string `pulumi:"metadata"`
	// The name or selfLink of the Google Compute Engine
	// network to the cluster will be part of. Conflicts with `subnetwork`.
	// If neither is specified, this defaults to the "default" network.
	Network *string `pulumi:"network"`
	// The service account to be used by the Node VMs.
	// If not specified, the "default" service account is used.
	ServiceAccount *string `pulumi:"serviceAccount"`
	// The set of Google API scopes
	// to be made available on all of the node VMs under the `serviceAccount`
	// specified. Both OAuth2 URLs and gcloud
	// short names are supported. To allow full access to all Cloud APIs, use the
	// `cloud-platform` scope. See a complete list of scopes [here](https://cloud.google.com/sdk/gcloud/reference/alpha/compute/instances/set-scopes#--scopes).
	ServiceAccountScopes []string `pulumi:"serviceAccountScopes"`
	// Shielded Instance Config for clusters using [Compute Engine Shielded VMs](https://cloud.google.com/security/shielded-cloud/shielded-vm).
	ShieldedInstanceConfig *ClusterClusterConfigGceClusterConfigShieldedInstanceConfig `pulumi:"shieldedInstanceConfig"`
	// The name or selfLink of the Google Compute Engine
	// subnetwork the cluster will be part of. Conflicts with `network`.
	Subnetwork *string `pulumi:"subnetwork"`
	// The list of instance tags applied to instances in the cluster.
	// Tags are used to identify valid sources or targets for network firewalls.
	Tags []string `pulumi:"tags"`
	// The GCP zone where your data is stored and used (i.e. where
	// the master and the worker nodes will be created in). If `region` is set to 'global' (default)
	// then `zone` is mandatory, otherwise GCP is able to make use of [Auto Zone Placement](https://cloud.google.com/dataproc/docs/concepts/auto-zone)
	// to determine this automatically for you.
	// Note: This setting additionally determines and restricts
	// which computing resources are available for use with other configs such as
	// `cluster_config.master_config.machine_type` and `cluster_config.worker_config.machine_type`.
	Zone *string `pulumi:"zone"`
}

type ClusterClusterConfigGceClusterConfigArgs

type ClusterClusterConfigGceClusterConfigArgs struct {
	// By default, clusters are not restricted to internal IP addresses,
	// and will have ephemeral external IP addresses assigned to each instance. If set to true, all
	// instances in the cluster will only have internal IP addresses. Note: Private Google Access
	// (also known as `privateIpGoogleAccess`) must be enabled on the subnetwork that the cluster
	// will be launched in.
	InternalIpOnly pulumi.BoolPtrInput `pulumi:"internalIpOnly"`
	// A map of the Compute Engine metadata entries to add to all instances
	// (see [Project and instance metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
	Metadata pulumi.StringMapInput `pulumi:"metadata"`
	// The name or selfLink of the Google Compute Engine
	// network to the cluster will be part of. Conflicts with `subnetwork`.
	// If neither is specified, this defaults to the "default" network.
	Network pulumi.StringPtrInput `pulumi:"network"`
	// The service account to be used by the Node VMs.
	// If not specified, the "default" service account is used.
	ServiceAccount pulumi.StringPtrInput `pulumi:"serviceAccount"`
	// The set of Google API scopes
	// to be made available on all of the node VMs under the `serviceAccount`
	// specified. Both OAuth2 URLs and gcloud
	// short names are supported. To allow full access to all Cloud APIs, use the
	// `cloud-platform` scope. See a complete list of scopes [here](https://cloud.google.com/sdk/gcloud/reference/alpha/compute/instances/set-scopes#--scopes).
	ServiceAccountScopes pulumi.StringArrayInput `pulumi:"serviceAccountScopes"`
	// Shielded Instance Config for clusters using [Compute Engine Shielded VMs](https://cloud.google.com/security/shielded-cloud/shielded-vm).
	ShieldedInstanceConfig ClusterClusterConfigGceClusterConfigShieldedInstanceConfigPtrInput `pulumi:"shieldedInstanceConfig"`
	// The name or selfLink of the Google Compute Engine
	// subnetwork the cluster will be part of. Conflicts with `network`.
	Subnetwork pulumi.StringPtrInput `pulumi:"subnetwork"`
	// The list of instance tags applied to instances in the cluster.
	// Tags are used to identify valid sources or targets for network firewalls.
	Tags pulumi.StringArrayInput `pulumi:"tags"`
	// The GCP zone where your data is stored and used (i.e. where
	// the master and the worker nodes will be created in). If `region` is set to 'global' (default)
	// then `zone` is mandatory, otherwise GCP is able to make use of [Auto Zone Placement](https://cloud.google.com/dataproc/docs/concepts/auto-zone)
	// to determine this automatically for you.
	// Note: This setting additionally determines and restricts
	// which computing resources are available for use with other configs such as
	// `cluster_config.master_config.machine_type` and `cluster_config.worker_config.machine_type`.
	Zone pulumi.StringPtrInput `pulumi:"zone"`
}

func (ClusterClusterConfigGceClusterConfigArgs) ElementType

func (ClusterClusterConfigGceClusterConfigArgs) ToClusterClusterConfigGceClusterConfigOutput

func (i ClusterClusterConfigGceClusterConfigArgs) ToClusterClusterConfigGceClusterConfigOutput() ClusterClusterConfigGceClusterConfigOutput

func (ClusterClusterConfigGceClusterConfigArgs) ToClusterClusterConfigGceClusterConfigOutputWithContext

func (i ClusterClusterConfigGceClusterConfigArgs) ToClusterClusterConfigGceClusterConfigOutputWithContext(ctx context.Context) ClusterClusterConfigGceClusterConfigOutput

func (ClusterClusterConfigGceClusterConfigArgs) ToClusterClusterConfigGceClusterConfigPtrOutput

func (i ClusterClusterConfigGceClusterConfigArgs) ToClusterClusterConfigGceClusterConfigPtrOutput() ClusterClusterConfigGceClusterConfigPtrOutput

func (ClusterClusterConfigGceClusterConfigArgs) ToClusterClusterConfigGceClusterConfigPtrOutputWithContext

func (i ClusterClusterConfigGceClusterConfigArgs) ToClusterClusterConfigGceClusterConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigGceClusterConfigPtrOutput

type ClusterClusterConfigGceClusterConfigInput

type ClusterClusterConfigGceClusterConfigInput interface {
	pulumi.Input

	ToClusterClusterConfigGceClusterConfigOutput() ClusterClusterConfigGceClusterConfigOutput
	ToClusterClusterConfigGceClusterConfigOutputWithContext(context.Context) ClusterClusterConfigGceClusterConfigOutput
}

ClusterClusterConfigGceClusterConfigInput is an input type that accepts ClusterClusterConfigGceClusterConfigArgs and ClusterClusterConfigGceClusterConfigOutput values. You can construct a concrete instance of `ClusterClusterConfigGceClusterConfigInput` via:

ClusterClusterConfigGceClusterConfigArgs{...}

type ClusterClusterConfigGceClusterConfigOutput

type ClusterClusterConfigGceClusterConfigOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigGceClusterConfigOutput) ElementType

func (ClusterClusterConfigGceClusterConfigOutput) InternalIpOnly

By default, clusters are not restricted to internal IP addresses, and will have ephemeral external IP addresses assigned to each instance. If set to true, all instances in the cluster will only have internal IP addresses. Note: Private Google Access (also known as `privateIpGoogleAccess`) must be enabled on the subnetwork that the cluster will be launched in.

func (ClusterClusterConfigGceClusterConfigOutput) Metadata

A map of the Compute Engine metadata entries to add to all instances (see [Project and instance metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).

func (ClusterClusterConfigGceClusterConfigOutput) Network

The name or selfLink of the Google Compute Engine network to the cluster will be part of. Conflicts with `subnetwork`. If neither is specified, this defaults to the "default" network.

func (ClusterClusterConfigGceClusterConfigOutput) ServiceAccount

The service account to be used by the Node VMs. If not specified, the "default" service account is used.

func (ClusterClusterConfigGceClusterConfigOutput) ServiceAccountScopes

The set of Google API scopes to be made available on all of the node VMs under the `serviceAccount` specified. Both OAuth2 URLs and gcloud short names are supported. To allow full access to all Cloud APIs, use the `cloud-platform` scope. See a complete list of scopes [here](https://cloud.google.com/sdk/gcloud/reference/alpha/compute/instances/set-scopes#--scopes).

func (ClusterClusterConfigGceClusterConfigOutput) ShieldedInstanceConfig added in v5.2.0

Shielded Instance Config for clusters using [Compute Engine Shielded VMs](https://cloud.google.com/security/shielded-cloud/shielded-vm).

func (ClusterClusterConfigGceClusterConfigOutput) Subnetwork

The name or selfLink of the Google Compute Engine subnetwork the cluster will be part of. Conflicts with `network`.

func (ClusterClusterConfigGceClusterConfigOutput) Tags

The list of instance tags applied to instances in the cluster. Tags are used to identify valid sources or targets for network firewalls.

func (ClusterClusterConfigGceClusterConfigOutput) ToClusterClusterConfigGceClusterConfigOutput

func (o ClusterClusterConfigGceClusterConfigOutput) ToClusterClusterConfigGceClusterConfigOutput() ClusterClusterConfigGceClusterConfigOutput

func (ClusterClusterConfigGceClusterConfigOutput) ToClusterClusterConfigGceClusterConfigOutputWithContext

func (o ClusterClusterConfigGceClusterConfigOutput) ToClusterClusterConfigGceClusterConfigOutputWithContext(ctx context.Context) ClusterClusterConfigGceClusterConfigOutput

func (ClusterClusterConfigGceClusterConfigOutput) ToClusterClusterConfigGceClusterConfigPtrOutput

func (o ClusterClusterConfigGceClusterConfigOutput) ToClusterClusterConfigGceClusterConfigPtrOutput() ClusterClusterConfigGceClusterConfigPtrOutput

func (ClusterClusterConfigGceClusterConfigOutput) ToClusterClusterConfigGceClusterConfigPtrOutputWithContext

func (o ClusterClusterConfigGceClusterConfigOutput) ToClusterClusterConfigGceClusterConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigGceClusterConfigPtrOutput

func (ClusterClusterConfigGceClusterConfigOutput) Zone

The GCP zone where your data is stored and used (i.e. where the master and the worker nodes will be created in). If `region` is set to 'global' (default) then `zone` is mandatory, otherwise GCP is able to make use of [Auto Zone Placement](https://cloud.google.com/dataproc/docs/concepts/auto-zone) to determine this automatically for you. Note: This setting additionally determines and restricts which computing resources are available for use with other configs such as `cluster_config.master_config.machine_type` and `cluster_config.worker_config.machine_type`.

type ClusterClusterConfigGceClusterConfigPtrInput

type ClusterClusterConfigGceClusterConfigPtrInput interface {
	pulumi.Input

	ToClusterClusterConfigGceClusterConfigPtrOutput() ClusterClusterConfigGceClusterConfigPtrOutput
	ToClusterClusterConfigGceClusterConfigPtrOutputWithContext(context.Context) ClusterClusterConfigGceClusterConfigPtrOutput
}

ClusterClusterConfigGceClusterConfigPtrInput is an input type that accepts ClusterClusterConfigGceClusterConfigArgs, ClusterClusterConfigGceClusterConfigPtr and ClusterClusterConfigGceClusterConfigPtrOutput values. You can construct a concrete instance of `ClusterClusterConfigGceClusterConfigPtrInput` via:

        ClusterClusterConfigGceClusterConfigArgs{...}

or:

        nil

type ClusterClusterConfigGceClusterConfigPtrOutput

type ClusterClusterConfigGceClusterConfigPtrOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigGceClusterConfigPtrOutput) Elem

func (ClusterClusterConfigGceClusterConfigPtrOutput) ElementType

func (ClusterClusterConfigGceClusterConfigPtrOutput) InternalIpOnly

By default, clusters are not restricted to internal IP addresses, and will have ephemeral external IP addresses assigned to each instance. If set to true, all instances in the cluster will only have internal IP addresses. Note: Private Google Access (also known as `privateIpGoogleAccess`) must be enabled on the subnetwork that the cluster will be launched in.

func (ClusterClusterConfigGceClusterConfigPtrOutput) Metadata

A map of the Compute Engine metadata entries to add to all instances (see [Project and instance metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).

func (ClusterClusterConfigGceClusterConfigPtrOutput) Network

The name or selfLink of the Google Compute Engine network to the cluster will be part of. Conflicts with `subnetwork`. If neither is specified, this defaults to the "default" network.

func (ClusterClusterConfigGceClusterConfigPtrOutput) ServiceAccount

The service account to be used by the Node VMs. If not specified, the "default" service account is used.

func (ClusterClusterConfigGceClusterConfigPtrOutput) ServiceAccountScopes

The set of Google API scopes to be made available on all of the node VMs under the `serviceAccount` specified. Both OAuth2 URLs and gcloud short names are supported. To allow full access to all Cloud APIs, use the `cloud-platform` scope. See a complete list of scopes [here](https://cloud.google.com/sdk/gcloud/reference/alpha/compute/instances/set-scopes#--scopes).

func (ClusterClusterConfigGceClusterConfigPtrOutput) ShieldedInstanceConfig added in v5.2.0

Shielded Instance Config for clusters using [Compute Engine Shielded VMs](https://cloud.google.com/security/shielded-cloud/shielded-vm).

func (ClusterClusterConfigGceClusterConfigPtrOutput) Subnetwork

The name or selfLink of the Google Compute Engine subnetwork the cluster will be part of. Conflicts with `network`.

func (ClusterClusterConfigGceClusterConfigPtrOutput) Tags

The list of instance tags applied to instances in the cluster. Tags are used to identify valid sources or targets for network firewalls.

func (ClusterClusterConfigGceClusterConfigPtrOutput) ToClusterClusterConfigGceClusterConfigPtrOutput

func (o ClusterClusterConfigGceClusterConfigPtrOutput) ToClusterClusterConfigGceClusterConfigPtrOutput() ClusterClusterConfigGceClusterConfigPtrOutput

func (ClusterClusterConfigGceClusterConfigPtrOutput) ToClusterClusterConfigGceClusterConfigPtrOutputWithContext

func (o ClusterClusterConfigGceClusterConfigPtrOutput) ToClusterClusterConfigGceClusterConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigGceClusterConfigPtrOutput

func (ClusterClusterConfigGceClusterConfigPtrOutput) Zone

The GCP zone where your data is stored and used (i.e. where the master and the worker nodes will be created in). If `region` is set to 'global' (default) then `zone` is mandatory, otherwise GCP is able to make use of [Auto Zone Placement](https://cloud.google.com/dataproc/docs/concepts/auto-zone) to determine this automatically for you. Note: This setting additionally determines and restricts which computing resources are available for use with other configs such as `cluster_config.master_config.machine_type` and `cluster_config.worker_config.machine_type`.

type ClusterClusterConfigGceClusterConfigShieldedInstanceConfig added in v5.2.0

type ClusterClusterConfigGceClusterConfigShieldedInstanceConfig struct {
	// Defines whether instances have integrity monitoring enabled.
	EnableIntegrityMonitoring *bool `pulumi:"enableIntegrityMonitoring"`
	// Defines whether instances have Secure Boot enabled.
	EnableSecureBoot *bool `pulumi:"enableSecureBoot"`
	// Defines whether instances have the [vTPM](https://cloud.google.com/security/shielded-cloud/shielded-vm#vtpm) enabled.
	EnableVtpm *bool `pulumi:"enableVtpm"`
}

type ClusterClusterConfigGceClusterConfigShieldedInstanceConfigArgs added in v5.2.0

type ClusterClusterConfigGceClusterConfigShieldedInstanceConfigArgs struct {
	// Defines whether instances have integrity monitoring enabled.
	EnableIntegrityMonitoring pulumi.BoolPtrInput `pulumi:"enableIntegrityMonitoring"`
	// Defines whether instances have Secure Boot enabled.
	EnableSecureBoot pulumi.BoolPtrInput `pulumi:"enableSecureBoot"`
	// Defines whether instances have the [vTPM](https://cloud.google.com/security/shielded-cloud/shielded-vm#vtpm) enabled.
	EnableVtpm pulumi.BoolPtrInput `pulumi:"enableVtpm"`
}

func (ClusterClusterConfigGceClusterConfigShieldedInstanceConfigArgs) ElementType added in v5.2.0

func (ClusterClusterConfigGceClusterConfigShieldedInstanceConfigArgs) ToClusterClusterConfigGceClusterConfigShieldedInstanceConfigOutput added in v5.2.0

func (ClusterClusterConfigGceClusterConfigShieldedInstanceConfigArgs) ToClusterClusterConfigGceClusterConfigShieldedInstanceConfigOutputWithContext added in v5.2.0

func (i ClusterClusterConfigGceClusterConfigShieldedInstanceConfigArgs) ToClusterClusterConfigGceClusterConfigShieldedInstanceConfigOutputWithContext(ctx context.Context) ClusterClusterConfigGceClusterConfigShieldedInstanceConfigOutput

func (ClusterClusterConfigGceClusterConfigShieldedInstanceConfigArgs) ToClusterClusterConfigGceClusterConfigShieldedInstanceConfigPtrOutput added in v5.2.0

func (ClusterClusterConfigGceClusterConfigShieldedInstanceConfigArgs) ToClusterClusterConfigGceClusterConfigShieldedInstanceConfigPtrOutputWithContext added in v5.2.0

func (i ClusterClusterConfigGceClusterConfigShieldedInstanceConfigArgs) ToClusterClusterConfigGceClusterConfigShieldedInstanceConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigGceClusterConfigShieldedInstanceConfigPtrOutput

type ClusterClusterConfigGceClusterConfigShieldedInstanceConfigInput added in v5.2.0

type ClusterClusterConfigGceClusterConfigShieldedInstanceConfigInput interface {
	pulumi.Input

	ToClusterClusterConfigGceClusterConfigShieldedInstanceConfigOutput() ClusterClusterConfigGceClusterConfigShieldedInstanceConfigOutput
	ToClusterClusterConfigGceClusterConfigShieldedInstanceConfigOutputWithContext(context.Context) ClusterClusterConfigGceClusterConfigShieldedInstanceConfigOutput
}

ClusterClusterConfigGceClusterConfigShieldedInstanceConfigInput is an input type that accepts ClusterClusterConfigGceClusterConfigShieldedInstanceConfigArgs and ClusterClusterConfigGceClusterConfigShieldedInstanceConfigOutput values. You can construct a concrete instance of `ClusterClusterConfigGceClusterConfigShieldedInstanceConfigInput` via:

ClusterClusterConfigGceClusterConfigShieldedInstanceConfigArgs{...}

type ClusterClusterConfigGceClusterConfigShieldedInstanceConfigOutput added in v5.2.0

type ClusterClusterConfigGceClusterConfigShieldedInstanceConfigOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigGceClusterConfigShieldedInstanceConfigOutput) ElementType added in v5.2.0

func (ClusterClusterConfigGceClusterConfigShieldedInstanceConfigOutput) EnableIntegrityMonitoring added in v5.2.0

Defines whether instances have integrity monitoring enabled.

func (ClusterClusterConfigGceClusterConfigShieldedInstanceConfigOutput) EnableSecureBoot added in v5.2.0

Defines whether instances have Secure Boot enabled.

func (ClusterClusterConfigGceClusterConfigShieldedInstanceConfigOutput) EnableVtpm added in v5.2.0

Defines whether instances have the [vTPM](https://cloud.google.com/security/shielded-cloud/shielded-vm#vtpm) enabled.

func (ClusterClusterConfigGceClusterConfigShieldedInstanceConfigOutput) ToClusterClusterConfigGceClusterConfigShieldedInstanceConfigOutput added in v5.2.0

func (ClusterClusterConfigGceClusterConfigShieldedInstanceConfigOutput) ToClusterClusterConfigGceClusterConfigShieldedInstanceConfigOutputWithContext added in v5.2.0

func (o ClusterClusterConfigGceClusterConfigShieldedInstanceConfigOutput) ToClusterClusterConfigGceClusterConfigShieldedInstanceConfigOutputWithContext(ctx context.Context) ClusterClusterConfigGceClusterConfigShieldedInstanceConfigOutput

func (ClusterClusterConfigGceClusterConfigShieldedInstanceConfigOutput) ToClusterClusterConfigGceClusterConfigShieldedInstanceConfigPtrOutput added in v5.2.0

func (ClusterClusterConfigGceClusterConfigShieldedInstanceConfigOutput) ToClusterClusterConfigGceClusterConfigShieldedInstanceConfigPtrOutputWithContext added in v5.2.0

func (o ClusterClusterConfigGceClusterConfigShieldedInstanceConfigOutput) ToClusterClusterConfigGceClusterConfigShieldedInstanceConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigGceClusterConfigShieldedInstanceConfigPtrOutput

type ClusterClusterConfigGceClusterConfigShieldedInstanceConfigPtrInput added in v5.2.0

type ClusterClusterConfigGceClusterConfigShieldedInstanceConfigPtrInput interface {
	pulumi.Input

	ToClusterClusterConfigGceClusterConfigShieldedInstanceConfigPtrOutput() ClusterClusterConfigGceClusterConfigShieldedInstanceConfigPtrOutput
	ToClusterClusterConfigGceClusterConfigShieldedInstanceConfigPtrOutputWithContext(context.Context) ClusterClusterConfigGceClusterConfigShieldedInstanceConfigPtrOutput
}

ClusterClusterConfigGceClusterConfigShieldedInstanceConfigPtrInput is an input type that accepts ClusterClusterConfigGceClusterConfigShieldedInstanceConfigArgs, ClusterClusterConfigGceClusterConfigShieldedInstanceConfigPtr and ClusterClusterConfigGceClusterConfigShieldedInstanceConfigPtrOutput values. You can construct a concrete instance of `ClusterClusterConfigGceClusterConfigShieldedInstanceConfigPtrInput` via:

        ClusterClusterConfigGceClusterConfigShieldedInstanceConfigArgs{...}

or:

        nil

type ClusterClusterConfigGceClusterConfigShieldedInstanceConfigPtrOutput added in v5.2.0

type ClusterClusterConfigGceClusterConfigShieldedInstanceConfigPtrOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigGceClusterConfigShieldedInstanceConfigPtrOutput) Elem added in v5.2.0

func (ClusterClusterConfigGceClusterConfigShieldedInstanceConfigPtrOutput) ElementType added in v5.2.0

func (ClusterClusterConfigGceClusterConfigShieldedInstanceConfigPtrOutput) EnableIntegrityMonitoring added in v5.2.0

Defines whether instances have integrity monitoring enabled.

func (ClusterClusterConfigGceClusterConfigShieldedInstanceConfigPtrOutput) EnableSecureBoot added in v5.2.0

Defines whether instances have Secure Boot enabled.

func (ClusterClusterConfigGceClusterConfigShieldedInstanceConfigPtrOutput) EnableVtpm added in v5.2.0

Defines whether instances have the [vTPM](https://cloud.google.com/security/shielded-cloud/shielded-vm#vtpm) enabled.

func (ClusterClusterConfigGceClusterConfigShieldedInstanceConfigPtrOutput) ToClusterClusterConfigGceClusterConfigShieldedInstanceConfigPtrOutput added in v5.2.0

func (ClusterClusterConfigGceClusterConfigShieldedInstanceConfigPtrOutput) ToClusterClusterConfigGceClusterConfigShieldedInstanceConfigPtrOutputWithContext added in v5.2.0

func (o ClusterClusterConfigGceClusterConfigShieldedInstanceConfigPtrOutput) ToClusterClusterConfigGceClusterConfigShieldedInstanceConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigGceClusterConfigShieldedInstanceConfigPtrOutput

type ClusterClusterConfigInitializationAction

type ClusterClusterConfigInitializationAction struct {
	// The script to be executed during initialization of the cluster.
	// The script must be a GCS file with a gs:// prefix.
	Script string `pulumi:"script"`
	// The maximum duration (in seconds) which `script` is
	// allowed to take to execute its action. GCP will default to a predetermined
	// computed value if not set (currently 300).
	TimeoutSec *int `pulumi:"timeoutSec"`
}

type ClusterClusterConfigInitializationActionArgs

type ClusterClusterConfigInitializationActionArgs struct {
	// The script to be executed during initialization of the cluster.
	// The script must be a GCS file with a gs:// prefix.
	Script pulumi.StringInput `pulumi:"script"`
	// The maximum duration (in seconds) which `script` is
	// allowed to take to execute its action. GCP will default to a predetermined
	// computed value if not set (currently 300).
	TimeoutSec pulumi.IntPtrInput `pulumi:"timeoutSec"`
}

func (ClusterClusterConfigInitializationActionArgs) ElementType

func (ClusterClusterConfigInitializationActionArgs) ToClusterClusterConfigInitializationActionOutput

func (i ClusterClusterConfigInitializationActionArgs) ToClusterClusterConfigInitializationActionOutput() ClusterClusterConfigInitializationActionOutput

func (ClusterClusterConfigInitializationActionArgs) ToClusterClusterConfigInitializationActionOutputWithContext

func (i ClusterClusterConfigInitializationActionArgs) ToClusterClusterConfigInitializationActionOutputWithContext(ctx context.Context) ClusterClusterConfigInitializationActionOutput

type ClusterClusterConfigInitializationActionArray

type ClusterClusterConfigInitializationActionArray []ClusterClusterConfigInitializationActionInput

func (ClusterClusterConfigInitializationActionArray) ElementType

func (ClusterClusterConfigInitializationActionArray) ToClusterClusterConfigInitializationActionArrayOutput

func (i ClusterClusterConfigInitializationActionArray) ToClusterClusterConfigInitializationActionArrayOutput() ClusterClusterConfigInitializationActionArrayOutput

func (ClusterClusterConfigInitializationActionArray) ToClusterClusterConfigInitializationActionArrayOutputWithContext

func (i ClusterClusterConfigInitializationActionArray) ToClusterClusterConfigInitializationActionArrayOutputWithContext(ctx context.Context) ClusterClusterConfigInitializationActionArrayOutput

type ClusterClusterConfigInitializationActionArrayInput

type ClusterClusterConfigInitializationActionArrayInput interface {
	pulumi.Input

	ToClusterClusterConfigInitializationActionArrayOutput() ClusterClusterConfigInitializationActionArrayOutput
	ToClusterClusterConfigInitializationActionArrayOutputWithContext(context.Context) ClusterClusterConfigInitializationActionArrayOutput
}

ClusterClusterConfigInitializationActionArrayInput is an input type that accepts ClusterClusterConfigInitializationActionArray and ClusterClusterConfigInitializationActionArrayOutput values. You can construct a concrete instance of `ClusterClusterConfigInitializationActionArrayInput` via:

ClusterClusterConfigInitializationActionArray{ ClusterClusterConfigInitializationActionArgs{...} }

type ClusterClusterConfigInitializationActionArrayOutput

type ClusterClusterConfigInitializationActionArrayOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigInitializationActionArrayOutput) ElementType

func (ClusterClusterConfigInitializationActionArrayOutput) Index

func (ClusterClusterConfigInitializationActionArrayOutput) ToClusterClusterConfigInitializationActionArrayOutput

func (o ClusterClusterConfigInitializationActionArrayOutput) ToClusterClusterConfigInitializationActionArrayOutput() ClusterClusterConfigInitializationActionArrayOutput

func (ClusterClusterConfigInitializationActionArrayOutput) ToClusterClusterConfigInitializationActionArrayOutputWithContext

func (o ClusterClusterConfigInitializationActionArrayOutput) ToClusterClusterConfigInitializationActionArrayOutputWithContext(ctx context.Context) ClusterClusterConfigInitializationActionArrayOutput

type ClusterClusterConfigInitializationActionInput

type ClusterClusterConfigInitializationActionInput interface {
	pulumi.Input

	ToClusterClusterConfigInitializationActionOutput() ClusterClusterConfigInitializationActionOutput
	ToClusterClusterConfigInitializationActionOutputWithContext(context.Context) ClusterClusterConfigInitializationActionOutput
}

ClusterClusterConfigInitializationActionInput is an input type that accepts ClusterClusterConfigInitializationActionArgs and ClusterClusterConfigInitializationActionOutput values. You can construct a concrete instance of `ClusterClusterConfigInitializationActionInput` via:

ClusterClusterConfigInitializationActionArgs{...}

type ClusterClusterConfigInitializationActionOutput

type ClusterClusterConfigInitializationActionOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigInitializationActionOutput) ElementType

func (ClusterClusterConfigInitializationActionOutput) Script

The script to be executed during initialization of the cluster. The script must be a GCS file with a gs:// prefix.

func (ClusterClusterConfigInitializationActionOutput) TimeoutSec

The maximum duration (in seconds) which `script` is allowed to take to execute its action. GCP will default to a predetermined computed value if not set (currently 300).

func (ClusterClusterConfigInitializationActionOutput) ToClusterClusterConfigInitializationActionOutput

func (o ClusterClusterConfigInitializationActionOutput) ToClusterClusterConfigInitializationActionOutput() ClusterClusterConfigInitializationActionOutput

func (ClusterClusterConfigInitializationActionOutput) ToClusterClusterConfigInitializationActionOutputWithContext

func (o ClusterClusterConfigInitializationActionOutput) ToClusterClusterConfigInitializationActionOutputWithContext(ctx context.Context) ClusterClusterConfigInitializationActionOutput

type ClusterClusterConfigInput

type ClusterClusterConfigInput interface {
	pulumi.Input

	ToClusterClusterConfigOutput() ClusterClusterConfigOutput
	ToClusterClusterConfigOutputWithContext(context.Context) ClusterClusterConfigOutput
}

ClusterClusterConfigInput is an input type that accepts ClusterClusterConfigArgs and ClusterClusterConfigOutput values. You can construct a concrete instance of `ClusterClusterConfigInput` via:

ClusterClusterConfigArgs{...}

type ClusterClusterConfigLifecycleConfig

type ClusterClusterConfigLifecycleConfig struct {
	// The time when cluster will be auto-deleted.
	// A timestamp in RFC3339 UTC "Zulu" format, accurate to nanoseconds.
	// Example: "2014-10-02T15:01:23.045123456Z".
	AutoDeleteTime *string `pulumi:"autoDeleteTime"`
	// The duration to keep the cluster alive while idling
	// (no jobs running). After this TTL, the cluster will be deleted. Valid range: [10m, 14d].
	IdleDeleteTtl *string `pulumi:"idleDeleteTtl"`
	IdleStartTime *string `pulumi:"idleStartTime"`
}

type ClusterClusterConfigLifecycleConfigArgs

type ClusterClusterConfigLifecycleConfigArgs struct {
	// The time when cluster will be auto-deleted.
	// A timestamp in RFC3339 UTC "Zulu" format, accurate to nanoseconds.
	// Example: "2014-10-02T15:01:23.045123456Z".
	AutoDeleteTime pulumi.StringPtrInput `pulumi:"autoDeleteTime"`
	// The duration to keep the cluster alive while idling
	// (no jobs running). After this TTL, the cluster will be deleted. Valid range: [10m, 14d].
	IdleDeleteTtl pulumi.StringPtrInput `pulumi:"idleDeleteTtl"`
	IdleStartTime pulumi.StringPtrInput `pulumi:"idleStartTime"`
}

func (ClusterClusterConfigLifecycleConfigArgs) ElementType

func (ClusterClusterConfigLifecycleConfigArgs) ToClusterClusterConfigLifecycleConfigOutput

func (i ClusterClusterConfigLifecycleConfigArgs) ToClusterClusterConfigLifecycleConfigOutput() ClusterClusterConfigLifecycleConfigOutput

func (ClusterClusterConfigLifecycleConfigArgs) ToClusterClusterConfigLifecycleConfigOutputWithContext

func (i ClusterClusterConfigLifecycleConfigArgs) ToClusterClusterConfigLifecycleConfigOutputWithContext(ctx context.Context) ClusterClusterConfigLifecycleConfigOutput

func (ClusterClusterConfigLifecycleConfigArgs) ToClusterClusterConfigLifecycleConfigPtrOutput

func (i ClusterClusterConfigLifecycleConfigArgs) ToClusterClusterConfigLifecycleConfigPtrOutput() ClusterClusterConfigLifecycleConfigPtrOutput

func (ClusterClusterConfigLifecycleConfigArgs) ToClusterClusterConfigLifecycleConfigPtrOutputWithContext

func (i ClusterClusterConfigLifecycleConfigArgs) ToClusterClusterConfigLifecycleConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigLifecycleConfigPtrOutput

type ClusterClusterConfigLifecycleConfigInput

type ClusterClusterConfigLifecycleConfigInput interface {
	pulumi.Input

	ToClusterClusterConfigLifecycleConfigOutput() ClusterClusterConfigLifecycleConfigOutput
	ToClusterClusterConfigLifecycleConfigOutputWithContext(context.Context) ClusterClusterConfigLifecycleConfigOutput
}

ClusterClusterConfigLifecycleConfigInput is an input type that accepts ClusterClusterConfigLifecycleConfigArgs and ClusterClusterConfigLifecycleConfigOutput values. You can construct a concrete instance of `ClusterClusterConfigLifecycleConfigInput` via:

ClusterClusterConfigLifecycleConfigArgs{...}

type ClusterClusterConfigLifecycleConfigOutput

type ClusterClusterConfigLifecycleConfigOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigLifecycleConfigOutput) AutoDeleteTime

The time when cluster will be auto-deleted. A timestamp in RFC3339 UTC "Zulu" format, accurate to nanoseconds. Example: "2014-10-02T15:01:23.045123456Z".

func (ClusterClusterConfigLifecycleConfigOutput) ElementType

func (ClusterClusterConfigLifecycleConfigOutput) IdleDeleteTtl

The duration to keep the cluster alive while idling (no jobs running). After this TTL, the cluster will be deleted. Valid range: [10m, 14d].

func (ClusterClusterConfigLifecycleConfigOutput) IdleStartTime

func (ClusterClusterConfigLifecycleConfigOutput) ToClusterClusterConfigLifecycleConfigOutput

func (o ClusterClusterConfigLifecycleConfigOutput) ToClusterClusterConfigLifecycleConfigOutput() ClusterClusterConfigLifecycleConfigOutput

func (ClusterClusterConfigLifecycleConfigOutput) ToClusterClusterConfigLifecycleConfigOutputWithContext

func (o ClusterClusterConfigLifecycleConfigOutput) ToClusterClusterConfigLifecycleConfigOutputWithContext(ctx context.Context) ClusterClusterConfigLifecycleConfigOutput

func (ClusterClusterConfigLifecycleConfigOutput) ToClusterClusterConfigLifecycleConfigPtrOutput

func (o ClusterClusterConfigLifecycleConfigOutput) ToClusterClusterConfigLifecycleConfigPtrOutput() ClusterClusterConfigLifecycleConfigPtrOutput

func (ClusterClusterConfigLifecycleConfigOutput) ToClusterClusterConfigLifecycleConfigPtrOutputWithContext

func (o ClusterClusterConfigLifecycleConfigOutput) ToClusterClusterConfigLifecycleConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigLifecycleConfigPtrOutput

type ClusterClusterConfigLifecycleConfigPtrInput

type ClusterClusterConfigLifecycleConfigPtrInput interface {
	pulumi.Input

	ToClusterClusterConfigLifecycleConfigPtrOutput() ClusterClusterConfigLifecycleConfigPtrOutput
	ToClusterClusterConfigLifecycleConfigPtrOutputWithContext(context.Context) ClusterClusterConfigLifecycleConfigPtrOutput
}

ClusterClusterConfigLifecycleConfigPtrInput is an input type that accepts ClusterClusterConfigLifecycleConfigArgs, ClusterClusterConfigLifecycleConfigPtr and ClusterClusterConfigLifecycleConfigPtrOutput values. You can construct a concrete instance of `ClusterClusterConfigLifecycleConfigPtrInput` via:

        ClusterClusterConfigLifecycleConfigArgs{...}

or:

        nil

type ClusterClusterConfigLifecycleConfigPtrOutput

type ClusterClusterConfigLifecycleConfigPtrOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigLifecycleConfigPtrOutput) AutoDeleteTime

The time when cluster will be auto-deleted. A timestamp in RFC3339 UTC "Zulu" format, accurate to nanoseconds. Example: "2014-10-02T15:01:23.045123456Z".

func (ClusterClusterConfigLifecycleConfigPtrOutput) Elem

func (ClusterClusterConfigLifecycleConfigPtrOutput) ElementType

func (ClusterClusterConfigLifecycleConfigPtrOutput) IdleDeleteTtl

The duration to keep the cluster alive while idling (no jobs running). After this TTL, the cluster will be deleted. Valid range: [10m, 14d].

func (ClusterClusterConfigLifecycleConfigPtrOutput) IdleStartTime

func (ClusterClusterConfigLifecycleConfigPtrOutput) ToClusterClusterConfigLifecycleConfigPtrOutput

func (o ClusterClusterConfigLifecycleConfigPtrOutput) ToClusterClusterConfigLifecycleConfigPtrOutput() ClusterClusterConfigLifecycleConfigPtrOutput

func (ClusterClusterConfigLifecycleConfigPtrOutput) ToClusterClusterConfigLifecycleConfigPtrOutputWithContext

func (o ClusterClusterConfigLifecycleConfigPtrOutput) ToClusterClusterConfigLifecycleConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigLifecycleConfigPtrOutput

type ClusterClusterConfigMasterConfig

type ClusterClusterConfigMasterConfig struct {
	// The Compute Engine accelerator configuration for these instances. Can be specified multiple times.
	Accelerators []ClusterClusterConfigMasterConfigAccelerator `pulumi:"accelerators"`
	// Disk Config
	DiskConfig *ClusterClusterConfigMasterConfigDiskConfig `pulumi:"diskConfig"`
	// The URI for the image to use for this worker.  See [the guide](https://cloud.google.com/dataproc/docs/guides/dataproc-images)
	// for more information.
	ImageUri      *string  `pulumi:"imageUri"`
	InstanceNames []string `pulumi:"instanceNames"`
	// The name of a Google Compute Engine machine type
	// to create for the worker nodes. If not specified, GCP will default to a predetermined
	// computed value (currently `n1-standard-4`).
	MachineType *string `pulumi:"machineType"`
	// The name of a minimum generation of CPU family
	// for the master. If not specified, GCP will default to a predetermined computed value
	// for each zone. See [the guide](https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform)
	// for details about which CPU families are available (and defaulted) for each zone.
	MinCpuPlatform *string `pulumi:"minCpuPlatform"`
	// Specifies the number of preemptible nodes to create.
	// Defaults to 0.
	NumInstances *int `pulumi:"numInstances"`
}

type ClusterClusterConfigMasterConfigAccelerator

type ClusterClusterConfigMasterConfigAccelerator struct {
	// The number of the accelerator cards of this type exposed to this instance. Often restricted to one of `1`, `2`, `4`, or `8`.
	AcceleratorCount int `pulumi:"acceleratorCount"`
	// The short name of the accelerator type to expose to this instance. For example, `nvidia-tesla-k80`.
	AcceleratorType string `pulumi:"acceleratorType"`
}

type ClusterClusterConfigMasterConfigAcceleratorArgs

type ClusterClusterConfigMasterConfigAcceleratorArgs struct {
	// The number of the accelerator cards of this type exposed to this instance. Often restricted to one of `1`, `2`, `4`, or `8`.
	AcceleratorCount pulumi.IntInput `pulumi:"acceleratorCount"`
	// The short name of the accelerator type to expose to this instance. For example, `nvidia-tesla-k80`.
	AcceleratorType pulumi.StringInput `pulumi:"acceleratorType"`
}

func (ClusterClusterConfigMasterConfigAcceleratorArgs) ElementType

func (ClusterClusterConfigMasterConfigAcceleratorArgs) ToClusterClusterConfigMasterConfigAcceleratorOutput

func (i ClusterClusterConfigMasterConfigAcceleratorArgs) ToClusterClusterConfigMasterConfigAcceleratorOutput() ClusterClusterConfigMasterConfigAcceleratorOutput

func (ClusterClusterConfigMasterConfigAcceleratorArgs) ToClusterClusterConfigMasterConfigAcceleratorOutputWithContext

func (i ClusterClusterConfigMasterConfigAcceleratorArgs) ToClusterClusterConfigMasterConfigAcceleratorOutputWithContext(ctx context.Context) ClusterClusterConfigMasterConfigAcceleratorOutput

type ClusterClusterConfigMasterConfigAcceleratorArray

type ClusterClusterConfigMasterConfigAcceleratorArray []ClusterClusterConfigMasterConfigAcceleratorInput

func (ClusterClusterConfigMasterConfigAcceleratorArray) ElementType

func (ClusterClusterConfigMasterConfigAcceleratorArray) ToClusterClusterConfigMasterConfigAcceleratorArrayOutput

func (i ClusterClusterConfigMasterConfigAcceleratorArray) ToClusterClusterConfigMasterConfigAcceleratorArrayOutput() ClusterClusterConfigMasterConfigAcceleratorArrayOutput

func (ClusterClusterConfigMasterConfigAcceleratorArray) ToClusterClusterConfigMasterConfigAcceleratorArrayOutputWithContext

func (i ClusterClusterConfigMasterConfigAcceleratorArray) ToClusterClusterConfigMasterConfigAcceleratorArrayOutputWithContext(ctx context.Context) ClusterClusterConfigMasterConfigAcceleratorArrayOutput

type ClusterClusterConfigMasterConfigAcceleratorArrayInput

type ClusterClusterConfigMasterConfigAcceleratorArrayInput interface {
	pulumi.Input

	ToClusterClusterConfigMasterConfigAcceleratorArrayOutput() ClusterClusterConfigMasterConfigAcceleratorArrayOutput
	ToClusterClusterConfigMasterConfigAcceleratorArrayOutputWithContext(context.Context) ClusterClusterConfigMasterConfigAcceleratorArrayOutput
}

ClusterClusterConfigMasterConfigAcceleratorArrayInput is an input type that accepts ClusterClusterConfigMasterConfigAcceleratorArray and ClusterClusterConfigMasterConfigAcceleratorArrayOutput values. You can construct a concrete instance of `ClusterClusterConfigMasterConfigAcceleratorArrayInput` via:

ClusterClusterConfigMasterConfigAcceleratorArray{ ClusterClusterConfigMasterConfigAcceleratorArgs{...} }

type ClusterClusterConfigMasterConfigAcceleratorArrayOutput

type ClusterClusterConfigMasterConfigAcceleratorArrayOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigMasterConfigAcceleratorArrayOutput) ElementType

func (ClusterClusterConfigMasterConfigAcceleratorArrayOutput) Index

func (ClusterClusterConfigMasterConfigAcceleratorArrayOutput) ToClusterClusterConfigMasterConfigAcceleratorArrayOutput

func (ClusterClusterConfigMasterConfigAcceleratorArrayOutput) ToClusterClusterConfigMasterConfigAcceleratorArrayOutputWithContext

func (o ClusterClusterConfigMasterConfigAcceleratorArrayOutput) ToClusterClusterConfigMasterConfigAcceleratorArrayOutputWithContext(ctx context.Context) ClusterClusterConfigMasterConfigAcceleratorArrayOutput

type ClusterClusterConfigMasterConfigAcceleratorInput

type ClusterClusterConfigMasterConfigAcceleratorInput interface {
	pulumi.Input

	ToClusterClusterConfigMasterConfigAcceleratorOutput() ClusterClusterConfigMasterConfigAcceleratorOutput
	ToClusterClusterConfigMasterConfigAcceleratorOutputWithContext(context.Context) ClusterClusterConfigMasterConfigAcceleratorOutput
}

ClusterClusterConfigMasterConfigAcceleratorInput is an input type that accepts ClusterClusterConfigMasterConfigAcceleratorArgs and ClusterClusterConfigMasterConfigAcceleratorOutput values. You can construct a concrete instance of `ClusterClusterConfigMasterConfigAcceleratorInput` via:

ClusterClusterConfigMasterConfigAcceleratorArgs{...}

type ClusterClusterConfigMasterConfigAcceleratorOutput

type ClusterClusterConfigMasterConfigAcceleratorOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigMasterConfigAcceleratorOutput) AcceleratorCount

The number of the accelerator cards of this type exposed to this instance. Often restricted to one of `1`, `2`, `4`, or `8`.

func (ClusterClusterConfigMasterConfigAcceleratorOutput) AcceleratorType

The short name of the accelerator type to expose to this instance. For example, `nvidia-tesla-k80`.

func (ClusterClusterConfigMasterConfigAcceleratorOutput) ElementType

func (ClusterClusterConfigMasterConfigAcceleratorOutput) ToClusterClusterConfigMasterConfigAcceleratorOutput

func (o ClusterClusterConfigMasterConfigAcceleratorOutput) ToClusterClusterConfigMasterConfigAcceleratorOutput() ClusterClusterConfigMasterConfigAcceleratorOutput

func (ClusterClusterConfigMasterConfigAcceleratorOutput) ToClusterClusterConfigMasterConfigAcceleratorOutputWithContext

func (o ClusterClusterConfigMasterConfigAcceleratorOutput) ToClusterClusterConfigMasterConfigAcceleratorOutputWithContext(ctx context.Context) ClusterClusterConfigMasterConfigAcceleratorOutput

type ClusterClusterConfigMasterConfigArgs

type ClusterClusterConfigMasterConfigArgs struct {
	// The Compute Engine accelerator configuration for these instances. Can be specified multiple times.
	Accelerators ClusterClusterConfigMasterConfigAcceleratorArrayInput `pulumi:"accelerators"`
	// Disk Config
	DiskConfig ClusterClusterConfigMasterConfigDiskConfigPtrInput `pulumi:"diskConfig"`
	// The URI for the image to use for this worker.  See [the guide](https://cloud.google.com/dataproc/docs/guides/dataproc-images)
	// for more information.
	ImageUri      pulumi.StringPtrInput   `pulumi:"imageUri"`
	InstanceNames pulumi.StringArrayInput `pulumi:"instanceNames"`
	// The name of a Google Compute Engine machine type
	// to create for the worker nodes. If not specified, GCP will default to a predetermined
	// computed value (currently `n1-standard-4`).
	MachineType pulumi.StringPtrInput `pulumi:"machineType"`
	// The name of a minimum generation of CPU family
	// for the master. If not specified, GCP will default to a predetermined computed value
	// for each zone. See [the guide](https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform)
	// for details about which CPU families are available (and defaulted) for each zone.
	MinCpuPlatform pulumi.StringPtrInput `pulumi:"minCpuPlatform"`
	// Specifies the number of preemptible nodes to create.
	// Defaults to 0.
	NumInstances pulumi.IntPtrInput `pulumi:"numInstances"`
}

func (ClusterClusterConfigMasterConfigArgs) ElementType

func (ClusterClusterConfigMasterConfigArgs) ToClusterClusterConfigMasterConfigOutput

func (i ClusterClusterConfigMasterConfigArgs) ToClusterClusterConfigMasterConfigOutput() ClusterClusterConfigMasterConfigOutput

func (ClusterClusterConfigMasterConfigArgs) ToClusterClusterConfigMasterConfigOutputWithContext

func (i ClusterClusterConfigMasterConfigArgs) ToClusterClusterConfigMasterConfigOutputWithContext(ctx context.Context) ClusterClusterConfigMasterConfigOutput

func (ClusterClusterConfigMasterConfigArgs) ToClusterClusterConfigMasterConfigPtrOutput

func (i ClusterClusterConfigMasterConfigArgs) ToClusterClusterConfigMasterConfigPtrOutput() ClusterClusterConfigMasterConfigPtrOutput

func (ClusterClusterConfigMasterConfigArgs) ToClusterClusterConfigMasterConfigPtrOutputWithContext

func (i ClusterClusterConfigMasterConfigArgs) ToClusterClusterConfigMasterConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigMasterConfigPtrOutput

type ClusterClusterConfigMasterConfigDiskConfig

type ClusterClusterConfigMasterConfigDiskConfig struct {
	// Size of the primary disk attached to each preemptible worker node, specified
	// in GB. The smallest allowed disk size is 10GB. GCP will default to a predetermined
	// computed value if not set (currently 500GB). Note: If SSDs are not
	// attached, it also contains the HDFS data blocks and Hadoop working directories.
	BootDiskSizeGb *int `pulumi:"bootDiskSizeGb"`
	// The disk type of the primary disk attached to each preemptible worker node.
	// One of `"pd-ssd"` or `"pd-standard"`. Defaults to `"pd-standard"`.
	BootDiskType *string `pulumi:"bootDiskType"`
	// The amount of local SSD disks that will be
	// attached to each preemptible worker node. Defaults to 0.
	NumLocalSsds *int `pulumi:"numLocalSsds"`
}

type ClusterClusterConfigMasterConfigDiskConfigArgs

type ClusterClusterConfigMasterConfigDiskConfigArgs struct {
	// Size of the primary disk attached to each preemptible worker node, specified
	// in GB. The smallest allowed disk size is 10GB. GCP will default to a predetermined
	// computed value if not set (currently 500GB). Note: If SSDs are not
	// attached, it also contains the HDFS data blocks and Hadoop working directories.
	BootDiskSizeGb pulumi.IntPtrInput `pulumi:"bootDiskSizeGb"`
	// The disk type of the primary disk attached to each preemptible worker node.
	// One of `"pd-ssd"` or `"pd-standard"`. Defaults to `"pd-standard"`.
	BootDiskType pulumi.StringPtrInput `pulumi:"bootDiskType"`
	// The amount of local SSD disks that will be
	// attached to each preemptible worker node. Defaults to 0.
	NumLocalSsds pulumi.IntPtrInput `pulumi:"numLocalSsds"`
}

func (ClusterClusterConfigMasterConfigDiskConfigArgs) ElementType

func (ClusterClusterConfigMasterConfigDiskConfigArgs) ToClusterClusterConfigMasterConfigDiskConfigOutput

func (i ClusterClusterConfigMasterConfigDiskConfigArgs) ToClusterClusterConfigMasterConfigDiskConfigOutput() ClusterClusterConfigMasterConfigDiskConfigOutput

func (ClusterClusterConfigMasterConfigDiskConfigArgs) ToClusterClusterConfigMasterConfigDiskConfigOutputWithContext

func (i ClusterClusterConfigMasterConfigDiskConfigArgs) ToClusterClusterConfigMasterConfigDiskConfigOutputWithContext(ctx context.Context) ClusterClusterConfigMasterConfigDiskConfigOutput

func (ClusterClusterConfigMasterConfigDiskConfigArgs) ToClusterClusterConfigMasterConfigDiskConfigPtrOutput

func (i ClusterClusterConfigMasterConfigDiskConfigArgs) ToClusterClusterConfigMasterConfigDiskConfigPtrOutput() ClusterClusterConfigMasterConfigDiskConfigPtrOutput

func (ClusterClusterConfigMasterConfigDiskConfigArgs) ToClusterClusterConfigMasterConfigDiskConfigPtrOutputWithContext

func (i ClusterClusterConfigMasterConfigDiskConfigArgs) ToClusterClusterConfigMasterConfigDiskConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigMasterConfigDiskConfigPtrOutput

type ClusterClusterConfigMasterConfigDiskConfigInput

type ClusterClusterConfigMasterConfigDiskConfigInput interface {
	pulumi.Input

	ToClusterClusterConfigMasterConfigDiskConfigOutput() ClusterClusterConfigMasterConfigDiskConfigOutput
	ToClusterClusterConfigMasterConfigDiskConfigOutputWithContext(context.Context) ClusterClusterConfigMasterConfigDiskConfigOutput
}

ClusterClusterConfigMasterConfigDiskConfigInput is an input type that accepts ClusterClusterConfigMasterConfigDiskConfigArgs and ClusterClusterConfigMasterConfigDiskConfigOutput values. You can construct a concrete instance of `ClusterClusterConfigMasterConfigDiskConfigInput` via:

ClusterClusterConfigMasterConfigDiskConfigArgs{...}

type ClusterClusterConfigMasterConfigDiskConfigOutput

type ClusterClusterConfigMasterConfigDiskConfigOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigMasterConfigDiskConfigOutput) BootDiskSizeGb

Size of the primary disk attached to each preemptible worker node, specified in GB. The smallest allowed disk size is 10GB. GCP will default to a predetermined computed value if not set (currently 500GB). Note: If SSDs are not attached, it also contains the HDFS data blocks and Hadoop working directories.

func (ClusterClusterConfigMasterConfigDiskConfigOutput) BootDiskType

The disk type of the primary disk attached to each preemptible worker node. One of `"pd-ssd"` or `"pd-standard"`. Defaults to `"pd-standard"`.

func (ClusterClusterConfigMasterConfigDiskConfigOutput) ElementType

func (ClusterClusterConfigMasterConfigDiskConfigOutput) NumLocalSsds

The amount of local SSD disks that will be attached to each preemptible worker node. Defaults to 0.

func (ClusterClusterConfigMasterConfigDiskConfigOutput) ToClusterClusterConfigMasterConfigDiskConfigOutput

func (o ClusterClusterConfigMasterConfigDiskConfigOutput) ToClusterClusterConfigMasterConfigDiskConfigOutput() ClusterClusterConfigMasterConfigDiskConfigOutput

func (ClusterClusterConfigMasterConfigDiskConfigOutput) ToClusterClusterConfigMasterConfigDiskConfigOutputWithContext

func (o ClusterClusterConfigMasterConfigDiskConfigOutput) ToClusterClusterConfigMasterConfigDiskConfigOutputWithContext(ctx context.Context) ClusterClusterConfigMasterConfigDiskConfigOutput

func (ClusterClusterConfigMasterConfigDiskConfigOutput) ToClusterClusterConfigMasterConfigDiskConfigPtrOutput

func (o ClusterClusterConfigMasterConfigDiskConfigOutput) ToClusterClusterConfigMasterConfigDiskConfigPtrOutput() ClusterClusterConfigMasterConfigDiskConfigPtrOutput

func (ClusterClusterConfigMasterConfigDiskConfigOutput) ToClusterClusterConfigMasterConfigDiskConfigPtrOutputWithContext

func (o ClusterClusterConfigMasterConfigDiskConfigOutput) ToClusterClusterConfigMasterConfigDiskConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigMasterConfigDiskConfigPtrOutput

type ClusterClusterConfigMasterConfigDiskConfigPtrInput

type ClusterClusterConfigMasterConfigDiskConfigPtrInput interface {
	pulumi.Input

	ToClusterClusterConfigMasterConfigDiskConfigPtrOutput() ClusterClusterConfigMasterConfigDiskConfigPtrOutput
	ToClusterClusterConfigMasterConfigDiskConfigPtrOutputWithContext(context.Context) ClusterClusterConfigMasterConfigDiskConfigPtrOutput
}

ClusterClusterConfigMasterConfigDiskConfigPtrInput is an input type that accepts ClusterClusterConfigMasterConfigDiskConfigArgs, ClusterClusterConfigMasterConfigDiskConfigPtr and ClusterClusterConfigMasterConfigDiskConfigPtrOutput values. You can construct a concrete instance of `ClusterClusterConfigMasterConfigDiskConfigPtrInput` via:

        ClusterClusterConfigMasterConfigDiskConfigArgs{...}

or:

        nil

type ClusterClusterConfigMasterConfigDiskConfigPtrOutput

type ClusterClusterConfigMasterConfigDiskConfigPtrOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigMasterConfigDiskConfigPtrOutput) BootDiskSizeGb

Size of the primary disk attached to each preemptible worker node, specified in GB. The smallest allowed disk size is 10GB. GCP will default to a predetermined computed value if not set (currently 500GB). Note: If SSDs are not attached, it also contains the HDFS data blocks and Hadoop working directories.

func (ClusterClusterConfigMasterConfigDiskConfigPtrOutput) BootDiskType

The disk type of the primary disk attached to each preemptible worker node. One of `"pd-ssd"` or `"pd-standard"`. Defaults to `"pd-standard"`.

func (ClusterClusterConfigMasterConfigDiskConfigPtrOutput) Elem

func (ClusterClusterConfigMasterConfigDiskConfigPtrOutput) ElementType

func (ClusterClusterConfigMasterConfigDiskConfigPtrOutput) NumLocalSsds

The amount of local SSD disks that will be attached to each preemptible worker node. Defaults to 0.

func (ClusterClusterConfigMasterConfigDiskConfigPtrOutput) ToClusterClusterConfigMasterConfigDiskConfigPtrOutput

func (o ClusterClusterConfigMasterConfigDiskConfigPtrOutput) ToClusterClusterConfigMasterConfigDiskConfigPtrOutput() ClusterClusterConfigMasterConfigDiskConfigPtrOutput

func (ClusterClusterConfigMasterConfigDiskConfigPtrOutput) ToClusterClusterConfigMasterConfigDiskConfigPtrOutputWithContext

func (o ClusterClusterConfigMasterConfigDiskConfigPtrOutput) ToClusterClusterConfigMasterConfigDiskConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigMasterConfigDiskConfigPtrOutput

type ClusterClusterConfigMasterConfigInput

type ClusterClusterConfigMasterConfigInput interface {
	pulumi.Input

	ToClusterClusterConfigMasterConfigOutput() ClusterClusterConfigMasterConfigOutput
	ToClusterClusterConfigMasterConfigOutputWithContext(context.Context) ClusterClusterConfigMasterConfigOutput
}

ClusterClusterConfigMasterConfigInput is an input type that accepts ClusterClusterConfigMasterConfigArgs and ClusterClusterConfigMasterConfigOutput values. You can construct a concrete instance of `ClusterClusterConfigMasterConfigInput` via:

ClusterClusterConfigMasterConfigArgs{...}

type ClusterClusterConfigMasterConfigOutput

type ClusterClusterConfigMasterConfigOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigMasterConfigOutput) Accelerators

The Compute Engine accelerator configuration for these instances. Can be specified multiple times.

func (ClusterClusterConfigMasterConfigOutput) DiskConfig

Disk Config

func (ClusterClusterConfigMasterConfigOutput) ElementType

func (ClusterClusterConfigMasterConfigOutput) ImageUri

The URI for the image to use for this worker. See [the guide](https://cloud.google.com/dataproc/docs/guides/dataproc-images) for more information.

func (ClusterClusterConfigMasterConfigOutput) InstanceNames

func (ClusterClusterConfigMasterConfigOutput) MachineType

The name of a Google Compute Engine machine type to create for the worker nodes. If not specified, GCP will default to a predetermined computed value (currently `n1-standard-4`).

func (ClusterClusterConfigMasterConfigOutput) MinCpuPlatform

The name of a minimum generation of CPU family for the master. If not specified, GCP will default to a predetermined computed value for each zone. See [the guide](https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform) for details about which CPU families are available (and defaulted) for each zone.

func (ClusterClusterConfigMasterConfigOutput) NumInstances

Specifies the number of preemptible nodes to create. Defaults to 0.

func (ClusterClusterConfigMasterConfigOutput) ToClusterClusterConfigMasterConfigOutput

func (o ClusterClusterConfigMasterConfigOutput) ToClusterClusterConfigMasterConfigOutput() ClusterClusterConfigMasterConfigOutput

func (ClusterClusterConfigMasterConfigOutput) ToClusterClusterConfigMasterConfigOutputWithContext

func (o ClusterClusterConfigMasterConfigOutput) ToClusterClusterConfigMasterConfigOutputWithContext(ctx context.Context) ClusterClusterConfigMasterConfigOutput

func (ClusterClusterConfigMasterConfigOutput) ToClusterClusterConfigMasterConfigPtrOutput

func (o ClusterClusterConfigMasterConfigOutput) ToClusterClusterConfigMasterConfigPtrOutput() ClusterClusterConfigMasterConfigPtrOutput

func (ClusterClusterConfigMasterConfigOutput) ToClusterClusterConfigMasterConfigPtrOutputWithContext

func (o ClusterClusterConfigMasterConfigOutput) ToClusterClusterConfigMasterConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigMasterConfigPtrOutput

type ClusterClusterConfigMasterConfigPtrInput

type ClusterClusterConfigMasterConfigPtrInput interface {
	pulumi.Input

	ToClusterClusterConfigMasterConfigPtrOutput() ClusterClusterConfigMasterConfigPtrOutput
	ToClusterClusterConfigMasterConfigPtrOutputWithContext(context.Context) ClusterClusterConfigMasterConfigPtrOutput
}

ClusterClusterConfigMasterConfigPtrInput is an input type that accepts ClusterClusterConfigMasterConfigArgs, ClusterClusterConfigMasterConfigPtr and ClusterClusterConfigMasterConfigPtrOutput values. You can construct a concrete instance of `ClusterClusterConfigMasterConfigPtrInput` via:

        ClusterClusterConfigMasterConfigArgs{...}

or:

        nil

type ClusterClusterConfigMasterConfigPtrOutput

type ClusterClusterConfigMasterConfigPtrOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigMasterConfigPtrOutput) Accelerators

The Compute Engine accelerator configuration for these instances. Can be specified multiple times.

func (ClusterClusterConfigMasterConfigPtrOutput) DiskConfig

Disk Config

func (ClusterClusterConfigMasterConfigPtrOutput) Elem

func (ClusterClusterConfigMasterConfigPtrOutput) ElementType

func (ClusterClusterConfigMasterConfigPtrOutput) ImageUri

The URI for the image to use for this worker. See [the guide](https://cloud.google.com/dataproc/docs/guides/dataproc-images) for more information.

func (ClusterClusterConfigMasterConfigPtrOutput) InstanceNames

func (ClusterClusterConfigMasterConfigPtrOutput) MachineType

The name of a Google Compute Engine machine type to create for the worker nodes. If not specified, GCP will default to a predetermined computed value (currently `n1-standard-4`).

func (ClusterClusterConfigMasterConfigPtrOutput) MinCpuPlatform

The name of a minimum generation of CPU family for the master. If not specified, GCP will default to a predetermined computed value for each zone. See [the guide](https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform) for details about which CPU families are available (and defaulted) for each zone.

func (ClusterClusterConfigMasterConfigPtrOutput) NumInstances

Specifies the number of preemptible nodes to create. Defaults to 0.

func (ClusterClusterConfigMasterConfigPtrOutput) ToClusterClusterConfigMasterConfigPtrOutput

func (o ClusterClusterConfigMasterConfigPtrOutput) ToClusterClusterConfigMasterConfigPtrOutput() ClusterClusterConfigMasterConfigPtrOutput

func (ClusterClusterConfigMasterConfigPtrOutput) ToClusterClusterConfigMasterConfigPtrOutputWithContext

func (o ClusterClusterConfigMasterConfigPtrOutput) ToClusterClusterConfigMasterConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigMasterConfigPtrOutput

type ClusterClusterConfigMetastoreConfig added in v5.19.0

type ClusterClusterConfigMetastoreConfig struct {
	// Resource name of an existing Dataproc Metastore service.
	DataprocMetastoreService string `pulumi:"dataprocMetastoreService"`
}

type ClusterClusterConfigMetastoreConfigArgs added in v5.19.0

type ClusterClusterConfigMetastoreConfigArgs struct {
	// Resource name of an existing Dataproc Metastore service.
	DataprocMetastoreService pulumi.StringInput `pulumi:"dataprocMetastoreService"`
}

func (ClusterClusterConfigMetastoreConfigArgs) ElementType added in v5.19.0

func (ClusterClusterConfigMetastoreConfigArgs) ToClusterClusterConfigMetastoreConfigOutput added in v5.19.0

func (i ClusterClusterConfigMetastoreConfigArgs) ToClusterClusterConfigMetastoreConfigOutput() ClusterClusterConfigMetastoreConfigOutput

func (ClusterClusterConfigMetastoreConfigArgs) ToClusterClusterConfigMetastoreConfigOutputWithContext added in v5.19.0

func (i ClusterClusterConfigMetastoreConfigArgs) ToClusterClusterConfigMetastoreConfigOutputWithContext(ctx context.Context) ClusterClusterConfigMetastoreConfigOutput

func (ClusterClusterConfigMetastoreConfigArgs) ToClusterClusterConfigMetastoreConfigPtrOutput added in v5.19.0

func (i ClusterClusterConfigMetastoreConfigArgs) ToClusterClusterConfigMetastoreConfigPtrOutput() ClusterClusterConfigMetastoreConfigPtrOutput

func (ClusterClusterConfigMetastoreConfigArgs) ToClusterClusterConfigMetastoreConfigPtrOutputWithContext added in v5.19.0

func (i ClusterClusterConfigMetastoreConfigArgs) ToClusterClusterConfigMetastoreConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigMetastoreConfigPtrOutput

type ClusterClusterConfigMetastoreConfigInput added in v5.19.0

type ClusterClusterConfigMetastoreConfigInput interface {
	pulumi.Input

	ToClusterClusterConfigMetastoreConfigOutput() ClusterClusterConfigMetastoreConfigOutput
	ToClusterClusterConfigMetastoreConfigOutputWithContext(context.Context) ClusterClusterConfigMetastoreConfigOutput
}

ClusterClusterConfigMetastoreConfigInput is an input type that accepts ClusterClusterConfigMetastoreConfigArgs and ClusterClusterConfigMetastoreConfigOutput values. You can construct a concrete instance of `ClusterClusterConfigMetastoreConfigInput` via:

ClusterClusterConfigMetastoreConfigArgs{...}

type ClusterClusterConfigMetastoreConfigOutput added in v5.19.0

type ClusterClusterConfigMetastoreConfigOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigMetastoreConfigOutput) DataprocMetastoreService added in v5.19.0

func (o ClusterClusterConfigMetastoreConfigOutput) DataprocMetastoreService() pulumi.StringOutput

Resource name of an existing Dataproc Metastore service.

func (ClusterClusterConfigMetastoreConfigOutput) ElementType added in v5.19.0

func (ClusterClusterConfigMetastoreConfigOutput) ToClusterClusterConfigMetastoreConfigOutput added in v5.19.0

func (o ClusterClusterConfigMetastoreConfigOutput) ToClusterClusterConfigMetastoreConfigOutput() ClusterClusterConfigMetastoreConfigOutput

func (ClusterClusterConfigMetastoreConfigOutput) ToClusterClusterConfigMetastoreConfigOutputWithContext added in v5.19.0

func (o ClusterClusterConfigMetastoreConfigOutput) ToClusterClusterConfigMetastoreConfigOutputWithContext(ctx context.Context) ClusterClusterConfigMetastoreConfigOutput

func (ClusterClusterConfigMetastoreConfigOutput) ToClusterClusterConfigMetastoreConfigPtrOutput added in v5.19.0

func (o ClusterClusterConfigMetastoreConfigOutput) ToClusterClusterConfigMetastoreConfigPtrOutput() ClusterClusterConfigMetastoreConfigPtrOutput

func (ClusterClusterConfigMetastoreConfigOutput) ToClusterClusterConfigMetastoreConfigPtrOutputWithContext added in v5.19.0

func (o ClusterClusterConfigMetastoreConfigOutput) ToClusterClusterConfigMetastoreConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigMetastoreConfigPtrOutput

type ClusterClusterConfigMetastoreConfigPtrInput added in v5.19.0

type ClusterClusterConfigMetastoreConfigPtrInput interface {
	pulumi.Input

	ToClusterClusterConfigMetastoreConfigPtrOutput() ClusterClusterConfigMetastoreConfigPtrOutput
	ToClusterClusterConfigMetastoreConfigPtrOutputWithContext(context.Context) ClusterClusterConfigMetastoreConfigPtrOutput
}

ClusterClusterConfigMetastoreConfigPtrInput is an input type that accepts ClusterClusterConfigMetastoreConfigArgs, ClusterClusterConfigMetastoreConfigPtr and ClusterClusterConfigMetastoreConfigPtrOutput values. You can construct a concrete instance of `ClusterClusterConfigMetastoreConfigPtrInput` via:

        ClusterClusterConfigMetastoreConfigArgs{...}

or:

        nil

type ClusterClusterConfigMetastoreConfigPtrOutput added in v5.19.0

type ClusterClusterConfigMetastoreConfigPtrOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigMetastoreConfigPtrOutput) DataprocMetastoreService added in v5.19.0

Resource name of an existing Dataproc Metastore service.

func (ClusterClusterConfigMetastoreConfigPtrOutput) Elem added in v5.19.0

func (ClusterClusterConfigMetastoreConfigPtrOutput) ElementType added in v5.19.0

func (ClusterClusterConfigMetastoreConfigPtrOutput) ToClusterClusterConfigMetastoreConfigPtrOutput added in v5.19.0

func (o ClusterClusterConfigMetastoreConfigPtrOutput) ToClusterClusterConfigMetastoreConfigPtrOutput() ClusterClusterConfigMetastoreConfigPtrOutput

func (ClusterClusterConfigMetastoreConfigPtrOutput) ToClusterClusterConfigMetastoreConfigPtrOutputWithContext added in v5.19.0

func (o ClusterClusterConfigMetastoreConfigPtrOutput) ToClusterClusterConfigMetastoreConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigMetastoreConfigPtrOutput

type ClusterClusterConfigOutput

type ClusterClusterConfigOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigOutput) AutoscalingConfig

The autoscaling policy config associated with the cluster. Note that once set, if `autoscalingConfig` is the only field set in `clusterConfig`, it can only be removed by setting `policyUri = ""`, rather than removing the whole block. Structure defined below.

func (ClusterClusterConfigOutput) Bucket

func (ClusterClusterConfigOutput) ElementType

func (ClusterClusterConfigOutput) ElementType() reflect.Type

func (ClusterClusterConfigOutput) EncryptionConfig

The Customer managed encryption keys settings for the cluster. Structure defined below.

func (ClusterClusterConfigOutput) EndpointConfig

The config settings for port access on the cluster. Structure defined below.

func (ClusterClusterConfigOutput) GceClusterConfig

Common config settings for resources of Google Compute Engine cluster instances, applicable to all instances in the cluster. Structure defined below.

func (ClusterClusterConfigOutput) InitializationActions

Commands to execute on each node after config is completed. You can specify multiple versions of these. Structure defined below.

func (ClusterClusterConfigOutput) LifecycleConfig

The settings for auto deletion cluster schedule. Structure defined below.

func (ClusterClusterConfigOutput) MasterConfig

The Google Compute Engine config settings for the master instances in a cluster. Structure defined below.

func (ClusterClusterConfigOutput) MetastoreConfig added in v5.19.0

The config setting for metastore service with the cluster. Structure defined below. ***

func (ClusterClusterConfigOutput) PreemptibleWorkerConfig

The Google Compute Engine config settings for the additional instances in a cluster. Structure defined below.

func (ClusterClusterConfigOutput) SecurityConfig

Security related configuration. Structure defined below.

func (ClusterClusterConfigOutput) SoftwareConfig

The config settings for software inside the cluster. Structure defined below.

func (ClusterClusterConfigOutput) StagingBucket

The Cloud Storage staging bucket used to stage files, such as Hadoop jars, between client machines and the cluster. Note: If you don't explicitly specify a `stagingBucket` then GCP will auto create / assign one for you. However, you are not guaranteed an auto generated bucket which is solely dedicated to your cluster; it may be shared with other clusters in the same region/zone also choosing to use the auto generation option.

func (ClusterClusterConfigOutput) TempBucket

The Cloud Storage temp bucket used to store ephemeral cluster and jobs data, such as Spark and MapReduce history files. Note: If you don't explicitly specify a `tempBucket` then GCP will auto create / assign one for you.

func (ClusterClusterConfigOutput) ToClusterClusterConfigOutput

func (o ClusterClusterConfigOutput) ToClusterClusterConfigOutput() ClusterClusterConfigOutput

func (ClusterClusterConfigOutput) ToClusterClusterConfigOutputWithContext

func (o ClusterClusterConfigOutput) ToClusterClusterConfigOutputWithContext(ctx context.Context) ClusterClusterConfigOutput

func (ClusterClusterConfigOutput) ToClusterClusterConfigPtrOutput

func (o ClusterClusterConfigOutput) ToClusterClusterConfigPtrOutput() ClusterClusterConfigPtrOutput

func (ClusterClusterConfigOutput) ToClusterClusterConfigPtrOutputWithContext

func (o ClusterClusterConfigOutput) ToClusterClusterConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigPtrOutput

func (ClusterClusterConfigOutput) WorkerConfig

The Google Compute Engine config settings for the worker instances in a cluster. Structure defined below.

type ClusterClusterConfigPreemptibleWorkerConfig

type ClusterClusterConfigPreemptibleWorkerConfig struct {
	// Disk Config
	DiskConfig    *ClusterClusterConfigPreemptibleWorkerConfigDiskConfig `pulumi:"diskConfig"`
	InstanceNames []string                                               `pulumi:"instanceNames"`
	// Specifies the number of preemptible nodes to create.
	// Defaults to 0.
	NumInstances *int `pulumi:"numInstances"`
}

type ClusterClusterConfigPreemptibleWorkerConfigArgs

type ClusterClusterConfigPreemptibleWorkerConfigArgs struct {
	// Disk Config
	DiskConfig    ClusterClusterConfigPreemptibleWorkerConfigDiskConfigPtrInput `pulumi:"diskConfig"`
	InstanceNames pulumi.StringArrayInput                                       `pulumi:"instanceNames"`
	// Specifies the number of preemptible nodes to create.
	// Defaults to 0.
	NumInstances pulumi.IntPtrInput `pulumi:"numInstances"`
}

func (ClusterClusterConfigPreemptibleWorkerConfigArgs) ElementType

func (ClusterClusterConfigPreemptibleWorkerConfigArgs) ToClusterClusterConfigPreemptibleWorkerConfigOutput

func (i ClusterClusterConfigPreemptibleWorkerConfigArgs) ToClusterClusterConfigPreemptibleWorkerConfigOutput() ClusterClusterConfigPreemptibleWorkerConfigOutput

func (ClusterClusterConfigPreemptibleWorkerConfigArgs) ToClusterClusterConfigPreemptibleWorkerConfigOutputWithContext

func (i ClusterClusterConfigPreemptibleWorkerConfigArgs) ToClusterClusterConfigPreemptibleWorkerConfigOutputWithContext(ctx context.Context) ClusterClusterConfigPreemptibleWorkerConfigOutput

func (ClusterClusterConfigPreemptibleWorkerConfigArgs) ToClusterClusterConfigPreemptibleWorkerConfigPtrOutput

func (i ClusterClusterConfigPreemptibleWorkerConfigArgs) ToClusterClusterConfigPreemptibleWorkerConfigPtrOutput() ClusterClusterConfigPreemptibleWorkerConfigPtrOutput

func (ClusterClusterConfigPreemptibleWorkerConfigArgs) ToClusterClusterConfigPreemptibleWorkerConfigPtrOutputWithContext

func (i ClusterClusterConfigPreemptibleWorkerConfigArgs) ToClusterClusterConfigPreemptibleWorkerConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigPreemptibleWorkerConfigPtrOutput

type ClusterClusterConfigPreemptibleWorkerConfigDiskConfig

type ClusterClusterConfigPreemptibleWorkerConfigDiskConfig struct {
	// Size of the primary disk attached to each preemptible worker node, specified
	// in GB. The smallest allowed disk size is 10GB. GCP will default to a predetermined
	// computed value if not set (currently 500GB). Note: If SSDs are not
	// attached, it also contains the HDFS data blocks and Hadoop working directories.
	BootDiskSizeGb *int `pulumi:"bootDiskSizeGb"`
	// The disk type of the primary disk attached to each preemptible worker node.
	// One of `"pd-ssd"` or `"pd-standard"`. Defaults to `"pd-standard"`.
	BootDiskType *string `pulumi:"bootDiskType"`
	// The amount of local SSD disks that will be
	// attached to each preemptible worker node. Defaults to 0.
	NumLocalSsds *int `pulumi:"numLocalSsds"`
}

type ClusterClusterConfigPreemptibleWorkerConfigDiskConfigArgs

type ClusterClusterConfigPreemptibleWorkerConfigDiskConfigArgs struct {
	// Size of the primary disk attached to each preemptible worker node, specified
	// in GB. The smallest allowed disk size is 10GB. GCP will default to a predetermined
	// computed value if not set (currently 500GB). Note: If SSDs are not
	// attached, it also contains the HDFS data blocks and Hadoop working directories.
	BootDiskSizeGb pulumi.IntPtrInput `pulumi:"bootDiskSizeGb"`
	// The disk type of the primary disk attached to each preemptible worker node.
	// One of `"pd-ssd"` or `"pd-standard"`. Defaults to `"pd-standard"`.
	BootDiskType pulumi.StringPtrInput `pulumi:"bootDiskType"`
	// The amount of local SSD disks that will be
	// attached to each preemptible worker node. Defaults to 0.
	NumLocalSsds pulumi.IntPtrInput `pulumi:"numLocalSsds"`
}

func (ClusterClusterConfigPreemptibleWorkerConfigDiskConfigArgs) ElementType

func (ClusterClusterConfigPreemptibleWorkerConfigDiskConfigArgs) ToClusterClusterConfigPreemptibleWorkerConfigDiskConfigOutput

func (ClusterClusterConfigPreemptibleWorkerConfigDiskConfigArgs) ToClusterClusterConfigPreemptibleWorkerConfigDiskConfigOutputWithContext

func (i ClusterClusterConfigPreemptibleWorkerConfigDiskConfigArgs) ToClusterClusterConfigPreemptibleWorkerConfigDiskConfigOutputWithContext(ctx context.Context) ClusterClusterConfigPreemptibleWorkerConfigDiskConfigOutput

func (ClusterClusterConfigPreemptibleWorkerConfigDiskConfigArgs) ToClusterClusterConfigPreemptibleWorkerConfigDiskConfigPtrOutput

func (ClusterClusterConfigPreemptibleWorkerConfigDiskConfigArgs) ToClusterClusterConfigPreemptibleWorkerConfigDiskConfigPtrOutputWithContext

func (i ClusterClusterConfigPreemptibleWorkerConfigDiskConfigArgs) ToClusterClusterConfigPreemptibleWorkerConfigDiskConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigPreemptibleWorkerConfigDiskConfigPtrOutput

type ClusterClusterConfigPreemptibleWorkerConfigDiskConfigInput

type ClusterClusterConfigPreemptibleWorkerConfigDiskConfigInput interface {
	pulumi.Input

	ToClusterClusterConfigPreemptibleWorkerConfigDiskConfigOutput() ClusterClusterConfigPreemptibleWorkerConfigDiskConfigOutput
	ToClusterClusterConfigPreemptibleWorkerConfigDiskConfigOutputWithContext(context.Context) ClusterClusterConfigPreemptibleWorkerConfigDiskConfigOutput
}

ClusterClusterConfigPreemptibleWorkerConfigDiskConfigInput is an input type that accepts ClusterClusterConfigPreemptibleWorkerConfigDiskConfigArgs and ClusterClusterConfigPreemptibleWorkerConfigDiskConfigOutput values. You can construct a concrete instance of `ClusterClusterConfigPreemptibleWorkerConfigDiskConfigInput` via:

ClusterClusterConfigPreemptibleWorkerConfigDiskConfigArgs{...}

type ClusterClusterConfigPreemptibleWorkerConfigDiskConfigOutput

type ClusterClusterConfigPreemptibleWorkerConfigDiskConfigOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigPreemptibleWorkerConfigDiskConfigOutput) BootDiskSizeGb

Size of the primary disk attached to each preemptible worker node, specified in GB. The smallest allowed disk size is 10GB. GCP will default to a predetermined computed value if not set (currently 500GB). Note: If SSDs are not attached, it also contains the HDFS data blocks and Hadoop working directories.

func (ClusterClusterConfigPreemptibleWorkerConfigDiskConfigOutput) BootDiskType

The disk type of the primary disk attached to each preemptible worker node. One of `"pd-ssd"` or `"pd-standard"`. Defaults to `"pd-standard"`.

func (ClusterClusterConfigPreemptibleWorkerConfigDiskConfigOutput) ElementType

func (ClusterClusterConfigPreemptibleWorkerConfigDiskConfigOutput) NumLocalSsds

The amount of local SSD disks that will be attached to each preemptible worker node. Defaults to 0.

func (ClusterClusterConfigPreemptibleWorkerConfigDiskConfigOutput) ToClusterClusterConfigPreemptibleWorkerConfigDiskConfigOutput

func (ClusterClusterConfigPreemptibleWorkerConfigDiskConfigOutput) ToClusterClusterConfigPreemptibleWorkerConfigDiskConfigOutputWithContext

func (o ClusterClusterConfigPreemptibleWorkerConfigDiskConfigOutput) ToClusterClusterConfigPreemptibleWorkerConfigDiskConfigOutputWithContext(ctx context.Context) ClusterClusterConfigPreemptibleWorkerConfigDiskConfigOutput

func (ClusterClusterConfigPreemptibleWorkerConfigDiskConfigOutput) ToClusterClusterConfigPreemptibleWorkerConfigDiskConfigPtrOutput

func (ClusterClusterConfigPreemptibleWorkerConfigDiskConfigOutput) ToClusterClusterConfigPreemptibleWorkerConfigDiskConfigPtrOutputWithContext

func (o ClusterClusterConfigPreemptibleWorkerConfigDiskConfigOutput) ToClusterClusterConfigPreemptibleWorkerConfigDiskConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigPreemptibleWorkerConfigDiskConfigPtrOutput

type ClusterClusterConfigPreemptibleWorkerConfigDiskConfigPtrInput

type ClusterClusterConfigPreemptibleWorkerConfigDiskConfigPtrInput interface {
	pulumi.Input

	ToClusterClusterConfigPreemptibleWorkerConfigDiskConfigPtrOutput() ClusterClusterConfigPreemptibleWorkerConfigDiskConfigPtrOutput
	ToClusterClusterConfigPreemptibleWorkerConfigDiskConfigPtrOutputWithContext(context.Context) ClusterClusterConfigPreemptibleWorkerConfigDiskConfigPtrOutput
}

ClusterClusterConfigPreemptibleWorkerConfigDiskConfigPtrInput is an input type that accepts ClusterClusterConfigPreemptibleWorkerConfigDiskConfigArgs, ClusterClusterConfigPreemptibleWorkerConfigDiskConfigPtr and ClusterClusterConfigPreemptibleWorkerConfigDiskConfigPtrOutput values. You can construct a concrete instance of `ClusterClusterConfigPreemptibleWorkerConfigDiskConfigPtrInput` via:

        ClusterClusterConfigPreemptibleWorkerConfigDiskConfigArgs{...}

or:

        nil

type ClusterClusterConfigPreemptibleWorkerConfigDiskConfigPtrOutput

type ClusterClusterConfigPreemptibleWorkerConfigDiskConfigPtrOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigPreemptibleWorkerConfigDiskConfigPtrOutput) BootDiskSizeGb

Size of the primary disk attached to each preemptible worker node, specified in GB. The smallest allowed disk size is 10GB. GCP will default to a predetermined computed value if not set (currently 500GB). Note: If SSDs are not attached, it also contains the HDFS data blocks and Hadoop working directories.

func (ClusterClusterConfigPreemptibleWorkerConfigDiskConfigPtrOutput) BootDiskType

The disk type of the primary disk attached to each preemptible worker node. One of `"pd-ssd"` or `"pd-standard"`. Defaults to `"pd-standard"`.

func (ClusterClusterConfigPreemptibleWorkerConfigDiskConfigPtrOutput) Elem

func (ClusterClusterConfigPreemptibleWorkerConfigDiskConfigPtrOutput) ElementType

func (ClusterClusterConfigPreemptibleWorkerConfigDiskConfigPtrOutput) NumLocalSsds

The amount of local SSD disks that will be attached to each preemptible worker node. Defaults to 0.

func (ClusterClusterConfigPreemptibleWorkerConfigDiskConfigPtrOutput) ToClusterClusterConfigPreemptibleWorkerConfigDiskConfigPtrOutput

func (ClusterClusterConfigPreemptibleWorkerConfigDiskConfigPtrOutput) ToClusterClusterConfigPreemptibleWorkerConfigDiskConfigPtrOutputWithContext

func (o ClusterClusterConfigPreemptibleWorkerConfigDiskConfigPtrOutput) ToClusterClusterConfigPreemptibleWorkerConfigDiskConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigPreemptibleWorkerConfigDiskConfigPtrOutput

type ClusterClusterConfigPreemptibleWorkerConfigInput

type ClusterClusterConfigPreemptibleWorkerConfigInput interface {
	pulumi.Input

	ToClusterClusterConfigPreemptibleWorkerConfigOutput() ClusterClusterConfigPreemptibleWorkerConfigOutput
	ToClusterClusterConfigPreemptibleWorkerConfigOutputWithContext(context.Context) ClusterClusterConfigPreemptibleWorkerConfigOutput
}

ClusterClusterConfigPreemptibleWorkerConfigInput is an input type that accepts ClusterClusterConfigPreemptibleWorkerConfigArgs and ClusterClusterConfigPreemptibleWorkerConfigOutput values. You can construct a concrete instance of `ClusterClusterConfigPreemptibleWorkerConfigInput` via:

ClusterClusterConfigPreemptibleWorkerConfigArgs{...}

type ClusterClusterConfigPreemptibleWorkerConfigOutput

type ClusterClusterConfigPreemptibleWorkerConfigOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigPreemptibleWorkerConfigOutput) DiskConfig

Disk Config

func (ClusterClusterConfigPreemptibleWorkerConfigOutput) ElementType

func (ClusterClusterConfigPreemptibleWorkerConfigOutput) InstanceNames

func (ClusterClusterConfigPreemptibleWorkerConfigOutput) NumInstances

Specifies the number of preemptible nodes to create. Defaults to 0.

func (ClusterClusterConfigPreemptibleWorkerConfigOutput) ToClusterClusterConfigPreemptibleWorkerConfigOutput

func (o ClusterClusterConfigPreemptibleWorkerConfigOutput) ToClusterClusterConfigPreemptibleWorkerConfigOutput() ClusterClusterConfigPreemptibleWorkerConfigOutput

func (ClusterClusterConfigPreemptibleWorkerConfigOutput) ToClusterClusterConfigPreemptibleWorkerConfigOutputWithContext

func (o ClusterClusterConfigPreemptibleWorkerConfigOutput) ToClusterClusterConfigPreemptibleWorkerConfigOutputWithContext(ctx context.Context) ClusterClusterConfigPreemptibleWorkerConfigOutput

func (ClusterClusterConfigPreemptibleWorkerConfigOutput) ToClusterClusterConfigPreemptibleWorkerConfigPtrOutput

func (o ClusterClusterConfigPreemptibleWorkerConfigOutput) ToClusterClusterConfigPreemptibleWorkerConfigPtrOutput() ClusterClusterConfigPreemptibleWorkerConfigPtrOutput

func (ClusterClusterConfigPreemptibleWorkerConfigOutput) ToClusterClusterConfigPreemptibleWorkerConfigPtrOutputWithContext

func (o ClusterClusterConfigPreemptibleWorkerConfigOutput) ToClusterClusterConfigPreemptibleWorkerConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigPreemptibleWorkerConfigPtrOutput

type ClusterClusterConfigPreemptibleWorkerConfigPtrInput

type ClusterClusterConfigPreemptibleWorkerConfigPtrInput interface {
	pulumi.Input

	ToClusterClusterConfigPreemptibleWorkerConfigPtrOutput() ClusterClusterConfigPreemptibleWorkerConfigPtrOutput
	ToClusterClusterConfigPreemptibleWorkerConfigPtrOutputWithContext(context.Context) ClusterClusterConfigPreemptibleWorkerConfigPtrOutput
}

ClusterClusterConfigPreemptibleWorkerConfigPtrInput is an input type that accepts ClusterClusterConfigPreemptibleWorkerConfigArgs, ClusterClusterConfigPreemptibleWorkerConfigPtr and ClusterClusterConfigPreemptibleWorkerConfigPtrOutput values. You can construct a concrete instance of `ClusterClusterConfigPreemptibleWorkerConfigPtrInput` via:

        ClusterClusterConfigPreemptibleWorkerConfigArgs{...}

or:

        nil

type ClusterClusterConfigPreemptibleWorkerConfigPtrOutput

type ClusterClusterConfigPreemptibleWorkerConfigPtrOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigPreemptibleWorkerConfigPtrOutput) DiskConfig

Disk Config

func (ClusterClusterConfigPreemptibleWorkerConfigPtrOutput) Elem

func (ClusterClusterConfigPreemptibleWorkerConfigPtrOutput) ElementType

func (ClusterClusterConfigPreemptibleWorkerConfigPtrOutput) InstanceNames

func (ClusterClusterConfigPreemptibleWorkerConfigPtrOutput) NumInstances

Specifies the number of preemptible nodes to create. Defaults to 0.

func (ClusterClusterConfigPreemptibleWorkerConfigPtrOutput) ToClusterClusterConfigPreemptibleWorkerConfigPtrOutput

func (ClusterClusterConfigPreemptibleWorkerConfigPtrOutput) ToClusterClusterConfigPreemptibleWorkerConfigPtrOutputWithContext

func (o ClusterClusterConfigPreemptibleWorkerConfigPtrOutput) ToClusterClusterConfigPreemptibleWorkerConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigPreemptibleWorkerConfigPtrOutput

type ClusterClusterConfigPtrInput

type ClusterClusterConfigPtrInput interface {
	pulumi.Input

	ToClusterClusterConfigPtrOutput() ClusterClusterConfigPtrOutput
	ToClusterClusterConfigPtrOutputWithContext(context.Context) ClusterClusterConfigPtrOutput
}

ClusterClusterConfigPtrInput is an input type that accepts ClusterClusterConfigArgs, ClusterClusterConfigPtr and ClusterClusterConfigPtrOutput values. You can construct a concrete instance of `ClusterClusterConfigPtrInput` via:

        ClusterClusterConfigArgs{...}

or:

        nil

type ClusterClusterConfigPtrOutput

type ClusterClusterConfigPtrOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigPtrOutput) AutoscalingConfig

The autoscaling policy config associated with the cluster. Note that once set, if `autoscalingConfig` is the only field set in `clusterConfig`, it can only be removed by setting `policyUri = ""`, rather than removing the whole block. Structure defined below.

func (ClusterClusterConfigPtrOutput) Bucket

func (ClusterClusterConfigPtrOutput) Elem

func (ClusterClusterConfigPtrOutput) ElementType

func (ClusterClusterConfigPtrOutput) EncryptionConfig

The Customer managed encryption keys settings for the cluster. Structure defined below.

func (ClusterClusterConfigPtrOutput) EndpointConfig

The config settings for port access on the cluster. Structure defined below.

func (ClusterClusterConfigPtrOutput) GceClusterConfig

Common config settings for resources of Google Compute Engine cluster instances, applicable to all instances in the cluster. Structure defined below.

func (ClusterClusterConfigPtrOutput) InitializationActions

Commands to execute on each node after config is completed. You can specify multiple versions of these. Structure defined below.

func (ClusterClusterConfigPtrOutput) LifecycleConfig

The settings for auto deletion cluster schedule. Structure defined below.

func (ClusterClusterConfigPtrOutput) MasterConfig

The Google Compute Engine config settings for the master instances in a cluster. Structure defined below.

func (ClusterClusterConfigPtrOutput) MetastoreConfig added in v5.19.0

The config setting for metastore service with the cluster. Structure defined below. ***

func (ClusterClusterConfigPtrOutput) PreemptibleWorkerConfig

The Google Compute Engine config settings for the additional instances in a cluster. Structure defined below.

func (ClusterClusterConfigPtrOutput) SecurityConfig

Security related configuration. Structure defined below.

func (ClusterClusterConfigPtrOutput) SoftwareConfig

The config settings for software inside the cluster. Structure defined below.

func (ClusterClusterConfigPtrOutput) StagingBucket

The Cloud Storage staging bucket used to stage files, such as Hadoop jars, between client machines and the cluster. Note: If you don't explicitly specify a `stagingBucket` then GCP will auto create / assign one for you. However, you are not guaranteed an auto generated bucket which is solely dedicated to your cluster; it may be shared with other clusters in the same region/zone also choosing to use the auto generation option.

func (ClusterClusterConfigPtrOutput) TempBucket

The Cloud Storage temp bucket used to store ephemeral cluster and jobs data, such as Spark and MapReduce history files. Note: If you don't explicitly specify a `tempBucket` then GCP will auto create / assign one for you.

func (ClusterClusterConfigPtrOutput) ToClusterClusterConfigPtrOutput

func (o ClusterClusterConfigPtrOutput) ToClusterClusterConfigPtrOutput() ClusterClusterConfigPtrOutput

func (ClusterClusterConfigPtrOutput) ToClusterClusterConfigPtrOutputWithContext

func (o ClusterClusterConfigPtrOutput) ToClusterClusterConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigPtrOutput

func (ClusterClusterConfigPtrOutput) WorkerConfig

The Google Compute Engine config settings for the worker instances in a cluster. Structure defined below.

type ClusterClusterConfigSecurityConfig

type ClusterClusterConfigSecurityConfig struct {
	// Kerberos Configuration
	KerberosConfig ClusterClusterConfigSecurityConfigKerberosConfig `pulumi:"kerberosConfig"`
}

type ClusterClusterConfigSecurityConfigArgs

type ClusterClusterConfigSecurityConfigArgs struct {
	// Kerberos Configuration
	KerberosConfig ClusterClusterConfigSecurityConfigKerberosConfigInput `pulumi:"kerberosConfig"`
}

func (ClusterClusterConfigSecurityConfigArgs) ElementType

func (ClusterClusterConfigSecurityConfigArgs) ToClusterClusterConfigSecurityConfigOutput

func (i ClusterClusterConfigSecurityConfigArgs) ToClusterClusterConfigSecurityConfigOutput() ClusterClusterConfigSecurityConfigOutput

func (ClusterClusterConfigSecurityConfigArgs) ToClusterClusterConfigSecurityConfigOutputWithContext

func (i ClusterClusterConfigSecurityConfigArgs) ToClusterClusterConfigSecurityConfigOutputWithContext(ctx context.Context) ClusterClusterConfigSecurityConfigOutput

func (ClusterClusterConfigSecurityConfigArgs) ToClusterClusterConfigSecurityConfigPtrOutput

func (i ClusterClusterConfigSecurityConfigArgs) ToClusterClusterConfigSecurityConfigPtrOutput() ClusterClusterConfigSecurityConfigPtrOutput

func (ClusterClusterConfigSecurityConfigArgs) ToClusterClusterConfigSecurityConfigPtrOutputWithContext

func (i ClusterClusterConfigSecurityConfigArgs) ToClusterClusterConfigSecurityConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigSecurityConfigPtrOutput

type ClusterClusterConfigSecurityConfigInput

type ClusterClusterConfigSecurityConfigInput interface {
	pulumi.Input

	ToClusterClusterConfigSecurityConfigOutput() ClusterClusterConfigSecurityConfigOutput
	ToClusterClusterConfigSecurityConfigOutputWithContext(context.Context) ClusterClusterConfigSecurityConfigOutput
}

ClusterClusterConfigSecurityConfigInput is an input type that accepts ClusterClusterConfigSecurityConfigArgs and ClusterClusterConfigSecurityConfigOutput values. You can construct a concrete instance of `ClusterClusterConfigSecurityConfigInput` via:

ClusterClusterConfigSecurityConfigArgs{...}

type ClusterClusterConfigSecurityConfigKerberosConfig

type ClusterClusterConfigSecurityConfigKerberosConfig struct {
	// The admin server (IP or hostname) for the
	// remote trusted realm in a cross realm trust relationship.
	CrossRealmTrustAdminServer *string `pulumi:"crossRealmTrustAdminServer"`
	// The KDC (IP or hostname) for the
	// remote trusted realm in a cross realm trust relationship.
	CrossRealmTrustKdc *string `pulumi:"crossRealmTrustKdc"`
	// The remote realm the Dataproc on-cluster KDC will
	// trust, should the user enable cross realm trust.
	CrossRealmTrustRealm *string `pulumi:"crossRealmTrustRealm"`
	// The Cloud Storage URI of a KMS
	// encrypted file containing the shared password between the on-cluster Kerberos realm
	// and the remote trusted realm, in a cross realm trust relationship.
	CrossRealmTrustSharedPasswordUri *string `pulumi:"crossRealmTrustSharedPasswordUri"`
	// Flag to indicate whether to Kerberize the cluster.
	EnableKerberos *bool `pulumi:"enableKerberos"`
	// The Cloud Storage URI of a KMS encrypted file containing
	// the master key of the KDC database.
	KdcDbKeyUri *string `pulumi:"kdcDbKeyUri"`
	// The Cloud Storage URI of a KMS encrypted file containing
	// the password to the user provided key. For the self-signed certificate, this password
	// is generated by Dataproc.
	KeyPasswordUri *string `pulumi:"keyPasswordUri"`
	// The Cloud Storage URI of a KMS encrypted file containing
	// the password to the user provided keystore. For the self-signed certificated, the password
	// is generated by Dataproc.
	KeystorePasswordUri *string `pulumi:"keystorePasswordUri"`
	// The Cloud Storage URI of the keystore file used for SSL encryption.
	// If not provided, Dataproc will provide a self-signed certificate.
	KeystoreUri *string `pulumi:"keystoreUri"`
	// The URI of the KMS key used to encrypt various sensitive files.
	KmsKeyUri string `pulumi:"kmsKeyUri"`
	// The name of the on-cluster Kerberos realm. If not specified, the
	// uppercased domain of hostnames will be the realm.
	Realm *string `pulumi:"realm"`
	// The Cloud Storage URI of a KMS encrypted file
	// containing the root principal password.
	RootPrincipalPasswordUri string `pulumi:"rootPrincipalPasswordUri"`
	// The lifetime of the ticket granting ticket, in hours.
	TgtLifetimeHours *int `pulumi:"tgtLifetimeHours"`
	// The Cloud Storage URI of a KMS encrypted file
	// containing the password to the user provided truststore. For the self-signed
	// certificate, this password is generated by Dataproc.
	TruststorePasswordUri *string `pulumi:"truststorePasswordUri"`
	// The Cloud Storage URI of the truststore file used for
	// SSL encryption. If not provided, Dataproc will provide a self-signed certificate.
	TruststoreUri *string `pulumi:"truststoreUri"`
}

type ClusterClusterConfigSecurityConfigKerberosConfigArgs

type ClusterClusterConfigSecurityConfigKerberosConfigArgs struct {
	// The admin server (IP or hostname) for the
	// remote trusted realm in a cross realm trust relationship.
	CrossRealmTrustAdminServer pulumi.StringPtrInput `pulumi:"crossRealmTrustAdminServer"`
	// The KDC (IP or hostname) for the
	// remote trusted realm in a cross realm trust relationship.
	CrossRealmTrustKdc pulumi.StringPtrInput `pulumi:"crossRealmTrustKdc"`
	// The remote realm the Dataproc on-cluster KDC will
	// trust, should the user enable cross realm trust.
	CrossRealmTrustRealm pulumi.StringPtrInput `pulumi:"crossRealmTrustRealm"`
	// The Cloud Storage URI of a KMS
	// encrypted file containing the shared password between the on-cluster Kerberos realm
	// and the remote trusted realm, in a cross realm trust relationship.
	CrossRealmTrustSharedPasswordUri pulumi.StringPtrInput `pulumi:"crossRealmTrustSharedPasswordUri"`
	// Flag to indicate whether to Kerberize the cluster.
	EnableKerberos pulumi.BoolPtrInput `pulumi:"enableKerberos"`
	// The Cloud Storage URI of a KMS encrypted file containing
	// the master key of the KDC database.
	KdcDbKeyUri pulumi.StringPtrInput `pulumi:"kdcDbKeyUri"`
	// The Cloud Storage URI of a KMS encrypted file containing
	// the password to the user provided key. For the self-signed certificate, this password
	// is generated by Dataproc.
	KeyPasswordUri pulumi.StringPtrInput `pulumi:"keyPasswordUri"`
	// The Cloud Storage URI of a KMS encrypted file containing
	// the password to the user provided keystore. For the self-signed certificated, the password
	// is generated by Dataproc.
	KeystorePasswordUri pulumi.StringPtrInput `pulumi:"keystorePasswordUri"`
	// The Cloud Storage URI of the keystore file used for SSL encryption.
	// If not provided, Dataproc will provide a self-signed certificate.
	KeystoreUri pulumi.StringPtrInput `pulumi:"keystoreUri"`
	// The URI of the KMS key used to encrypt various sensitive files.
	KmsKeyUri pulumi.StringInput `pulumi:"kmsKeyUri"`
	// The name of the on-cluster Kerberos realm. If not specified, the
	// uppercased domain of hostnames will be the realm.
	Realm pulumi.StringPtrInput `pulumi:"realm"`
	// The Cloud Storage URI of a KMS encrypted file
	// containing the root principal password.
	RootPrincipalPasswordUri pulumi.StringInput `pulumi:"rootPrincipalPasswordUri"`
	// The lifetime of the ticket granting ticket, in hours.
	TgtLifetimeHours pulumi.IntPtrInput `pulumi:"tgtLifetimeHours"`
	// The Cloud Storage URI of a KMS encrypted file
	// containing the password to the user provided truststore. For the self-signed
	// certificate, this password is generated by Dataproc.
	TruststorePasswordUri pulumi.StringPtrInput `pulumi:"truststorePasswordUri"`
	// The Cloud Storage URI of the truststore file used for
	// SSL encryption. If not provided, Dataproc will provide a self-signed certificate.
	TruststoreUri pulumi.StringPtrInput `pulumi:"truststoreUri"`
}

func (ClusterClusterConfigSecurityConfigKerberosConfigArgs) ElementType

func (ClusterClusterConfigSecurityConfigKerberosConfigArgs) ToClusterClusterConfigSecurityConfigKerberosConfigOutput

func (i ClusterClusterConfigSecurityConfigKerberosConfigArgs) ToClusterClusterConfigSecurityConfigKerberosConfigOutput() ClusterClusterConfigSecurityConfigKerberosConfigOutput

func (ClusterClusterConfigSecurityConfigKerberosConfigArgs) ToClusterClusterConfigSecurityConfigKerberosConfigOutputWithContext

func (i ClusterClusterConfigSecurityConfigKerberosConfigArgs) ToClusterClusterConfigSecurityConfigKerberosConfigOutputWithContext(ctx context.Context) ClusterClusterConfigSecurityConfigKerberosConfigOutput

func (ClusterClusterConfigSecurityConfigKerberosConfigArgs) ToClusterClusterConfigSecurityConfigKerberosConfigPtrOutput

func (i ClusterClusterConfigSecurityConfigKerberosConfigArgs) ToClusterClusterConfigSecurityConfigKerberosConfigPtrOutput() ClusterClusterConfigSecurityConfigKerberosConfigPtrOutput

func (ClusterClusterConfigSecurityConfigKerberosConfigArgs) ToClusterClusterConfigSecurityConfigKerberosConfigPtrOutputWithContext

func (i ClusterClusterConfigSecurityConfigKerberosConfigArgs) ToClusterClusterConfigSecurityConfigKerberosConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigSecurityConfigKerberosConfigPtrOutput

type ClusterClusterConfigSecurityConfigKerberosConfigInput

type ClusterClusterConfigSecurityConfigKerberosConfigInput interface {
	pulumi.Input

	ToClusterClusterConfigSecurityConfigKerberosConfigOutput() ClusterClusterConfigSecurityConfigKerberosConfigOutput
	ToClusterClusterConfigSecurityConfigKerberosConfigOutputWithContext(context.Context) ClusterClusterConfigSecurityConfigKerberosConfigOutput
}

ClusterClusterConfigSecurityConfigKerberosConfigInput is an input type that accepts ClusterClusterConfigSecurityConfigKerberosConfigArgs and ClusterClusterConfigSecurityConfigKerberosConfigOutput values. You can construct a concrete instance of `ClusterClusterConfigSecurityConfigKerberosConfigInput` via:

ClusterClusterConfigSecurityConfigKerberosConfigArgs{...}

type ClusterClusterConfigSecurityConfigKerberosConfigOutput

type ClusterClusterConfigSecurityConfigKerberosConfigOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigSecurityConfigKerberosConfigOutput) CrossRealmTrustAdminServer

The admin server (IP or hostname) for the remote trusted realm in a cross realm trust relationship.

func (ClusterClusterConfigSecurityConfigKerberosConfigOutput) CrossRealmTrustKdc

The KDC (IP or hostname) for the remote trusted realm in a cross realm trust relationship.

func (ClusterClusterConfigSecurityConfigKerberosConfigOutput) CrossRealmTrustRealm

The remote realm the Dataproc on-cluster KDC will trust, should the user enable cross realm trust.

func (ClusterClusterConfigSecurityConfigKerberosConfigOutput) CrossRealmTrustSharedPasswordUri

The Cloud Storage URI of a KMS encrypted file containing the shared password between the on-cluster Kerberos realm and the remote trusted realm, in a cross realm trust relationship.

func (ClusterClusterConfigSecurityConfigKerberosConfigOutput) ElementType

func (ClusterClusterConfigSecurityConfigKerberosConfigOutput) EnableKerberos

Flag to indicate whether to Kerberize the cluster.

func (ClusterClusterConfigSecurityConfigKerberosConfigOutput) KdcDbKeyUri

The Cloud Storage URI of a KMS encrypted file containing the master key of the KDC database.

func (ClusterClusterConfigSecurityConfigKerberosConfigOutput) KeyPasswordUri

The Cloud Storage URI of a KMS encrypted file containing the password to the user provided key. For the self-signed certificate, this password is generated by Dataproc.

func (ClusterClusterConfigSecurityConfigKerberosConfigOutput) KeystorePasswordUri

The Cloud Storage URI of a KMS encrypted file containing the password to the user provided keystore. For the self-signed certificated, the password is generated by Dataproc.

func (ClusterClusterConfigSecurityConfigKerberosConfigOutput) KeystoreUri

The Cloud Storage URI of the keystore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.

func (ClusterClusterConfigSecurityConfigKerberosConfigOutput) KmsKeyUri

The URI of the KMS key used to encrypt various sensitive files.

func (ClusterClusterConfigSecurityConfigKerberosConfigOutput) Realm

The name of the on-cluster Kerberos realm. If not specified, the uppercased domain of hostnames will be the realm.

func (ClusterClusterConfigSecurityConfigKerberosConfigOutput) RootPrincipalPasswordUri

The Cloud Storage URI of a KMS encrypted file containing the root principal password.

func (ClusterClusterConfigSecurityConfigKerberosConfigOutput) TgtLifetimeHours

The lifetime of the ticket granting ticket, in hours.

func (ClusterClusterConfigSecurityConfigKerberosConfigOutput) ToClusterClusterConfigSecurityConfigKerberosConfigOutput

func (ClusterClusterConfigSecurityConfigKerberosConfigOutput) ToClusterClusterConfigSecurityConfigKerberosConfigOutputWithContext

func (o ClusterClusterConfigSecurityConfigKerberosConfigOutput) ToClusterClusterConfigSecurityConfigKerberosConfigOutputWithContext(ctx context.Context) ClusterClusterConfigSecurityConfigKerberosConfigOutput

func (ClusterClusterConfigSecurityConfigKerberosConfigOutput) ToClusterClusterConfigSecurityConfigKerberosConfigPtrOutput

func (o ClusterClusterConfigSecurityConfigKerberosConfigOutput) ToClusterClusterConfigSecurityConfigKerberosConfigPtrOutput() ClusterClusterConfigSecurityConfigKerberosConfigPtrOutput

func (ClusterClusterConfigSecurityConfigKerberosConfigOutput) ToClusterClusterConfigSecurityConfigKerberosConfigPtrOutputWithContext

func (o ClusterClusterConfigSecurityConfigKerberosConfigOutput) ToClusterClusterConfigSecurityConfigKerberosConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigSecurityConfigKerberosConfigPtrOutput

func (ClusterClusterConfigSecurityConfigKerberosConfigOutput) TruststorePasswordUri

The Cloud Storage URI of a KMS encrypted file containing the password to the user provided truststore. For the self-signed certificate, this password is generated by Dataproc.

func (ClusterClusterConfigSecurityConfigKerberosConfigOutput) TruststoreUri

The Cloud Storage URI of the truststore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.

type ClusterClusterConfigSecurityConfigKerberosConfigPtrInput

type ClusterClusterConfigSecurityConfigKerberosConfigPtrInput interface {
	pulumi.Input

	ToClusterClusterConfigSecurityConfigKerberosConfigPtrOutput() ClusterClusterConfigSecurityConfigKerberosConfigPtrOutput
	ToClusterClusterConfigSecurityConfigKerberosConfigPtrOutputWithContext(context.Context) ClusterClusterConfigSecurityConfigKerberosConfigPtrOutput
}

ClusterClusterConfigSecurityConfigKerberosConfigPtrInput is an input type that accepts ClusterClusterConfigSecurityConfigKerberosConfigArgs, ClusterClusterConfigSecurityConfigKerberosConfigPtr and ClusterClusterConfigSecurityConfigKerberosConfigPtrOutput values. You can construct a concrete instance of `ClusterClusterConfigSecurityConfigKerberosConfigPtrInput` via:

        ClusterClusterConfigSecurityConfigKerberosConfigArgs{...}

or:

        nil

type ClusterClusterConfigSecurityConfigKerberosConfigPtrOutput

type ClusterClusterConfigSecurityConfigKerberosConfigPtrOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigSecurityConfigKerberosConfigPtrOutput) CrossRealmTrustAdminServer

The admin server (IP or hostname) for the remote trusted realm in a cross realm trust relationship.

func (ClusterClusterConfigSecurityConfigKerberosConfigPtrOutput) CrossRealmTrustKdc

The KDC (IP or hostname) for the remote trusted realm in a cross realm trust relationship.

func (ClusterClusterConfigSecurityConfigKerberosConfigPtrOutput) CrossRealmTrustRealm

The remote realm the Dataproc on-cluster KDC will trust, should the user enable cross realm trust.

func (ClusterClusterConfigSecurityConfigKerberosConfigPtrOutput) CrossRealmTrustSharedPasswordUri

The Cloud Storage URI of a KMS encrypted file containing the shared password between the on-cluster Kerberos realm and the remote trusted realm, in a cross realm trust relationship.

func (ClusterClusterConfigSecurityConfigKerberosConfigPtrOutput) Elem

func (ClusterClusterConfigSecurityConfigKerberosConfigPtrOutput) ElementType

func (ClusterClusterConfigSecurityConfigKerberosConfigPtrOutput) EnableKerberos

Flag to indicate whether to Kerberize the cluster.

func (ClusterClusterConfigSecurityConfigKerberosConfigPtrOutput) KdcDbKeyUri

The Cloud Storage URI of a KMS encrypted file containing the master key of the KDC database.

func (ClusterClusterConfigSecurityConfigKerberosConfigPtrOutput) KeyPasswordUri

The Cloud Storage URI of a KMS encrypted file containing the password to the user provided key. For the self-signed certificate, this password is generated by Dataproc.

func (ClusterClusterConfigSecurityConfigKerberosConfigPtrOutput) KeystorePasswordUri

The Cloud Storage URI of a KMS encrypted file containing the password to the user provided keystore. For the self-signed certificated, the password is generated by Dataproc.

func (ClusterClusterConfigSecurityConfigKerberosConfigPtrOutput) KeystoreUri

The Cloud Storage URI of the keystore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.

func (ClusterClusterConfigSecurityConfigKerberosConfigPtrOutput) KmsKeyUri

The URI of the KMS key used to encrypt various sensitive files.

func (ClusterClusterConfigSecurityConfigKerberosConfigPtrOutput) Realm

The name of the on-cluster Kerberos realm. If not specified, the uppercased domain of hostnames will be the realm.

func (ClusterClusterConfigSecurityConfigKerberosConfigPtrOutput) RootPrincipalPasswordUri

The Cloud Storage URI of a KMS encrypted file containing the root principal password.

func (ClusterClusterConfigSecurityConfigKerberosConfigPtrOutput) TgtLifetimeHours

The lifetime of the ticket granting ticket, in hours.

func (ClusterClusterConfigSecurityConfigKerberosConfigPtrOutput) ToClusterClusterConfigSecurityConfigKerberosConfigPtrOutput

func (ClusterClusterConfigSecurityConfigKerberosConfigPtrOutput) ToClusterClusterConfigSecurityConfigKerberosConfigPtrOutputWithContext

func (o ClusterClusterConfigSecurityConfigKerberosConfigPtrOutput) ToClusterClusterConfigSecurityConfigKerberosConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigSecurityConfigKerberosConfigPtrOutput

func (ClusterClusterConfigSecurityConfigKerberosConfigPtrOutput) TruststorePasswordUri

The Cloud Storage URI of a KMS encrypted file containing the password to the user provided truststore. For the self-signed certificate, this password is generated by Dataproc.

func (ClusterClusterConfigSecurityConfigKerberosConfigPtrOutput) TruststoreUri

The Cloud Storage URI of the truststore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.

type ClusterClusterConfigSecurityConfigOutput

type ClusterClusterConfigSecurityConfigOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigSecurityConfigOutput) ElementType

func (ClusterClusterConfigSecurityConfigOutput) KerberosConfig

Kerberos Configuration

func (ClusterClusterConfigSecurityConfigOutput) ToClusterClusterConfigSecurityConfigOutput

func (o ClusterClusterConfigSecurityConfigOutput) ToClusterClusterConfigSecurityConfigOutput() ClusterClusterConfigSecurityConfigOutput

func (ClusterClusterConfigSecurityConfigOutput) ToClusterClusterConfigSecurityConfigOutputWithContext

func (o ClusterClusterConfigSecurityConfigOutput) ToClusterClusterConfigSecurityConfigOutputWithContext(ctx context.Context) ClusterClusterConfigSecurityConfigOutput

func (ClusterClusterConfigSecurityConfigOutput) ToClusterClusterConfigSecurityConfigPtrOutput

func (o ClusterClusterConfigSecurityConfigOutput) ToClusterClusterConfigSecurityConfigPtrOutput() ClusterClusterConfigSecurityConfigPtrOutput

func (ClusterClusterConfigSecurityConfigOutput) ToClusterClusterConfigSecurityConfigPtrOutputWithContext

func (o ClusterClusterConfigSecurityConfigOutput) ToClusterClusterConfigSecurityConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigSecurityConfigPtrOutput

type ClusterClusterConfigSecurityConfigPtrInput

type ClusterClusterConfigSecurityConfigPtrInput interface {
	pulumi.Input

	ToClusterClusterConfigSecurityConfigPtrOutput() ClusterClusterConfigSecurityConfigPtrOutput
	ToClusterClusterConfigSecurityConfigPtrOutputWithContext(context.Context) ClusterClusterConfigSecurityConfigPtrOutput
}

ClusterClusterConfigSecurityConfigPtrInput is an input type that accepts ClusterClusterConfigSecurityConfigArgs, ClusterClusterConfigSecurityConfigPtr and ClusterClusterConfigSecurityConfigPtrOutput values. You can construct a concrete instance of `ClusterClusterConfigSecurityConfigPtrInput` via:

        ClusterClusterConfigSecurityConfigArgs{...}

or:

        nil

type ClusterClusterConfigSecurityConfigPtrOutput

type ClusterClusterConfigSecurityConfigPtrOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigSecurityConfigPtrOutput) Elem

func (ClusterClusterConfigSecurityConfigPtrOutput) ElementType

func (ClusterClusterConfigSecurityConfigPtrOutput) KerberosConfig

Kerberos Configuration

func (ClusterClusterConfigSecurityConfigPtrOutput) ToClusterClusterConfigSecurityConfigPtrOutput

func (o ClusterClusterConfigSecurityConfigPtrOutput) ToClusterClusterConfigSecurityConfigPtrOutput() ClusterClusterConfigSecurityConfigPtrOutput

func (ClusterClusterConfigSecurityConfigPtrOutput) ToClusterClusterConfigSecurityConfigPtrOutputWithContext

func (o ClusterClusterConfigSecurityConfigPtrOutput) ToClusterClusterConfigSecurityConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigSecurityConfigPtrOutput

type ClusterClusterConfigSoftwareConfig

type ClusterClusterConfigSoftwareConfig struct {
	// The Cloud Dataproc image version to use
	// for the cluster - this controls the sets of software versions
	// installed onto the nodes when you create clusters. If not specified, defaults to the
	// latest version. For a list of valid versions see
	// [Cloud Dataproc versions](https://cloud.google.com/dataproc/docs/concepts/dataproc-versions)
	ImageVersion *string `pulumi:"imageVersion"`
	// The set of optional components to activate on the cluster.
	// Accepted values are:
	// * ANACONDA
	// * DRUID
	// * FLINK
	// * HBASE
	// * HIVE_WEBHCAT
	// * JUPYTER
	// * KERBEROS
	// * PRESTO
	// * RANGER
	// * SOLR
	// * ZEPPELIN
	// * ZOOKEEPER
	OptionalComponents []string `pulumi:"optionalComponents"`
	// A list of override and additional properties (key/value pairs)
	// used to modify various aspects of the common configuration files used when creating
	// a cluster. For a list of valid properties please see
	// [Cluster properties](https://cloud.google.com/dataproc/docs/concepts/cluster-properties)
	OverrideProperties map[string]string      `pulumi:"overrideProperties"`
	Properties         map[string]interface{} `pulumi:"properties"`
}

type ClusterClusterConfigSoftwareConfigArgs

type ClusterClusterConfigSoftwareConfigArgs struct {
	// The Cloud Dataproc image version to use
	// for the cluster - this controls the sets of software versions
	// installed onto the nodes when you create clusters. If not specified, defaults to the
	// latest version. For a list of valid versions see
	// [Cloud Dataproc versions](https://cloud.google.com/dataproc/docs/concepts/dataproc-versions)
	ImageVersion pulumi.StringPtrInput `pulumi:"imageVersion"`
	// The set of optional components to activate on the cluster.
	// Accepted values are:
	// * ANACONDA
	// * DRUID
	// * FLINK
	// * HBASE
	// * HIVE_WEBHCAT
	// * JUPYTER
	// * KERBEROS
	// * PRESTO
	// * RANGER
	// * SOLR
	// * ZEPPELIN
	// * ZOOKEEPER
	OptionalComponents pulumi.StringArrayInput `pulumi:"optionalComponents"`
	// A list of override and additional properties (key/value pairs)
	// used to modify various aspects of the common configuration files used when creating
	// a cluster. For a list of valid properties please see
	// [Cluster properties](https://cloud.google.com/dataproc/docs/concepts/cluster-properties)
	OverrideProperties pulumi.StringMapInput `pulumi:"overrideProperties"`
	Properties         pulumi.MapInput       `pulumi:"properties"`
}

func (ClusterClusterConfigSoftwareConfigArgs) ElementType

func (ClusterClusterConfigSoftwareConfigArgs) ToClusterClusterConfigSoftwareConfigOutput

func (i ClusterClusterConfigSoftwareConfigArgs) ToClusterClusterConfigSoftwareConfigOutput() ClusterClusterConfigSoftwareConfigOutput

func (ClusterClusterConfigSoftwareConfigArgs) ToClusterClusterConfigSoftwareConfigOutputWithContext

func (i ClusterClusterConfigSoftwareConfigArgs) ToClusterClusterConfigSoftwareConfigOutputWithContext(ctx context.Context) ClusterClusterConfigSoftwareConfigOutput

func (ClusterClusterConfigSoftwareConfigArgs) ToClusterClusterConfigSoftwareConfigPtrOutput

func (i ClusterClusterConfigSoftwareConfigArgs) ToClusterClusterConfigSoftwareConfigPtrOutput() ClusterClusterConfigSoftwareConfigPtrOutput

func (ClusterClusterConfigSoftwareConfigArgs) ToClusterClusterConfigSoftwareConfigPtrOutputWithContext

func (i ClusterClusterConfigSoftwareConfigArgs) ToClusterClusterConfigSoftwareConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigSoftwareConfigPtrOutput

type ClusterClusterConfigSoftwareConfigInput

type ClusterClusterConfigSoftwareConfigInput interface {
	pulumi.Input

	ToClusterClusterConfigSoftwareConfigOutput() ClusterClusterConfigSoftwareConfigOutput
	ToClusterClusterConfigSoftwareConfigOutputWithContext(context.Context) ClusterClusterConfigSoftwareConfigOutput
}

ClusterClusterConfigSoftwareConfigInput is an input type that accepts ClusterClusterConfigSoftwareConfigArgs and ClusterClusterConfigSoftwareConfigOutput values. You can construct a concrete instance of `ClusterClusterConfigSoftwareConfigInput` via:

ClusterClusterConfigSoftwareConfigArgs{...}

type ClusterClusterConfigSoftwareConfigOutput

type ClusterClusterConfigSoftwareConfigOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigSoftwareConfigOutput) ElementType

func (ClusterClusterConfigSoftwareConfigOutput) ImageVersion

The Cloud Dataproc image version to use for the cluster - this controls the sets of software versions installed onto the nodes when you create clusters. If not specified, defaults to the latest version. For a list of valid versions see [Cloud Dataproc versions](https://cloud.google.com/dataproc/docs/concepts/dataproc-versions)

func (ClusterClusterConfigSoftwareConfigOutput) OptionalComponents

The set of optional components to activate on the cluster. Accepted values are: * ANACONDA * DRUID * FLINK * HBASE * HIVE_WEBHCAT * JUPYTER * KERBEROS * PRESTO * RANGER * SOLR * ZEPPELIN * ZOOKEEPER

func (ClusterClusterConfigSoftwareConfigOutput) OverrideProperties

A list of override and additional properties (key/value pairs) used to modify various aspects of the common configuration files used when creating a cluster. For a list of valid properties please see [Cluster properties](https://cloud.google.com/dataproc/docs/concepts/cluster-properties)

func (ClusterClusterConfigSoftwareConfigOutput) Properties

func (ClusterClusterConfigSoftwareConfigOutput) ToClusterClusterConfigSoftwareConfigOutput

func (o ClusterClusterConfigSoftwareConfigOutput) ToClusterClusterConfigSoftwareConfigOutput() ClusterClusterConfigSoftwareConfigOutput

func (ClusterClusterConfigSoftwareConfigOutput) ToClusterClusterConfigSoftwareConfigOutputWithContext

func (o ClusterClusterConfigSoftwareConfigOutput) ToClusterClusterConfigSoftwareConfigOutputWithContext(ctx context.Context) ClusterClusterConfigSoftwareConfigOutput

func (ClusterClusterConfigSoftwareConfigOutput) ToClusterClusterConfigSoftwareConfigPtrOutput

func (o ClusterClusterConfigSoftwareConfigOutput) ToClusterClusterConfigSoftwareConfigPtrOutput() ClusterClusterConfigSoftwareConfigPtrOutput

func (ClusterClusterConfigSoftwareConfigOutput) ToClusterClusterConfigSoftwareConfigPtrOutputWithContext

func (o ClusterClusterConfigSoftwareConfigOutput) ToClusterClusterConfigSoftwareConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigSoftwareConfigPtrOutput

type ClusterClusterConfigSoftwareConfigPtrInput

type ClusterClusterConfigSoftwareConfigPtrInput interface {
	pulumi.Input

	ToClusterClusterConfigSoftwareConfigPtrOutput() ClusterClusterConfigSoftwareConfigPtrOutput
	ToClusterClusterConfigSoftwareConfigPtrOutputWithContext(context.Context) ClusterClusterConfigSoftwareConfigPtrOutput
}

ClusterClusterConfigSoftwareConfigPtrInput is an input type that accepts ClusterClusterConfigSoftwareConfigArgs, ClusterClusterConfigSoftwareConfigPtr and ClusterClusterConfigSoftwareConfigPtrOutput values. You can construct a concrete instance of `ClusterClusterConfigSoftwareConfigPtrInput` via:

        ClusterClusterConfigSoftwareConfigArgs{...}

or:

        nil

type ClusterClusterConfigSoftwareConfigPtrOutput

type ClusterClusterConfigSoftwareConfigPtrOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigSoftwareConfigPtrOutput) Elem

func (ClusterClusterConfigSoftwareConfigPtrOutput) ElementType

func (ClusterClusterConfigSoftwareConfigPtrOutput) ImageVersion

The Cloud Dataproc image version to use for the cluster - this controls the sets of software versions installed onto the nodes when you create clusters. If not specified, defaults to the latest version. For a list of valid versions see [Cloud Dataproc versions](https://cloud.google.com/dataproc/docs/concepts/dataproc-versions)

func (ClusterClusterConfigSoftwareConfigPtrOutput) OptionalComponents

The set of optional components to activate on the cluster. Accepted values are: * ANACONDA * DRUID * FLINK * HBASE * HIVE_WEBHCAT * JUPYTER * KERBEROS * PRESTO * RANGER * SOLR * ZEPPELIN * ZOOKEEPER

func (ClusterClusterConfigSoftwareConfigPtrOutput) OverrideProperties

A list of override and additional properties (key/value pairs) used to modify various aspects of the common configuration files used when creating a cluster. For a list of valid properties please see [Cluster properties](https://cloud.google.com/dataproc/docs/concepts/cluster-properties)

func (ClusterClusterConfigSoftwareConfigPtrOutput) Properties

func (ClusterClusterConfigSoftwareConfigPtrOutput) ToClusterClusterConfigSoftwareConfigPtrOutput

func (o ClusterClusterConfigSoftwareConfigPtrOutput) ToClusterClusterConfigSoftwareConfigPtrOutput() ClusterClusterConfigSoftwareConfigPtrOutput

func (ClusterClusterConfigSoftwareConfigPtrOutput) ToClusterClusterConfigSoftwareConfigPtrOutputWithContext

func (o ClusterClusterConfigSoftwareConfigPtrOutput) ToClusterClusterConfigSoftwareConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigSoftwareConfigPtrOutput

type ClusterClusterConfigWorkerConfig

type ClusterClusterConfigWorkerConfig struct {
	// The Compute Engine accelerator configuration for these instances. Can be specified multiple times.
	Accelerators []ClusterClusterConfigWorkerConfigAccelerator `pulumi:"accelerators"`
	// Disk Config
	DiskConfig *ClusterClusterConfigWorkerConfigDiskConfig `pulumi:"diskConfig"`
	// The URI for the image to use for this worker.  See [the guide](https://cloud.google.com/dataproc/docs/guides/dataproc-images)
	// for more information.
	ImageUri      *string  `pulumi:"imageUri"`
	InstanceNames []string `pulumi:"instanceNames"`
	// The name of a Google Compute Engine machine type
	// to create for the worker nodes. If not specified, GCP will default to a predetermined
	// computed value (currently `n1-standard-4`).
	MachineType *string `pulumi:"machineType"`
	// The name of a minimum generation of CPU family
	// for the master. If not specified, GCP will default to a predetermined computed value
	// for each zone. See [the guide](https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform)
	// for details about which CPU families are available (and defaulted) for each zone.
	MinCpuPlatform *string `pulumi:"minCpuPlatform"`
	// Specifies the number of preemptible nodes to create.
	// Defaults to 0.
	NumInstances *int `pulumi:"numInstances"`
}

type ClusterClusterConfigWorkerConfigAccelerator

type ClusterClusterConfigWorkerConfigAccelerator struct {
	// The number of the accelerator cards of this type exposed to this instance. Often restricted to one of `1`, `2`, `4`, or `8`.
	AcceleratorCount int `pulumi:"acceleratorCount"`
	// The short name of the accelerator type to expose to this instance. For example, `nvidia-tesla-k80`.
	AcceleratorType string `pulumi:"acceleratorType"`
}

type ClusterClusterConfigWorkerConfigAcceleratorArgs

type ClusterClusterConfigWorkerConfigAcceleratorArgs struct {
	// The number of the accelerator cards of this type exposed to this instance. Often restricted to one of `1`, `2`, `4`, or `8`.
	AcceleratorCount pulumi.IntInput `pulumi:"acceleratorCount"`
	// The short name of the accelerator type to expose to this instance. For example, `nvidia-tesla-k80`.
	AcceleratorType pulumi.StringInput `pulumi:"acceleratorType"`
}

func (ClusterClusterConfigWorkerConfigAcceleratorArgs) ElementType

func (ClusterClusterConfigWorkerConfigAcceleratorArgs) ToClusterClusterConfigWorkerConfigAcceleratorOutput

func (i ClusterClusterConfigWorkerConfigAcceleratorArgs) ToClusterClusterConfigWorkerConfigAcceleratorOutput() ClusterClusterConfigWorkerConfigAcceleratorOutput

func (ClusterClusterConfigWorkerConfigAcceleratorArgs) ToClusterClusterConfigWorkerConfigAcceleratorOutputWithContext

func (i ClusterClusterConfigWorkerConfigAcceleratorArgs) ToClusterClusterConfigWorkerConfigAcceleratorOutputWithContext(ctx context.Context) ClusterClusterConfigWorkerConfigAcceleratorOutput

type ClusterClusterConfigWorkerConfigAcceleratorArray

type ClusterClusterConfigWorkerConfigAcceleratorArray []ClusterClusterConfigWorkerConfigAcceleratorInput

func (ClusterClusterConfigWorkerConfigAcceleratorArray) ElementType

func (ClusterClusterConfigWorkerConfigAcceleratorArray) ToClusterClusterConfigWorkerConfigAcceleratorArrayOutput

func (i ClusterClusterConfigWorkerConfigAcceleratorArray) ToClusterClusterConfigWorkerConfigAcceleratorArrayOutput() ClusterClusterConfigWorkerConfigAcceleratorArrayOutput

func (ClusterClusterConfigWorkerConfigAcceleratorArray) ToClusterClusterConfigWorkerConfigAcceleratorArrayOutputWithContext

func (i ClusterClusterConfigWorkerConfigAcceleratorArray) ToClusterClusterConfigWorkerConfigAcceleratorArrayOutputWithContext(ctx context.Context) ClusterClusterConfigWorkerConfigAcceleratorArrayOutput

type ClusterClusterConfigWorkerConfigAcceleratorArrayInput

type ClusterClusterConfigWorkerConfigAcceleratorArrayInput interface {
	pulumi.Input

	ToClusterClusterConfigWorkerConfigAcceleratorArrayOutput() ClusterClusterConfigWorkerConfigAcceleratorArrayOutput
	ToClusterClusterConfigWorkerConfigAcceleratorArrayOutputWithContext(context.Context) ClusterClusterConfigWorkerConfigAcceleratorArrayOutput
}

ClusterClusterConfigWorkerConfigAcceleratorArrayInput is an input type that accepts ClusterClusterConfigWorkerConfigAcceleratorArray and ClusterClusterConfigWorkerConfigAcceleratorArrayOutput values. You can construct a concrete instance of `ClusterClusterConfigWorkerConfigAcceleratorArrayInput` via:

ClusterClusterConfigWorkerConfigAcceleratorArray{ ClusterClusterConfigWorkerConfigAcceleratorArgs{...} }

type ClusterClusterConfigWorkerConfigAcceleratorArrayOutput

type ClusterClusterConfigWorkerConfigAcceleratorArrayOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigWorkerConfigAcceleratorArrayOutput) ElementType

func (ClusterClusterConfigWorkerConfigAcceleratorArrayOutput) Index

func (ClusterClusterConfigWorkerConfigAcceleratorArrayOutput) ToClusterClusterConfigWorkerConfigAcceleratorArrayOutput

func (ClusterClusterConfigWorkerConfigAcceleratorArrayOutput) ToClusterClusterConfigWorkerConfigAcceleratorArrayOutputWithContext

func (o ClusterClusterConfigWorkerConfigAcceleratorArrayOutput) ToClusterClusterConfigWorkerConfigAcceleratorArrayOutputWithContext(ctx context.Context) ClusterClusterConfigWorkerConfigAcceleratorArrayOutput

type ClusterClusterConfigWorkerConfigAcceleratorInput

type ClusterClusterConfigWorkerConfigAcceleratorInput interface {
	pulumi.Input

	ToClusterClusterConfigWorkerConfigAcceleratorOutput() ClusterClusterConfigWorkerConfigAcceleratorOutput
	ToClusterClusterConfigWorkerConfigAcceleratorOutputWithContext(context.Context) ClusterClusterConfigWorkerConfigAcceleratorOutput
}

ClusterClusterConfigWorkerConfigAcceleratorInput is an input type that accepts ClusterClusterConfigWorkerConfigAcceleratorArgs and ClusterClusterConfigWorkerConfigAcceleratorOutput values. You can construct a concrete instance of `ClusterClusterConfigWorkerConfigAcceleratorInput` via:

ClusterClusterConfigWorkerConfigAcceleratorArgs{...}

type ClusterClusterConfigWorkerConfigAcceleratorOutput

type ClusterClusterConfigWorkerConfigAcceleratorOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigWorkerConfigAcceleratorOutput) AcceleratorCount

The number of the accelerator cards of this type exposed to this instance. Often restricted to one of `1`, `2`, `4`, or `8`.

func (ClusterClusterConfigWorkerConfigAcceleratorOutput) AcceleratorType

The short name of the accelerator type to expose to this instance. For example, `nvidia-tesla-k80`.

func (ClusterClusterConfigWorkerConfigAcceleratorOutput) ElementType

func (ClusterClusterConfigWorkerConfigAcceleratorOutput) ToClusterClusterConfigWorkerConfigAcceleratorOutput

func (o ClusterClusterConfigWorkerConfigAcceleratorOutput) ToClusterClusterConfigWorkerConfigAcceleratorOutput() ClusterClusterConfigWorkerConfigAcceleratorOutput

func (ClusterClusterConfigWorkerConfigAcceleratorOutput) ToClusterClusterConfigWorkerConfigAcceleratorOutputWithContext

func (o ClusterClusterConfigWorkerConfigAcceleratorOutput) ToClusterClusterConfigWorkerConfigAcceleratorOutputWithContext(ctx context.Context) ClusterClusterConfigWorkerConfigAcceleratorOutput

type ClusterClusterConfigWorkerConfigArgs

type ClusterClusterConfigWorkerConfigArgs struct {
	// The Compute Engine accelerator configuration for these instances. Can be specified multiple times.
	Accelerators ClusterClusterConfigWorkerConfigAcceleratorArrayInput `pulumi:"accelerators"`
	// Disk Config
	DiskConfig ClusterClusterConfigWorkerConfigDiskConfigPtrInput `pulumi:"diskConfig"`
	// The URI for the image to use for this worker.  See [the guide](https://cloud.google.com/dataproc/docs/guides/dataproc-images)
	// for more information.
	ImageUri      pulumi.StringPtrInput   `pulumi:"imageUri"`
	InstanceNames pulumi.StringArrayInput `pulumi:"instanceNames"`
	// The name of a Google Compute Engine machine type
	// to create for the worker nodes. If not specified, GCP will default to a predetermined
	// computed value (currently `n1-standard-4`).
	MachineType pulumi.StringPtrInput `pulumi:"machineType"`
	// The name of a minimum generation of CPU family
	// for the master. If not specified, GCP will default to a predetermined computed value
	// for each zone. See [the guide](https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform)
	// for details about which CPU families are available (and defaulted) for each zone.
	MinCpuPlatform pulumi.StringPtrInput `pulumi:"minCpuPlatform"`
	// Specifies the number of preemptible nodes to create.
	// Defaults to 0.
	NumInstances pulumi.IntPtrInput `pulumi:"numInstances"`
}

func (ClusterClusterConfigWorkerConfigArgs) ElementType

func (ClusterClusterConfigWorkerConfigArgs) ToClusterClusterConfigWorkerConfigOutput

func (i ClusterClusterConfigWorkerConfigArgs) ToClusterClusterConfigWorkerConfigOutput() ClusterClusterConfigWorkerConfigOutput

func (ClusterClusterConfigWorkerConfigArgs) ToClusterClusterConfigWorkerConfigOutputWithContext

func (i ClusterClusterConfigWorkerConfigArgs) ToClusterClusterConfigWorkerConfigOutputWithContext(ctx context.Context) ClusterClusterConfigWorkerConfigOutput

func (ClusterClusterConfigWorkerConfigArgs) ToClusterClusterConfigWorkerConfigPtrOutput

func (i ClusterClusterConfigWorkerConfigArgs) ToClusterClusterConfigWorkerConfigPtrOutput() ClusterClusterConfigWorkerConfigPtrOutput

func (ClusterClusterConfigWorkerConfigArgs) ToClusterClusterConfigWorkerConfigPtrOutputWithContext

func (i ClusterClusterConfigWorkerConfigArgs) ToClusterClusterConfigWorkerConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigWorkerConfigPtrOutput

type ClusterClusterConfigWorkerConfigDiskConfig

type ClusterClusterConfigWorkerConfigDiskConfig struct {
	// Size of the primary disk attached to each preemptible worker node, specified
	// in GB. The smallest allowed disk size is 10GB. GCP will default to a predetermined
	// computed value if not set (currently 500GB). Note: If SSDs are not
	// attached, it also contains the HDFS data blocks and Hadoop working directories.
	BootDiskSizeGb *int `pulumi:"bootDiskSizeGb"`
	// The disk type of the primary disk attached to each preemptible worker node.
	// One of `"pd-ssd"` or `"pd-standard"`. Defaults to `"pd-standard"`.
	BootDiskType *string `pulumi:"bootDiskType"`
	// The amount of local SSD disks that will be
	// attached to each preemptible worker node. Defaults to 0.
	NumLocalSsds *int `pulumi:"numLocalSsds"`
}

type ClusterClusterConfigWorkerConfigDiskConfigArgs

type ClusterClusterConfigWorkerConfigDiskConfigArgs struct {
	// Size of the primary disk attached to each preemptible worker node, specified
	// in GB. The smallest allowed disk size is 10GB. GCP will default to a predetermined
	// computed value if not set (currently 500GB). Note: If SSDs are not
	// attached, it also contains the HDFS data blocks and Hadoop working directories.
	BootDiskSizeGb pulumi.IntPtrInput `pulumi:"bootDiskSizeGb"`
	// The disk type of the primary disk attached to each preemptible worker node.
	// One of `"pd-ssd"` or `"pd-standard"`. Defaults to `"pd-standard"`.
	BootDiskType pulumi.StringPtrInput `pulumi:"bootDiskType"`
	// The amount of local SSD disks that will be
	// attached to each preemptible worker node. Defaults to 0.
	NumLocalSsds pulumi.IntPtrInput `pulumi:"numLocalSsds"`
}

func (ClusterClusterConfigWorkerConfigDiskConfigArgs) ElementType

func (ClusterClusterConfigWorkerConfigDiskConfigArgs) ToClusterClusterConfigWorkerConfigDiskConfigOutput

func (i ClusterClusterConfigWorkerConfigDiskConfigArgs) ToClusterClusterConfigWorkerConfigDiskConfigOutput() ClusterClusterConfigWorkerConfigDiskConfigOutput

func (ClusterClusterConfigWorkerConfigDiskConfigArgs) ToClusterClusterConfigWorkerConfigDiskConfigOutputWithContext

func (i ClusterClusterConfigWorkerConfigDiskConfigArgs) ToClusterClusterConfigWorkerConfigDiskConfigOutputWithContext(ctx context.Context) ClusterClusterConfigWorkerConfigDiskConfigOutput

func (ClusterClusterConfigWorkerConfigDiskConfigArgs) ToClusterClusterConfigWorkerConfigDiskConfigPtrOutput

func (i ClusterClusterConfigWorkerConfigDiskConfigArgs) ToClusterClusterConfigWorkerConfigDiskConfigPtrOutput() ClusterClusterConfigWorkerConfigDiskConfigPtrOutput

func (ClusterClusterConfigWorkerConfigDiskConfigArgs) ToClusterClusterConfigWorkerConfigDiskConfigPtrOutputWithContext

func (i ClusterClusterConfigWorkerConfigDiskConfigArgs) ToClusterClusterConfigWorkerConfigDiskConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigWorkerConfigDiskConfigPtrOutput

type ClusterClusterConfigWorkerConfigDiskConfigInput

type ClusterClusterConfigWorkerConfigDiskConfigInput interface {
	pulumi.Input

	ToClusterClusterConfigWorkerConfigDiskConfigOutput() ClusterClusterConfigWorkerConfigDiskConfigOutput
	ToClusterClusterConfigWorkerConfigDiskConfigOutputWithContext(context.Context) ClusterClusterConfigWorkerConfigDiskConfigOutput
}

ClusterClusterConfigWorkerConfigDiskConfigInput is an input type that accepts ClusterClusterConfigWorkerConfigDiskConfigArgs and ClusterClusterConfigWorkerConfigDiskConfigOutput values. You can construct a concrete instance of `ClusterClusterConfigWorkerConfigDiskConfigInput` via:

ClusterClusterConfigWorkerConfigDiskConfigArgs{...}

type ClusterClusterConfigWorkerConfigDiskConfigOutput

type ClusterClusterConfigWorkerConfigDiskConfigOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigWorkerConfigDiskConfigOutput) BootDiskSizeGb

Size of the primary disk attached to each preemptible worker node, specified in GB. The smallest allowed disk size is 10GB. GCP will default to a predetermined computed value if not set (currently 500GB). Note: If SSDs are not attached, it also contains the HDFS data blocks and Hadoop working directories.

func (ClusterClusterConfigWorkerConfigDiskConfigOutput) BootDiskType

The disk type of the primary disk attached to each preemptible worker node. One of `"pd-ssd"` or `"pd-standard"`. Defaults to `"pd-standard"`.

func (ClusterClusterConfigWorkerConfigDiskConfigOutput) ElementType

func (ClusterClusterConfigWorkerConfigDiskConfigOutput) NumLocalSsds

The amount of local SSD disks that will be attached to each preemptible worker node. Defaults to 0.

func (ClusterClusterConfigWorkerConfigDiskConfigOutput) ToClusterClusterConfigWorkerConfigDiskConfigOutput

func (o ClusterClusterConfigWorkerConfigDiskConfigOutput) ToClusterClusterConfigWorkerConfigDiskConfigOutput() ClusterClusterConfigWorkerConfigDiskConfigOutput

func (ClusterClusterConfigWorkerConfigDiskConfigOutput) ToClusterClusterConfigWorkerConfigDiskConfigOutputWithContext

func (o ClusterClusterConfigWorkerConfigDiskConfigOutput) ToClusterClusterConfigWorkerConfigDiskConfigOutputWithContext(ctx context.Context) ClusterClusterConfigWorkerConfigDiskConfigOutput

func (ClusterClusterConfigWorkerConfigDiskConfigOutput) ToClusterClusterConfigWorkerConfigDiskConfigPtrOutput

func (o ClusterClusterConfigWorkerConfigDiskConfigOutput) ToClusterClusterConfigWorkerConfigDiskConfigPtrOutput() ClusterClusterConfigWorkerConfigDiskConfigPtrOutput

func (ClusterClusterConfigWorkerConfigDiskConfigOutput) ToClusterClusterConfigWorkerConfigDiskConfigPtrOutputWithContext

func (o ClusterClusterConfigWorkerConfigDiskConfigOutput) ToClusterClusterConfigWorkerConfigDiskConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigWorkerConfigDiskConfigPtrOutput

type ClusterClusterConfigWorkerConfigDiskConfigPtrInput

type ClusterClusterConfigWorkerConfigDiskConfigPtrInput interface {
	pulumi.Input

	ToClusterClusterConfigWorkerConfigDiskConfigPtrOutput() ClusterClusterConfigWorkerConfigDiskConfigPtrOutput
	ToClusterClusterConfigWorkerConfigDiskConfigPtrOutputWithContext(context.Context) ClusterClusterConfigWorkerConfigDiskConfigPtrOutput
}

ClusterClusterConfigWorkerConfigDiskConfigPtrInput is an input type that accepts ClusterClusterConfigWorkerConfigDiskConfigArgs, ClusterClusterConfigWorkerConfigDiskConfigPtr and ClusterClusterConfigWorkerConfigDiskConfigPtrOutput values. You can construct a concrete instance of `ClusterClusterConfigWorkerConfigDiskConfigPtrInput` via:

        ClusterClusterConfigWorkerConfigDiskConfigArgs{...}

or:

        nil

type ClusterClusterConfigWorkerConfigDiskConfigPtrOutput

type ClusterClusterConfigWorkerConfigDiskConfigPtrOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigWorkerConfigDiskConfigPtrOutput) BootDiskSizeGb

Size of the primary disk attached to each preemptible worker node, specified in GB. The smallest allowed disk size is 10GB. GCP will default to a predetermined computed value if not set (currently 500GB). Note: If SSDs are not attached, it also contains the HDFS data blocks and Hadoop working directories.

func (ClusterClusterConfigWorkerConfigDiskConfigPtrOutput) BootDiskType

The disk type of the primary disk attached to each preemptible worker node. One of `"pd-ssd"` or `"pd-standard"`. Defaults to `"pd-standard"`.

func (ClusterClusterConfigWorkerConfigDiskConfigPtrOutput) Elem

func (ClusterClusterConfigWorkerConfigDiskConfigPtrOutput) ElementType

func (ClusterClusterConfigWorkerConfigDiskConfigPtrOutput) NumLocalSsds

The amount of local SSD disks that will be attached to each preemptible worker node. Defaults to 0.

func (ClusterClusterConfigWorkerConfigDiskConfigPtrOutput) ToClusterClusterConfigWorkerConfigDiskConfigPtrOutput

func (o ClusterClusterConfigWorkerConfigDiskConfigPtrOutput) ToClusterClusterConfigWorkerConfigDiskConfigPtrOutput() ClusterClusterConfigWorkerConfigDiskConfigPtrOutput

func (ClusterClusterConfigWorkerConfigDiskConfigPtrOutput) ToClusterClusterConfigWorkerConfigDiskConfigPtrOutputWithContext

func (o ClusterClusterConfigWorkerConfigDiskConfigPtrOutput) ToClusterClusterConfigWorkerConfigDiskConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigWorkerConfigDiskConfigPtrOutput

type ClusterClusterConfigWorkerConfigInput

type ClusterClusterConfigWorkerConfigInput interface {
	pulumi.Input

	ToClusterClusterConfigWorkerConfigOutput() ClusterClusterConfigWorkerConfigOutput
	ToClusterClusterConfigWorkerConfigOutputWithContext(context.Context) ClusterClusterConfigWorkerConfigOutput
}

ClusterClusterConfigWorkerConfigInput is an input type that accepts ClusterClusterConfigWorkerConfigArgs and ClusterClusterConfigWorkerConfigOutput values. You can construct a concrete instance of `ClusterClusterConfigWorkerConfigInput` via:

ClusterClusterConfigWorkerConfigArgs{...}

type ClusterClusterConfigWorkerConfigOutput

type ClusterClusterConfigWorkerConfigOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigWorkerConfigOutput) Accelerators

The Compute Engine accelerator configuration for these instances. Can be specified multiple times.

func (ClusterClusterConfigWorkerConfigOutput) DiskConfig

Disk Config

func (ClusterClusterConfigWorkerConfigOutput) ElementType

func (ClusterClusterConfigWorkerConfigOutput) ImageUri

The URI for the image to use for this worker. See [the guide](https://cloud.google.com/dataproc/docs/guides/dataproc-images) for more information.

func (ClusterClusterConfigWorkerConfigOutput) InstanceNames

func (ClusterClusterConfigWorkerConfigOutput) MachineType

The name of a Google Compute Engine machine type to create for the worker nodes. If not specified, GCP will default to a predetermined computed value (currently `n1-standard-4`).

func (ClusterClusterConfigWorkerConfigOutput) MinCpuPlatform

The name of a minimum generation of CPU family for the master. If not specified, GCP will default to a predetermined computed value for each zone. See [the guide](https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform) for details about which CPU families are available (and defaulted) for each zone.

func (ClusterClusterConfigWorkerConfigOutput) NumInstances

Specifies the number of preemptible nodes to create. Defaults to 0.

func (ClusterClusterConfigWorkerConfigOutput) ToClusterClusterConfigWorkerConfigOutput

func (o ClusterClusterConfigWorkerConfigOutput) ToClusterClusterConfigWorkerConfigOutput() ClusterClusterConfigWorkerConfigOutput

func (ClusterClusterConfigWorkerConfigOutput) ToClusterClusterConfigWorkerConfigOutputWithContext

func (o ClusterClusterConfigWorkerConfigOutput) ToClusterClusterConfigWorkerConfigOutputWithContext(ctx context.Context) ClusterClusterConfigWorkerConfigOutput

func (ClusterClusterConfigWorkerConfigOutput) ToClusterClusterConfigWorkerConfigPtrOutput

func (o ClusterClusterConfigWorkerConfigOutput) ToClusterClusterConfigWorkerConfigPtrOutput() ClusterClusterConfigWorkerConfigPtrOutput

func (ClusterClusterConfigWorkerConfigOutput) ToClusterClusterConfigWorkerConfigPtrOutputWithContext

func (o ClusterClusterConfigWorkerConfigOutput) ToClusterClusterConfigWorkerConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigWorkerConfigPtrOutput

type ClusterClusterConfigWorkerConfigPtrInput

type ClusterClusterConfigWorkerConfigPtrInput interface {
	pulumi.Input

	ToClusterClusterConfigWorkerConfigPtrOutput() ClusterClusterConfigWorkerConfigPtrOutput
	ToClusterClusterConfigWorkerConfigPtrOutputWithContext(context.Context) ClusterClusterConfigWorkerConfigPtrOutput
}

ClusterClusterConfigWorkerConfigPtrInput is an input type that accepts ClusterClusterConfigWorkerConfigArgs, ClusterClusterConfigWorkerConfigPtr and ClusterClusterConfigWorkerConfigPtrOutput values. You can construct a concrete instance of `ClusterClusterConfigWorkerConfigPtrInput` via:

        ClusterClusterConfigWorkerConfigArgs{...}

or:

        nil

type ClusterClusterConfigWorkerConfigPtrOutput

type ClusterClusterConfigWorkerConfigPtrOutput struct{ *pulumi.OutputState }

func (ClusterClusterConfigWorkerConfigPtrOutput) Accelerators

The Compute Engine accelerator configuration for these instances. Can be specified multiple times.

func (ClusterClusterConfigWorkerConfigPtrOutput) DiskConfig

Disk Config

func (ClusterClusterConfigWorkerConfigPtrOutput) Elem

func (ClusterClusterConfigWorkerConfigPtrOutput) ElementType

func (ClusterClusterConfigWorkerConfigPtrOutput) ImageUri

The URI for the image to use for this worker. See [the guide](https://cloud.google.com/dataproc/docs/guides/dataproc-images) for more information.

func (ClusterClusterConfigWorkerConfigPtrOutput) InstanceNames

func (ClusterClusterConfigWorkerConfigPtrOutput) MachineType

The name of a Google Compute Engine machine type to create for the worker nodes. If not specified, GCP will default to a predetermined computed value (currently `n1-standard-4`).

func (ClusterClusterConfigWorkerConfigPtrOutput) MinCpuPlatform

The name of a minimum generation of CPU family for the master. If not specified, GCP will default to a predetermined computed value for each zone. See [the guide](https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform) for details about which CPU families are available (and defaulted) for each zone.

func (ClusterClusterConfigWorkerConfigPtrOutput) NumInstances

Specifies the number of preemptible nodes to create. Defaults to 0.

func (ClusterClusterConfigWorkerConfigPtrOutput) ToClusterClusterConfigWorkerConfigPtrOutput

func (o ClusterClusterConfigWorkerConfigPtrOutput) ToClusterClusterConfigWorkerConfigPtrOutput() ClusterClusterConfigWorkerConfigPtrOutput

func (ClusterClusterConfigWorkerConfigPtrOutput) ToClusterClusterConfigWorkerConfigPtrOutputWithContext

func (o ClusterClusterConfigWorkerConfigPtrOutput) ToClusterClusterConfigWorkerConfigPtrOutputWithContext(ctx context.Context) ClusterClusterConfigWorkerConfigPtrOutput

type ClusterIAMBinding

type ClusterIAMBinding struct {
	pulumi.CustomResourceState

	// The name or relative resource id of the cluster to manage IAM policies for.
	Cluster   pulumi.StringOutput                 `pulumi:"cluster"`
	Condition ClusterIAMBindingConditionPtrOutput `pulumi:"condition"`
	// (Computed) The etag of the clusters's IAM policy.
	Etag    pulumi.StringOutput      `pulumi:"etag"`
	Members pulumi.StringArrayOutput `pulumi:"members"`
	// The project in which the cluster belongs. If it
	// is not provided, the provider will use a default.
	Project pulumi.StringOutput `pulumi:"project"`
	// The region in which the cluster belongs. If it
	// is not provided, the provider will use a default.
	Region pulumi.StringOutput `pulumi:"region"`
	// The role that should be applied. Only one
	// `dataproc.ClusterIAMBinding` can be used per role. Note that custom roles must be of the format
	// `[projects|organizations]/{parent-name}/roles/{role-name}`.
	Role pulumi.StringOutput `pulumi:"role"`
}

Three different resources help you manage IAM policies on dataproc clusters. Each of these resources serves a different use case:

* `dataproc.ClusterIAMPolicy`: Authoritative. Sets the IAM policy for the cluster and replaces any existing policy already attached. * `dataproc.ClusterIAMBinding`: Authoritative for a given role. Updates the IAM policy to grant a role to a list of members. Other roles within the IAM policy for the cluster are preserved. * `dataproc.ClusterIAMMember`: Non-authoritative. Updates the IAM policy to grant a role to a new member. Other members for the role for the cluster are preserved.

> **Note:** `dataproc.ClusterIAMPolicy` **cannot** be used in conjunction with `dataproc.ClusterIAMBinding` and `dataproc.ClusterIAMMember` or they will fight over what your policy should be. In addition, be careful not to accidentally unset ownership of the cluster as `dataproc.ClusterIAMPolicy` replaces the entire policy.

> **Note:** `dataproc.ClusterIAMBinding` resources **can be** used in conjunction with `dataproc.ClusterIAMMember` resources **only if** they do not grant privilege to the same role.

## google\_dataproc\_cluster\_iam\_policy

```go package main

import (

"github.com/pulumi/pulumi-gcp/sdk/v5/go/gcp/dataproc"
"github.com/pulumi/pulumi-gcp/sdk/v5/go/gcp/organizations"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"

)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		admin, err := organizations.LookupIAMPolicy(ctx, &organizations.LookupIAMPolicyArgs{
			Bindings: []organizations.GetIAMPolicyBinding{
				organizations.GetIAMPolicyBinding{
					Role: "roles/editor",
					Members: []string{
						"user:jane@example.com",
					},
				},
			},
		}, nil)
		if err != nil {
			return err
		}
		_, err = dataproc.NewClusterIAMPolicy(ctx, "editor", &dataproc.ClusterIAMPolicyArgs{
			Project:    pulumi.String("your-project"),
			Region:     pulumi.String("your-region"),
			Cluster:    pulumi.String("your-dataproc-cluster"),
			PolicyData: pulumi.String(admin.PolicyData),
		})
		if err != nil {
			return err
		}
		return nil
	})
}

```

## google\_dataproc\_cluster\_iam\_binding

```go package main

import (

"github.com/pulumi/pulumi-gcp/sdk/v5/go/gcp/dataproc"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"

)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := dataproc.NewClusterIAMBinding(ctx, "editor", &dataproc.ClusterIAMBindingArgs{
			Cluster: pulumi.String("your-dataproc-cluster"),
			Members: pulumi.StringArray{
				pulumi.String("user:jane@example.com"),
			},
			Role: pulumi.String("roles/editor"),
		})
		if err != nil {
			return err
		}
		return nil
	})
}

```

## google\_dataproc\_cluster\_iam\_member

```go package main

import (

"github.com/pulumi/pulumi-gcp/sdk/v5/go/gcp/dataproc"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"

)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := dataproc.NewClusterIAMMember(ctx, "editor", &dataproc.ClusterIAMMemberArgs{
			Cluster: pulumi.String("your-dataproc-cluster"),
			Member:  pulumi.String("user:jane@example.com"),
			Role:    pulumi.String("roles/editor"),
		})
		if err != nil {
			return err
		}
		return nil
	})
}

```

## Import

Cluster IAM resources can be imported using the project, region, cluster name, role and/or member.

```sh

$ pulumi import gcp:dataproc/clusterIAMBinding:ClusterIAMBinding editor "projects/{project}/regions/{region}/clusters/{cluster}"

```

```sh

$ pulumi import gcp:dataproc/clusterIAMBinding:ClusterIAMBinding editor "projects/{project}/regions/{region}/clusters/{cluster} roles/editor"

```

```sh

$ pulumi import gcp:dataproc/clusterIAMBinding:ClusterIAMBinding editor "projects/{project}/regions/{region}/clusters/{cluster} roles/editor user:jane@example.com"

```

-> **Custom Roles**If you're importing a IAM resource with a custom role, make sure to use the

full name of the custom role, e.g. `[projects/my-project|organizations/my-org]/roles/my-custom-role`.

func GetClusterIAMBinding

func GetClusterIAMBinding(ctx *pulumi.Context,
	name string, id pulumi.IDInput, state *ClusterIAMBindingState, opts ...pulumi.ResourceOption) (*ClusterIAMBinding, error)

GetClusterIAMBinding gets an existing ClusterIAMBinding resource's state with the given name, ID, and optional state properties that are used to uniquely qualify the lookup (nil if not required).

func NewClusterIAMBinding

func NewClusterIAMBinding(ctx *pulumi.Context,
	name string, args *ClusterIAMBindingArgs, opts ...pulumi.ResourceOption) (*ClusterIAMBinding, error)

NewClusterIAMBinding registers a new resource with the given unique name, arguments, and options.

func (*ClusterIAMBinding) ElementType

func (*ClusterIAMBinding) ElementType() reflect.Type

func (*ClusterIAMBinding) ToClusterIAMBindingOutput

func (i *ClusterIAMBinding) ToClusterIAMBindingOutput() ClusterIAMBindingOutput

func (*ClusterIAMBinding) ToClusterIAMBindingOutputWithContext

func (i *ClusterIAMBinding) ToClusterIAMBindingOutputWithContext(ctx context.Context) ClusterIAMBindingOutput

func (*ClusterIAMBinding) ToClusterIAMBindingPtrOutput

func (i *ClusterIAMBinding) ToClusterIAMBindingPtrOutput() ClusterIAMBindingPtrOutput

func (*ClusterIAMBinding) ToClusterIAMBindingPtrOutputWithContext

func (i *ClusterIAMBinding) ToClusterIAMBindingPtrOutputWithContext(ctx context.Context) ClusterIAMBindingPtrOutput

type ClusterIAMBindingArgs

type ClusterIAMBindingArgs struct {
	// The name or relative resource id of the cluster to manage IAM policies for.
	Cluster   pulumi.StringInput
	Condition ClusterIAMBindingConditionPtrInput
	Members   pulumi.StringArrayInput
	// The project in which the cluster belongs. If it
	// is not provided, the provider will use a default.
	Project pulumi.StringPtrInput
	// The region in which the cluster belongs. If it
	// is not provided, the provider will use a default.
	Region pulumi.StringPtrInput
	// The role that should be applied. Only one
	// `dataproc.ClusterIAMBinding` can be used per role. Note that custom roles must be of the format
	// `[projects|organizations]/{parent-name}/roles/{role-name}`.
	Role pulumi.StringInput
}

The set of arguments for constructing a ClusterIAMBinding resource.

func (ClusterIAMBindingArgs) ElementType

func (ClusterIAMBindingArgs) ElementType() reflect.Type

type ClusterIAMBindingArray

type ClusterIAMBindingArray []ClusterIAMBindingInput

func (ClusterIAMBindingArray) ElementType

func (ClusterIAMBindingArray) ElementType() reflect.Type

func (ClusterIAMBindingArray) ToClusterIAMBindingArrayOutput

func (i ClusterIAMBindingArray) ToClusterIAMBindingArrayOutput() ClusterIAMBindingArrayOutput

func (ClusterIAMBindingArray) ToClusterIAMBindingArrayOutputWithContext

func (i ClusterIAMBindingArray) ToClusterIAMBindingArrayOutputWithContext(ctx context.Context) ClusterIAMBindingArrayOutput

type ClusterIAMBindingArrayInput

type ClusterIAMBindingArrayInput interface {
	pulumi.Input

	ToClusterIAMBindingArrayOutput() ClusterIAMBindingArrayOutput
	ToClusterIAMBindingArrayOutputWithContext(context.Context) ClusterIAMBindingArrayOutput
}

ClusterIAMBindingArrayInput is an input type that accepts ClusterIAMBindingArray and ClusterIAMBindingArrayOutput values. You can construct a concrete instance of `ClusterIAMBindingArrayInput` via:

ClusterIAMBindingArray{ ClusterIAMBindingArgs{...} }

type ClusterIAMBindingArrayOutput

type ClusterIAMBindingArrayOutput struct{ *pulumi.OutputState }

func (ClusterIAMBindingArrayOutput) ElementType

func (ClusterIAMBindingArrayOutput) Index

func (ClusterIAMBindingArrayOutput) ToClusterIAMBindingArrayOutput

func (o ClusterIAMBindingArrayOutput) ToClusterIAMBindingArrayOutput() ClusterIAMBindingArrayOutput

func (ClusterIAMBindingArrayOutput) ToClusterIAMBindingArrayOutputWithContext

func (o ClusterIAMBindingArrayOutput) ToClusterIAMBindingArrayOutputWithContext(ctx context.Context) ClusterIAMBindingArrayOutput

type ClusterIAMBindingCondition

type ClusterIAMBindingCondition struct {
	Description *string `pulumi:"description"`
	Expression  string  `pulumi:"expression"`
	Title       string  `pulumi:"title"`
}

type ClusterIAMBindingConditionArgs

type ClusterIAMBindingConditionArgs struct {
	Description pulumi.StringPtrInput `pulumi:"description"`
	Expression  pulumi.StringInput    `pulumi:"expression"`
	Title       pulumi.StringInput    `pulumi:"title"`
}

func (ClusterIAMBindingConditionArgs) ElementType

func (ClusterIAMBindingConditionArgs) ToClusterIAMBindingConditionOutput

func (i ClusterIAMBindingConditionArgs) ToClusterIAMBindingConditionOutput() ClusterIAMBindingConditionOutput

func (ClusterIAMBindingConditionArgs) ToClusterIAMBindingConditionOutputWithContext

func (i ClusterIAMBindingConditionArgs) ToClusterIAMBindingConditionOutputWithContext(ctx context.Context) ClusterIAMBindingConditionOutput

func (ClusterIAMBindingConditionArgs) ToClusterIAMBindingConditionPtrOutput

func (i ClusterIAMBindingConditionArgs) ToClusterIAMBindingConditionPtrOutput() ClusterIAMBindingConditionPtrOutput

func (ClusterIAMBindingConditionArgs) ToClusterIAMBindingConditionPtrOutputWithContext

func (i ClusterIAMBindingConditionArgs) ToClusterIAMBindingConditionPtrOutputWithContext(ctx context.Context) ClusterIAMBindingConditionPtrOutput

type ClusterIAMBindingConditionInput

type ClusterIAMBindingConditionInput interface {
	pulumi.Input

	ToClusterIAMBindingConditionOutput() ClusterIAMBindingConditionOutput
	ToClusterIAMBindingConditionOutputWithContext(context.Context) ClusterIAMBindingConditionOutput
}

ClusterIAMBindingConditionInput is an input type that accepts ClusterIAMBindingConditionArgs and ClusterIAMBindingConditionOutput values. You can construct a concrete instance of `ClusterIAMBindingConditionInput` via:

ClusterIAMBindingConditionArgs{...}

type ClusterIAMBindingConditionOutput

type ClusterIAMBindingConditionOutput struct{ *pulumi.OutputState }

func (ClusterIAMBindingConditionOutput) Description

func (ClusterIAMBindingConditionOutput) ElementType

func (ClusterIAMBindingConditionOutput) Expression

func (ClusterIAMBindingConditionOutput) Title

func (ClusterIAMBindingConditionOutput) ToClusterIAMBindingConditionOutput

func (o ClusterIAMBindingConditionOutput) ToClusterIAMBindingConditionOutput() ClusterIAMBindingConditionOutput

func (ClusterIAMBindingConditionOutput) ToClusterIAMBindingConditionOutputWithContext

func (o ClusterIAMBindingConditionOutput) ToClusterIAMBindingConditionOutputWithContext(ctx context.Context) ClusterIAMBindingConditionOutput

func (ClusterIAMBindingConditionOutput) ToClusterIAMBindingConditionPtrOutput

func (o ClusterIAMBindingConditionOutput) ToClusterIAMBindingConditionPtrOutput() ClusterIAMBindingConditionPtrOutput

func (ClusterIAMBindingConditionOutput) ToClusterIAMBindingConditionPtrOutputWithContext

func (o ClusterIAMBindingConditionOutput) ToClusterIAMBindingConditionPtrOutputWithContext(ctx context.Context) ClusterIAMBindingConditionPtrOutput

type ClusterIAMBindingConditionPtrInput

type ClusterIAMBindingConditionPtrInput interface {
	pulumi.Input

	ToClusterIAMBindingConditionPtrOutput() ClusterIAMBindingConditionPtrOutput
	ToClusterIAMBindingConditionPtrOutputWithContext(context.Context) ClusterIAMBindingConditionPtrOutput
}

ClusterIAMBindingConditionPtrInput is an input type that accepts ClusterIAMBindingConditionArgs, ClusterIAMBindingConditionPtr and ClusterIAMBindingConditionPtrOutput values. You can construct a concrete instance of `ClusterIAMBindingConditionPtrInput` via:

        ClusterIAMBindingConditionArgs{...}

or:

        nil

type ClusterIAMBindingConditionPtrOutput

type ClusterIAMBindingConditionPtrOutput struct{ *pulumi.OutputState }

func (ClusterIAMBindingConditionPtrOutput) Description

func (ClusterIAMBindingConditionPtrOutput) Elem

func (ClusterIAMBindingConditionPtrOutput) ElementType

func (ClusterIAMBindingConditionPtrOutput) Expression

func (ClusterIAMBindingConditionPtrOutput) Title

func (ClusterIAMBindingConditionPtrOutput) ToClusterIAMBindingConditionPtrOutput

func (o ClusterIAMBindingConditionPtrOutput) ToClusterIAMBindingConditionPtrOutput() ClusterIAMBindingConditionPtrOutput

func (ClusterIAMBindingConditionPtrOutput) ToClusterIAMBindingConditionPtrOutputWithContext

func (o ClusterIAMBindingConditionPtrOutput) ToClusterIAMBindingConditionPtrOutputWithContext(ctx context.Context) ClusterIAMBindingConditionPtrOutput

type ClusterIAMBindingInput

type ClusterIAMBindingInput interface {
	pulumi.Input

	ToClusterIAMBindingOutput() ClusterIAMBindingOutput
	ToClusterIAMBindingOutputWithContext(ctx context.Context) ClusterIAMBindingOutput
}

type ClusterIAMBindingMap

type ClusterIAMBindingMap map[string]ClusterIAMBindingInput

func (ClusterIAMBindingMap) ElementType

func (ClusterIAMBindingMap) ElementType() reflect.Type

func (ClusterIAMBindingMap) ToClusterIAMBindingMapOutput

func (i ClusterIAMBindingMap) ToClusterIAMBindingMapOutput() ClusterIAMBindingMapOutput

func (ClusterIAMBindingMap) ToClusterIAMBindingMapOutputWithContext

func (i ClusterIAMBindingMap) ToClusterIAMBindingMapOutputWithContext(ctx context.Context) ClusterIAMBindingMapOutput

type ClusterIAMBindingMapInput

type ClusterIAMBindingMapInput interface {
	pulumi.Input

	ToClusterIAMBindingMapOutput() ClusterIAMBindingMapOutput
	ToClusterIAMBindingMapOutputWithContext(context.Context) ClusterIAMBindingMapOutput
}

ClusterIAMBindingMapInput is an input type that accepts ClusterIAMBindingMap and ClusterIAMBindingMapOutput values. You can construct a concrete instance of `ClusterIAMBindingMapInput` via:

ClusterIAMBindingMap{ "key": ClusterIAMBindingArgs{...} }

type ClusterIAMBindingMapOutput

type ClusterIAMBindingMapOutput struct{ *pulumi.OutputState }

func (ClusterIAMBindingMapOutput) ElementType

func (ClusterIAMBindingMapOutput) ElementType() reflect.Type

func (ClusterIAMBindingMapOutput) MapIndex

func (ClusterIAMBindingMapOutput) ToClusterIAMBindingMapOutput

func (o ClusterIAMBindingMapOutput) ToClusterIAMBindingMapOutput() ClusterIAMBindingMapOutput

func (ClusterIAMBindingMapOutput) ToClusterIAMBindingMapOutputWithContext

func (o ClusterIAMBindingMapOutput) ToClusterIAMBindingMapOutputWithContext(ctx context.Context) ClusterIAMBindingMapOutput

type ClusterIAMBindingOutput

type ClusterIAMBindingOutput struct{ *pulumi.OutputState }

func (ClusterIAMBindingOutput) ElementType

func (ClusterIAMBindingOutput) ElementType() reflect.Type

func (ClusterIAMBindingOutput) ToClusterIAMBindingOutput

func (o ClusterIAMBindingOutput) ToClusterIAMBindingOutput() ClusterIAMBindingOutput

func (ClusterIAMBindingOutput) ToClusterIAMBindingOutputWithContext

func (o ClusterIAMBindingOutput) ToClusterIAMBindingOutputWithContext(ctx context.Context) ClusterIAMBindingOutput

func (ClusterIAMBindingOutput) ToClusterIAMBindingPtrOutput

func (o ClusterIAMBindingOutput) ToClusterIAMBindingPtrOutput() ClusterIAMBindingPtrOutput

func (ClusterIAMBindingOutput) ToClusterIAMBindingPtrOutputWithContext

func (o ClusterIAMBindingOutput) ToClusterIAMBindingPtrOutputWithContext(ctx context.Context) ClusterIAMBindingPtrOutput

type ClusterIAMBindingPtrInput

type ClusterIAMBindingPtrInput interface {
	pulumi.Input

	ToClusterIAMBindingPtrOutput() ClusterIAMBindingPtrOutput
	ToClusterIAMBindingPtrOutputWithContext(ctx context.Context) ClusterIAMBindingPtrOutput
}

type ClusterIAMBindingPtrOutput

type ClusterIAMBindingPtrOutput struct{ *pulumi.OutputState }

func (ClusterIAMBindingPtrOutput) Elem added in v5.21.0

func (ClusterIAMBindingPtrOutput) ElementType

func (ClusterIAMBindingPtrOutput) ElementType() reflect.Type

func (ClusterIAMBindingPtrOutput) ToClusterIAMBindingPtrOutput

func (o ClusterIAMBindingPtrOutput) ToClusterIAMBindingPtrOutput() ClusterIAMBindingPtrOutput

func (ClusterIAMBindingPtrOutput) ToClusterIAMBindingPtrOutputWithContext

func (o ClusterIAMBindingPtrOutput) ToClusterIAMBindingPtrOutputWithContext(ctx context.Context) ClusterIAMBindingPtrOutput

type ClusterIAMBindingState

type ClusterIAMBindingState struct {
	// The name or relative resource id of the cluster to manage IAM policies for.
	Cluster   pulumi.StringPtrInput
	Condition ClusterIAMBindingConditionPtrInput
	// (Computed) The etag of the clusters's IAM policy.
	Etag    pulumi.StringPtrInput
	Members pulumi.StringArrayInput
	// The project in which the cluster belongs. If it
	// is not provided, the provider will use a default.
	Project pulumi.StringPtrInput
	// The region in which the cluster belongs. If it
	// is not provided, the provider will use a default.
	Region pulumi.StringPtrInput
	// The role that should be applied. Only one
	// `dataproc.ClusterIAMBinding` can be used per role. Note that custom roles must be of the format
	// `[projects|organizations]/{parent-name}/roles/{role-name}`.
	Role pulumi.StringPtrInput
}

func (ClusterIAMBindingState) ElementType

func (ClusterIAMBindingState) ElementType() reflect.Type

type ClusterIAMMember

type ClusterIAMMember struct {
	pulumi.CustomResourceState

	// The name or relative resource id of the cluster to manage IAM policies for.
	Cluster   pulumi.StringOutput                `pulumi:"cluster"`
	Condition ClusterIAMMemberConditionPtrOutput `pulumi:"condition"`
	// (Computed) The etag of the clusters's IAM policy.
	Etag   pulumi.StringOutput `pulumi:"etag"`
	Member pulumi.StringOutput `pulumi:"member"`
	// The project in which the cluster belongs. If it
	// is not provided, the provider will use a default.
	Project pulumi.StringOutput `pulumi:"project"`
	// The region in which the cluster belongs. If it
	// is not provided, the provider will use a default.
	Region pulumi.StringOutput `pulumi:"region"`
	// The role that should be applied. Only one
	// `dataproc.ClusterIAMBinding` can be used per role. Note that custom roles must be of the format
	// `[projects|organizations]/{parent-name}/roles/{role-name}`.
	Role pulumi.StringOutput `pulumi:"role"`
}

Three different resources help you manage IAM policies on dataproc clusters. Each of these resources serves a different use case:

* `dataproc.ClusterIAMPolicy`: Authoritative. Sets the IAM policy for the cluster and replaces any existing policy already attached. * `dataproc.ClusterIAMBinding`: Authoritative for a given role. Updates the IAM policy to grant a role to a list of members. Other roles within the IAM policy for the cluster are preserved. * `dataproc.ClusterIAMMember`: Non-authoritative. Updates the IAM policy to grant a role to a new member. Other members for the role for the cluster are preserved.

> **Note:** `dataproc.ClusterIAMPolicy` **cannot** be used in conjunction with `dataproc.ClusterIAMBinding` and `dataproc.ClusterIAMMember` or they will fight over what your policy should be. In addition, be careful not to accidentally unset ownership of the cluster as `dataproc.ClusterIAMPolicy` replaces the entire policy.

> **Note:** `dataproc.ClusterIAMBinding` resources **can be** used in conjunction with `dataproc.ClusterIAMMember` resources **only if** they do not grant privilege to the same role.

## google\_dataproc\_cluster\_iam\_policy

```go package main

import (

"github.com/pulumi/pulumi-gcp/sdk/v5/go/gcp/dataproc"
"github.com/pulumi/pulumi-gcp/sdk/v5/go/gcp/organizations"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"

)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		admin, err := organizations.LookupIAMPolicy(ctx, &organizations.LookupIAMPolicyArgs{
			Bindings: []organizations.GetIAMPolicyBinding{
				organizations.GetIAMPolicyBinding{
					Role: "roles/editor",
					Members: []string{
						"user:jane@example.com",
					},
				},
			},
		}, nil)
		if err != nil {
			return err
		}
		_, err = dataproc.NewClusterIAMPolicy(ctx, "editor", &dataproc.ClusterIAMPolicyArgs{
			Project:    pulumi.String("your-project"),
			Region:     pulumi.String("your-region"),
			Cluster:    pulumi.String("your-dataproc-cluster"),
			PolicyData: pulumi.String(admin.PolicyData),
		})
		if err != nil {
			return err
		}
		return nil
	})
}

```

## google\_dataproc\_cluster\_iam\_binding

```go package main

import (

"github.com/pulumi/pulumi-gcp/sdk/v5/go/gcp/dataproc"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"

)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := dataproc.NewClusterIAMBinding(ctx, "editor", &dataproc.ClusterIAMBindingArgs{
			Cluster: pulumi.String("your-dataproc-cluster"),
			Members: pulumi.StringArray{
				pulumi.String("user:jane@example.com"),
			},
			Role: pulumi.String("roles/editor"),
		})
		if err != nil {
			return err
		}
		return nil
	})
}

```

## google\_dataproc\_cluster\_iam\_member

```go package main

import (

"github.com/pulumi/pulumi-gcp/sdk/v5/go/gcp/dataproc"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"

)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := dataproc.NewClusterIAMMember(ctx, "editor", &dataproc.ClusterIAMMemberArgs{
			Cluster: pulumi.String("your-dataproc-cluster"),
			Member:  pulumi.String("user:jane@example.com"),
			Role:    pulumi.String("roles/editor"),
		})
		if err != nil {
			return err
		}
		return nil
	})
}

```

## Import

Cluster IAM resources can be imported using the project, region, cluster name, role and/or member.

```sh

$ pulumi import gcp:dataproc/clusterIAMMember:ClusterIAMMember editor "projects/{project}/regions/{region}/clusters/{cluster}"

```

```sh

$ pulumi import gcp:dataproc/clusterIAMMember:ClusterIAMMember editor "projects/{project}/regions/{region}/clusters/{cluster} roles/editor"

```

```sh

$ pulumi import gcp:dataproc/clusterIAMMember:ClusterIAMMember editor "projects/{project}/regions/{region}/clusters/{cluster} roles/editor user:jane@example.com"

```

-> **Custom Roles**If you're importing a IAM resource with a custom role, make sure to use the

full name of the custom role, e.g. `[projects/my-project|organizations/my-org]/roles/my-custom-role`.

func GetClusterIAMMember

func GetClusterIAMMember(ctx *pulumi.Context,
	name string, id pulumi.IDInput, state *ClusterIAMMemberState, opts ...pulumi.ResourceOption) (*ClusterIAMMember, error)

GetClusterIAMMember gets an existing ClusterIAMMember resource's state with the given name, ID, and optional state properties that are used to uniquely qualify the lookup (nil if not required).

func NewClusterIAMMember

func NewClusterIAMMember(ctx *pulumi.Context,
	name string, args *ClusterIAMMemberArgs, opts ...pulumi.ResourceOption) (*ClusterIAMMember, error)

NewClusterIAMMember registers a new resource with the given unique name, arguments, and options.

func (*ClusterIAMMember) ElementType

func (*ClusterIAMMember) ElementType() reflect.Type

func (*ClusterIAMMember) ToClusterIAMMemberOutput

func (i *ClusterIAMMember) ToClusterIAMMemberOutput() ClusterIAMMemberOutput

func (*ClusterIAMMember) ToClusterIAMMemberOutputWithContext

func (i *ClusterIAMMember) ToClusterIAMMemberOutputWithContext(ctx context.Context) ClusterIAMMemberOutput

func (*ClusterIAMMember) ToClusterIAMMemberPtrOutput

func (i *ClusterIAMMember) ToClusterIAMMemberPtrOutput() ClusterIAMMemberPtrOutput

func (*ClusterIAMMember) ToClusterIAMMemberPtrOutputWithContext

func (i *ClusterIAMMember) ToClusterIAMMemberPtrOutputWithContext(ctx context.Context) ClusterIAMMemberPtrOutput

type ClusterIAMMemberArgs

type ClusterIAMMemberArgs struct {
	// The name or relative resource id of the cluster to manage IAM policies for.
	Cluster   pulumi.StringInput
	Condition ClusterIAMMemberConditionPtrInput
	Member    pulumi.StringInput
	// The project in which the cluster belongs. If it
	// is not provided, the provider will use a default.
	Project pulumi.StringPtrInput
	// The region in which the cluster belongs. If it
	// is not provided, the provider will use a default.
	Region pulumi.StringPtrInput
	// The role that should be applied. Only one
	// `dataproc.ClusterIAMBinding` can be used per role. Note that custom roles must be of the format
	// `[projects|organizations]/{parent-name}/roles/{role-name}`.
	Role pulumi.StringInput
}

The set of arguments for constructing a ClusterIAMMember resource.

func (ClusterIAMMemberArgs) ElementType

func (ClusterIAMMemberArgs) ElementType() reflect.Type

type ClusterIAMMemberArray

type ClusterIAMMemberArray []ClusterIAMMemberInput

func (ClusterIAMMemberArray) ElementType

func (ClusterIAMMemberArray) ElementType() reflect.Type

func (ClusterIAMMemberArray) ToClusterIAMMemberArrayOutput

func (i ClusterIAMMemberArray) ToClusterIAMMemberArrayOutput() ClusterIAMMemberArrayOutput

func (ClusterIAMMemberArray) ToClusterIAMMemberArrayOutputWithContext

func (i ClusterIAMMemberArray) ToClusterIAMMemberArrayOutputWithContext(ctx context.Context) ClusterIAMMemberArrayOutput

type ClusterIAMMemberArrayInput

type ClusterIAMMemberArrayInput interface {
	pulumi.Input

	ToClusterIAMMemberArrayOutput() ClusterIAMMemberArrayOutput
	ToClusterIAMMemberArrayOutputWithContext(context.Context) ClusterIAMMemberArrayOutput
}

ClusterIAMMemberArrayInput is an input type that accepts ClusterIAMMemberArray and ClusterIAMMemberArrayOutput values. You can construct a concrete instance of `ClusterIAMMemberArrayInput` via:

ClusterIAMMemberArray{ ClusterIAMMemberArgs{...} }

type ClusterIAMMemberArrayOutput

type ClusterIAMMemberArrayOutput struct{ *pulumi.OutputState }

func (ClusterIAMMemberArrayOutput) ElementType

func (ClusterIAMMemberArrayOutput) Index

func (ClusterIAMMemberArrayOutput) ToClusterIAMMemberArrayOutput

func (o ClusterIAMMemberArrayOutput) ToClusterIAMMemberArrayOutput() ClusterIAMMemberArrayOutput

func (ClusterIAMMemberArrayOutput) ToClusterIAMMemberArrayOutputWithContext

func (o ClusterIAMMemberArrayOutput) ToClusterIAMMemberArrayOutputWithContext(ctx context.Context) ClusterIAMMemberArrayOutput

type ClusterIAMMemberCondition

type ClusterIAMMemberCondition struct {
	Description *string `pulumi:"description"`
	Expression  string  `pulumi:"expression"`
	Title       string  `pulumi:"title"`
}

type ClusterIAMMemberConditionArgs

type ClusterIAMMemberConditionArgs struct {
	Description pulumi.StringPtrInput `pulumi:"description"`
	Expression  pulumi.StringInput    `pulumi:"expression"`
	Title       pulumi.StringInput    `pulumi:"title"`
}

func (ClusterIAMMemberConditionArgs) ElementType

func (ClusterIAMMemberConditionArgs) ToClusterIAMMemberConditionOutput

func (i ClusterIAMMemberConditionArgs) ToClusterIAMMemberConditionOutput() ClusterIAMMemberConditionOutput

func (ClusterIAMMemberConditionArgs) ToClusterIAMMemberConditionOutputWithContext

func (i ClusterIAMMemberConditionArgs) ToClusterIAMMemberConditionOutputWithContext(ctx context.Context) ClusterIAMMemberConditionOutput

func (ClusterIAMMemberConditionArgs) ToClusterIAMMemberConditionPtrOutput

func (i ClusterIAMMemberConditionArgs) ToClusterIAMMemberConditionPtrOutput() ClusterIAMMemberConditionPtrOutput

func (ClusterIAMMemberConditionArgs) ToClusterIAMMemberConditionPtrOutputWithContext

func (i ClusterIAMMemberConditionArgs) ToClusterIAMMemberConditionPtrOutputWithContext(ctx context.Context) ClusterIAMMemberConditionPtrOutput

type ClusterIAMMemberConditionInput

type ClusterIAMMemberConditionInput interface {
	pulumi.Input

	ToClusterIAMMemberConditionOutput() ClusterIAMMemberConditionOutput
	ToClusterIAMMemberConditionOutputWithContext(context.Context) ClusterIAMMemberConditionOutput
}

ClusterIAMMemberConditionInput is an input type that accepts ClusterIAMMemberConditionArgs and ClusterIAMMemberConditionOutput values. You can construct a concrete instance of `ClusterIAMMemberConditionInput` via:

ClusterIAMMemberConditionArgs{...}

type ClusterIAMMemberConditionOutput

type ClusterIAMMemberConditionOutput struct{ *pulumi.OutputState }

func (ClusterIAMMemberConditionOutput) Description

func (ClusterIAMMemberConditionOutput) ElementType

func (ClusterIAMMemberConditionOutput) Expression

func (ClusterIAMMemberConditionOutput) Title

func (ClusterIAMMemberConditionOutput) ToClusterIAMMemberConditionOutput

func (o ClusterIAMMemberConditionOutput) ToClusterIAMMemberConditionOutput() ClusterIAMMemberConditionOutput

func (ClusterIAMMemberConditionOutput) ToClusterIAMMemberConditionOutputWithContext

func (o ClusterIAMMemberConditionOutput) ToClusterIAMMemberConditionOutputWithContext(ctx context.Context) ClusterIAMMemberConditionOutput

func (ClusterIAMMemberConditionOutput) ToClusterIAMMemberConditionPtrOutput

func (o ClusterIAMMemberConditionOutput) ToClusterIAMMemberConditionPtrOutput() ClusterIAMMemberConditionPtrOutput

func (ClusterIAMMemberConditionOutput) ToClusterIAMMemberConditionPtrOutputWithContext

func (o ClusterIAMMemberConditionOutput) ToClusterIAMMemberConditionPtrOutputWithContext(ctx context.Context) ClusterIAMMemberConditionPtrOutput

type ClusterIAMMemberConditionPtrInput

type ClusterIAMMemberConditionPtrInput interface {
	pulumi.Input

	ToClusterIAMMemberConditionPtrOutput() ClusterIAMMemberConditionPtrOutput
	ToClusterIAMMemberConditionPtrOutputWithContext(context.Context) ClusterIAMMemberConditionPtrOutput
}

ClusterIAMMemberConditionPtrInput is an input type that accepts ClusterIAMMemberConditionArgs, ClusterIAMMemberConditionPtr and ClusterIAMMemberConditionPtrOutput values. You can construct a concrete instance of `ClusterIAMMemberConditionPtrInput` via:

        ClusterIAMMemberConditionArgs{...}

or:

        nil

type ClusterIAMMemberConditionPtrOutput

type ClusterIAMMemberConditionPtrOutput struct{ *pulumi.OutputState }

func (ClusterIAMMemberConditionPtrOutput) Description

func (ClusterIAMMemberConditionPtrOutput) Elem

func (ClusterIAMMemberConditionPtrOutput) ElementType

func (ClusterIAMMemberConditionPtrOutput) Expression

func (ClusterIAMMemberConditionPtrOutput) Title

func (ClusterIAMMemberConditionPtrOutput) ToClusterIAMMemberConditionPtrOutput

func (o ClusterIAMMemberConditionPtrOutput) ToClusterIAMMemberConditionPtrOutput() ClusterIAMMemberConditionPtrOutput

func (ClusterIAMMemberConditionPtrOutput) ToClusterIAMMemberConditionPtrOutputWithContext

func (o ClusterIAMMemberConditionPtrOutput) ToClusterIAMMemberConditionPtrOutputWithContext(ctx context.Context) ClusterIAMMemberConditionPtrOutput

type ClusterIAMMemberInput

type ClusterIAMMemberInput interface {
	pulumi.Input

	ToClusterIAMMemberOutput() ClusterIAMMemberOutput
	ToClusterIAMMemberOutputWithContext(ctx context.Context) ClusterIAMMemberOutput
}

type ClusterIAMMemberMap

type ClusterIAMMemberMap map[string]ClusterIAMMemberInput

func (ClusterIAMMemberMap) ElementType

func (ClusterIAMMemberMap) ElementType() reflect.Type

func (ClusterIAMMemberMap) ToClusterIAMMemberMapOutput

func (i ClusterIAMMemberMap) ToClusterIAMMemberMapOutput() ClusterIAMMemberMapOutput

func (ClusterIAMMemberMap) ToClusterIAMMemberMapOutputWithContext

func (i ClusterIAMMemberMap) ToClusterIAMMemberMapOutputWithContext(ctx context.Context) ClusterIAMMemberMapOutput

type ClusterIAMMemberMapInput

type ClusterIAMMemberMapInput interface {
	pulumi.Input

	ToClusterIAMMemberMapOutput() ClusterIAMMemberMapOutput
	ToClusterIAMMemberMapOutputWithContext(context.Context) ClusterIAMMemberMapOutput
}

ClusterIAMMemberMapInput is an input type that accepts ClusterIAMMemberMap and ClusterIAMMemberMapOutput values. You can construct a concrete instance of `ClusterIAMMemberMapInput` via:

ClusterIAMMemberMap{ "key": ClusterIAMMemberArgs{...} }

type ClusterIAMMemberMapOutput

type ClusterIAMMemberMapOutput struct{ *pulumi.OutputState }

func (ClusterIAMMemberMapOutput) ElementType

func (ClusterIAMMemberMapOutput) ElementType() reflect.Type

func (ClusterIAMMemberMapOutput) MapIndex

func (ClusterIAMMemberMapOutput) ToClusterIAMMemberMapOutput

func (o ClusterIAMMemberMapOutput) ToClusterIAMMemberMapOutput() ClusterIAMMemberMapOutput

func (ClusterIAMMemberMapOutput) ToClusterIAMMemberMapOutputWithContext

func (o ClusterIAMMemberMapOutput) ToClusterIAMMemberMapOutputWithContext(ctx context.Context) ClusterIAMMemberMapOutput

type ClusterIAMMemberOutput

type ClusterIAMMemberOutput struct{ *pulumi.OutputState }

func (ClusterIAMMemberOutput) ElementType

func (ClusterIAMMemberOutput) ElementType() reflect.Type

func (ClusterIAMMemberOutput) ToClusterIAMMemberOutput

func (o ClusterIAMMemberOutput) ToClusterIAMMemberOutput() ClusterIAMMemberOutput

func (ClusterIAMMemberOutput) ToClusterIAMMemberOutputWithContext

func (o ClusterIAMMemberOutput) ToClusterIAMMemberOutputWithContext(ctx context.Context) ClusterIAMMemberOutput

func (ClusterIAMMemberOutput) ToClusterIAMMemberPtrOutput

func (o ClusterIAMMemberOutput) ToClusterIAMMemberPtrOutput() ClusterIAMMemberPtrOutput

func (ClusterIAMMemberOutput) ToClusterIAMMemberPtrOutputWithContext

func (o ClusterIAMMemberOutput) ToClusterIAMMemberPtrOutputWithContext(ctx context.Context) ClusterIAMMemberPtrOutput

type ClusterIAMMemberPtrInput

type ClusterIAMMemberPtrInput interface {
	pulumi.Input

	ToClusterIAMMemberPtrOutput() ClusterIAMMemberPtrOutput
	ToClusterIAMMemberPtrOutputWithContext(ctx context.Context) ClusterIAMMemberPtrOutput
}

type ClusterIAMMemberPtrOutput

type ClusterIAMMemberPtrOutput struct{ *pulumi.OutputState }

func (ClusterIAMMemberPtrOutput) Elem added in v5.21.0

func (ClusterIAMMemberPtrOutput) ElementType

func (ClusterIAMMemberPtrOutput) ElementType() reflect.Type

func (ClusterIAMMemberPtrOutput) ToClusterIAMMemberPtrOutput

func (o ClusterIAMMemberPtrOutput) ToClusterIAMMemberPtrOutput() ClusterIAMMemberPtrOutput

func (ClusterIAMMemberPtrOutput) ToClusterIAMMemberPtrOutputWithContext

func (o ClusterIAMMemberPtrOutput) ToClusterIAMMemberPtrOutputWithContext(ctx context.Context) ClusterIAMMemberPtrOutput

type ClusterIAMMemberState

type ClusterIAMMemberState struct {
	// The name or relative resource id of the cluster to manage IAM policies for.
	Cluster   pulumi.StringPtrInput
	Condition ClusterIAMMemberConditionPtrInput
	// (Computed) The etag of the clusters's IAM policy.
	Etag   pulumi.StringPtrInput
	Member pulumi.StringPtrInput
	// The project in which the cluster belongs. If it
	// is not provided, the provider will use a default.
	Project pulumi.StringPtrInput
	// The region in which the cluster belongs. If it
	// is not provided, the provider will use a default.
	Region pulumi.StringPtrInput
	// The role that should be applied. Only one
	// `dataproc.ClusterIAMBinding` can be used per role. Note that custom roles must be of the format
	// `[projects|organizations]/{parent-name}/roles/{role-name}`.
	Role pulumi.StringPtrInput
}

func (ClusterIAMMemberState) ElementType

func (ClusterIAMMemberState) ElementType() reflect.Type

type ClusterIAMPolicy

type ClusterIAMPolicy struct {
	pulumi.CustomResourceState

	// The name or relative resource id of the cluster to manage IAM policies for.
	Cluster pulumi.StringOutput `pulumi:"cluster"`
	// (Computed) The etag of the clusters's IAM policy.
	Etag pulumi.StringOutput `pulumi:"etag"`
	// The policy data generated by a `organizations.getIAMPolicy` data source.
	PolicyData pulumi.StringOutput `pulumi:"policyData"`
	// The project in which the cluster belongs. If it
	// is not provided, the provider will use a default.
	Project pulumi.StringOutput `pulumi:"project"`
	// The region in which the cluster belongs. If it
	// is not provided, the provider will use a default.
	Region pulumi.StringOutput `pulumi:"region"`
}

Three different resources help you manage IAM policies on dataproc clusters. Each of these resources serves a different use case:

* `dataproc.ClusterIAMPolicy`: Authoritative. Sets the IAM policy for the cluster and replaces any existing policy already attached. * `dataproc.ClusterIAMBinding`: Authoritative for a given role. Updates the IAM policy to grant a role to a list of members. Other roles within the IAM policy for the cluster are preserved. * `dataproc.ClusterIAMMember`: Non-authoritative. Updates the IAM policy to grant a role to a new member. Other members for the role for the cluster are preserved.

> **Note:** `dataproc.ClusterIAMPolicy` **cannot** be used in conjunction with `dataproc.ClusterIAMBinding` and `dataproc.ClusterIAMMember` or they will fight over what your policy should be. In addition, be careful not to accidentally unset ownership of the cluster as `dataproc.ClusterIAMPolicy` replaces the entire policy.

> **Note:** `dataproc.ClusterIAMBinding` resources **can be** used in conjunction with `dataproc.ClusterIAMMember` resources **only if** they do not grant privilege to the same role.

## google\_dataproc\_cluster\_iam\_policy

```go package main

import (

"github.com/pulumi/pulumi-gcp/sdk/v5/go/gcp/dataproc"
"github.com/pulumi/pulumi-gcp/sdk/v5/go/gcp/organizations"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"

)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		admin, err := organizations.LookupIAMPolicy(ctx, &organizations.LookupIAMPolicyArgs{
			Bindings: []organizations.GetIAMPolicyBinding{
				organizations.GetIAMPolicyBinding{
					Role: "roles/editor",
					Members: []string{
						"user:jane@example.com",
					},
				},
			},
		}, nil)
		if err != nil {
			return err
		}
		_, err = dataproc.NewClusterIAMPolicy(ctx, "editor", &dataproc.ClusterIAMPolicyArgs{
			Project:    pulumi.String("your-project"),
			Region:     pulumi.String("your-region"),
			Cluster:    pulumi.String("your-dataproc-cluster"),
			PolicyData: pulumi.String(admin.PolicyData),
		})
		if err != nil {
			return err
		}
		return nil
	})
}

```

## google\_dataproc\_cluster\_iam\_binding

```go package main

import (

"github.com/pulumi/pulumi-gcp/sdk/v5/go/gcp/dataproc"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"

)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := dataproc.NewClusterIAMBinding(ctx, "editor", &dataproc.ClusterIAMBindingArgs{
			Cluster: pulumi.String("your-dataproc-cluster"),
			Members: pulumi.StringArray{
				pulumi.String("user:jane@example.com"),
			},
			Role: pulumi.String("roles/editor"),
		})
		if err != nil {
			return err
		}
		return nil
	})
}

```

## google\_dataproc\_cluster\_iam\_member

```go package main

import (

"github.com/pulumi/pulumi-gcp/sdk/v5/go/gcp/dataproc"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"

)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := dataproc.NewClusterIAMMember(ctx, "editor", &dataproc.ClusterIAMMemberArgs{
			Cluster: pulumi.String("your-dataproc-cluster"),
			Member:  pulumi.String("user:jane@example.com"),
			Role:    pulumi.String("roles/editor"),
		})
		if err != nil {
			return err
		}
		return nil
	})
}

```

## Import

Cluster IAM resources can be imported using the project, region, cluster name, role and/or member.

```sh

$ pulumi import gcp:dataproc/clusterIAMPolicy:ClusterIAMPolicy editor "projects/{project}/regions/{region}/clusters/{cluster}"

```

```sh

$ pulumi import gcp:dataproc/clusterIAMPolicy:ClusterIAMPolicy editor "projects/{project}/regions/{region}/clusters/{cluster} roles/editor"

```

```sh

$ pulumi import gcp:dataproc/clusterIAMPolicy:ClusterIAMPolicy editor "projects/{project}/regions/{region}/clusters/{cluster} roles/editor user:jane@example.com"

```

-> **Custom Roles**If you're importing a IAM resource with a custom role, make sure to use the

full name of the custom role, e.g. `[projects/my-project|organizations/my-org]/roles/my-custom-role`.

func GetClusterIAMPolicy

func GetClusterIAMPolicy(ctx *pulumi.Context,
	name string, id pulumi.IDInput, state *ClusterIAMPolicyState, opts ...pulumi.ResourceOption) (*ClusterIAMPolicy, error)

GetClusterIAMPolicy gets an existing ClusterIAMPolicy resource's state with the given name, ID, and optional state properties that are used to uniquely qualify the lookup (nil if not required).

func NewClusterIAMPolicy

func NewClusterIAMPolicy(ctx *pulumi.Context,
	name string, args *ClusterIAMPolicyArgs, opts ...pulumi.ResourceOption) (*ClusterIAMPolicy, error)

NewClusterIAMPolicy registers a new resource with the given unique name, arguments, and options.

func (*ClusterIAMPolicy) ElementType

func (*ClusterIAMPolicy) ElementType() reflect.Type

func (*ClusterIAMPolicy) ToClusterIAMPolicyOutput

func (i *ClusterIAMPolicy) ToClusterIAMPolicyOutput() ClusterIAMPolicyOutput

func (*ClusterIAMPolicy) ToClusterIAMPolicyOutputWithContext

func (i *ClusterIAMPolicy) ToClusterIAMPolicyOutputWithContext(ctx context.Context) ClusterIAMPolicyOutput

func (*ClusterIAMPolicy) ToClusterIAMPolicyPtrOutput

func (i *ClusterIAMPolicy) ToClusterIAMPolicyPtrOutput() ClusterIAMPolicyPtrOutput

func (*ClusterIAMPolicy) ToClusterIAMPolicyPtrOutputWithContext

func (i *ClusterIAMPolicy) ToClusterIAMPolicyPtrOutputWithContext(ctx context.Context) ClusterIAMPolicyPtrOutput

type ClusterIAMPolicyArgs

type ClusterIAMPolicyArgs struct {
	// The name or relative resource id of the cluster to manage IAM policies for.
	Cluster pulumi.StringInput
	// The policy data generated by a `organizations.getIAMPolicy` data source.
	PolicyData pulumi.StringInput
	// The project in which the cluster belongs. If it
	// is not provided, the provider will use a default.
	Project pulumi.StringPtrInput
	// The region in which the cluster belongs. If it
	// is not provided, the provider will use a default.
	Region pulumi.StringPtrInput
}

The set of arguments for constructing a ClusterIAMPolicy resource.

func (ClusterIAMPolicyArgs) ElementType

func (ClusterIAMPolicyArgs) ElementType() reflect.Type

type ClusterIAMPolicyArray

type ClusterIAMPolicyArray []ClusterIAMPolicyInput

func (ClusterIAMPolicyArray) ElementType

func (ClusterIAMPolicyArray) ElementType() reflect.Type

func (ClusterIAMPolicyArray) ToClusterIAMPolicyArrayOutput

func (i ClusterIAMPolicyArray) ToClusterIAMPolicyArrayOutput() ClusterIAMPolicyArrayOutput

func (ClusterIAMPolicyArray) ToClusterIAMPolicyArrayOutputWithContext

func (i ClusterIAMPolicyArray) ToClusterIAMPolicyArrayOutputWithContext(ctx context.Context) ClusterIAMPolicyArrayOutput

type ClusterIAMPolicyArrayInput

type ClusterIAMPolicyArrayInput interface {
	pulumi.Input

	ToClusterIAMPolicyArrayOutput() ClusterIAMPolicyArrayOutput
	ToClusterIAMPolicyArrayOutputWithContext(context.Context) ClusterIAMPolicyArrayOutput
}

ClusterIAMPolicyArrayInput is an input type that accepts ClusterIAMPolicyArray and ClusterIAMPolicyArrayOutput values. You can construct a concrete instance of `ClusterIAMPolicyArrayInput` via:

ClusterIAMPolicyArray{ ClusterIAMPolicyArgs{...} }

type ClusterIAMPolicyArrayOutput

type ClusterIAMPolicyArrayOutput struct{ *pulumi.OutputState }

func (ClusterIAMPolicyArrayOutput) ElementType

func (ClusterIAMPolicyArrayOutput) Index

func (ClusterIAMPolicyArrayOutput) ToClusterIAMPolicyArrayOutput

func (o ClusterIAMPolicyArrayOutput) ToClusterIAMPolicyArrayOutput() ClusterIAMPolicyArrayOutput

func (ClusterIAMPolicyArrayOutput) ToClusterIAMPolicyArrayOutputWithContext

func (o ClusterIAMPolicyArrayOutput) ToClusterIAMPolicyArrayOutputWithContext(ctx context.Context) ClusterIAMPolicyArrayOutput

type ClusterIAMPolicyInput

type ClusterIAMPolicyInput interface {
	pulumi.Input

	ToClusterIAMPolicyOutput() ClusterIAMPolicyOutput
	ToClusterIAMPolicyOutputWithContext(ctx context.Context) ClusterIAMPolicyOutput
}

type ClusterIAMPolicyMap

type ClusterIAMPolicyMap map[string]ClusterIAMPolicyInput

func (ClusterIAMPolicyMap) ElementType

func (ClusterIAMPolicyMap) ElementType() reflect.Type

func (ClusterIAMPolicyMap) ToClusterIAMPolicyMapOutput

func (i ClusterIAMPolicyMap) ToClusterIAMPolicyMapOutput() ClusterIAMPolicyMapOutput

func (ClusterIAMPolicyMap) ToClusterIAMPolicyMapOutputWithContext

func (i ClusterIAMPolicyMap) ToClusterIAMPolicyMapOutputWithContext(ctx context.Context) ClusterIAMPolicyMapOutput

type ClusterIAMPolicyMapInput

type ClusterIAMPolicyMapInput interface {
	pulumi.Input

	ToClusterIAMPolicyMapOutput() ClusterIAMPolicyMapOutput
	ToClusterIAMPolicyMapOutputWithContext(context.Context) ClusterIAMPolicyMapOutput
}

ClusterIAMPolicyMapInput is an input type that accepts ClusterIAMPolicyMap and ClusterIAMPolicyMapOutput values. You can construct a concrete instance of `ClusterIAMPolicyMapInput` via:

ClusterIAMPolicyMap{ "key": ClusterIAMPolicyArgs{...} }

type ClusterIAMPolicyMapOutput

type ClusterIAMPolicyMapOutput struct{ *pulumi.OutputState }

func (ClusterIAMPolicyMapOutput) ElementType

func (ClusterIAMPolicyMapOutput) ElementType() reflect.Type

func (ClusterIAMPolicyMapOutput) MapIndex

func (ClusterIAMPolicyMapOutput) ToClusterIAMPolicyMapOutput

func (o ClusterIAMPolicyMapOutput) ToClusterIAMPolicyMapOutput() ClusterIAMPolicyMapOutput

func (ClusterIAMPolicyMapOutput) ToClusterIAMPolicyMapOutputWithContext

func (o ClusterIAMPolicyMapOutput) ToClusterIAMPolicyMapOutputWithContext(ctx context.Context) ClusterIAMPolicyMapOutput

type ClusterIAMPolicyOutput

type ClusterIAMPolicyOutput struct{ *pulumi.OutputState }

func (ClusterIAMPolicyOutput) ElementType

func (ClusterIAMPolicyOutput) ElementType() reflect.Type

func (ClusterIAMPolicyOutput) ToClusterIAMPolicyOutput

func (o ClusterIAMPolicyOutput) ToClusterIAMPolicyOutput() ClusterIAMPolicyOutput

func (ClusterIAMPolicyOutput) ToClusterIAMPolicyOutputWithContext

func (o ClusterIAMPolicyOutput) ToClusterIAMPolicyOutputWithContext(ctx context.Context) ClusterIAMPolicyOutput

func (ClusterIAMPolicyOutput) ToClusterIAMPolicyPtrOutput

func (o ClusterIAMPolicyOutput) ToClusterIAMPolicyPtrOutput() ClusterIAMPolicyPtrOutput

func (ClusterIAMPolicyOutput) ToClusterIAMPolicyPtrOutputWithContext

func (o ClusterIAMPolicyOutput) ToClusterIAMPolicyPtrOutputWithContext(ctx context.Context) ClusterIAMPolicyPtrOutput

type ClusterIAMPolicyPtrInput

type ClusterIAMPolicyPtrInput interface {
	pulumi.Input

	ToClusterIAMPolicyPtrOutput() ClusterIAMPolicyPtrOutput
	ToClusterIAMPolicyPtrOutputWithContext(ctx context.Context) ClusterIAMPolicyPtrOutput
}

type ClusterIAMPolicyPtrOutput

type ClusterIAMPolicyPtrOutput struct{ *pulumi.OutputState }

func (ClusterIAMPolicyPtrOutput) Elem added in v5.21.0

func (ClusterIAMPolicyPtrOutput) ElementType

func (ClusterIAMPolicyPtrOutput) ElementType() reflect.Type

func (ClusterIAMPolicyPtrOutput) ToClusterIAMPolicyPtrOutput

func (o ClusterIAMPolicyPtrOutput) ToClusterIAMPolicyPtrOutput() ClusterIAMPolicyPtrOutput

func (ClusterIAMPolicyPtrOutput) ToClusterIAMPolicyPtrOutputWithContext

func (o ClusterIAMPolicyPtrOutput) ToClusterIAMPolicyPtrOutputWithContext(ctx context.Context) ClusterIAMPolicyPtrOutput

type ClusterIAMPolicyState

type ClusterIAMPolicyState struct {
	// The name or relative resource id of the cluster to manage IAM policies for.
	Cluster pulumi.StringPtrInput
	// (Computed) The etag of the clusters's IAM policy.
	Etag pulumi.StringPtrInput
	// The policy data generated by a `organizations.getIAMPolicy` data source.
	PolicyData pulumi.StringPtrInput
	// The project in which the cluster belongs. If it
	// is not provided, the provider will use a default.
	Project pulumi.StringPtrInput
	// The region in which the cluster belongs. If it
	// is not provided, the provider will use a default.
	Region pulumi.StringPtrInput
}

func (ClusterIAMPolicyState) ElementType

func (ClusterIAMPolicyState) ElementType() reflect.Type

type ClusterInput

type ClusterInput interface {
	pulumi.Input

	ToClusterOutput() ClusterOutput
	ToClusterOutputWithContext(ctx context.Context) ClusterOutput
}

type ClusterMap

type ClusterMap map[string]ClusterInput

func (ClusterMap) ElementType

func (ClusterMap) ElementType() reflect.Type

func (ClusterMap) ToClusterMapOutput

func (i ClusterMap) ToClusterMapOutput() ClusterMapOutput

func (ClusterMap) ToClusterMapOutputWithContext

func (i ClusterMap) ToClusterMapOutputWithContext(ctx context.Context) ClusterMapOutput

type ClusterMapInput

type ClusterMapInput interface {
	pulumi.Input

	ToClusterMapOutput() ClusterMapOutput
	ToClusterMapOutputWithContext(context.Context) ClusterMapOutput
}

ClusterMapInput is an input type that accepts ClusterMap and ClusterMapOutput values. You can construct a concrete instance of `ClusterMapInput` via:

ClusterMap{ "key": ClusterArgs{...} }

type ClusterMapOutput

type ClusterMapOutput struct{ *pulumi.OutputState }

func (ClusterMapOutput) ElementType

func (ClusterMapOutput) ElementType() reflect.Type

func (ClusterMapOutput) MapIndex

func (ClusterMapOutput) ToClusterMapOutput

func (o ClusterMapOutput) ToClusterMapOutput() ClusterMapOutput

func (ClusterMapOutput) ToClusterMapOutputWithContext

func (o ClusterMapOutput) ToClusterMapOutputWithContext(ctx context.Context) ClusterMapOutput

type ClusterOutput

type ClusterOutput struct{ *pulumi.OutputState }

func (ClusterOutput) ElementType

func (ClusterOutput) ElementType() reflect.Type

func (ClusterOutput) ToClusterOutput

func (o ClusterOutput) ToClusterOutput() ClusterOutput

func (ClusterOutput) ToClusterOutputWithContext

func (o ClusterOutput) ToClusterOutputWithContext(ctx context.Context) ClusterOutput

func (ClusterOutput) ToClusterPtrOutput

func (o ClusterOutput) ToClusterPtrOutput() ClusterPtrOutput

func (ClusterOutput) ToClusterPtrOutputWithContext

func (o ClusterOutput) ToClusterPtrOutputWithContext(ctx context.Context) ClusterPtrOutput

type ClusterPtrInput

type ClusterPtrInput interface {
	pulumi.Input

	ToClusterPtrOutput() ClusterPtrOutput
	ToClusterPtrOutputWithContext(ctx context.Context) ClusterPtrOutput
}

type ClusterPtrOutput

type ClusterPtrOutput struct{ *pulumi.OutputState }

func (ClusterPtrOutput) Elem added in v5.21.0

func (ClusterPtrOutput) ElementType

func (ClusterPtrOutput) ElementType() reflect.Type

func (ClusterPtrOutput) ToClusterPtrOutput

func (o ClusterPtrOutput) ToClusterPtrOutput() ClusterPtrOutput

func (ClusterPtrOutput) ToClusterPtrOutputWithContext

func (o ClusterPtrOutput) ToClusterPtrOutputWithContext(ctx context.Context) ClusterPtrOutput

type ClusterState

type ClusterState struct {
	// Allows you to configure various aspects of the cluster.
	// Structure defined below.
	ClusterConfig ClusterClusterConfigPtrInput
	// The timeout duration which allows graceful decomissioning when you change the number of worker nodes directly through a
	// terraform apply
	GracefulDecommissionTimeout pulumi.StringPtrInput
	// The list of labels (key/value pairs) to be applied to
	// instances in the cluster. GCP generates some itself including `goog-dataproc-cluster-name`
	// which is the name of the cluster.
	Labels pulumi.StringMapInput
	// The name of the cluster, unique within the project and
	// zone.
	Name pulumi.StringPtrInput
	// The ID of the project in which the `cluster` will exist. If it
	// is not provided, the provider project is used.
	Project pulumi.StringPtrInput
	// The region in which the cluster and associated nodes will be created in.
	// Defaults to `global`.
	Region pulumi.StringPtrInput
}

func (ClusterState) ElementType

func (ClusterState) ElementType() reflect.Type

type Job

type Job struct {
	pulumi.CustomResourceState

	// If present, the location of miscellaneous control files which may be used as part of job setup and handling. If not present, control files may be placed in the same location as driver_output_uri.
	DriverControlsFilesUri pulumi.StringOutput `pulumi:"driverControlsFilesUri"`
	// A URI pointing to the location of the stdout of the job's driver program.
	DriverOutputResourceUri pulumi.StringOutput `pulumi:"driverOutputResourceUri"`
	// By default, you can only delete inactive jobs within
	// Dataproc. Setting this to true, and calling destroy, will ensure that the
	// job is first cancelled before issuing the delete.
	ForceDelete pulumi.BoolPtrOutput `pulumi:"forceDelete"`
	// The config of Hadoop job
	HadoopConfig JobHadoopConfigPtrOutput `pulumi:"hadoopConfig"`
	// The config of hive job
	HiveConfig JobHiveConfigPtrOutput `pulumi:"hiveConfig"`
	// The list of labels (key/value pairs) to add to the job.
	Labels pulumi.StringMapOutput `pulumi:"labels"`
	// The config of pag job.
	PigConfig JobPigConfigPtrOutput `pulumi:"pigConfig"`
	// The config of job placement.
	Placement JobPlacementOutput `pulumi:"placement"`
	// The project in which the `cluster` can be found and jobs
	// subsequently run against. If it is not provided, the provider project is used.
	Project pulumi.StringOutput `pulumi:"project"`
	// The config of pySpark job.
	PysparkConfig JobPysparkConfigPtrOutput `pulumi:"pysparkConfig"`
	// The reference of the job
	Reference JobReferenceOutput `pulumi:"reference"`
	// The Cloud Dataproc region. This essentially determines which clusters are available
	// for this job to be submitted to. If not specified, defaults to `global`.
	Region pulumi.StringPtrOutput `pulumi:"region"`
	// Optional. Job scheduling configuration.
	Scheduling JobSchedulingPtrOutput `pulumi:"scheduling"`
	// The config of the Spark job.
	SparkConfig JobSparkConfigPtrOutput `pulumi:"sparkConfig"`
	// The config of SparkSql job
	SparksqlConfig JobSparksqlConfigPtrOutput `pulumi:"sparksqlConfig"`
	// The status of the job.
	Statuses JobStatusArrayOutput `pulumi:"statuses"`
}

Manages a job resource within a Dataproc cluster within GCE. For more information see [the official dataproc documentation](https://cloud.google.com/dataproc/).

!> **Note:** This resource does not support 'update' and changing any attributes will cause the resource to be recreated.

## Example Usage

```go package main

import (

"github.com/pulumi/pulumi-gcp/sdk/v5/go/gcp/dataproc"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"

)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		mycluster, err := dataproc.NewCluster(ctx, "mycluster", &dataproc.ClusterArgs{
			Region: pulumi.String("us-central1"),
		})
		if err != nil {
			return err
		}
		spark, err := dataproc.NewJob(ctx, "spark", &dataproc.JobArgs{
			Region:      mycluster.Region,
			ForceDelete: pulumi.Bool(true),
			Placement: &dataproc.JobPlacementArgs{
				ClusterName: mycluster.Name,
			},
			SparkConfig: &dataproc.JobSparkConfigArgs{
				MainClass: pulumi.String("org.apache.spark.examples.SparkPi"),
				JarFileUris: pulumi.StringArray{
					pulumi.String("file:///usr/lib/spark/examples/jars/spark-examples.jar"),
				},
				Args: pulumi.StringArray{
					pulumi.String("1000"),
				},
				Properties: pulumi.StringMap{
					"spark.logConf": pulumi.String("true"),
				},
				LoggingConfig: &dataproc.JobSparkConfigLoggingConfigArgs{
					DriverLogLevels: pulumi.StringMap{
						"root": pulumi.String("INFO"),
					},
				},
			},
		})
		if err != nil {
			return err
		}
		pyspark, err := dataproc.NewJob(ctx, "pyspark", &dataproc.JobArgs{
			Region:      mycluster.Region,
			ForceDelete: pulumi.Bool(true),
			Placement: &dataproc.JobPlacementArgs{
				ClusterName: mycluster.Name,
			},
			PysparkConfig: &dataproc.JobPysparkConfigArgs{
				MainPythonFileUri: pulumi.String("gs://dataproc-examples-2f10d78d114f6aaec76462e3c310f31f/src/pyspark/hello-world/hello-world.py"),
				Properties: pulumi.StringMap{
					"spark.logConf": pulumi.String("true"),
				},
			},
		})
		if err != nil {
			return err
		}
		ctx.Export("sparkStatus", spark.Statuses.ApplyT(func(statuses []dataproc.JobStatus) (string, error) {
			return statuses[0].State, nil
		}).(pulumi.StringOutput))
		ctx.Export("pysparkStatus", pyspark.Statuses.ApplyT(func(statuses []dataproc.JobStatus) (string, error) {
			return statuses[0].State, nil
		}).(pulumi.StringOutput))
		return nil
	})
}

```

## Import

This resource does not support import.

func GetJob

func GetJob(ctx *pulumi.Context,
	name string, id pulumi.IDInput, state *JobState, opts ...pulumi.ResourceOption) (*Job, error)

GetJob gets an existing Job resource's state with the given name, ID, and optional state properties that are used to uniquely qualify the lookup (nil if not required).

func NewJob

func NewJob(ctx *pulumi.Context,
	name string, args *JobArgs, opts ...pulumi.ResourceOption) (*Job, error)

NewJob registers a new resource with the given unique name, arguments, and options.

func (*Job) ElementType

func (*Job) ElementType() reflect.Type

func (*Job) ToJobOutput

func (i *Job) ToJobOutput() JobOutput

func (*Job) ToJobOutputWithContext

func (i *Job) ToJobOutputWithContext(ctx context.Context) JobOutput

func (*Job) ToJobPtrOutput

func (i *Job) ToJobPtrOutput() JobPtrOutput

func (*Job) ToJobPtrOutputWithContext

func (i *Job) ToJobPtrOutputWithContext(ctx context.Context) JobPtrOutput

type JobArgs

type JobArgs struct {
	// By default, you can only delete inactive jobs within
	// Dataproc. Setting this to true, and calling destroy, will ensure that the
	// job is first cancelled before issuing the delete.
	ForceDelete pulumi.BoolPtrInput
	// The config of Hadoop job
	HadoopConfig JobHadoopConfigPtrInput
	// The config of hive job
	HiveConfig JobHiveConfigPtrInput
	// The list of labels (key/value pairs) to add to the job.
	Labels pulumi.StringMapInput
	// The config of pag job.
	PigConfig JobPigConfigPtrInput
	// The config of job placement.
	Placement JobPlacementInput
	// The project in which the `cluster` can be found and jobs
	// subsequently run against. If it is not provided, the provider project is used.
	Project pulumi.StringPtrInput
	// The config of pySpark job.
	PysparkConfig JobPysparkConfigPtrInput
	// The reference of the job
	Reference JobReferencePtrInput
	// The Cloud Dataproc region. This essentially determines which clusters are available
	// for this job to be submitted to. If not specified, defaults to `global`.
	Region pulumi.StringPtrInput
	// Optional. Job scheduling configuration.
	Scheduling JobSchedulingPtrInput
	// The config of the Spark job.
	SparkConfig JobSparkConfigPtrInput
	// The config of SparkSql job
	SparksqlConfig JobSparksqlConfigPtrInput
}

The set of arguments for constructing a Job resource.

func (JobArgs) ElementType

func (JobArgs) ElementType() reflect.Type

type JobArray

type JobArray []JobInput

func (JobArray) ElementType

func (JobArray) ElementType() reflect.Type

func (JobArray) ToJobArrayOutput

func (i JobArray) ToJobArrayOutput() JobArrayOutput

func (JobArray) ToJobArrayOutputWithContext

func (i JobArray) ToJobArrayOutputWithContext(ctx context.Context) JobArrayOutput

type JobArrayInput

type JobArrayInput interface {
	pulumi.Input

	ToJobArrayOutput() JobArrayOutput
	ToJobArrayOutputWithContext(context.Context) JobArrayOutput
}

JobArrayInput is an input type that accepts JobArray and JobArrayOutput values. You can construct a concrete instance of `JobArrayInput` via:

JobArray{ JobArgs{...} }

type JobArrayOutput

type JobArrayOutput struct{ *pulumi.OutputState }

func (JobArrayOutput) ElementType

func (JobArrayOutput) ElementType() reflect.Type

func (JobArrayOutput) Index

func (JobArrayOutput) ToJobArrayOutput

func (o JobArrayOutput) ToJobArrayOutput() JobArrayOutput

func (JobArrayOutput) ToJobArrayOutputWithContext

func (o JobArrayOutput) ToJobArrayOutputWithContext(ctx context.Context) JobArrayOutput

type JobHadoopConfig

type JobHadoopConfig struct {
	// HCFS URIs of archives to be extracted in the working directory of .jar, .tar, .tar.gz, .tgz, and .zip.
	ArchiveUris []string `pulumi:"archiveUris"`
	// The arguments to pass to the driver. Do not include arguments, such as -libjars or -Dfoo=bar, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
	Args []string `pulumi:"args"`
	// HCFS URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.
	FileUris []string `pulumi:"fileUris"`
	// HCFS URIs of jar files to be added to the Spark CLASSPATH.
	JarFileUris   []string                      `pulumi:"jarFileUris"`
	LoggingConfig *JobHadoopConfigLoggingConfig `pulumi:"loggingConfig"`
	// The name of the driver's main class. The jar file containing the class must be in the default CLASSPATH or specified in `jarFileUris`. Conflicts with `mainJarFileUri`
	MainClass *string `pulumi:"mainClass"`
	// The HCFS URI of the jar file containing the main class. Examples: 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'. Conflicts with `mainClass`
	MainJarFileUri *string `pulumi:"mainJarFileUri"`
	// A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Cloud Dataproc API may be overwritten.
	Properties map[string]string `pulumi:"properties"`
}

type JobHadoopConfigArgs

type JobHadoopConfigArgs struct {
	// HCFS URIs of archives to be extracted in the working directory of .jar, .tar, .tar.gz, .tgz, and .zip.
	ArchiveUris pulumi.StringArrayInput `pulumi:"archiveUris"`
	// The arguments to pass to the driver. Do not include arguments, such as -libjars or -Dfoo=bar, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
	Args pulumi.StringArrayInput `pulumi:"args"`
	// HCFS URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.
	FileUris pulumi.StringArrayInput `pulumi:"fileUris"`
	// HCFS URIs of jar files to be added to the Spark CLASSPATH.
	JarFileUris   pulumi.StringArrayInput              `pulumi:"jarFileUris"`
	LoggingConfig JobHadoopConfigLoggingConfigPtrInput `pulumi:"loggingConfig"`
	// The name of the driver's main class. The jar file containing the class must be in the default CLASSPATH or specified in `jarFileUris`. Conflicts with `mainJarFileUri`
	MainClass pulumi.StringPtrInput `pulumi:"mainClass"`
	// The HCFS URI of the jar file containing the main class. Examples: 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'. Conflicts with `mainClass`
	MainJarFileUri pulumi.StringPtrInput `pulumi:"mainJarFileUri"`
	// A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Cloud Dataproc API may be overwritten.
	Properties pulumi.StringMapInput `pulumi:"properties"`
}

func (JobHadoopConfigArgs) ElementType

func (JobHadoopConfigArgs) ElementType() reflect.Type

func (JobHadoopConfigArgs) ToJobHadoopConfigOutput

func (i JobHadoopConfigArgs) ToJobHadoopConfigOutput() JobHadoopConfigOutput

func (JobHadoopConfigArgs) ToJobHadoopConfigOutputWithContext

func (i JobHadoopConfigArgs) ToJobHadoopConfigOutputWithContext(ctx context.Context) JobHadoopConfigOutput

func (JobHadoopConfigArgs) ToJobHadoopConfigPtrOutput

func (i JobHadoopConfigArgs) ToJobHadoopConfigPtrOutput() JobHadoopConfigPtrOutput

func (JobHadoopConfigArgs) ToJobHadoopConfigPtrOutputWithContext

func (i JobHadoopConfigArgs) ToJobHadoopConfigPtrOutputWithContext(ctx context.Context) JobHadoopConfigPtrOutput

type JobHadoopConfigInput

type JobHadoopConfigInput interface {
	pulumi.Input

	ToJobHadoopConfigOutput() JobHadoopConfigOutput
	ToJobHadoopConfigOutputWithContext(context.Context) JobHadoopConfigOutput
}

JobHadoopConfigInput is an input type that accepts JobHadoopConfigArgs and JobHadoopConfigOutput values. You can construct a concrete instance of `JobHadoopConfigInput` via:

JobHadoopConfigArgs{...}

type JobHadoopConfigLoggingConfig

type JobHadoopConfigLoggingConfig struct {
	DriverLogLevels map[string]string `pulumi:"driverLogLevels"`
}

type JobHadoopConfigLoggingConfigArgs

type JobHadoopConfigLoggingConfigArgs struct {
	DriverLogLevels pulumi.StringMapInput `pulumi:"driverLogLevels"`
}

func (JobHadoopConfigLoggingConfigArgs) ElementType

func (JobHadoopConfigLoggingConfigArgs) ToJobHadoopConfigLoggingConfigOutput

func (i JobHadoopConfigLoggingConfigArgs) ToJobHadoopConfigLoggingConfigOutput() JobHadoopConfigLoggingConfigOutput

func (JobHadoopConfigLoggingConfigArgs) ToJobHadoopConfigLoggingConfigOutputWithContext

func (i JobHadoopConfigLoggingConfigArgs) ToJobHadoopConfigLoggingConfigOutputWithContext(ctx context.Context) JobHadoopConfigLoggingConfigOutput

func (JobHadoopConfigLoggingConfigArgs) ToJobHadoopConfigLoggingConfigPtrOutput

func (i JobHadoopConfigLoggingConfigArgs) ToJobHadoopConfigLoggingConfigPtrOutput() JobHadoopConfigLoggingConfigPtrOutput

func (JobHadoopConfigLoggingConfigArgs) ToJobHadoopConfigLoggingConfigPtrOutputWithContext

func (i JobHadoopConfigLoggingConfigArgs) ToJobHadoopConfigLoggingConfigPtrOutputWithContext(ctx context.Context) JobHadoopConfigLoggingConfigPtrOutput

type JobHadoopConfigLoggingConfigInput

type JobHadoopConfigLoggingConfigInput interface {
	pulumi.Input

	ToJobHadoopConfigLoggingConfigOutput() JobHadoopConfigLoggingConfigOutput
	ToJobHadoopConfigLoggingConfigOutputWithContext(context.Context) JobHadoopConfigLoggingConfigOutput
}

JobHadoopConfigLoggingConfigInput is an input type that accepts JobHadoopConfigLoggingConfigArgs and JobHadoopConfigLoggingConfigOutput values. You can construct a concrete instance of `JobHadoopConfigLoggingConfigInput` via:

JobHadoopConfigLoggingConfigArgs{...}

type JobHadoopConfigLoggingConfigOutput

type JobHadoopConfigLoggingConfigOutput struct{ *pulumi.OutputState }

func (JobHadoopConfigLoggingConfigOutput) DriverLogLevels

func (JobHadoopConfigLoggingConfigOutput) ElementType

func (JobHadoopConfigLoggingConfigOutput) ToJobHadoopConfigLoggingConfigOutput

func (o JobHadoopConfigLoggingConfigOutput) ToJobHadoopConfigLoggingConfigOutput() JobHadoopConfigLoggingConfigOutput

func (JobHadoopConfigLoggingConfigOutput) ToJobHadoopConfigLoggingConfigOutputWithContext

func (o JobHadoopConfigLoggingConfigOutput) ToJobHadoopConfigLoggingConfigOutputWithContext(ctx context.Context) JobHadoopConfigLoggingConfigOutput

func (JobHadoopConfigLoggingConfigOutput) ToJobHadoopConfigLoggingConfigPtrOutput

func (o JobHadoopConfigLoggingConfigOutput) ToJobHadoopConfigLoggingConfigPtrOutput() JobHadoopConfigLoggingConfigPtrOutput

func (JobHadoopConfigLoggingConfigOutput) ToJobHadoopConfigLoggingConfigPtrOutputWithContext

func (o JobHadoopConfigLoggingConfigOutput) ToJobHadoopConfigLoggingConfigPtrOutputWithContext(ctx context.Context) JobHadoopConfigLoggingConfigPtrOutput

type JobHadoopConfigLoggingConfigPtrInput

type JobHadoopConfigLoggingConfigPtrInput interface {
	pulumi.Input

	ToJobHadoopConfigLoggingConfigPtrOutput() JobHadoopConfigLoggingConfigPtrOutput
	ToJobHadoopConfigLoggingConfigPtrOutputWithContext(context.Context) JobHadoopConfigLoggingConfigPtrOutput
}

JobHadoopConfigLoggingConfigPtrInput is an input type that accepts JobHadoopConfigLoggingConfigArgs, JobHadoopConfigLoggingConfigPtr and JobHadoopConfigLoggingConfigPtrOutput values. You can construct a concrete instance of `JobHadoopConfigLoggingConfigPtrInput` via:

        JobHadoopConfigLoggingConfigArgs{...}

or:

        nil

type JobHadoopConfigLoggingConfigPtrOutput

type JobHadoopConfigLoggingConfigPtrOutput struct{ *pulumi.OutputState }

func (JobHadoopConfigLoggingConfigPtrOutput) DriverLogLevels

func (JobHadoopConfigLoggingConfigPtrOutput) Elem

func (JobHadoopConfigLoggingConfigPtrOutput) ElementType

func (JobHadoopConfigLoggingConfigPtrOutput) ToJobHadoopConfigLoggingConfigPtrOutput

func (o JobHadoopConfigLoggingConfigPtrOutput) ToJobHadoopConfigLoggingConfigPtrOutput() JobHadoopConfigLoggingConfigPtrOutput

func (JobHadoopConfigLoggingConfigPtrOutput) ToJobHadoopConfigLoggingConfigPtrOutputWithContext

func (o JobHadoopConfigLoggingConfigPtrOutput) ToJobHadoopConfigLoggingConfigPtrOutputWithContext(ctx context.Context) JobHadoopConfigLoggingConfigPtrOutput

type JobHadoopConfigOutput

type JobHadoopConfigOutput struct{ *pulumi.OutputState }

func (JobHadoopConfigOutput) ArchiveUris

HCFS URIs of archives to be extracted in the working directory of .jar, .tar, .tar.gz, .tgz, and .zip.

func (JobHadoopConfigOutput) Args

The arguments to pass to the driver. Do not include arguments, such as -libjars or -Dfoo=bar, that can be set as job properties, since a collision may occur that causes an incorrect job submission.

func (JobHadoopConfigOutput) ElementType

func (JobHadoopConfigOutput) ElementType() reflect.Type

func (JobHadoopConfigOutput) FileUris

HCFS URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.

func (JobHadoopConfigOutput) JarFileUris

HCFS URIs of jar files to be added to the Spark CLASSPATH.

func (JobHadoopConfigOutput) LoggingConfig

func (JobHadoopConfigOutput) MainClass

The name of the driver's main class. The jar file containing the class must be in the default CLASSPATH or specified in `jarFileUris`. Conflicts with `mainJarFileUri`

func (JobHadoopConfigOutput) MainJarFileUri

func (o JobHadoopConfigOutput) MainJarFileUri() pulumi.StringPtrOutput

The HCFS URI of the jar file containing the main class. Examples: 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'. Conflicts with `mainClass`

func (JobHadoopConfigOutput) Properties

A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Cloud Dataproc API may be overwritten.

func (JobHadoopConfigOutput) ToJobHadoopConfigOutput

func (o JobHadoopConfigOutput) ToJobHadoopConfigOutput() JobHadoopConfigOutput

func (JobHadoopConfigOutput) ToJobHadoopConfigOutputWithContext

func (o JobHadoopConfigOutput) ToJobHadoopConfigOutputWithContext(ctx context.Context) JobHadoopConfigOutput

func (JobHadoopConfigOutput) ToJobHadoopConfigPtrOutput

func (o JobHadoopConfigOutput) ToJobHadoopConfigPtrOutput() JobHadoopConfigPtrOutput

func (JobHadoopConfigOutput) ToJobHadoopConfigPtrOutputWithContext

func (o JobHadoopConfigOutput) ToJobHadoopConfigPtrOutputWithContext(ctx context.Context) JobHadoopConfigPtrOutput

type JobHadoopConfigPtrInput

type JobHadoopConfigPtrInput interface {
	pulumi.Input

	ToJobHadoopConfigPtrOutput() JobHadoopConfigPtrOutput
	ToJobHadoopConfigPtrOutputWithContext(context.Context) JobHadoopConfigPtrOutput
}

JobHadoopConfigPtrInput is an input type that accepts JobHadoopConfigArgs, JobHadoopConfigPtr and JobHadoopConfigPtrOutput values. You can construct a concrete instance of `JobHadoopConfigPtrInput` via:

        JobHadoopConfigArgs{...}

or:

        nil

type JobHadoopConfigPtrOutput

type JobHadoopConfigPtrOutput struct{ *pulumi.OutputState }

func (JobHadoopConfigPtrOutput) ArchiveUris

HCFS URIs of archives to be extracted in the working directory of .jar, .tar, .tar.gz, .tgz, and .zip.

func (JobHadoopConfigPtrOutput) Args

The arguments to pass to the driver. Do not include arguments, such as -libjars or -Dfoo=bar, that can be set as job properties, since a collision may occur that causes an incorrect job submission.

func (JobHadoopConfigPtrOutput) Elem

func (JobHadoopConfigPtrOutput) ElementType

func (JobHadoopConfigPtrOutput) ElementType() reflect.Type

func (JobHadoopConfigPtrOutput) FileUris

HCFS URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.

func (JobHadoopConfigPtrOutput) JarFileUris

HCFS URIs of jar files to be added to the Spark CLASSPATH.

func (JobHadoopConfigPtrOutput) LoggingConfig

func (JobHadoopConfigPtrOutput) MainClass

The name of the driver's main class. The jar file containing the class must be in the default CLASSPATH or specified in `jarFileUris`. Conflicts with `mainJarFileUri`

func (JobHadoopConfigPtrOutput) MainJarFileUri

func (o JobHadoopConfigPtrOutput) MainJarFileUri() pulumi.StringPtrOutput

The HCFS URI of the jar file containing the main class. Examples: 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'. Conflicts with `mainClass`

func (JobHadoopConfigPtrOutput) Properties

A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Cloud Dataproc API may be overwritten.

func (JobHadoopConfigPtrOutput) ToJobHadoopConfigPtrOutput

func (o JobHadoopConfigPtrOutput) ToJobHadoopConfigPtrOutput() JobHadoopConfigPtrOutput

func (JobHadoopConfigPtrOutput) ToJobHadoopConfigPtrOutputWithContext

func (o JobHadoopConfigPtrOutput) ToJobHadoopConfigPtrOutputWithContext(ctx context.Context) JobHadoopConfigPtrOutput

type JobHiveConfig

type JobHiveConfig struct {
	// Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries. Defaults to false.
	ContinueOnFailure *bool `pulumi:"continueOnFailure"`
	// HCFS URIs of jar files to be added to the Spark CLASSPATH.
	JarFileUris []string `pulumi:"jarFileUris"`
	// A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Cloud Dataproc API may be overwritten.
	Properties map[string]string `pulumi:"properties"`
	// The HCFS URI of the script that contains SQL queries.
	// Conflicts with `queryList`
	QueryFileUri *string `pulumi:"queryFileUri"`
	// The list of SQL queries or statements to execute as part of the job.
	// Conflicts with `queryFileUri`
	QueryLists []string `pulumi:"queryLists"`
	// Mapping of query variable names to values (equivalent to the Spark SQL command: `SET name="value";`).
	ScriptVariables map[string]string `pulumi:"scriptVariables"`
}

type JobHiveConfigArgs

type JobHiveConfigArgs struct {
	// Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries. Defaults to false.
	ContinueOnFailure pulumi.BoolPtrInput `pulumi:"continueOnFailure"`
	// HCFS URIs of jar files to be added to the Spark CLASSPATH.
	JarFileUris pulumi.StringArrayInput `pulumi:"jarFileUris"`
	// A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Cloud Dataproc API may be overwritten.
	Properties pulumi.StringMapInput `pulumi:"properties"`
	// The HCFS URI of the script that contains SQL queries.
	// Conflicts with `queryList`
	QueryFileUri pulumi.StringPtrInput `pulumi:"queryFileUri"`
	// The list of SQL queries or statements to execute as part of the job.
	// Conflicts with `queryFileUri`
	QueryLists pulumi.StringArrayInput `pulumi:"queryLists"`
	// Mapping of query variable names to values (equivalent to the Spark SQL command: `SET name="value";`).
	ScriptVariables pulumi.StringMapInput `pulumi:"scriptVariables"`
}

func (JobHiveConfigArgs) ElementType

func (JobHiveConfigArgs) ElementType() reflect.Type

func (JobHiveConfigArgs) ToJobHiveConfigOutput

func (i JobHiveConfigArgs) ToJobHiveConfigOutput() JobHiveConfigOutput

func (JobHiveConfigArgs) ToJobHiveConfigOutputWithContext

func (i JobHiveConfigArgs) ToJobHiveConfigOutputWithContext(ctx context.Context) JobHiveConfigOutput

func (JobHiveConfigArgs) ToJobHiveConfigPtrOutput

func (i JobHiveConfigArgs) ToJobHiveConfigPtrOutput() JobHiveConfigPtrOutput

func (JobHiveConfigArgs) ToJobHiveConfigPtrOutputWithContext

func (i JobHiveConfigArgs) ToJobHiveConfigPtrOutputWithContext(ctx context.Context) JobHiveConfigPtrOutput

type JobHiveConfigInput

type JobHiveConfigInput interface {
	pulumi.Input

	ToJobHiveConfigOutput() JobHiveConfigOutput
	ToJobHiveConfigOutputWithContext(context.Context) JobHiveConfigOutput
}

JobHiveConfigInput is an input type that accepts JobHiveConfigArgs and JobHiveConfigOutput values. You can construct a concrete instance of `JobHiveConfigInput` via:

JobHiveConfigArgs{...}

type JobHiveConfigOutput

type JobHiveConfigOutput struct{ *pulumi.OutputState }

func (JobHiveConfigOutput) ContinueOnFailure

func (o JobHiveConfigOutput) ContinueOnFailure() pulumi.BoolPtrOutput

Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries. Defaults to false.

func (JobHiveConfigOutput) ElementType

func (JobHiveConfigOutput) ElementType() reflect.Type

func (JobHiveConfigOutput) JarFileUris

HCFS URIs of jar files to be added to the Spark CLASSPATH.

func (JobHiveConfigOutput) Properties

A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Cloud Dataproc API may be overwritten.

func (JobHiveConfigOutput) QueryFileUri

func (o JobHiveConfigOutput) QueryFileUri() pulumi.StringPtrOutput

The HCFS URI of the script that contains SQL queries. Conflicts with `queryList`

func (JobHiveConfigOutput) QueryLists

The list of SQL queries or statements to execute as part of the job. Conflicts with `queryFileUri`

func (JobHiveConfigOutput) ScriptVariables

func (o JobHiveConfigOutput) ScriptVariables() pulumi.StringMapOutput

Mapping of query variable names to values (equivalent to the Spark SQL command: `SET name="value";`).

func (JobHiveConfigOutput) ToJobHiveConfigOutput

func (o JobHiveConfigOutput) ToJobHiveConfigOutput() JobHiveConfigOutput

func (JobHiveConfigOutput) ToJobHiveConfigOutputWithContext

func (o JobHiveConfigOutput) ToJobHiveConfigOutputWithContext(ctx context.Context) JobHiveConfigOutput

func (JobHiveConfigOutput) ToJobHiveConfigPtrOutput

func (o JobHiveConfigOutput) ToJobHiveConfigPtrOutput() JobHiveConfigPtrOutput

func (JobHiveConfigOutput) ToJobHiveConfigPtrOutputWithContext

func (o JobHiveConfigOutput) ToJobHiveConfigPtrOutputWithContext(ctx context.Context) JobHiveConfigPtrOutput

type JobHiveConfigPtrInput

type JobHiveConfigPtrInput interface {
	pulumi.Input

	ToJobHiveConfigPtrOutput() JobHiveConfigPtrOutput
	ToJobHiveConfigPtrOutputWithContext(context.Context) JobHiveConfigPtrOutput
}

JobHiveConfigPtrInput is an input type that accepts JobHiveConfigArgs, JobHiveConfigPtr and JobHiveConfigPtrOutput values. You can construct a concrete instance of `JobHiveConfigPtrInput` via:

        JobHiveConfigArgs{...}

or:

        nil

type JobHiveConfigPtrOutput

type JobHiveConfigPtrOutput struct{ *pulumi.OutputState }

func (JobHiveConfigPtrOutput) ContinueOnFailure

func (o JobHiveConfigPtrOutput) ContinueOnFailure() pulumi.BoolPtrOutput

Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries. Defaults to false.

func (JobHiveConfigPtrOutput) Elem

func (JobHiveConfigPtrOutput) ElementType

func (JobHiveConfigPtrOutput) ElementType() reflect.Type

func (JobHiveConfigPtrOutput) JarFileUris

HCFS URIs of jar files to be added to the Spark CLASSPATH.

func (JobHiveConfigPtrOutput) Properties

A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Cloud Dataproc API may be overwritten.

func (JobHiveConfigPtrOutput) QueryFileUri

The HCFS URI of the script that contains SQL queries. Conflicts with `queryList`

func (JobHiveConfigPtrOutput) QueryLists

The list of SQL queries or statements to execute as part of the job. Conflicts with `queryFileUri`

func (JobHiveConfigPtrOutput) ScriptVariables

func (o JobHiveConfigPtrOutput) ScriptVariables() pulumi.StringMapOutput

Mapping of query variable names to values (equivalent to the Spark SQL command: `SET name="value";`).

func (JobHiveConfigPtrOutput) ToJobHiveConfigPtrOutput

func (o JobHiveConfigPtrOutput) ToJobHiveConfigPtrOutput() JobHiveConfigPtrOutput

func (JobHiveConfigPtrOutput) ToJobHiveConfigPtrOutputWithContext

func (o JobHiveConfigPtrOutput) ToJobHiveConfigPtrOutputWithContext(ctx context.Context) JobHiveConfigPtrOutput

type JobIAMBinding

type JobIAMBinding struct {
	pulumi.CustomResourceState

	Condition JobIAMBindingConditionPtrOutput `pulumi:"condition"`
	// (Computed) The etag of the jobs's IAM policy.
	Etag    pulumi.StringOutput      `pulumi:"etag"`
	JobId   pulumi.StringOutput      `pulumi:"jobId"`
	Members pulumi.StringArrayOutput `pulumi:"members"`
	// The project in which the job belongs. If it
	// is not provided, the provider will use a default.
	Project pulumi.StringOutput `pulumi:"project"`
	// The region in which the job belongs. If it
	// is not provided, the provider will use a default.
	Region pulumi.StringOutput `pulumi:"region"`
	// The role that should be applied. Only one
	// `dataproc.JobIAMBinding` can be used per role. Note that custom roles must be of the format
	// `[projects|organizations]/{parent-name}/roles/{role-name}`.
	Role pulumi.StringOutput `pulumi:"role"`
}

Three different resources help you manage IAM policies on dataproc jobs. Each of these resources serves a different use case:

* `dataproc.JobIAMPolicy`: Authoritative. Sets the IAM policy for the job and replaces any existing policy already attached. * `dataproc.JobIAMBinding`: Authoritative for a given role. Updates the IAM policy to grant a role to a list of members. Other roles within the IAM policy for the job are preserved. * `dataproc.JobIAMMember`: Non-authoritative. Updates the IAM policy to grant a role to a new member. Other members for the role for the job are preserved.

> **Note:** `dataproc.JobIAMPolicy` **cannot** be used in conjunction with `dataproc.JobIAMBinding` and `dataproc.JobIAMMember` or they will fight over what your policy should be. In addition, be careful not to accidentally unset ownership of the job as `dataproc.JobIAMPolicy` replaces the entire policy.

> **Note:** `dataproc.JobIAMBinding` resources **can be** used in conjunction with `dataproc.JobIAMMember` resources **only if** they do not grant privilege to the same role.

## google\_dataproc\_job\_iam\_policy

```go package main

import (

"github.com/pulumi/pulumi-gcp/sdk/v5/go/gcp/dataproc"
"github.com/pulumi/pulumi-gcp/sdk/v5/go/gcp/organizations"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"

)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		admin, err := organizations.LookupIAMPolicy(ctx, &organizations.LookupIAMPolicyArgs{
			Bindings: []organizations.GetIAMPolicyBinding{
				organizations.GetIAMPolicyBinding{
					Role: "roles/editor",
					Members: []string{
						"user:jane@example.com",
					},
				},
			},
		}, nil)
		if err != nil {
			return err
		}
		_, err = dataproc.NewJobIAMPolicy(ctx, "editor", &dataproc.JobIAMPolicyArgs{
			Project:    pulumi.String("your-project"),
			Region:     pulumi.String("your-region"),
			JobId:      pulumi.String("your-dataproc-job"),
			PolicyData: pulumi.String(admin.PolicyData),
		})
		if err != nil {
			return err
		}
		return nil
	})
}

```

## google\_dataproc\_job\_iam\_binding

```go package main

import (

"github.com/pulumi/pulumi-gcp/sdk/v5/go/gcp/dataproc"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"

)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := dataproc.NewJobIAMBinding(ctx, "editor", &dataproc.JobIAMBindingArgs{
			JobId: pulumi.String("your-dataproc-job"),
			Members: pulumi.StringArray{
				pulumi.String("user:jane@example.com"),
			},
			Role: pulumi.String("roles/editor"),
		})
		if err != nil {
			return err
		}
		return nil
	})
}

```

## google\_dataproc\_job\_iam\_member

```go package main

import (

"github.com/pulumi/pulumi-gcp/sdk/v5/go/gcp/dataproc"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"

)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := dataproc.NewJobIAMMember(ctx, "editor", &dataproc.JobIAMMemberArgs{
			JobId:  pulumi.String("your-dataproc-job"),
			Member: pulumi.String("user:jane@example.com"),
			Role:   pulumi.String("roles/editor"),
		})
		if err != nil {
			return err
		}
		return nil
	})
}

```

## Import

Job IAM resources can be imported using the project, region, job id, role and/or member.

```sh

$ pulumi import gcp:dataproc/jobIAMBinding:JobIAMBinding editor "projects/{project}/regions/{region}/jobs/{job_id}"

```

```sh

$ pulumi import gcp:dataproc/jobIAMBinding:JobIAMBinding editor "projects/{project}/regions/{region}/jobs/{job_id} roles/editor"

```

```sh

$ pulumi import gcp:dataproc/jobIAMBinding:JobIAMBinding editor "projects/{project}/regions/{region}/jobs/{job_id} roles/editor user:jane@example.com"

```

-> **Custom Roles**If you're importing a IAM resource with a custom role, make sure to use the

full name of the custom role, e.g. `[projects/my-project|organizations/my-org]/roles/my-custom-role`.

func GetJobIAMBinding

func GetJobIAMBinding(ctx *pulumi.Context,
	name string, id pulumi.IDInput, state *JobIAMBindingState, opts ...pulumi.ResourceOption) (*JobIAMBinding, error)

GetJobIAMBinding gets an existing JobIAMBinding resource's state with the given name, ID, and optional state properties that are used to uniquely qualify the lookup (nil if not required).

func NewJobIAMBinding

func NewJobIAMBinding(ctx *pulumi.Context,
	name string, args *JobIAMBindingArgs, opts ...pulumi.ResourceOption) (*JobIAMBinding, error)

NewJobIAMBinding registers a new resource with the given unique name, arguments, and options.

func (*JobIAMBinding) ElementType

func (*JobIAMBinding) ElementType() reflect.Type

func (*JobIAMBinding) ToJobIAMBindingOutput

func (i *JobIAMBinding) ToJobIAMBindingOutput() JobIAMBindingOutput

func (*JobIAMBinding) ToJobIAMBindingOutputWithContext

func (i *JobIAMBinding) ToJobIAMBindingOutputWithContext(ctx context.Context) JobIAMBindingOutput

func (*JobIAMBinding) ToJobIAMBindingPtrOutput

func (i *JobIAMBinding) ToJobIAMBindingPtrOutput() JobIAMBindingPtrOutput

func (*JobIAMBinding) ToJobIAMBindingPtrOutputWithContext

func (i *JobIAMBinding) ToJobIAMBindingPtrOutputWithContext(ctx context.Context) JobIAMBindingPtrOutput

type JobIAMBindingArgs

type JobIAMBindingArgs struct {
	Condition JobIAMBindingConditionPtrInput
	JobId     pulumi.StringInput
	Members   pulumi.StringArrayInput
	// The project in which the job belongs. If it
	// is not provided, the provider will use a default.
	Project pulumi.StringPtrInput
	// The region in which the job belongs. If it
	// is not provided, the provider will use a default.
	Region pulumi.StringPtrInput
	// The role that should be applied. Only one
	// `dataproc.JobIAMBinding` can be used per role. Note that custom roles must be of the format
	// `[projects|organizations]/{parent-name}/roles/{role-name}`.
	Role pulumi.StringInput
}

The set of arguments for constructing a JobIAMBinding resource.

func (JobIAMBindingArgs) ElementType

func (JobIAMBindingArgs) ElementType() reflect.Type

type JobIAMBindingArray

type JobIAMBindingArray []JobIAMBindingInput

func (JobIAMBindingArray) ElementType

func (JobIAMBindingArray) ElementType() reflect.Type

func (JobIAMBindingArray) ToJobIAMBindingArrayOutput

func (i JobIAMBindingArray) ToJobIAMBindingArrayOutput() JobIAMBindingArrayOutput

func (JobIAMBindingArray) ToJobIAMBindingArrayOutputWithContext

func (i JobIAMBindingArray) ToJobIAMBindingArrayOutputWithContext(ctx context.Context) JobIAMBindingArrayOutput

type JobIAMBindingArrayInput

type JobIAMBindingArrayInput interface {
	pulumi.Input

	ToJobIAMBindingArrayOutput() JobIAMBindingArrayOutput
	ToJobIAMBindingArrayOutputWithContext(context.Context) JobIAMBindingArrayOutput
}

JobIAMBindingArrayInput is an input type that accepts JobIAMBindingArray and JobIAMBindingArrayOutput values. You can construct a concrete instance of `JobIAMBindingArrayInput` via:

JobIAMBindingArray{ JobIAMBindingArgs{...} }

type JobIAMBindingArrayOutput

type JobIAMBindingArrayOutput struct{ *pulumi.OutputState }

func (JobIAMBindingArrayOutput) ElementType

func (JobIAMBindingArrayOutput) ElementType() reflect.Type

func (JobIAMBindingArrayOutput) Index

func (JobIAMBindingArrayOutput) ToJobIAMBindingArrayOutput

func (o JobIAMBindingArrayOutput) ToJobIAMBindingArrayOutput() JobIAMBindingArrayOutput

func (JobIAMBindingArrayOutput) ToJobIAMBindingArrayOutputWithContext

func (o JobIAMBindingArrayOutput) ToJobIAMBindingArrayOutputWithContext(ctx context.Context) JobIAMBindingArrayOutput

type JobIAMBindingCondition

type JobIAMBindingCondition struct {
	Description *string `pulumi:"description"`
	Expression  string  `pulumi:"expression"`
	Title       string  `pulumi:"title"`
}

type JobIAMBindingConditionArgs

type JobIAMBindingConditionArgs struct {
	Description pulumi.StringPtrInput `pulumi:"description"`
	Expression  pulumi.StringInput    `pulumi:"expression"`
	Title       pulumi.StringInput    `pulumi:"title"`
}

func (JobIAMBindingConditionArgs) ElementType

func (JobIAMBindingConditionArgs) ElementType() reflect.Type

func (JobIAMBindingConditionArgs) ToJobIAMBindingConditionOutput

func (i JobIAMBindingConditionArgs) ToJobIAMBindingConditionOutput() JobIAMBindingConditionOutput

func (JobIAMBindingConditionArgs) ToJobIAMBindingConditionOutputWithContext

func (i JobIAMBindingConditionArgs) ToJobIAMBindingConditionOutputWithContext(ctx context.Context) JobIAMBindingConditionOutput

func (JobIAMBindingConditionArgs) ToJobIAMBindingConditionPtrOutput

func (i JobIAMBindingConditionArgs) ToJobIAMBindingConditionPtrOutput() JobIAMBindingConditionPtrOutput

func (JobIAMBindingConditionArgs) ToJobIAMBindingConditionPtrOutputWithContext

func (i JobIAMBindingConditionArgs) ToJobIAMBindingConditionPtrOutputWithContext(ctx context.Context) JobIAMBindingConditionPtrOutput

type JobIAMBindingConditionInput

type JobIAMBindingConditionInput interface {
	pulumi.Input

	ToJobIAMBindingConditionOutput() JobIAMBindingConditionOutput
	ToJobIAMBindingConditionOutputWithContext(context.Context) JobIAMBindingConditionOutput
}

JobIAMBindingConditionInput is an input type that accepts JobIAMBindingConditionArgs and JobIAMBindingConditionOutput values. You can construct a concrete instance of `JobIAMBindingConditionInput` via:

JobIAMBindingConditionArgs{...}

type JobIAMBindingConditionOutput

type JobIAMBindingConditionOutput struct{ *pulumi.OutputState }

func (JobIAMBindingConditionOutput) Description

func (JobIAMBindingConditionOutput) ElementType

func (JobIAMBindingConditionOutput) Expression

func (JobIAMBindingConditionOutput) Title

func (JobIAMBindingConditionOutput) ToJobIAMBindingConditionOutput

func (o JobIAMBindingConditionOutput) ToJobIAMBindingConditionOutput() JobIAMBindingConditionOutput

func (JobIAMBindingConditionOutput) ToJobIAMBindingConditionOutputWithContext

func (o JobIAMBindingConditionOutput) ToJobIAMBindingConditionOutputWithContext(ctx context.Context) JobIAMBindingConditionOutput

func (JobIAMBindingConditionOutput) ToJobIAMBindingConditionPtrOutput

func (o JobIAMBindingConditionOutput) ToJobIAMBindingConditionPtrOutput() JobIAMBindingConditionPtrOutput

func (JobIAMBindingConditionOutput) ToJobIAMBindingConditionPtrOutputWithContext

func (o JobIAMBindingConditionOutput) ToJobIAMBindingConditionPtrOutputWithContext(ctx context.Context) JobIAMBindingConditionPtrOutput

type JobIAMBindingConditionPtrInput

type JobIAMBindingConditionPtrInput interface {
	pulumi.Input

	ToJobIAMBindingConditionPtrOutput() JobIAMBindingConditionPtrOutput
	ToJobIAMBindingConditionPtrOutputWithContext(context.Context) JobIAMBindingConditionPtrOutput
}

JobIAMBindingConditionPtrInput is an input type that accepts JobIAMBindingConditionArgs, JobIAMBindingConditionPtr and JobIAMBindingConditionPtrOutput values. You can construct a concrete instance of `JobIAMBindingConditionPtrInput` via:

        JobIAMBindingConditionArgs{...}

or:

        nil

type JobIAMBindingConditionPtrOutput

type JobIAMBindingConditionPtrOutput struct{ *pulumi.OutputState }

func (JobIAMBindingConditionPtrOutput) Description

func (JobIAMBindingConditionPtrOutput) Elem

func (JobIAMBindingConditionPtrOutput) ElementType

func (JobIAMBindingConditionPtrOutput) Expression

func (JobIAMBindingConditionPtrOutput) Title

func (JobIAMBindingConditionPtrOutput) ToJobIAMBindingConditionPtrOutput

func (o JobIAMBindingConditionPtrOutput) ToJobIAMBindingConditionPtrOutput() JobIAMBindingConditionPtrOutput

func (JobIAMBindingConditionPtrOutput) ToJobIAMBindingConditionPtrOutputWithContext

func (o JobIAMBindingConditionPtrOutput) ToJobIAMBindingConditionPtrOutputWithContext(ctx context.Context) JobIAMBindingConditionPtrOutput

type JobIAMBindingInput

type JobIAMBindingInput interface {
	pulumi.Input

	ToJobIAMBindingOutput() JobIAMBindingOutput
	ToJobIAMBindingOutputWithContext(ctx context.Context) JobIAMBindingOutput
}

type JobIAMBindingMap

type JobIAMBindingMap map[string]JobIAMBindingInput

func (JobIAMBindingMap) ElementType

func (JobIAMBindingMap) ElementType() reflect.Type

func (JobIAMBindingMap) ToJobIAMBindingMapOutput

func (i JobIAMBindingMap) ToJobIAMBindingMapOutput() JobIAMBindingMapOutput

func (JobIAMBindingMap) ToJobIAMBindingMapOutputWithContext

func (i JobIAMBindingMap) ToJobIAMBindingMapOutputWithContext(ctx context.Context) JobIAMBindingMapOutput

type JobIAMBindingMapInput

type JobIAMBindingMapInput interface {
	pulumi.Input

	ToJobIAMBindingMapOutput() JobIAMBindingMapOutput
	ToJobIAMBindingMapOutputWithContext(context.Context) JobIAMBindingMapOutput
}

JobIAMBindingMapInput is an input type that accepts JobIAMBindingMap and JobIAMBindingMapOutput values. You can construct a concrete instance of `JobIAMBindingMapInput` via:

JobIAMBindingMap{ "key": JobIAMBindingArgs{...} }

type JobIAMBindingMapOutput

type JobIAMBindingMapOutput struct{ *pulumi.OutputState }

func (JobIAMBindingMapOutput) ElementType

func (JobIAMBindingMapOutput) ElementType() reflect.Type

func (JobIAMBindingMapOutput) MapIndex

func (JobIAMBindingMapOutput) ToJobIAMBindingMapOutput

func (o JobIAMBindingMapOutput) ToJobIAMBindingMapOutput() JobIAMBindingMapOutput

func (JobIAMBindingMapOutput) ToJobIAMBindingMapOutputWithContext

func (o JobIAMBindingMapOutput) ToJobIAMBindingMapOutputWithContext(ctx context.Context) JobIAMBindingMapOutput

type JobIAMBindingOutput

type JobIAMBindingOutput struct{ *pulumi.OutputState }

func (JobIAMBindingOutput) ElementType

func (JobIAMBindingOutput) ElementType() reflect.Type

func (JobIAMBindingOutput) ToJobIAMBindingOutput

func (o JobIAMBindingOutput) ToJobIAMBindingOutput() JobIAMBindingOutput

func (JobIAMBindingOutput) ToJobIAMBindingOutputWithContext

func (o JobIAMBindingOutput) ToJobIAMBindingOutputWithContext(ctx context.Context) JobIAMBindingOutput

func (JobIAMBindingOutput) ToJobIAMBindingPtrOutput

func (o JobIAMBindingOutput) ToJobIAMBindingPtrOutput() JobIAMBindingPtrOutput

func (JobIAMBindingOutput) ToJobIAMBindingPtrOutputWithContext

func (o JobIAMBindingOutput) ToJobIAMBindingPtrOutputWithContext(ctx context.Context) JobIAMBindingPtrOutput

type JobIAMBindingPtrInput

type JobIAMBindingPtrInput interface {
	pulumi.Input

	ToJobIAMBindingPtrOutput() JobIAMBindingPtrOutput
	ToJobIAMBindingPtrOutputWithContext(ctx context.Context) JobIAMBindingPtrOutput
}

type JobIAMBindingPtrOutput

type JobIAMBindingPtrOutput struct{ *pulumi.OutputState }

func (JobIAMBindingPtrOutput) Elem added in v5.21.0

func (JobIAMBindingPtrOutput) ElementType

func (JobIAMBindingPtrOutput) ElementType() reflect.Type

func (JobIAMBindingPtrOutput) ToJobIAMBindingPtrOutput

func (o JobIAMBindingPtrOutput) ToJobIAMBindingPtrOutput() JobIAMBindingPtrOutput

func (JobIAMBindingPtrOutput) ToJobIAMBindingPtrOutputWithContext

func (o JobIAMBindingPtrOutput) ToJobIAMBindingPtrOutputWithContext(ctx context.Context) JobIAMBindingPtrOutput

type JobIAMBindingState

type JobIAMBindingState struct {
	Condition JobIAMBindingConditionPtrInput
	// (Computed) The etag of the jobs's IAM policy.
	Etag    pulumi.StringPtrInput
	JobId   pulumi.StringPtrInput
	Members pulumi.StringArrayInput
	// The project in which the job belongs. If it
	// is not provided, the provider will use a default.
	Project pulumi.StringPtrInput
	// The region in which the job belongs. If it
	// is not provided, the provider will use a default.
	Region pulumi.StringPtrInput
	// The role that should be applied. Only one
	// `dataproc.JobIAMBinding` can be used per role. Note that custom roles must be of the format
	// `[projects|organizations]/{parent-name}/roles/{role-name}`.
	Role pulumi.StringPtrInput
}

func (JobIAMBindingState) ElementType

func (JobIAMBindingState) ElementType() reflect.Type

type JobIAMMember

type JobIAMMember struct {
	pulumi.CustomResourceState

	Condition JobIAMMemberConditionPtrOutput `pulumi:"condition"`
	// (Computed) The etag of the jobs's IAM policy.
	Etag   pulumi.StringOutput `pulumi:"etag"`
	JobId  pulumi.StringOutput `pulumi:"jobId"`
	Member pulumi.StringOutput `pulumi:"member"`
	// The project in which the job belongs. If it
	// is not provided, the provider will use a default.
	Project pulumi.StringOutput `pulumi:"project"`
	// The region in which the job belongs. If it
	// is not provided, the provider will use a default.
	Region pulumi.StringOutput `pulumi:"region"`
	// The role that should be applied. Only one
	// `dataproc.JobIAMBinding` can be used per role. Note that custom roles must be of the format
	// `[projects|organizations]/{parent-name}/roles/{role-name}`.
	Role pulumi.StringOutput `pulumi:"role"`
}

Three different resources help you manage IAM policies on dataproc jobs. Each of these resources serves a different use case:

* `dataproc.JobIAMPolicy`: Authoritative. Sets the IAM policy for the job and replaces any existing policy already attached. * `dataproc.JobIAMBinding`: Authoritative for a given role. Updates the IAM policy to grant a role to a list of members. Other roles within the IAM policy for the job are preserved. * `dataproc.JobIAMMember`: Non-authoritative. Updates the IAM policy to grant a role to a new member. Other members for the role for the job are preserved.

> **Note:** `dataproc.JobIAMPolicy` **cannot** be used in conjunction with `dataproc.JobIAMBinding` and `dataproc.JobIAMMember` or they will fight over what your policy should be. In addition, be careful not to accidentally unset ownership of the job as `dataproc.JobIAMPolicy` replaces the entire policy.

> **Note:** `dataproc.JobIAMBinding` resources **can be** used in conjunction with `dataproc.JobIAMMember` resources **only if** they do not grant privilege to the same role.

## google\_dataproc\_job\_iam\_policy

```go package main

import (

"github.com/pulumi/pulumi-gcp/sdk/v5/go/gcp/dataproc"
"github.com/pulumi/pulumi-gcp/sdk/v5/go/gcp/organizations"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"

)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		admin, err := organizations.LookupIAMPolicy(ctx, &organizations.LookupIAMPolicyArgs{
			Bindings: []organizations.GetIAMPolicyBinding{
				organizations.GetIAMPolicyBinding{
					Role: "roles/editor",
					Members: []string{
						"user:jane@example.com",
					},
				},
			},
		}, nil)
		if err != nil {
			return err
		}
		_, err = dataproc.NewJobIAMPolicy(ctx, "editor", &dataproc.JobIAMPolicyArgs{
			Project:    pulumi.String("your-project"),
			Region:     pulumi.String("your-region"),
			JobId:      pulumi.String("your-dataproc-job"),
			PolicyData: pulumi.String(admin.PolicyData),
		})
		if err != nil {
			return err
		}
		return nil
	})
}

```

## google\_dataproc\_job\_iam\_binding

```go package main

import (

"github.com/pulumi/pulumi-gcp/sdk/v5/go/gcp/dataproc"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"

)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := dataproc.NewJobIAMBinding(ctx, "editor", &dataproc.JobIAMBindingArgs{
			JobId: pulumi.String("your-dataproc-job"),
			Members: pulumi.StringArray{
				pulumi.String("user:jane@example.com"),
			},
			Role: pulumi.String("roles/editor"),
		})
		if err != nil {
			return err
		}
		return nil
	})
}

```

## google\_dataproc\_job\_iam\_member

```go package main

import (

"github.com/pulumi/pulumi-gcp/sdk/v5/go/gcp/dataproc"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"

)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := dataproc.NewJobIAMMember(ctx, "editor", &dataproc.JobIAMMemberArgs{
			JobId:  pulumi.String("your-dataproc-job"),
			Member: pulumi.String("user:jane@example.com"),
			Role:   pulumi.String("roles/editor"),
		})
		if err != nil {
			return err
		}
		return nil
	})
}

```

## Import

Job IAM resources can be imported using the project, region, job id, role and/or member.

```sh

$ pulumi import gcp:dataproc/jobIAMMember:JobIAMMember editor "projects/{project}/regions/{region}/jobs/{job_id}"

```

```sh

$ pulumi import gcp:dataproc/jobIAMMember:JobIAMMember editor "projects/{project}/regions/{region}/jobs/{job_id} roles/editor"

```

```sh

$ pulumi import gcp:dataproc/jobIAMMember:JobIAMMember editor "projects/{project}/regions/{region}/jobs/{job_id} roles/editor user:jane@example.com"

```

-> **Custom Roles**If you're importing a IAM resource with a custom role, make sure to use the

full name of the custom role, e.g. `[projects/my-project|organizations/my-org]/roles/my-custom-role`.

func GetJobIAMMember

func GetJobIAMMember(ctx *pulumi.Context,
	name string, id pulumi.IDInput, state *JobIAMMemberState, opts ...pulumi.ResourceOption) (*JobIAMMember, error)

GetJobIAMMember gets an existing JobIAMMember resource's state with the given name, ID, and optional state properties that are used to uniquely qualify the lookup (nil if not required).

func NewJobIAMMember

func NewJobIAMMember(ctx *pulumi.Context,
	name string, args *JobIAMMemberArgs, opts ...pulumi.ResourceOption) (*JobIAMMember, error)

NewJobIAMMember registers a new resource with the given unique name, arguments, and options.

func (*JobIAMMember) ElementType

func (*JobIAMMember) ElementType() reflect.Type

func (*JobIAMMember) ToJobIAMMemberOutput

func (i *JobIAMMember) ToJobIAMMemberOutput() JobIAMMemberOutput

func (*JobIAMMember) ToJobIAMMemberOutputWithContext

func (i *JobIAMMember) ToJobIAMMemberOutputWithContext(ctx context.Context) JobIAMMemberOutput

func (*JobIAMMember) ToJobIAMMemberPtrOutput

func (i *JobIAMMember) ToJobIAMMemberPtrOutput() JobIAMMemberPtrOutput

func (*JobIAMMember) ToJobIAMMemberPtrOutputWithContext

func (i *JobIAMMember) ToJobIAMMemberPtrOutputWithContext(ctx context.Context) JobIAMMemberPtrOutput

type JobIAMMemberArgs

type JobIAMMemberArgs struct {
	Condition JobIAMMemberConditionPtrInput
	JobId     pulumi.StringInput
	Member    pulumi.StringInput
	// The project in which the job belongs. If it
	// is not provided, the provider will use a default.
	Project pulumi.StringPtrInput
	// The region in which the job belongs. If it
	// is not provided, the provider will use a default.
	Region pulumi.StringPtrInput
	// The role that should be applied. Only one
	// `dataproc.JobIAMBinding` can be used per role. Note that custom roles must be of the format
	// `[projects|organizations]/{parent-name}/roles/{role-name}`.
	Role pulumi.StringInput
}

The set of arguments for constructing a JobIAMMember resource.

func (JobIAMMemberArgs) ElementType

func (JobIAMMemberArgs) ElementType() reflect.Type

type JobIAMMemberArray

type JobIAMMemberArray []JobIAMMemberInput

func (JobIAMMemberArray) ElementType

func (JobIAMMemberArray) ElementType() reflect.Type

func (JobIAMMemberArray) ToJobIAMMemberArrayOutput

func (i JobIAMMemberArray) ToJobIAMMemberArrayOutput() JobIAMMemberArrayOutput

func (JobIAMMemberArray) ToJobIAMMemberArrayOutputWithContext

func (i JobIAMMemberArray) ToJobIAMMemberArrayOutputWithContext(ctx context.Context) JobIAMMemberArrayOutput

type JobIAMMemberArrayInput

type JobIAMMemberArrayInput interface {
	pulumi.Input

	ToJobIAMMemberArrayOutput() JobIAMMemberArrayOutput
	ToJobIAMMemberArrayOutputWithContext(context.Context) JobIAMMemberArrayOutput
}

JobIAMMemberArrayInput is an input type that accepts JobIAMMemberArray and JobIAMMemberArrayOutput values. You can construct a concrete instance of `JobIAMMemberArrayInput` via:

JobIAMMemberArray{ JobIAMMemberArgs{...} }

type JobIAMMemberArrayOutput

type JobIAMMemberArrayOutput struct{ *pulumi.OutputState }

func (JobIAMMemberArrayOutput) ElementType

func (JobIAMMemberArrayOutput) ElementType() reflect.Type

func (JobIAMMemberArrayOutput) Index

func (JobIAMMemberArrayOutput) ToJobIAMMemberArrayOutput

func (o JobIAMMemberArrayOutput) ToJobIAMMemberArrayOutput() JobIAMMemberArrayOutput

func (JobIAMMemberArrayOutput) ToJobIAMMemberArrayOutputWithContext

func (o JobIAMMemberArrayOutput) ToJobIAMMemberArrayOutputWithContext(ctx context.Context) JobIAMMemberArrayOutput

type JobIAMMemberCondition

type JobIAMMemberCondition struct {
	Description *string `pulumi:"description"`
	Expression  string  `pulumi:"expression"`
	Title       string  `pulumi:"title"`
}

type JobIAMMemberConditionArgs

type JobIAMMemberConditionArgs struct {
	Description pulumi.StringPtrInput `pulumi:"description"`
	Expression  pulumi.StringInput    `pulumi:"expression"`
	Title       pulumi.StringInput    `pulumi:"title"`
}

func (JobIAMMemberConditionArgs) ElementType

func (JobIAMMemberConditionArgs) ElementType() reflect.Type

func (JobIAMMemberConditionArgs) ToJobIAMMemberConditionOutput

func (i JobIAMMemberConditionArgs) ToJobIAMMemberConditionOutput() JobIAMMemberConditionOutput

func (JobIAMMemberConditionArgs) ToJobIAMMemberConditionOutputWithContext

func (i JobIAMMemberConditionArgs) ToJobIAMMemberConditionOutputWithContext(ctx context.Context) JobIAMMemberConditionOutput

func (JobIAMMemberConditionArgs) ToJobIAMMemberConditionPtrOutput

func (i JobIAMMemberConditionArgs) ToJobIAMMemberConditionPtrOutput() JobIAMMemberConditionPtrOutput

func (JobIAMMemberConditionArgs) ToJobIAMMemberConditionPtrOutputWithContext

func (i JobIAMMemberConditionArgs) ToJobIAMMemberConditionPtrOutputWithContext(ctx context.Context) JobIAMMemberConditionPtrOutput

type JobIAMMemberConditionInput

type JobIAMMemberConditionInput interface {
	pulumi.Input

	ToJobIAMMemberConditionOutput() JobIAMMemberConditionOutput
	ToJobIAMMemberConditionOutputWithContext(context.Context) JobIAMMemberConditionOutput
}

JobIAMMemberConditionInput is an input type that accepts JobIAMMemberConditionArgs and JobIAMMemberConditionOutput values. You can construct a concrete instance of `JobIAMMemberConditionInput` via:

JobIAMMemberConditionArgs{...}

type JobIAMMemberConditionOutput

type JobIAMMemberConditionOutput struct{ *pulumi.OutputState }

func (JobIAMMemberConditionOutput) Description

func (JobIAMMemberConditionOutput) ElementType

func (JobIAMMemberConditionOutput) Expression

func (JobIAMMemberConditionOutput) Title

func (JobIAMMemberConditionOutput) ToJobIAMMemberConditionOutput

func (o JobIAMMemberConditionOutput) ToJobIAMMemberConditionOutput() JobIAMMemberConditionOutput

func (JobIAMMemberConditionOutput) ToJobIAMMemberConditionOutputWithContext

func (o JobIAMMemberConditionOutput) ToJobIAMMemberConditionOutputWithContext(ctx context.Context) JobIAMMemberConditionOutput

func (JobIAMMemberConditionOutput) ToJobIAMMemberConditionPtrOutput

func (o JobIAMMemberConditionOutput) ToJobIAMMemberConditionPtrOutput() JobIAMMemberConditionPtrOutput

func (JobIAMMemberConditionOutput) ToJobIAMMemberConditionPtrOutputWithContext

func (o JobIAMMemberConditionOutput) ToJobIAMMemberConditionPtrOutputWithContext(ctx context.Context) JobIAMMemberConditionPtrOutput

type JobIAMMemberConditionPtrInput

type JobIAMMemberConditionPtrInput interface {
	pulumi.Input

	ToJobIAMMemberConditionPtrOutput() JobIAMMemberConditionPtrOutput
	ToJobIAMMemberConditionPtrOutputWithContext(context.Context) JobIAMMemberConditionPtrOutput
}

JobIAMMemberConditionPtrInput is an input type that accepts JobIAMMemberConditionArgs, JobIAMMemberConditionPtr and JobIAMMemberConditionPtrOutput values. You can construct a concrete instance of `JobIAMMemberConditionPtrInput` via:

        JobIAMMemberConditionArgs{...}

or:

        nil

type JobIAMMemberConditionPtrOutput

type JobIAMMemberConditionPtrOutput struct{ *pulumi.OutputState }

func (JobIAMMemberConditionPtrOutput) Description

func (JobIAMMemberConditionPtrOutput) Elem

func (JobIAMMemberConditionPtrOutput) ElementType

func (JobIAMMemberConditionPtrOutput) Expression

func (JobIAMMemberConditionPtrOutput) Title

func (JobIAMMemberConditionPtrOutput) ToJobIAMMemberConditionPtrOutput

func (o JobIAMMemberConditionPtrOutput) ToJobIAMMemberConditionPtrOutput() JobIAMMemberConditionPtrOutput

func (JobIAMMemberConditionPtrOutput) ToJobIAMMemberConditionPtrOutputWithContext

func (o JobIAMMemberConditionPtrOutput) ToJobIAMMemberConditionPtrOutputWithContext(ctx context.Context) JobIAMMemberConditionPtrOutput

type JobIAMMemberInput

type JobIAMMemberInput interface {
	pulumi.Input

	ToJobIAMMemberOutput() JobIAMMemberOutput
	ToJobIAMMemberOutputWithContext(ctx context.Context) JobIAMMemberOutput
}

type JobIAMMemberMap

type JobIAMMemberMap map[string]JobIAMMemberInput

func (JobIAMMemberMap) ElementType

func (JobIAMMemberMap) ElementType() reflect.Type

func (JobIAMMemberMap) ToJobIAMMemberMapOutput

func (i JobIAMMemberMap) ToJobIAMMemberMapOutput() JobIAMMemberMapOutput

func (JobIAMMemberMap) ToJobIAMMemberMapOutputWithContext

func (i JobIAMMemberMap) ToJobIAMMemberMapOutputWithContext(ctx context.Context) JobIAMMemberMapOutput

type JobIAMMemberMapInput

type JobIAMMemberMapInput interface {
	pulumi.Input

	ToJobIAMMemberMapOutput() JobIAMMemberMapOutput
	ToJobIAMMemberMapOutputWithContext(context.Context) JobIAMMemberMapOutput
}

JobIAMMemberMapInput is an input type that accepts JobIAMMemberMap and JobIAMMemberMapOutput values. You can construct a concrete instance of `JobIAMMemberMapInput` via:

JobIAMMemberMap{ "key": JobIAMMemberArgs{...} }

type JobIAMMemberMapOutput

type JobIAMMemberMapOutput struct{ *pulumi.OutputState }

func (JobIAMMemberMapOutput) ElementType

func (JobIAMMemberMapOutput) ElementType() reflect.Type

func (JobIAMMemberMapOutput) MapIndex

func (JobIAMMemberMapOutput) ToJobIAMMemberMapOutput

func (o JobIAMMemberMapOutput) ToJobIAMMemberMapOutput() JobIAMMemberMapOutput

func (JobIAMMemberMapOutput) ToJobIAMMemberMapOutputWithContext

func (o JobIAMMemberMapOutput) ToJobIAMMemberMapOutputWithContext(ctx context.Context) JobIAMMemberMapOutput

type JobIAMMemberOutput

type JobIAMMemberOutput struct{ *pulumi.OutputState }

func (JobIAMMemberOutput) ElementType

func (JobIAMMemberOutput) ElementType() reflect.Type

func (JobIAMMemberOutput) ToJobIAMMemberOutput

func (o JobIAMMemberOutput) ToJobIAMMemberOutput() JobIAMMemberOutput

func (JobIAMMemberOutput) ToJobIAMMemberOutputWithContext

func (o JobIAMMemberOutput) ToJobIAMMemberOutputWithContext(ctx context.Context) JobIAMMemberOutput

func (JobIAMMemberOutput) ToJobIAMMemberPtrOutput

func (o JobIAMMemberOutput) ToJobIAMMemberPtrOutput() JobIAMMemberPtrOutput

func (JobIAMMemberOutput) ToJobIAMMemberPtrOutputWithContext

func (o JobIAMMemberOutput) ToJobIAMMemberPtrOutputWithContext(ctx context.Context) JobIAMMemberPtrOutput

type JobIAMMemberPtrInput

type JobIAMMemberPtrInput interface {
	pulumi.Input

	ToJobIAMMemberPtrOutput() JobIAMMemberPtrOutput
	ToJobIAMMemberPtrOutputWithContext(ctx context.Context) JobIAMMemberPtrOutput
}

type JobIAMMemberPtrOutput

type JobIAMMemberPtrOutput struct{ *pulumi.OutputState }

func (JobIAMMemberPtrOutput) Elem added in v5.21.0

func (JobIAMMemberPtrOutput) ElementType

func (JobIAMMemberPtrOutput) ElementType() reflect.Type

func (JobIAMMemberPtrOutput) ToJobIAMMemberPtrOutput

func (o JobIAMMemberPtrOutput) ToJobIAMMemberPtrOutput() JobIAMMemberPtrOutput

func (JobIAMMemberPtrOutput) ToJobIAMMemberPtrOutputWithContext

func (o JobIAMMemberPtrOutput) ToJobIAMMemberPtrOutputWithContext(ctx context.Context) JobIAMMemberPtrOutput

type JobIAMMemberState

type JobIAMMemberState struct {
	Condition JobIAMMemberConditionPtrInput
	// (Computed) The etag of the jobs's IAM policy.
	Etag   pulumi.StringPtrInput
	JobId  pulumi.StringPtrInput
	Member pulumi.StringPtrInput
	// The project in which the job belongs. If it
	// is not provided, the provider will use a default.
	Project pulumi.StringPtrInput
	// The region in which the job belongs. If it
	// is not provided, the provider will use a default.
	Region pulumi.StringPtrInput
	// The role that should be applied. Only one
	// `dataproc.JobIAMBinding` can be used per role. Note that custom roles must be of the format
	// `[projects|organizations]/{parent-name}/roles/{role-name}`.
	Role pulumi.StringPtrInput
}

func (JobIAMMemberState) ElementType

func (JobIAMMemberState) ElementType() reflect.Type

type JobIAMPolicy

type JobIAMPolicy struct {
	pulumi.CustomResourceState

	// (Computed) The etag of the jobs's IAM policy.
	Etag  pulumi.StringOutput `pulumi:"etag"`
	JobId pulumi.StringOutput `pulumi:"jobId"`
	// The policy data generated by a `organizations.getIAMPolicy` data source.
	PolicyData pulumi.StringOutput `pulumi:"policyData"`
	// The project in which the job belongs. If it
	// is not provided, the provider will use a default.
	Project pulumi.StringOutput `pulumi:"project"`
	// The region in which the job belongs. If it
	// is not provided, the provider will use a default.
	Region pulumi.StringOutput `pulumi:"region"`
}

Three different resources help you manage IAM policies on dataproc jobs. Each of these resources serves a different use case:

* `dataproc.JobIAMPolicy`: Authoritative. Sets the IAM policy for the job and replaces any existing policy already attached. * `dataproc.JobIAMBinding`: Authoritative for a given role. Updates the IAM policy to grant a role to a list of members. Other roles within the IAM policy for the job are preserved. * `dataproc.JobIAMMember`: Non-authoritative. Updates the IAM policy to grant a role to a new member. Other members for the role for the job are preserved.

> **Note:** `dataproc.JobIAMPolicy` **cannot** be used in conjunction with `dataproc.JobIAMBinding` and `dataproc.JobIAMMember` or they will fight over what your policy should be. In addition, be careful not to accidentally unset ownership of the job as `dataproc.JobIAMPolicy` replaces the entire policy.

> **Note:** `dataproc.JobIAMBinding` resources **can be** used in conjunction with `dataproc.JobIAMMember` resources **only if** they do not grant privilege to the same role.

## google\_dataproc\_job\_iam\_policy

```go package main

import (

"github.com/pulumi/pulumi-gcp/sdk/v5/go/gcp/dataproc"
"github.com/pulumi/pulumi-gcp/sdk/v5/go/gcp/organizations"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"

)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		admin, err := organizations.LookupIAMPolicy(ctx, &organizations.LookupIAMPolicyArgs{
			Bindings: []organizations.GetIAMPolicyBinding{
				organizations.GetIAMPolicyBinding{
					Role: "roles/editor",
					Members: []string{
						"user:jane@example.com",
					},
				},
			},
		}, nil)
		if err != nil {
			return err
		}
		_, err = dataproc.NewJobIAMPolicy(ctx, "editor", &dataproc.JobIAMPolicyArgs{
			Project:    pulumi.String("your-project"),
			Region:     pulumi.String("your-region"),
			JobId:      pulumi.String("your-dataproc-job"),
			PolicyData: pulumi.String(admin.PolicyData),
		})
		if err != nil {
			return err
		}
		return nil
	})
}

```

## google\_dataproc\_job\_iam\_binding

```go package main

import (

"github.com/pulumi/pulumi-gcp/sdk/v5/go/gcp/dataproc"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"

)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := dataproc.NewJobIAMBinding(ctx, "editor", &dataproc.JobIAMBindingArgs{
			JobId: pulumi.String("your-dataproc-job"),
			Members: pulumi.StringArray{
				pulumi.String("user:jane@example.com"),
			},
			Role: pulumi.String("roles/editor"),
		})
		if err != nil {
			return err
		}
		return nil
	})
}

```

## google\_dataproc\_job\_iam\_member

```go package main

import (

"github.com/pulumi/pulumi-gcp/sdk/v5/go/gcp/dataproc"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"

)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := dataproc.NewJobIAMMember(ctx, "editor", &dataproc.JobIAMMemberArgs{
			JobId:  pulumi.String("your-dataproc-job"),
			Member: pulumi.String("user:jane@example.com"),
			Role:   pulumi.String("roles/editor"),
		})
		if err != nil {
			return err
		}
		return nil
	})
}

```

## Import

Job IAM resources can be imported using the project, region, job id, role and/or member.

```sh

$ pulumi import gcp:dataproc/jobIAMPolicy:JobIAMPolicy editor "projects/{project}/regions/{region}/jobs/{job_id}"

```

```sh

$ pulumi import gcp:dataproc/jobIAMPolicy:JobIAMPolicy editor "projects/{project}/regions/{region}/jobs/{job_id} roles/editor"

```

```sh

$ pulumi import gcp:dataproc/jobIAMPolicy:JobIAMPolicy editor "projects/{project}/regions/{region}/jobs/{job_id} roles/editor user:jane@example.com"

```

-> **Custom Roles**If you're importing a IAM resource with a custom role, make sure to use the

full name of the custom role, e.g. `[projects/my-project|organizations/my-org]/roles/my-custom-role`.

func GetJobIAMPolicy

func GetJobIAMPolicy(ctx *pulumi.Context,
	name string, id pulumi.IDInput, state *JobIAMPolicyState, opts ...pulumi.ResourceOption) (*JobIAMPolicy, error)

GetJobIAMPolicy gets an existing JobIAMPolicy resource's state with the given name, ID, and optional state properties that are used to uniquely qualify the lookup (nil if not required).

func NewJobIAMPolicy

func NewJobIAMPolicy(ctx *pulumi.Context,
	name string, args *JobIAMPolicyArgs, opts ...pulumi.ResourceOption) (*JobIAMPolicy, error)

NewJobIAMPolicy registers a new resource with the given unique name, arguments, and options.

func (*JobIAMPolicy) ElementType

func (*JobIAMPolicy) ElementType() reflect.Type

func (*JobIAMPolicy) ToJobIAMPolicyOutput

func (i *JobIAMPolicy) ToJobIAMPolicyOutput() JobIAMPolicyOutput

func (*JobIAMPolicy) ToJobIAMPolicyOutputWithContext

func (i *JobIAMPolicy) ToJobIAMPolicyOutputWithContext(ctx context.Context) JobIAMPolicyOutput

func (*JobIAMPolicy) ToJobIAMPolicyPtrOutput

func (i *JobIAMPolicy) ToJobIAMPolicyPtrOutput() JobIAMPolicyPtrOutput

func (*JobIAMPolicy) ToJobIAMPolicyPtrOutputWithContext

func (i *JobIAMPolicy) ToJobIAMPolicyPtrOutputWithContext(ctx context.Context) JobIAMPolicyPtrOutput

type JobIAMPolicyArgs

type JobIAMPolicyArgs struct {
	JobId pulumi.StringInput
	// The policy data generated by a `organizations.getIAMPolicy` data source.
	PolicyData pulumi.StringInput
	// The project in which the job belongs. If it
	// is not provided, the provider will use a default.
	Project pulumi.StringPtrInput
	// The region in which the job belongs. If it
	// is not provided, the provider will use a default.
	Region pulumi.StringPtrInput
}

The set of arguments for constructing a JobIAMPolicy resource.

func (JobIAMPolicyArgs) ElementType

func (JobIAMPolicyArgs) ElementType() reflect.Type

type JobIAMPolicyArray

type JobIAMPolicyArray []JobIAMPolicyInput

func (JobIAMPolicyArray) ElementType

func (JobIAMPolicyArray) ElementType() reflect.Type

func (JobIAMPolicyArray) ToJobIAMPolicyArrayOutput

func (i JobIAMPolicyArray) ToJobIAMPolicyArrayOutput() JobIAMPolicyArrayOutput

func (JobIAMPolicyArray) ToJobIAMPolicyArrayOutputWithContext

func (i JobIAMPolicyArray) ToJobIAMPolicyArrayOutputWithContext(ctx context.Context) JobIAMPolicyArrayOutput

type JobIAMPolicyArrayInput

type JobIAMPolicyArrayInput interface {
	pulumi.Input

	ToJobIAMPolicyArrayOutput() JobIAMPolicyArrayOutput
	ToJobIAMPolicyArrayOutputWithContext(context.Context) JobIAMPolicyArrayOutput
}

JobIAMPolicyArrayInput is an input type that accepts JobIAMPolicyArray and JobIAMPolicyArrayOutput values. You can construct a concrete instance of `JobIAMPolicyArrayInput` via:

JobIAMPolicyArray{ JobIAMPolicyArgs{...} }

type JobIAMPolicyArrayOutput

type JobIAMPolicyArrayOutput struct{ *pulumi.OutputState }

func (JobIAMPolicyArrayOutput) ElementType

func (JobIAMPolicyArrayOutput) ElementType() reflect.Type

func (JobIAMPolicyArrayOutput) Index

func (JobIAMPolicyArrayOutput) ToJobIAMPolicyArrayOutput

func (o JobIAMPolicyArrayOutput) ToJobIAMPolicyArrayOutput() JobIAMPolicyArrayOutput

func (JobIAMPolicyArrayOutput) ToJobIAMPolicyArrayOutputWithContext

func (o JobIAMPolicyArrayOutput) ToJobIAMPolicyArrayOutputWithContext(ctx context.Context) JobIAMPolicyArrayOutput

type JobIAMPolicyInput

type JobIAMPolicyInput interface {
	pulumi.Input

	ToJobIAMPolicyOutput() JobIAMPolicyOutput
	ToJobIAMPolicyOutputWithContext(ctx context.Context) JobIAMPolicyOutput
}

type JobIAMPolicyMap

type JobIAMPolicyMap map[string]JobIAMPolicyInput

func (JobIAMPolicyMap) ElementType

func (JobIAMPolicyMap) ElementType() reflect.Type

func (JobIAMPolicyMap) ToJobIAMPolicyMapOutput

func (i JobIAMPolicyMap) ToJobIAMPolicyMapOutput() JobIAMPolicyMapOutput

func (JobIAMPolicyMap) ToJobIAMPolicyMapOutputWithContext

func (i JobIAMPolicyMap) ToJobIAMPolicyMapOutputWithContext(ctx context.Context) JobIAMPolicyMapOutput

type JobIAMPolicyMapInput

type JobIAMPolicyMapInput interface {
	pulumi.Input

	ToJobIAMPolicyMapOutput() JobIAMPolicyMapOutput
	ToJobIAMPolicyMapOutputWithContext(context.Context) JobIAMPolicyMapOutput
}

JobIAMPolicyMapInput is an input type that accepts JobIAMPolicyMap and JobIAMPolicyMapOutput values. You can construct a concrete instance of `JobIAMPolicyMapInput` via:

JobIAMPolicyMap{ "key": JobIAMPolicyArgs{...} }

type JobIAMPolicyMapOutput

type JobIAMPolicyMapOutput struct{ *pulumi.OutputState }

func (JobIAMPolicyMapOutput) ElementType

func (JobIAMPolicyMapOutput) ElementType() reflect.Type

func (JobIAMPolicyMapOutput) MapIndex

func (JobIAMPolicyMapOutput) ToJobIAMPolicyMapOutput

func (o JobIAMPolicyMapOutput) ToJobIAMPolicyMapOutput() JobIAMPolicyMapOutput

func (JobIAMPolicyMapOutput) ToJobIAMPolicyMapOutputWithContext

func (o JobIAMPolicyMapOutput) ToJobIAMPolicyMapOutputWithContext(ctx context.Context) JobIAMPolicyMapOutput

type JobIAMPolicyOutput

type JobIAMPolicyOutput struct{ *pulumi.OutputState }

func (JobIAMPolicyOutput) ElementType

func (JobIAMPolicyOutput) ElementType() reflect.Type

func (JobIAMPolicyOutput) ToJobIAMPolicyOutput

func (o JobIAMPolicyOutput) ToJobIAMPolicyOutput() JobIAMPolicyOutput

func (JobIAMPolicyOutput) ToJobIAMPolicyOutputWithContext

func (o JobIAMPolicyOutput) ToJobIAMPolicyOutputWithContext(ctx context.Context) JobIAMPolicyOutput

func (JobIAMPolicyOutput) ToJobIAMPolicyPtrOutput

func (o JobIAMPolicyOutput) ToJobIAMPolicyPtrOutput() JobIAMPolicyPtrOutput

func (JobIAMPolicyOutput) ToJobIAMPolicyPtrOutputWithContext

func (o JobIAMPolicyOutput) ToJobIAMPolicyPtrOutputWithContext(ctx context.Context) JobIAMPolicyPtrOutput

type JobIAMPolicyPtrInput

type JobIAMPolicyPtrInput interface {
	pulumi.Input

	ToJobIAMPolicyPtrOutput() JobIAMPolicyPtrOutput
	ToJobIAMPolicyPtrOutputWithContext(ctx context.Context) JobIAMPolicyPtrOutput
}

type JobIAMPolicyPtrOutput

type JobIAMPolicyPtrOutput struct{ *pulumi.OutputState }

func (JobIAMPolicyPtrOutput) Elem added in v5.21.0

func (JobIAMPolicyPtrOutput) ElementType

func (JobIAMPolicyPtrOutput) ElementType() reflect.Type

func (JobIAMPolicyPtrOutput) ToJobIAMPolicyPtrOutput

func (o JobIAMPolicyPtrOutput) ToJobIAMPolicyPtrOutput() JobIAMPolicyPtrOutput

func (JobIAMPolicyPtrOutput) ToJobIAMPolicyPtrOutputWithContext

func (o JobIAMPolicyPtrOutput) ToJobIAMPolicyPtrOutputWithContext(ctx context.Context) JobIAMPolicyPtrOutput

type JobIAMPolicyState

type JobIAMPolicyState struct {
	// (Computed) The etag of the jobs's IAM policy.
	Etag  pulumi.StringPtrInput
	JobId pulumi.StringPtrInput
	// The policy data generated by a `organizations.getIAMPolicy` data source.
	PolicyData pulumi.StringPtrInput
	// The project in which the job belongs. If it
	// is not provided, the provider will use a default.
	Project pulumi.StringPtrInput
	// The region in which the job belongs. If it
	// is not provided, the provider will use a default.
	Region pulumi.StringPtrInput
}

func (JobIAMPolicyState) ElementType

func (JobIAMPolicyState) ElementType() reflect.Type

type JobInput

type JobInput interface {
	pulumi.Input

	ToJobOutput() JobOutput
	ToJobOutputWithContext(ctx context.Context) JobOutput
}

type JobMap

type JobMap map[string]JobInput

func (JobMap) ElementType

func (JobMap) ElementType() reflect.Type

func (JobMap) ToJobMapOutput

func (i JobMap) ToJobMapOutput() JobMapOutput

func (JobMap) ToJobMapOutputWithContext

func (i JobMap) ToJobMapOutputWithContext(ctx context.Context) JobMapOutput

type JobMapInput

type JobMapInput interface {
	pulumi.Input

	ToJobMapOutput() JobMapOutput
	ToJobMapOutputWithContext(context.Context) JobMapOutput
}

JobMapInput is an input type that accepts JobMap and JobMapOutput values. You can construct a concrete instance of `JobMapInput` via:

JobMap{ "key": JobArgs{...} }

type JobMapOutput

type JobMapOutput struct{ *pulumi.OutputState }

func (JobMapOutput) ElementType

func (JobMapOutput) ElementType() reflect.Type

func (JobMapOutput) MapIndex

func (o JobMapOutput) MapIndex(k pulumi.StringInput) JobOutput

func (JobMapOutput) ToJobMapOutput

func (o JobMapOutput) ToJobMapOutput() JobMapOutput

func (JobMapOutput) ToJobMapOutputWithContext

func (o JobMapOutput) ToJobMapOutputWithContext(ctx context.Context) JobMapOutput

type JobOutput

type JobOutput struct{ *pulumi.OutputState }

func (JobOutput) ElementType

func (JobOutput) ElementType() reflect.Type

func (JobOutput) ToJobOutput

func (o JobOutput) ToJobOutput() JobOutput

func (JobOutput) ToJobOutputWithContext

func (o JobOutput) ToJobOutputWithContext(ctx context.Context) JobOutput

func (JobOutput) ToJobPtrOutput

func (o JobOutput) ToJobPtrOutput() JobPtrOutput

func (JobOutput) ToJobPtrOutputWithContext

func (o JobOutput) ToJobPtrOutputWithContext(ctx context.Context) JobPtrOutput

type JobPigConfig

type JobPigConfig struct {
	// Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries. Defaults to false.
	ContinueOnFailure *bool `pulumi:"continueOnFailure"`
	// HCFS URIs of jar files to be added to the Spark CLASSPATH.
	JarFileUris   []string                   `pulumi:"jarFileUris"`
	LoggingConfig *JobPigConfigLoggingConfig `pulumi:"loggingConfig"`
	// A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Cloud Dataproc API may be overwritten.
	Properties map[string]string `pulumi:"properties"`
	// The HCFS URI of the script that contains SQL queries.
	// Conflicts with `queryList`
	QueryFileUri *string `pulumi:"queryFileUri"`
	// The list of SQL queries or statements to execute as part of the job.
	// Conflicts with `queryFileUri`
	QueryLists []string `pulumi:"queryLists"`
	// Mapping of query variable names to values (equivalent to the Spark SQL command: `SET name="value";`).
	ScriptVariables map[string]string `pulumi:"scriptVariables"`
}

type JobPigConfigArgs

type JobPigConfigArgs struct {
	// Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries. Defaults to false.
	ContinueOnFailure pulumi.BoolPtrInput `pulumi:"continueOnFailure"`
	// HCFS URIs of jar files to be added to the Spark CLASSPATH.
	JarFileUris   pulumi.StringArrayInput           `pulumi:"jarFileUris"`
	LoggingConfig JobPigConfigLoggingConfigPtrInput `pulumi:"loggingConfig"`
	// A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Cloud Dataproc API may be overwritten.
	Properties pulumi.StringMapInput `pulumi:"properties"`
	// The HCFS URI of the script that contains SQL queries.
	// Conflicts with `queryList`
	QueryFileUri pulumi.StringPtrInput `pulumi:"queryFileUri"`
	// The list of SQL queries or statements to execute as part of the job.
	// Conflicts with `queryFileUri`
	QueryLists pulumi.StringArrayInput `pulumi:"queryLists"`
	// Mapping of query variable names to values (equivalent to the Spark SQL command: `SET name="value";`).
	ScriptVariables pulumi.StringMapInput `pulumi:"scriptVariables"`
}

func (JobPigConfigArgs) ElementType

func (JobPigConfigArgs) ElementType() reflect.Type

func (JobPigConfigArgs) ToJobPigConfigOutput

func (i JobPigConfigArgs) ToJobPigConfigOutput() JobPigConfigOutput

func (JobPigConfigArgs) ToJobPigConfigOutputWithContext

func (i JobPigConfigArgs) ToJobPigConfigOutputWithContext(ctx context.Context) JobPigConfigOutput

func (JobPigConfigArgs) ToJobPigConfigPtrOutput

func (i JobPigConfigArgs) ToJobPigConfigPtrOutput() JobPigConfigPtrOutput

func (JobPigConfigArgs) ToJobPigConfigPtrOutputWithContext

func (i JobPigConfigArgs) ToJobPigConfigPtrOutputWithContext(ctx context.Context) JobPigConfigPtrOutput

type JobPigConfigInput

type JobPigConfigInput interface {
	pulumi.Input

	ToJobPigConfigOutput() JobPigConfigOutput
	ToJobPigConfigOutputWithContext(context.Context) JobPigConfigOutput
}

JobPigConfigInput is an input type that accepts JobPigConfigArgs and JobPigConfigOutput values. You can construct a concrete instance of `JobPigConfigInput` via:

JobPigConfigArgs{...}

type JobPigConfigLoggingConfig

type JobPigConfigLoggingConfig struct {
	DriverLogLevels map[string]string `pulumi:"driverLogLevels"`
}

type JobPigConfigLoggingConfigArgs

type JobPigConfigLoggingConfigArgs struct {
	DriverLogLevels pulumi.StringMapInput `pulumi:"driverLogLevels"`
}

func (JobPigConfigLoggingConfigArgs) ElementType

func (JobPigConfigLoggingConfigArgs) ToJobPigConfigLoggingConfigOutput

func (i JobPigConfigLoggingConfigArgs) ToJobPigConfigLoggingConfigOutput() JobPigConfigLoggingConfigOutput

func (JobPigConfigLoggingConfigArgs) ToJobPigConfigLoggingConfigOutputWithContext

func (i JobPigConfigLoggingConfigArgs) ToJobPigConfigLoggingConfigOutputWithContext(ctx context.Context) JobPigConfigLoggingConfigOutput

func (JobPigConfigLoggingConfigArgs) ToJobPigConfigLoggingConfigPtrOutput

func (i JobPigConfigLoggingConfigArgs) ToJobPigConfigLoggingConfigPtrOutput() JobPigConfigLoggingConfigPtrOutput

func (JobPigConfigLoggingConfigArgs) ToJobPigConfigLoggingConfigPtrOutputWithContext

func (i JobPigConfigLoggingConfigArgs) ToJobPigConfigLoggingConfigPtrOutputWithContext(ctx context.Context) JobPigConfigLoggingConfigPtrOutput

type JobPigConfigLoggingConfigInput

type JobPigConfigLoggingConfigInput interface {
	pulumi.Input

	ToJobPigConfigLoggingConfigOutput() JobPigConfigLoggingConfigOutput
	ToJobPigConfigLoggingConfigOutputWithContext(context.Context) JobPigConfigLoggingConfigOutput
}

JobPigConfigLoggingConfigInput is an input type that accepts JobPigConfigLoggingConfigArgs and JobPigConfigLoggingConfigOutput values. You can construct a concrete instance of `JobPigConfigLoggingConfigInput` via:

JobPigConfigLoggingConfigArgs{...}

type JobPigConfigLoggingConfigOutput

type JobPigConfigLoggingConfigOutput struct{ *pulumi.OutputState }

func (JobPigConfigLoggingConfigOutput) DriverLogLevels

func (JobPigConfigLoggingConfigOutput) ElementType

func (JobPigConfigLoggingConfigOutput) ToJobPigConfigLoggingConfigOutput

func (o JobPigConfigLoggingConfigOutput) ToJobPigConfigLoggingConfigOutput() JobPigConfigLoggingConfigOutput

func (JobPigConfigLoggingConfigOutput) ToJobPigConfigLoggingConfigOutputWithContext

func (o JobPigConfigLoggingConfigOutput) ToJobPigConfigLoggingConfigOutputWithContext(ctx context.Context) JobPigConfigLoggingConfigOutput

func (JobPigConfigLoggingConfigOutput) ToJobPigConfigLoggingConfigPtrOutput

func (o JobPigConfigLoggingConfigOutput) ToJobPigConfigLoggingConfigPtrOutput() JobPigConfigLoggingConfigPtrOutput

func (JobPigConfigLoggingConfigOutput) ToJobPigConfigLoggingConfigPtrOutputWithContext

func (o JobPigConfigLoggingConfigOutput) ToJobPigConfigLoggingConfigPtrOutputWithContext(ctx context.Context) JobPigConfigLoggingConfigPtrOutput

type JobPigConfigLoggingConfigPtrInput

type JobPigConfigLoggingConfigPtrInput interface {
	pulumi.Input

	ToJobPigConfigLoggingConfigPtrOutput() JobPigConfigLoggingConfigPtrOutput
	ToJobPigConfigLoggingConfigPtrOutputWithContext(context.Context) JobPigConfigLoggingConfigPtrOutput
}

JobPigConfigLoggingConfigPtrInput is an input type that accepts JobPigConfigLoggingConfigArgs, JobPigConfigLoggingConfigPtr and JobPigConfigLoggingConfigPtrOutput values. You can construct a concrete instance of `JobPigConfigLoggingConfigPtrInput` via:

        JobPigConfigLoggingConfigArgs{...}

or:

        nil

type JobPigConfigLoggingConfigPtrOutput

type JobPigConfigLoggingConfigPtrOutput struct{ *pulumi.OutputState }

func (JobPigConfigLoggingConfigPtrOutput) DriverLogLevels

func (JobPigConfigLoggingConfigPtrOutput) Elem

func (JobPigConfigLoggingConfigPtrOutput) ElementType

func (JobPigConfigLoggingConfigPtrOutput) ToJobPigConfigLoggingConfigPtrOutput

func (o JobPigConfigLoggingConfigPtrOutput) ToJobPigConfigLoggingConfigPtrOutput() JobPigConfigLoggingConfigPtrOutput

func (JobPigConfigLoggingConfigPtrOutput) ToJobPigConfigLoggingConfigPtrOutputWithContext

func (o JobPigConfigLoggingConfigPtrOutput) ToJobPigConfigLoggingConfigPtrOutputWithContext(ctx context.Context) JobPigConfigLoggingConfigPtrOutput

type JobPigConfigOutput

type JobPigConfigOutput struct{ *pulumi.OutputState }

func (JobPigConfigOutput) ContinueOnFailure

func (o JobPigConfigOutput) ContinueOnFailure() pulumi.BoolPtrOutput

Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries. Defaults to false.

func (JobPigConfigOutput) ElementType

func (JobPigConfigOutput) ElementType() reflect.Type

func (JobPigConfigOutput) JarFileUris

HCFS URIs of jar files to be added to the Spark CLASSPATH.

func (JobPigConfigOutput) LoggingConfig

func (JobPigConfigOutput) Properties

func (o JobPigConfigOutput) Properties() pulumi.StringMapOutput

A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Cloud Dataproc API may be overwritten.

func (JobPigConfigOutput) QueryFileUri

func (o JobPigConfigOutput) QueryFileUri() pulumi.StringPtrOutput

The HCFS URI of the script that contains SQL queries. Conflicts with `queryList`

func (JobPigConfigOutput) QueryLists

The list of SQL queries or statements to execute as part of the job. Conflicts with `queryFileUri`

func (JobPigConfigOutput) ScriptVariables

func (o JobPigConfigOutput) ScriptVariables() pulumi.StringMapOutput

Mapping of query variable names to values (equivalent to the Spark SQL command: `SET name="value";`).

func (JobPigConfigOutput) ToJobPigConfigOutput

func (o JobPigConfigOutput) ToJobPigConfigOutput() JobPigConfigOutput

func (JobPigConfigOutput) ToJobPigConfigOutputWithContext

func (o JobPigConfigOutput) ToJobPigConfigOutputWithContext(ctx context.Context) JobPigConfigOutput

func (JobPigConfigOutput) ToJobPigConfigPtrOutput

func (o JobPigConfigOutput) ToJobPigConfigPtrOutput() JobPigConfigPtrOutput

func (JobPigConfigOutput) ToJobPigConfigPtrOutputWithContext

func (o JobPigConfigOutput) ToJobPigConfigPtrOutputWithContext(ctx context.Context) JobPigConfigPtrOutput

type JobPigConfigPtrInput

type JobPigConfigPtrInput interface {
	pulumi.Input

	ToJobPigConfigPtrOutput() JobPigConfigPtrOutput
	ToJobPigConfigPtrOutputWithContext(context.Context) JobPigConfigPtrOutput
}

JobPigConfigPtrInput is an input type that accepts JobPigConfigArgs, JobPigConfigPtr and JobPigConfigPtrOutput values. You can construct a concrete instance of `JobPigConfigPtrInput` via:

        JobPigConfigArgs{...}

or:

        nil

type JobPigConfigPtrOutput

type JobPigConfigPtrOutput struct{ *pulumi.OutputState }

func (JobPigConfigPtrOutput) ContinueOnFailure

func (o JobPigConfigPtrOutput) ContinueOnFailure() pulumi.BoolPtrOutput

Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries. Defaults to false.

func (JobPigConfigPtrOutput) Elem

func (JobPigConfigPtrOutput) ElementType

func (JobPigConfigPtrOutput) ElementType() reflect.Type

func (JobPigConfigPtrOutput) JarFileUris

HCFS URIs of jar files to be added to the Spark CLASSPATH.

func (JobPigConfigPtrOutput) LoggingConfig

func (JobPigConfigPtrOutput) Properties

A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Cloud Dataproc API may be overwritten.

func (JobPigConfigPtrOutput) QueryFileUri

func (o JobPigConfigPtrOutput) QueryFileUri() pulumi.StringPtrOutput

The HCFS URI of the script that contains SQL queries. Conflicts with `queryList`

func (JobPigConfigPtrOutput) QueryLists

The list of SQL queries or statements to execute as part of the job. Conflicts with `queryFileUri`

func (JobPigConfigPtrOutput) ScriptVariables

func (o JobPigConfigPtrOutput) ScriptVariables() pulumi.StringMapOutput

Mapping of query variable names to values (equivalent to the Spark SQL command: `SET name="value";`).

func (JobPigConfigPtrOutput) ToJobPigConfigPtrOutput

func (o JobPigConfigPtrOutput) ToJobPigConfigPtrOutput() JobPigConfigPtrOutput

func (JobPigConfigPtrOutput) ToJobPigConfigPtrOutputWithContext

func (o JobPigConfigPtrOutput) ToJobPigConfigPtrOutputWithContext(ctx context.Context) JobPigConfigPtrOutput

type JobPlacement

type JobPlacement struct {
	ClusterName string  `pulumi:"clusterName"`
	ClusterUuid *string `pulumi:"clusterUuid"`
}

type JobPlacementArgs

type JobPlacementArgs struct {
	ClusterName pulumi.StringInput    `pulumi:"clusterName"`
	ClusterUuid pulumi.StringPtrInput `pulumi:"clusterUuid"`
}

func (JobPlacementArgs) ElementType

func (JobPlacementArgs) ElementType() reflect.Type

func (JobPlacementArgs) ToJobPlacementOutput

func (i JobPlacementArgs) ToJobPlacementOutput() JobPlacementOutput

func (JobPlacementArgs) ToJobPlacementOutputWithContext

func (i JobPlacementArgs) ToJobPlacementOutputWithContext(ctx context.Context) JobPlacementOutput

func (JobPlacementArgs) ToJobPlacementPtrOutput

func (i JobPlacementArgs) ToJobPlacementPtrOutput() JobPlacementPtrOutput

func (JobPlacementArgs) ToJobPlacementPtrOutputWithContext

func (i JobPlacementArgs) ToJobPlacementPtrOutputWithContext(ctx context.Context) JobPlacementPtrOutput

type JobPlacementInput

type JobPlacementInput interface {
	pulumi.Input

	ToJobPlacementOutput() JobPlacementOutput
	ToJobPlacementOutputWithContext(context.Context) JobPlacementOutput
}

JobPlacementInput is an input type that accepts JobPlacementArgs and JobPlacementOutput values. You can construct a concrete instance of `JobPlacementInput` via:

JobPlacementArgs{...}

type JobPlacementOutput

type JobPlacementOutput struct{ *pulumi.OutputState }

func (JobPlacementOutput) ClusterName

func (o JobPlacementOutput) ClusterName() pulumi.StringOutput

func (JobPlacementOutput) ClusterUuid

func (o JobPlacementOutput) ClusterUuid() pulumi.StringPtrOutput

func (JobPlacementOutput) ElementType

func (JobPlacementOutput) ElementType() reflect.Type

func (JobPlacementOutput) ToJobPlacementOutput

func (o JobPlacementOutput) ToJobPlacementOutput() JobPlacementOutput

func (JobPlacementOutput) ToJobPlacementOutputWithContext

func (o JobPlacementOutput) ToJobPlacementOutputWithContext(ctx context.Context) JobPlacementOutput

func (JobPlacementOutput) ToJobPlacementPtrOutput

func (o JobPlacementOutput) ToJobPlacementPtrOutput() JobPlacementPtrOutput

func (JobPlacementOutput) ToJobPlacementPtrOutputWithContext

func (o JobPlacementOutput) ToJobPlacementPtrOutputWithContext(ctx context.Context) JobPlacementPtrOutput

type JobPlacementPtrInput

type JobPlacementPtrInput interface {
	pulumi.Input

	ToJobPlacementPtrOutput() JobPlacementPtrOutput
	ToJobPlacementPtrOutputWithContext(context.Context) JobPlacementPtrOutput
}

JobPlacementPtrInput is an input type that accepts JobPlacementArgs, JobPlacementPtr and JobPlacementPtrOutput values. You can construct a concrete instance of `JobPlacementPtrInput` via:

        JobPlacementArgs{...}

or:

        nil

type JobPlacementPtrOutput

type JobPlacementPtrOutput struct{ *pulumi.OutputState }

func (JobPlacementPtrOutput) ClusterName

func (JobPlacementPtrOutput) ClusterUuid

func (JobPlacementPtrOutput) Elem

func (JobPlacementPtrOutput) ElementType

func (JobPlacementPtrOutput) ElementType() reflect.Type

func (JobPlacementPtrOutput) ToJobPlacementPtrOutput

func (o JobPlacementPtrOutput) ToJobPlacementPtrOutput() JobPlacementPtrOutput

func (JobPlacementPtrOutput) ToJobPlacementPtrOutputWithContext

func (o JobPlacementPtrOutput) ToJobPlacementPtrOutputWithContext(ctx context.Context) JobPlacementPtrOutput

type JobPtrInput

type JobPtrInput interface {
	pulumi.Input

	ToJobPtrOutput() JobPtrOutput
	ToJobPtrOutputWithContext(ctx context.Context) JobPtrOutput
}

type JobPtrOutput

type JobPtrOutput struct{ *pulumi.OutputState }

func (JobPtrOutput) Elem added in v5.21.0

func (o JobPtrOutput) Elem() JobOutput

func (JobPtrOutput) ElementType

func (JobPtrOutput) ElementType() reflect.Type

func (JobPtrOutput) ToJobPtrOutput

func (o JobPtrOutput) ToJobPtrOutput() JobPtrOutput

func (JobPtrOutput) ToJobPtrOutputWithContext

func (o JobPtrOutput) ToJobPtrOutputWithContext(ctx context.Context) JobPtrOutput

type JobPysparkConfig

type JobPysparkConfig struct {
	// HCFS URIs of archives to be extracted in the working directory of .jar, .tar, .tar.gz, .tgz, and .zip.
	ArchiveUris []string `pulumi:"archiveUris"`
	// The arguments to pass to the driver. Do not include arguments, such as -libjars or -Dfoo=bar, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
	Args []string `pulumi:"args"`
	// HCFS URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.
	FileUris []string `pulumi:"fileUris"`
	// HCFS URIs of jar files to be added to the Spark CLASSPATH.
	JarFileUris   []string                       `pulumi:"jarFileUris"`
	LoggingConfig *JobPysparkConfigLoggingConfig `pulumi:"loggingConfig"`
	// The HCFS URI of the main Python file to use as the driver. Must be a .py file.
	MainPythonFileUri string `pulumi:"mainPythonFileUri"`
	// A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Cloud Dataproc API may be overwritten.
	Properties map[string]string `pulumi:"properties"`
	// HCFS file URIs of Python files to pass to the PySpark framework. Supported file types: .py, .egg, and .zip.
	PythonFileUris []string `pulumi:"pythonFileUris"`
}

type JobPysparkConfigArgs

type JobPysparkConfigArgs struct {
	// HCFS URIs of archives to be extracted in the working directory of .jar, .tar, .tar.gz, .tgz, and .zip.
	ArchiveUris pulumi.StringArrayInput `pulumi:"archiveUris"`
	// The arguments to pass to the driver. Do not include arguments, such as -libjars or -Dfoo=bar, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
	Args pulumi.StringArrayInput `pulumi:"args"`
	// HCFS URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.
	FileUris pulumi.StringArrayInput `pulumi:"fileUris"`
	// HCFS URIs of jar files to be added to the Spark CLASSPATH.
	JarFileUris   pulumi.StringArrayInput               `pulumi:"jarFileUris"`
	LoggingConfig JobPysparkConfigLoggingConfigPtrInput `pulumi:"loggingConfig"`
	// The HCFS URI of the main Python file to use as the driver. Must be a .py file.
	MainPythonFileUri pulumi.StringInput `pulumi:"mainPythonFileUri"`
	// A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Cloud Dataproc API may be overwritten.
	Properties pulumi.StringMapInput `pulumi:"properties"`
	// HCFS file URIs of Python files to pass to the PySpark framework. Supported file types: .py, .egg, and .zip.
	PythonFileUris pulumi.StringArrayInput `pulumi:"pythonFileUris"`
}

func (JobPysparkConfigArgs) ElementType

func (JobPysparkConfigArgs) ElementType() reflect.Type

func (JobPysparkConfigArgs) ToJobPysparkConfigOutput

func (i JobPysparkConfigArgs) ToJobPysparkConfigOutput() JobPysparkConfigOutput

func (JobPysparkConfigArgs) ToJobPysparkConfigOutputWithContext

func (i JobPysparkConfigArgs) ToJobPysparkConfigOutputWithContext(ctx context.Context) JobPysparkConfigOutput

func (JobPysparkConfigArgs) ToJobPysparkConfigPtrOutput

func (i JobPysparkConfigArgs) ToJobPysparkConfigPtrOutput() JobPysparkConfigPtrOutput

func (JobPysparkConfigArgs) ToJobPysparkConfigPtrOutputWithContext

func (i JobPysparkConfigArgs) ToJobPysparkConfigPtrOutputWithContext(ctx context.Context) JobPysparkConfigPtrOutput

type JobPysparkConfigInput

type JobPysparkConfigInput interface {
	pulumi.Input

	ToJobPysparkConfigOutput() JobPysparkConfigOutput
	ToJobPysparkConfigOutputWithContext(context.Context) JobPysparkConfigOutput
}

JobPysparkConfigInput is an input type that accepts JobPysparkConfigArgs and JobPysparkConfigOutput values. You can construct a concrete instance of `JobPysparkConfigInput` via:

JobPysparkConfigArgs{...}

type JobPysparkConfigLoggingConfig

type JobPysparkConfigLoggingConfig struct {
	DriverLogLevels map[string]string `pulumi:"driverLogLevels"`
}

type JobPysparkConfigLoggingConfigArgs

type JobPysparkConfigLoggingConfigArgs struct {
	DriverLogLevels pulumi.StringMapInput `pulumi:"driverLogLevels"`
}

func (JobPysparkConfigLoggingConfigArgs) ElementType

func (JobPysparkConfigLoggingConfigArgs) ToJobPysparkConfigLoggingConfigOutput

func (i JobPysparkConfigLoggingConfigArgs) ToJobPysparkConfigLoggingConfigOutput() JobPysparkConfigLoggingConfigOutput

func (JobPysparkConfigLoggingConfigArgs) ToJobPysparkConfigLoggingConfigOutputWithContext

func (i JobPysparkConfigLoggingConfigArgs) ToJobPysparkConfigLoggingConfigOutputWithContext(ctx context.Context) JobPysparkConfigLoggingConfigOutput

func (JobPysparkConfigLoggingConfigArgs) ToJobPysparkConfigLoggingConfigPtrOutput

func (i JobPysparkConfigLoggingConfigArgs) ToJobPysparkConfigLoggingConfigPtrOutput() JobPysparkConfigLoggingConfigPtrOutput

func (JobPysparkConfigLoggingConfigArgs) ToJobPysparkConfigLoggingConfigPtrOutputWithContext

func (i JobPysparkConfigLoggingConfigArgs) ToJobPysparkConfigLoggingConfigPtrOutputWithContext(ctx context.Context) JobPysparkConfigLoggingConfigPtrOutput

type JobPysparkConfigLoggingConfigInput

type JobPysparkConfigLoggingConfigInput interface {
	pulumi.Input

	ToJobPysparkConfigLoggingConfigOutput() JobPysparkConfigLoggingConfigOutput
	ToJobPysparkConfigLoggingConfigOutputWithContext(context.Context) JobPysparkConfigLoggingConfigOutput
}

JobPysparkConfigLoggingConfigInput is an input type that accepts JobPysparkConfigLoggingConfigArgs and JobPysparkConfigLoggingConfigOutput values. You can construct a concrete instance of `JobPysparkConfigLoggingConfigInput` via:

JobPysparkConfigLoggingConfigArgs{...}

type JobPysparkConfigLoggingConfigOutput

type JobPysparkConfigLoggingConfigOutput struct{ *pulumi.OutputState }

func (JobPysparkConfigLoggingConfigOutput) DriverLogLevels

func (JobPysparkConfigLoggingConfigOutput) ElementType

func (JobPysparkConfigLoggingConfigOutput) ToJobPysparkConfigLoggingConfigOutput

func (o JobPysparkConfigLoggingConfigOutput) ToJobPysparkConfigLoggingConfigOutput() JobPysparkConfigLoggingConfigOutput

func (JobPysparkConfigLoggingConfigOutput) ToJobPysparkConfigLoggingConfigOutputWithContext

func (o JobPysparkConfigLoggingConfigOutput) ToJobPysparkConfigLoggingConfigOutputWithContext(ctx context.Context) JobPysparkConfigLoggingConfigOutput

func (JobPysparkConfigLoggingConfigOutput) ToJobPysparkConfigLoggingConfigPtrOutput

func (o JobPysparkConfigLoggingConfigOutput) ToJobPysparkConfigLoggingConfigPtrOutput() JobPysparkConfigLoggingConfigPtrOutput

func (JobPysparkConfigLoggingConfigOutput) ToJobPysparkConfigLoggingConfigPtrOutputWithContext

func (o JobPysparkConfigLoggingConfigOutput) ToJobPysparkConfigLoggingConfigPtrOutputWithContext(ctx context.Context) JobPysparkConfigLoggingConfigPtrOutput

type JobPysparkConfigLoggingConfigPtrInput

type JobPysparkConfigLoggingConfigPtrInput interface {
	pulumi.Input

	ToJobPysparkConfigLoggingConfigPtrOutput() JobPysparkConfigLoggingConfigPtrOutput
	ToJobPysparkConfigLoggingConfigPtrOutputWithContext(context.Context) JobPysparkConfigLoggingConfigPtrOutput
}

JobPysparkConfigLoggingConfigPtrInput is an input type that accepts JobPysparkConfigLoggingConfigArgs, JobPysparkConfigLoggingConfigPtr and JobPysparkConfigLoggingConfigPtrOutput values. You can construct a concrete instance of `JobPysparkConfigLoggingConfigPtrInput` via:

        JobPysparkConfigLoggingConfigArgs{...}

or:

        nil

type JobPysparkConfigLoggingConfigPtrOutput

type JobPysparkConfigLoggingConfigPtrOutput struct{ *pulumi.OutputState }

func (JobPysparkConfigLoggingConfigPtrOutput) DriverLogLevels

func (JobPysparkConfigLoggingConfigPtrOutput) Elem

func (JobPysparkConfigLoggingConfigPtrOutput) ElementType

func (JobPysparkConfigLoggingConfigPtrOutput) ToJobPysparkConfigLoggingConfigPtrOutput

func (o JobPysparkConfigLoggingConfigPtrOutput) ToJobPysparkConfigLoggingConfigPtrOutput() JobPysparkConfigLoggingConfigPtrOutput

func (JobPysparkConfigLoggingConfigPtrOutput) ToJobPysparkConfigLoggingConfigPtrOutputWithContext

func (o JobPysparkConfigLoggingConfigPtrOutput) ToJobPysparkConfigLoggingConfigPtrOutputWithContext(ctx context.Context) JobPysparkConfigLoggingConfigPtrOutput

type JobPysparkConfigOutput

type JobPysparkConfigOutput struct{ *pulumi.OutputState }

func (JobPysparkConfigOutput) ArchiveUris

HCFS URIs of archives to be extracted in the working directory of .jar, .tar, .tar.gz, .tgz, and .zip.

func (JobPysparkConfigOutput) Args

The arguments to pass to the driver. Do not include arguments, such as -libjars or -Dfoo=bar, that can be set as job properties, since a collision may occur that causes an incorrect job submission.

func (JobPysparkConfigOutput) ElementType

func (JobPysparkConfigOutput) ElementType() reflect.Type

func (JobPysparkConfigOutput) FileUris

HCFS URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.

func (JobPysparkConfigOutput) JarFileUris

HCFS URIs of jar files to be added to the Spark CLASSPATH.

func (JobPysparkConfigOutput) LoggingConfig

func (JobPysparkConfigOutput) MainPythonFileUri

func (o JobPysparkConfigOutput) MainPythonFileUri() pulumi.StringOutput

The HCFS URI of the main Python file to use as the driver. Must be a .py file.

func (JobPysparkConfigOutput) Properties

A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Cloud Dataproc API may be overwritten.

func (JobPysparkConfigOutput) PythonFileUris

func (o JobPysparkConfigOutput) PythonFileUris() pulumi.StringArrayOutput

HCFS file URIs of Python files to pass to the PySpark framework. Supported file types: .py, .egg, and .zip.

func (JobPysparkConfigOutput) ToJobPysparkConfigOutput

func (o JobPysparkConfigOutput) ToJobPysparkConfigOutput() JobPysparkConfigOutput

func (JobPysparkConfigOutput) ToJobPysparkConfigOutputWithContext

func (o JobPysparkConfigOutput) ToJobPysparkConfigOutputWithContext(ctx context.Context) JobPysparkConfigOutput

func (JobPysparkConfigOutput) ToJobPysparkConfigPtrOutput

func (o JobPysparkConfigOutput) ToJobPysparkConfigPtrOutput() JobPysparkConfigPtrOutput

func (JobPysparkConfigOutput) ToJobPysparkConfigPtrOutputWithContext

func (o JobPysparkConfigOutput) ToJobPysparkConfigPtrOutputWithContext(ctx context.Context) JobPysparkConfigPtrOutput

type JobPysparkConfigPtrInput

type JobPysparkConfigPtrInput interface {
	pulumi.Input

	ToJobPysparkConfigPtrOutput() JobPysparkConfigPtrOutput
	ToJobPysparkConfigPtrOutputWithContext(context.Context) JobPysparkConfigPtrOutput
}

JobPysparkConfigPtrInput is an input type that accepts JobPysparkConfigArgs, JobPysparkConfigPtr and JobPysparkConfigPtrOutput values. You can construct a concrete instance of `JobPysparkConfigPtrInput` via:

        JobPysparkConfigArgs{...}

or:

        nil

type JobPysparkConfigPtrOutput

type JobPysparkConfigPtrOutput struct{ *pulumi.OutputState }

func (JobPysparkConfigPtrOutput) ArchiveUris

HCFS URIs of archives to be extracted in the working directory of .jar, .tar, .tar.gz, .tgz, and .zip.

func (JobPysparkConfigPtrOutput) Args

The arguments to pass to the driver. Do not include arguments, such as -libjars or -Dfoo=bar, that can be set as job properties, since a collision may occur that causes an incorrect job submission.

func (JobPysparkConfigPtrOutput) Elem

func (JobPysparkConfigPtrOutput) ElementType

func (JobPysparkConfigPtrOutput) ElementType() reflect.Type

func (JobPysparkConfigPtrOutput) FileUris

HCFS URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.

func (JobPysparkConfigPtrOutput) JarFileUris

HCFS URIs of jar files to be added to the Spark CLASSPATH.

func (JobPysparkConfigPtrOutput) LoggingConfig

func (JobPysparkConfigPtrOutput) MainPythonFileUri

func (o JobPysparkConfigPtrOutput) MainPythonFileUri() pulumi.StringPtrOutput

The HCFS URI of the main Python file to use as the driver. Must be a .py file.

func (JobPysparkConfigPtrOutput) Properties

A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Cloud Dataproc API may be overwritten.

func (JobPysparkConfigPtrOutput) PythonFileUris

HCFS file URIs of Python files to pass to the PySpark framework. Supported file types: .py, .egg, and .zip.

func (JobPysparkConfigPtrOutput) ToJobPysparkConfigPtrOutput

func (o JobPysparkConfigPtrOutput) ToJobPysparkConfigPtrOutput() JobPysparkConfigPtrOutput

func (JobPysparkConfigPtrOutput) ToJobPysparkConfigPtrOutputWithContext

func (o JobPysparkConfigPtrOutput) ToJobPysparkConfigPtrOutputWithContext(ctx context.Context) JobPysparkConfigPtrOutput

type JobReference

type JobReference struct {
	JobId *string `pulumi:"jobId"`
}

type JobReferenceArgs

type JobReferenceArgs struct {
	JobId pulumi.StringPtrInput `pulumi:"jobId"`
}

func (JobReferenceArgs) ElementType

func (JobReferenceArgs) ElementType() reflect.Type

func (JobReferenceArgs) ToJobReferenceOutput

func (i JobReferenceArgs) ToJobReferenceOutput() JobReferenceOutput

func (JobReferenceArgs) ToJobReferenceOutputWithContext

func (i JobReferenceArgs) ToJobReferenceOutputWithContext(ctx context.Context) JobReferenceOutput

func (JobReferenceArgs) ToJobReferencePtrOutput

func (i JobReferenceArgs) ToJobReferencePtrOutput() JobReferencePtrOutput

func (JobReferenceArgs) ToJobReferencePtrOutputWithContext

func (i JobReferenceArgs) ToJobReferencePtrOutputWithContext(ctx context.Context) JobReferencePtrOutput

type JobReferenceInput

type JobReferenceInput interface {
	pulumi.Input

	ToJobReferenceOutput() JobReferenceOutput
	ToJobReferenceOutputWithContext(context.Context) JobReferenceOutput
}

JobReferenceInput is an input type that accepts JobReferenceArgs and JobReferenceOutput values. You can construct a concrete instance of `JobReferenceInput` via:

JobReferenceArgs{...}

type JobReferenceOutput

type JobReferenceOutput struct{ *pulumi.OutputState }

func (JobReferenceOutput) ElementType

func (JobReferenceOutput) ElementType() reflect.Type

func (JobReferenceOutput) JobId

func (JobReferenceOutput) ToJobReferenceOutput

func (o JobReferenceOutput) ToJobReferenceOutput() JobReferenceOutput

func (JobReferenceOutput) ToJobReferenceOutputWithContext

func (o JobReferenceOutput) ToJobReferenceOutputWithContext(ctx context.Context) JobReferenceOutput

func (JobReferenceOutput) ToJobReferencePtrOutput

func (o JobReferenceOutput) ToJobReferencePtrOutput() JobReferencePtrOutput

func (JobReferenceOutput) ToJobReferencePtrOutputWithContext

func (o JobReferenceOutput) ToJobReferencePtrOutputWithContext(ctx context.Context) JobReferencePtrOutput

type JobReferencePtrInput

type JobReferencePtrInput interface {
	pulumi.Input

	ToJobReferencePtrOutput() JobReferencePtrOutput
	ToJobReferencePtrOutputWithContext(context.Context) JobReferencePtrOutput
}

JobReferencePtrInput is an input type that accepts JobReferenceArgs, JobReferencePtr and JobReferencePtrOutput values. You can construct a concrete instance of `JobReferencePtrInput` via:

        JobReferenceArgs{...}

or:

        nil

type JobReferencePtrOutput

type JobReferencePtrOutput struct{ *pulumi.OutputState }

func (JobReferencePtrOutput) Elem

func (JobReferencePtrOutput) ElementType

func (JobReferencePtrOutput) ElementType() reflect.Type

func (JobReferencePtrOutput) JobId

func (JobReferencePtrOutput) ToJobReferencePtrOutput

func (o JobReferencePtrOutput) ToJobReferencePtrOutput() JobReferencePtrOutput

func (JobReferencePtrOutput) ToJobReferencePtrOutputWithContext

func (o JobReferencePtrOutput) ToJobReferencePtrOutputWithContext(ctx context.Context) JobReferencePtrOutput

type JobScheduling

type JobScheduling struct {
	MaxFailuresPerHour int `pulumi:"maxFailuresPerHour"`
	MaxFailuresTotal   int `pulumi:"maxFailuresTotal"`
}

type JobSchedulingArgs

type JobSchedulingArgs struct {
	MaxFailuresPerHour pulumi.IntInput `pulumi:"maxFailuresPerHour"`
	MaxFailuresTotal   pulumi.IntInput `pulumi:"maxFailuresTotal"`
}

func (JobSchedulingArgs) ElementType

func (JobSchedulingArgs) ElementType() reflect.Type

func (JobSchedulingArgs) ToJobSchedulingOutput

func (i JobSchedulingArgs) ToJobSchedulingOutput() JobSchedulingOutput

func (JobSchedulingArgs) ToJobSchedulingOutputWithContext

func (i JobSchedulingArgs) ToJobSchedulingOutputWithContext(ctx context.Context) JobSchedulingOutput

func (JobSchedulingArgs) ToJobSchedulingPtrOutput

func (i JobSchedulingArgs) ToJobSchedulingPtrOutput() JobSchedulingPtrOutput

func (JobSchedulingArgs) ToJobSchedulingPtrOutputWithContext

func (i JobSchedulingArgs) ToJobSchedulingPtrOutputWithContext(ctx context.Context) JobSchedulingPtrOutput

type JobSchedulingInput

type JobSchedulingInput interface {
	pulumi.Input

	ToJobSchedulingOutput() JobSchedulingOutput
	ToJobSchedulingOutputWithContext(context.Context) JobSchedulingOutput
}

JobSchedulingInput is an input type that accepts JobSchedulingArgs and JobSchedulingOutput values. You can construct a concrete instance of `JobSchedulingInput` via:

JobSchedulingArgs{...}

type JobSchedulingOutput

type JobSchedulingOutput struct{ *pulumi.OutputState }

func (JobSchedulingOutput) ElementType

func (JobSchedulingOutput) ElementType() reflect.Type

func (JobSchedulingOutput) MaxFailuresPerHour

func (o JobSchedulingOutput) MaxFailuresPerHour() pulumi.IntOutput

func (JobSchedulingOutput) MaxFailuresTotal

func (o JobSchedulingOutput) MaxFailuresTotal() pulumi.IntOutput

func (JobSchedulingOutput) ToJobSchedulingOutput

func (o JobSchedulingOutput) ToJobSchedulingOutput() JobSchedulingOutput

func (JobSchedulingOutput) ToJobSchedulingOutputWithContext

func (o JobSchedulingOutput) ToJobSchedulingOutputWithContext(ctx context.Context) JobSchedulingOutput

func (JobSchedulingOutput) ToJobSchedulingPtrOutput

func (o JobSchedulingOutput) ToJobSchedulingPtrOutput() JobSchedulingPtrOutput

func (JobSchedulingOutput) ToJobSchedulingPtrOutputWithContext

func (o JobSchedulingOutput) ToJobSchedulingPtrOutputWithContext(ctx context.Context) JobSchedulingPtrOutput

type JobSchedulingPtrInput

type JobSchedulingPtrInput interface {
	pulumi.Input

	ToJobSchedulingPtrOutput() JobSchedulingPtrOutput
	ToJobSchedulingPtrOutputWithContext(context.Context) JobSchedulingPtrOutput
}

JobSchedulingPtrInput is an input type that accepts JobSchedulingArgs, JobSchedulingPtr and JobSchedulingPtrOutput values. You can construct a concrete instance of `JobSchedulingPtrInput` via:

        JobSchedulingArgs{...}

or:

        nil

type JobSchedulingPtrOutput

type JobSchedulingPtrOutput struct{ *pulumi.OutputState }

func (JobSchedulingPtrOutput) Elem

func (JobSchedulingPtrOutput) ElementType

func (JobSchedulingPtrOutput) ElementType() reflect.Type

func (JobSchedulingPtrOutput) MaxFailuresPerHour

func (o JobSchedulingPtrOutput) MaxFailuresPerHour() pulumi.IntPtrOutput

func (JobSchedulingPtrOutput) MaxFailuresTotal

func (o JobSchedulingPtrOutput) MaxFailuresTotal() pulumi.IntPtrOutput

func (JobSchedulingPtrOutput) ToJobSchedulingPtrOutput

func (o JobSchedulingPtrOutput) ToJobSchedulingPtrOutput() JobSchedulingPtrOutput

func (JobSchedulingPtrOutput) ToJobSchedulingPtrOutputWithContext

func (o JobSchedulingPtrOutput) ToJobSchedulingPtrOutputWithContext(ctx context.Context) JobSchedulingPtrOutput

type JobSparkConfig

type JobSparkConfig struct {
	// HCFS URIs of archives to be extracted in the working directory of .jar, .tar, .tar.gz, .tgz, and .zip.
	ArchiveUris []string `pulumi:"archiveUris"`
	// The arguments to pass to the driver. Do not include arguments, such as -libjars or -Dfoo=bar, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
	Args []string `pulumi:"args"`
	// HCFS URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.
	FileUris []string `pulumi:"fileUris"`
	// HCFS URIs of jar files to be added to the Spark CLASSPATH.
	JarFileUris   []string                     `pulumi:"jarFileUris"`
	LoggingConfig *JobSparkConfigLoggingConfig `pulumi:"loggingConfig"`
	// The name of the driver's main class. The jar file containing the class must be in the default CLASSPATH or specified in `jarFileUris`. Conflicts with `mainJarFileUri`
	MainClass *string `pulumi:"mainClass"`
	// The HCFS URI of the jar file containing the main class. Examples: 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'. Conflicts with `mainClass`
	MainJarFileUri *string `pulumi:"mainJarFileUri"`
	// A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Cloud Dataproc API may be overwritten.
	Properties map[string]string `pulumi:"properties"`
}

type JobSparkConfigArgs

type JobSparkConfigArgs struct {
	// HCFS URIs of archives to be extracted in the working directory of .jar, .tar, .tar.gz, .tgz, and .zip.
	ArchiveUris pulumi.StringArrayInput `pulumi:"archiveUris"`
	// The arguments to pass to the driver. Do not include arguments, such as -libjars or -Dfoo=bar, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
	Args pulumi.StringArrayInput `pulumi:"args"`
	// HCFS URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.
	FileUris pulumi.StringArrayInput `pulumi:"fileUris"`
	// HCFS URIs of jar files to be added to the Spark CLASSPATH.
	JarFileUris   pulumi.StringArrayInput             `pulumi:"jarFileUris"`
	LoggingConfig JobSparkConfigLoggingConfigPtrInput `pulumi:"loggingConfig"`
	// The name of the driver's main class. The jar file containing the class must be in the default CLASSPATH or specified in `jarFileUris`. Conflicts with `mainJarFileUri`
	MainClass pulumi.StringPtrInput `pulumi:"mainClass"`
	// The HCFS URI of the jar file containing the main class. Examples: 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'. Conflicts with `mainClass`
	MainJarFileUri pulumi.StringPtrInput `pulumi:"mainJarFileUri"`
	// A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Cloud Dataproc API may be overwritten.
	Properties pulumi.StringMapInput `pulumi:"properties"`
}

func (JobSparkConfigArgs) ElementType

func (JobSparkConfigArgs) ElementType() reflect.Type

func (JobSparkConfigArgs) ToJobSparkConfigOutput

func (i JobSparkConfigArgs) ToJobSparkConfigOutput() JobSparkConfigOutput

func (JobSparkConfigArgs) ToJobSparkConfigOutputWithContext

func (i JobSparkConfigArgs) ToJobSparkConfigOutputWithContext(ctx context.Context) JobSparkConfigOutput

func (JobSparkConfigArgs) ToJobSparkConfigPtrOutput

func (i JobSparkConfigArgs) ToJobSparkConfigPtrOutput() JobSparkConfigPtrOutput

func (JobSparkConfigArgs) ToJobSparkConfigPtrOutputWithContext

func (i JobSparkConfigArgs) ToJobSparkConfigPtrOutputWithContext(ctx context.Context) JobSparkConfigPtrOutput

type JobSparkConfigInput

type JobSparkConfigInput interface {
	pulumi.Input

	ToJobSparkConfigOutput() JobSparkConfigOutput
	ToJobSparkConfigOutputWithContext(context.Context) JobSparkConfigOutput
}

JobSparkConfigInput is an input type that accepts JobSparkConfigArgs and JobSparkConfigOutput values. You can construct a concrete instance of `JobSparkConfigInput` via:

JobSparkConfigArgs{...}

type JobSparkConfigLoggingConfig

type JobSparkConfigLoggingConfig struct {
	DriverLogLevels map[string]string `pulumi:"driverLogLevels"`
}

type JobSparkConfigLoggingConfigArgs

type JobSparkConfigLoggingConfigArgs struct {
	DriverLogLevels pulumi.StringMapInput `pulumi:"driverLogLevels"`
}

func (JobSparkConfigLoggingConfigArgs) ElementType

func (JobSparkConfigLoggingConfigArgs) ToJobSparkConfigLoggingConfigOutput

func (i JobSparkConfigLoggingConfigArgs) ToJobSparkConfigLoggingConfigOutput() JobSparkConfigLoggingConfigOutput

func (JobSparkConfigLoggingConfigArgs) ToJobSparkConfigLoggingConfigOutputWithContext

func (i JobSparkConfigLoggingConfigArgs) ToJobSparkConfigLoggingConfigOutputWithContext(ctx context.Context) JobSparkConfigLoggingConfigOutput

func (JobSparkConfigLoggingConfigArgs) ToJobSparkConfigLoggingConfigPtrOutput

func (i JobSparkConfigLoggingConfigArgs) ToJobSparkConfigLoggingConfigPtrOutput() JobSparkConfigLoggingConfigPtrOutput

func (JobSparkConfigLoggingConfigArgs) ToJobSparkConfigLoggingConfigPtrOutputWithContext

func (i JobSparkConfigLoggingConfigArgs) ToJobSparkConfigLoggingConfigPtrOutputWithContext(ctx context.Context) JobSparkConfigLoggingConfigPtrOutput

type JobSparkConfigLoggingConfigInput

type JobSparkConfigLoggingConfigInput interface {
	pulumi.Input

	ToJobSparkConfigLoggingConfigOutput() JobSparkConfigLoggingConfigOutput
	ToJobSparkConfigLoggingConfigOutputWithContext(context.Context) JobSparkConfigLoggingConfigOutput
}

JobSparkConfigLoggingConfigInput is an input type that accepts JobSparkConfigLoggingConfigArgs and JobSparkConfigLoggingConfigOutput values. You can construct a concrete instance of `JobSparkConfigLoggingConfigInput` via:

JobSparkConfigLoggingConfigArgs{...}

type JobSparkConfigLoggingConfigOutput

type JobSparkConfigLoggingConfigOutput struct{ *pulumi.OutputState }

func (JobSparkConfigLoggingConfigOutput) DriverLogLevels

func (JobSparkConfigLoggingConfigOutput) ElementType

func (JobSparkConfigLoggingConfigOutput) ToJobSparkConfigLoggingConfigOutput

func (o JobSparkConfigLoggingConfigOutput) ToJobSparkConfigLoggingConfigOutput() JobSparkConfigLoggingConfigOutput

func (JobSparkConfigLoggingConfigOutput) ToJobSparkConfigLoggingConfigOutputWithContext

func (o JobSparkConfigLoggingConfigOutput) ToJobSparkConfigLoggingConfigOutputWithContext(ctx context.Context) JobSparkConfigLoggingConfigOutput

func (JobSparkConfigLoggingConfigOutput) ToJobSparkConfigLoggingConfigPtrOutput

func (o JobSparkConfigLoggingConfigOutput) ToJobSparkConfigLoggingConfigPtrOutput() JobSparkConfigLoggingConfigPtrOutput

func (JobSparkConfigLoggingConfigOutput) ToJobSparkConfigLoggingConfigPtrOutputWithContext

func (o JobSparkConfigLoggingConfigOutput) ToJobSparkConfigLoggingConfigPtrOutputWithContext(ctx context.Context) JobSparkConfigLoggingConfigPtrOutput

type JobSparkConfigLoggingConfigPtrInput

type JobSparkConfigLoggingConfigPtrInput interface {
	pulumi.Input

	ToJobSparkConfigLoggingConfigPtrOutput() JobSparkConfigLoggingConfigPtrOutput
	ToJobSparkConfigLoggingConfigPtrOutputWithContext(context.Context) JobSparkConfigLoggingConfigPtrOutput
}

JobSparkConfigLoggingConfigPtrInput is an input type that accepts JobSparkConfigLoggingConfigArgs, JobSparkConfigLoggingConfigPtr and JobSparkConfigLoggingConfigPtrOutput values. You can construct a concrete instance of `JobSparkConfigLoggingConfigPtrInput` via:

        JobSparkConfigLoggingConfigArgs{...}

or:

        nil

type JobSparkConfigLoggingConfigPtrOutput

type JobSparkConfigLoggingConfigPtrOutput struct{ *pulumi.OutputState }

func (JobSparkConfigLoggingConfigPtrOutput) DriverLogLevels

func (JobSparkConfigLoggingConfigPtrOutput) Elem

func (JobSparkConfigLoggingConfigPtrOutput) ElementType

func (JobSparkConfigLoggingConfigPtrOutput) ToJobSparkConfigLoggingConfigPtrOutput

func (o JobSparkConfigLoggingConfigPtrOutput) ToJobSparkConfigLoggingConfigPtrOutput() JobSparkConfigLoggingConfigPtrOutput

func (JobSparkConfigLoggingConfigPtrOutput) ToJobSparkConfigLoggingConfigPtrOutputWithContext

func (o JobSparkConfigLoggingConfigPtrOutput) ToJobSparkConfigLoggingConfigPtrOutputWithContext(ctx context.Context) JobSparkConfigLoggingConfigPtrOutput

type JobSparkConfigOutput

type JobSparkConfigOutput struct{ *pulumi.OutputState }

func (JobSparkConfigOutput) ArchiveUris

HCFS URIs of archives to be extracted in the working directory of .jar, .tar, .tar.gz, .tgz, and .zip.

func (JobSparkConfigOutput) Args

The arguments to pass to the driver. Do not include arguments, such as -libjars or -Dfoo=bar, that can be set as job properties, since a collision may occur that causes an incorrect job submission.

func (JobSparkConfigOutput) ElementType

func (JobSparkConfigOutput) ElementType() reflect.Type

func (JobSparkConfigOutput) FileUris

HCFS URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.

func (JobSparkConfigOutput) JarFileUris

HCFS URIs of jar files to be added to the Spark CLASSPATH.

func (JobSparkConfigOutput) LoggingConfig

func (JobSparkConfigOutput) MainClass

The name of the driver's main class. The jar file containing the class must be in the default CLASSPATH or specified in `jarFileUris`. Conflicts with `mainJarFileUri`

func (JobSparkConfigOutput) MainJarFileUri

func (o JobSparkConfigOutput) MainJarFileUri() pulumi.StringPtrOutput

The HCFS URI of the jar file containing the main class. Examples: 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'. Conflicts with `mainClass`

func (JobSparkConfigOutput) Properties

A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Cloud Dataproc API may be overwritten.

func (JobSparkConfigOutput) ToJobSparkConfigOutput

func (o JobSparkConfigOutput) ToJobSparkConfigOutput() JobSparkConfigOutput

func (JobSparkConfigOutput) ToJobSparkConfigOutputWithContext

func (o JobSparkConfigOutput) ToJobSparkConfigOutputWithContext(ctx context.Context) JobSparkConfigOutput

func (JobSparkConfigOutput) ToJobSparkConfigPtrOutput

func (o JobSparkConfigOutput) ToJobSparkConfigPtrOutput() JobSparkConfigPtrOutput

func (JobSparkConfigOutput) ToJobSparkConfigPtrOutputWithContext

func (o JobSparkConfigOutput) ToJobSparkConfigPtrOutputWithContext(ctx context.Context) JobSparkConfigPtrOutput

type JobSparkConfigPtrInput

type JobSparkConfigPtrInput interface {
	pulumi.Input

	ToJobSparkConfigPtrOutput() JobSparkConfigPtrOutput
	ToJobSparkConfigPtrOutputWithContext(context.Context) JobSparkConfigPtrOutput
}

JobSparkConfigPtrInput is an input type that accepts JobSparkConfigArgs, JobSparkConfigPtr and JobSparkConfigPtrOutput values. You can construct a concrete instance of `JobSparkConfigPtrInput` via:

        JobSparkConfigArgs{...}

or:

        nil

type JobSparkConfigPtrOutput

type JobSparkConfigPtrOutput struct{ *pulumi.OutputState }

func (JobSparkConfigPtrOutput) ArchiveUris

HCFS URIs of archives to be extracted in the working directory of .jar, .tar, .tar.gz, .tgz, and .zip.

func (JobSparkConfigPtrOutput) Args

The arguments to pass to the driver. Do not include arguments, such as -libjars or -Dfoo=bar, that can be set as job properties, since a collision may occur that causes an incorrect job submission.

func (JobSparkConfigPtrOutput) Elem

func (JobSparkConfigPtrOutput) ElementType

func (JobSparkConfigPtrOutput) ElementType() reflect.Type

func (JobSparkConfigPtrOutput) FileUris

HCFS URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.

func (JobSparkConfigPtrOutput) JarFileUris

HCFS URIs of jar files to be added to the Spark CLASSPATH.

func (JobSparkConfigPtrOutput) LoggingConfig

func (JobSparkConfigPtrOutput) MainClass

The name of the driver's main class. The jar file containing the class must be in the default CLASSPATH or specified in `jarFileUris`. Conflicts with `mainJarFileUri`

func (JobSparkConfigPtrOutput) MainJarFileUri

func (o JobSparkConfigPtrOutput) MainJarFileUri() pulumi.StringPtrOutput

The HCFS URI of the jar file containing the main class. Examples: 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'. Conflicts with `mainClass`

func (JobSparkConfigPtrOutput) Properties

A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Cloud Dataproc API may be overwritten.

func (JobSparkConfigPtrOutput) ToJobSparkConfigPtrOutput

func (o JobSparkConfigPtrOutput) ToJobSparkConfigPtrOutput() JobSparkConfigPtrOutput

func (JobSparkConfigPtrOutput) ToJobSparkConfigPtrOutputWithContext

func (o JobSparkConfigPtrOutput) ToJobSparkConfigPtrOutputWithContext(ctx context.Context) JobSparkConfigPtrOutput

type JobSparksqlConfig

type JobSparksqlConfig struct {
	// HCFS URIs of jar files to be added to the Spark CLASSPATH.
	JarFileUris   []string                        `pulumi:"jarFileUris"`
	LoggingConfig *JobSparksqlConfigLoggingConfig `pulumi:"loggingConfig"`
	// A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Cloud Dataproc API may be overwritten.
	Properties map[string]string `pulumi:"properties"`
	// The HCFS URI of the script that contains SQL queries.
	// Conflicts with `queryList`
	QueryFileUri *string `pulumi:"queryFileUri"`
	// The list of SQL queries or statements to execute as part of the job.
	// Conflicts with `queryFileUri`
	QueryLists []string `pulumi:"queryLists"`
	// Mapping of query variable names to values (equivalent to the Spark SQL command: `SET name="value";`).
	ScriptVariables map[string]string `pulumi:"scriptVariables"`
}

type JobSparksqlConfigArgs

type JobSparksqlConfigArgs struct {
	// HCFS URIs of jar files to be added to the Spark CLASSPATH.
	JarFileUris   pulumi.StringArrayInput                `pulumi:"jarFileUris"`
	LoggingConfig JobSparksqlConfigLoggingConfigPtrInput `pulumi:"loggingConfig"`
	// A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Cloud Dataproc API may be overwritten.
	Properties pulumi.StringMapInput `pulumi:"properties"`
	// The HCFS URI of the script that contains SQL queries.
	// Conflicts with `queryList`
	QueryFileUri pulumi.StringPtrInput `pulumi:"queryFileUri"`
	// The list of SQL queries or statements to execute as part of the job.
	// Conflicts with `queryFileUri`
	QueryLists pulumi.StringArrayInput `pulumi:"queryLists"`
	// Mapping of query variable names to values (equivalent to the Spark SQL command: `SET name="value";`).
	ScriptVariables pulumi.StringMapInput `pulumi:"scriptVariables"`
}

func (JobSparksqlConfigArgs) ElementType

func (JobSparksqlConfigArgs) ElementType() reflect.Type

func (JobSparksqlConfigArgs) ToJobSparksqlConfigOutput

func (i JobSparksqlConfigArgs) ToJobSparksqlConfigOutput() JobSparksqlConfigOutput

func (JobSparksqlConfigArgs) ToJobSparksqlConfigOutputWithContext

func (i JobSparksqlConfigArgs) ToJobSparksqlConfigOutputWithContext(ctx context.Context) JobSparksqlConfigOutput

func (JobSparksqlConfigArgs) ToJobSparksqlConfigPtrOutput

func (i JobSparksqlConfigArgs) ToJobSparksqlConfigPtrOutput() JobSparksqlConfigPtrOutput

func (JobSparksqlConfigArgs) ToJobSparksqlConfigPtrOutputWithContext

func (i JobSparksqlConfigArgs) ToJobSparksqlConfigPtrOutputWithContext(ctx context.Context) JobSparksqlConfigPtrOutput

type JobSparksqlConfigInput

type JobSparksqlConfigInput interface {
	pulumi.Input

	ToJobSparksqlConfigOutput() JobSparksqlConfigOutput
	ToJobSparksqlConfigOutputWithContext(context.Context) JobSparksqlConfigOutput
}

JobSparksqlConfigInput is an input type that accepts JobSparksqlConfigArgs and JobSparksqlConfigOutput values. You can construct a concrete instance of `JobSparksqlConfigInput` via:

JobSparksqlConfigArgs{...}

type JobSparksqlConfigLoggingConfig

type JobSparksqlConfigLoggingConfig struct {
	DriverLogLevels map[string]string `pulumi:"driverLogLevels"`
}

type JobSparksqlConfigLoggingConfigArgs

type JobSparksqlConfigLoggingConfigArgs struct {
	DriverLogLevels pulumi.StringMapInput `pulumi:"driverLogLevels"`
}

func (JobSparksqlConfigLoggingConfigArgs) ElementType

func (JobSparksqlConfigLoggingConfigArgs) ToJobSparksqlConfigLoggingConfigOutput

func (i JobSparksqlConfigLoggingConfigArgs) ToJobSparksqlConfigLoggingConfigOutput() JobSparksqlConfigLoggingConfigOutput

func (JobSparksqlConfigLoggingConfigArgs) ToJobSparksqlConfigLoggingConfigOutputWithContext

func (i JobSparksqlConfigLoggingConfigArgs) ToJobSparksqlConfigLoggingConfigOutputWithContext(ctx context.Context) JobSparksqlConfigLoggingConfigOutput

func (JobSparksqlConfigLoggingConfigArgs) ToJobSparksqlConfigLoggingConfigPtrOutput

func (i JobSparksqlConfigLoggingConfigArgs) ToJobSparksqlConfigLoggingConfigPtrOutput() JobSparksqlConfigLoggingConfigPtrOutput

func (JobSparksqlConfigLoggingConfigArgs) ToJobSparksqlConfigLoggingConfigPtrOutputWithContext

func (i JobSparksqlConfigLoggingConfigArgs) ToJobSparksqlConfigLoggingConfigPtrOutputWithContext(ctx context.Context) JobSparksqlConfigLoggingConfigPtrOutput

type JobSparksqlConfigLoggingConfigInput

type JobSparksqlConfigLoggingConfigInput interface {
	pulumi.Input

	ToJobSparksqlConfigLoggingConfigOutput() JobSparksqlConfigLoggingConfigOutput
	ToJobSparksqlConfigLoggingConfigOutputWithContext(context.Context) JobSparksqlConfigLoggingConfigOutput
}

JobSparksqlConfigLoggingConfigInput is an input type that accepts JobSparksqlConfigLoggingConfigArgs and JobSparksqlConfigLoggingConfigOutput values. You can construct a concrete instance of `JobSparksqlConfigLoggingConfigInput` via:

JobSparksqlConfigLoggingConfigArgs{...}

type JobSparksqlConfigLoggingConfigOutput

type JobSparksqlConfigLoggingConfigOutput struct{ *pulumi.OutputState }

func (JobSparksqlConfigLoggingConfigOutput) DriverLogLevels

func (JobSparksqlConfigLoggingConfigOutput) ElementType

func (JobSparksqlConfigLoggingConfigOutput) ToJobSparksqlConfigLoggingConfigOutput

func (o JobSparksqlConfigLoggingConfigOutput) ToJobSparksqlConfigLoggingConfigOutput() JobSparksqlConfigLoggingConfigOutput

func (JobSparksqlConfigLoggingConfigOutput) ToJobSparksqlConfigLoggingConfigOutputWithContext

func (o JobSparksqlConfigLoggingConfigOutput) ToJobSparksqlConfigLoggingConfigOutputWithContext(ctx context.Context) JobSparksqlConfigLoggingConfigOutput

func (JobSparksqlConfigLoggingConfigOutput) ToJobSparksqlConfigLoggingConfigPtrOutput

func (o JobSparksqlConfigLoggingConfigOutput) ToJobSparksqlConfigLoggingConfigPtrOutput() JobSparksqlConfigLoggingConfigPtrOutput

func (JobSparksqlConfigLoggingConfigOutput) ToJobSparksqlConfigLoggingConfigPtrOutputWithContext

func (o JobSparksqlConfigLoggingConfigOutput) ToJobSparksqlConfigLoggingConfigPtrOutputWithContext(ctx context.Context) JobSparksqlConfigLoggingConfigPtrOutput

type JobSparksqlConfigLoggingConfigPtrInput

type JobSparksqlConfigLoggingConfigPtrInput interface {
	pulumi.Input

	ToJobSparksqlConfigLoggingConfigPtrOutput() JobSparksqlConfigLoggingConfigPtrOutput
	ToJobSparksqlConfigLoggingConfigPtrOutputWithContext(context.Context) JobSparksqlConfigLoggingConfigPtrOutput
}

JobSparksqlConfigLoggingConfigPtrInput is an input type that accepts JobSparksqlConfigLoggingConfigArgs, JobSparksqlConfigLoggingConfigPtr and JobSparksqlConfigLoggingConfigPtrOutput values. You can construct a concrete instance of `JobSparksqlConfigLoggingConfigPtrInput` via:

        JobSparksqlConfigLoggingConfigArgs{...}

or:

        nil

type JobSparksqlConfigLoggingConfigPtrOutput

type JobSparksqlConfigLoggingConfigPtrOutput struct{ *pulumi.OutputState }

func (JobSparksqlConfigLoggingConfigPtrOutput) DriverLogLevels

func (JobSparksqlConfigLoggingConfigPtrOutput) Elem

func (JobSparksqlConfigLoggingConfigPtrOutput) ElementType

func (JobSparksqlConfigLoggingConfigPtrOutput) ToJobSparksqlConfigLoggingConfigPtrOutput

func (o JobSparksqlConfigLoggingConfigPtrOutput) ToJobSparksqlConfigLoggingConfigPtrOutput() JobSparksqlConfigLoggingConfigPtrOutput

func (JobSparksqlConfigLoggingConfigPtrOutput) ToJobSparksqlConfigLoggingConfigPtrOutputWithContext

func (o JobSparksqlConfigLoggingConfigPtrOutput) ToJobSparksqlConfigLoggingConfigPtrOutputWithContext(ctx context.Context) JobSparksqlConfigLoggingConfigPtrOutput

type JobSparksqlConfigOutput

type JobSparksqlConfigOutput struct{ *pulumi.OutputState }

func (JobSparksqlConfigOutput) ElementType

func (JobSparksqlConfigOutput) ElementType() reflect.Type

func (JobSparksqlConfigOutput) JarFileUris

HCFS URIs of jar files to be added to the Spark CLASSPATH.

func (JobSparksqlConfigOutput) LoggingConfig

func (JobSparksqlConfigOutput) Properties

A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Cloud Dataproc API may be overwritten.

func (JobSparksqlConfigOutput) QueryFileUri

The HCFS URI of the script that contains SQL queries. Conflicts with `queryList`

func (JobSparksqlConfigOutput) QueryLists

The list of SQL queries or statements to execute as part of the job. Conflicts with `queryFileUri`

func (JobSparksqlConfigOutput) ScriptVariables

func (o JobSparksqlConfigOutput) ScriptVariables() pulumi.StringMapOutput

Mapping of query variable names to values (equivalent to the Spark SQL command: `SET name="value";`).

func (JobSparksqlConfigOutput) ToJobSparksqlConfigOutput

func (o JobSparksqlConfigOutput) ToJobSparksqlConfigOutput() JobSparksqlConfigOutput

func (JobSparksqlConfigOutput) ToJobSparksqlConfigOutputWithContext

func (o JobSparksqlConfigOutput) ToJobSparksqlConfigOutputWithContext(ctx context.Context) JobSparksqlConfigOutput

func (JobSparksqlConfigOutput) ToJobSparksqlConfigPtrOutput

func (o JobSparksqlConfigOutput) ToJobSparksqlConfigPtrOutput() JobSparksqlConfigPtrOutput

func (JobSparksqlConfigOutput) ToJobSparksqlConfigPtrOutputWithContext

func (o JobSparksqlConfigOutput) ToJobSparksqlConfigPtrOutputWithContext(ctx context.Context) JobSparksqlConfigPtrOutput

type JobSparksqlConfigPtrInput

type JobSparksqlConfigPtrInput interface {
	pulumi.Input

	ToJobSparksqlConfigPtrOutput() JobSparksqlConfigPtrOutput
	ToJobSparksqlConfigPtrOutputWithContext(context.Context) JobSparksqlConfigPtrOutput
}

JobSparksqlConfigPtrInput is an input type that accepts JobSparksqlConfigArgs, JobSparksqlConfigPtr and JobSparksqlConfigPtrOutput values. You can construct a concrete instance of `JobSparksqlConfigPtrInput` via:

        JobSparksqlConfigArgs{...}

or:

        nil

type JobSparksqlConfigPtrOutput

type JobSparksqlConfigPtrOutput struct{ *pulumi.OutputState }

func (JobSparksqlConfigPtrOutput) Elem

func (JobSparksqlConfigPtrOutput) ElementType

func (JobSparksqlConfigPtrOutput) ElementType() reflect.Type

func (JobSparksqlConfigPtrOutput) JarFileUris

HCFS URIs of jar files to be added to the Spark CLASSPATH.

func (JobSparksqlConfigPtrOutput) LoggingConfig

func (JobSparksqlConfigPtrOutput) Properties

A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Cloud Dataproc API may be overwritten.

func (JobSparksqlConfigPtrOutput) QueryFileUri

The HCFS URI of the script that contains SQL queries. Conflicts with `queryList`

func (JobSparksqlConfigPtrOutput) QueryLists

The list of SQL queries or statements to execute as part of the job. Conflicts with `queryFileUri`

func (JobSparksqlConfigPtrOutput) ScriptVariables

Mapping of query variable names to values (equivalent to the Spark SQL command: `SET name="value";`).

func (JobSparksqlConfigPtrOutput) ToJobSparksqlConfigPtrOutput

func (o JobSparksqlConfigPtrOutput) ToJobSparksqlConfigPtrOutput() JobSparksqlConfigPtrOutput

func (JobSparksqlConfigPtrOutput) ToJobSparksqlConfigPtrOutputWithContext

func (o JobSparksqlConfigPtrOutput) ToJobSparksqlConfigPtrOutputWithContext(ctx context.Context) JobSparksqlConfigPtrOutput

type JobState

type JobState struct {
	// If present, the location of miscellaneous control files which may be used as part of job setup and handling. If not present, control files may be placed in the same location as driver_output_uri.
	DriverControlsFilesUri pulumi.StringPtrInput
	// A URI pointing to the location of the stdout of the job's driver program.
	DriverOutputResourceUri pulumi.StringPtrInput
	// By default, you can only delete inactive jobs within
	// Dataproc. Setting this to true, and calling destroy, will ensure that the
	// job is first cancelled before issuing the delete.
	ForceDelete pulumi.BoolPtrInput
	// The config of Hadoop job
	HadoopConfig JobHadoopConfigPtrInput
	// The config of hive job
	HiveConfig JobHiveConfigPtrInput
	// The list of labels (key/value pairs) to add to the job.
	Labels pulumi.StringMapInput
	// The config of pag job.
	PigConfig JobPigConfigPtrInput
	// The config of job placement.
	Placement JobPlacementPtrInput
	// The project in which the `cluster` can be found and jobs
	// subsequently run against. If it is not provided, the provider project is used.
	Project pulumi.StringPtrInput
	// The config of pySpark job.
	PysparkConfig JobPysparkConfigPtrInput
	// The reference of the job
	Reference JobReferencePtrInput
	// The Cloud Dataproc region. This essentially determines which clusters are available
	// for this job to be submitted to. If not specified, defaults to `global`.
	Region pulumi.StringPtrInput
	// Optional. Job scheduling configuration.
	Scheduling JobSchedulingPtrInput
	// The config of the Spark job.
	SparkConfig JobSparkConfigPtrInput
	// The config of SparkSql job
	SparksqlConfig JobSparksqlConfigPtrInput
	// The status of the job.
	Statuses JobStatusArrayInput
}

func (JobState) ElementType

func (JobState) ElementType() reflect.Type

type JobStatus

type JobStatus struct {
	Details        *string `pulumi:"details"`
	State          *string `pulumi:"state"`
	StateStartTime *string `pulumi:"stateStartTime"`
	Substate       *string `pulumi:"substate"`
}

type JobStatusArgs

type JobStatusArgs struct {
	Details        pulumi.StringPtrInput `pulumi:"details"`
	State          pulumi.StringPtrInput `pulumi:"state"`
	StateStartTime pulumi.StringPtrInput `pulumi:"stateStartTime"`
	Substate       pulumi.StringPtrInput `pulumi:"substate"`
}

func (JobStatusArgs) ElementType

func (JobStatusArgs) ElementType() reflect.Type

func (JobStatusArgs) ToJobStatusOutput

func (i JobStatusArgs) ToJobStatusOutput() JobStatusOutput

func (JobStatusArgs) ToJobStatusOutputWithContext

func (i JobStatusArgs) ToJobStatusOutputWithContext(ctx context.Context) JobStatusOutput

type JobStatusArray

type JobStatusArray []JobStatusInput

func (JobStatusArray) ElementType

func (JobStatusArray) ElementType() reflect.Type

func (JobStatusArray) ToJobStatusArrayOutput

func (i JobStatusArray) ToJobStatusArrayOutput() JobStatusArrayOutput

func (JobStatusArray) ToJobStatusArrayOutputWithContext

func (i JobStatusArray) ToJobStatusArrayOutputWithContext(ctx context.Context) JobStatusArrayOutput

type JobStatusArrayInput

type JobStatusArrayInput interface {
	pulumi.Input

	ToJobStatusArrayOutput() JobStatusArrayOutput
	ToJobStatusArrayOutputWithContext(context.Context) JobStatusArrayOutput
}

JobStatusArrayInput is an input type that accepts JobStatusArray and JobStatusArrayOutput values. You can construct a concrete instance of `JobStatusArrayInput` via:

JobStatusArray{ JobStatusArgs{...} }

type JobStatusArrayOutput

type JobStatusArrayOutput struct{ *pulumi.OutputState }

func (JobStatusArrayOutput) ElementType

func (JobStatusArrayOutput) ElementType() reflect.Type

func (JobStatusArrayOutput) Index

func (JobStatusArrayOutput) ToJobStatusArrayOutput

func (o JobStatusArrayOutput) ToJobStatusArrayOutput() JobStatusArrayOutput

func (JobStatusArrayOutput) ToJobStatusArrayOutputWithContext

func (o JobStatusArrayOutput) ToJobStatusArrayOutputWithContext(ctx context.Context) JobStatusArrayOutput

type JobStatusInput

type JobStatusInput interface {
	pulumi.Input

	ToJobStatusOutput() JobStatusOutput
	ToJobStatusOutputWithContext(context.Context) JobStatusOutput
}

JobStatusInput is an input type that accepts JobStatusArgs and JobStatusOutput values. You can construct a concrete instance of `JobStatusInput` via:

JobStatusArgs{...}

type JobStatusOutput

type JobStatusOutput struct{ *pulumi.OutputState }

func (JobStatusOutput) Details

func (JobStatusOutput) ElementType

func (JobStatusOutput) ElementType() reflect.Type

func (JobStatusOutput) State

func (JobStatusOutput) StateStartTime

func (o JobStatusOutput) StateStartTime() pulumi.StringPtrOutput

func (JobStatusOutput) Substate

func (o JobStatusOutput) Substate() pulumi.StringPtrOutput

func (JobStatusOutput) ToJobStatusOutput

func (o JobStatusOutput) ToJobStatusOutput() JobStatusOutput

func (JobStatusOutput) ToJobStatusOutputWithContext

func (o JobStatusOutput) ToJobStatusOutputWithContext(ctx context.Context) JobStatusOutput

type MetastoreService

type MetastoreService struct {
	pulumi.CustomResourceState

	// A Cloud Storage URI (starting with gs://) that specifies where artifacts related to the metastore service are stored.
	ArtifactGcsUri pulumi.StringOutput `pulumi:"artifactGcsUri"`
	// The URI of the endpoint used to access the metastore service.
	EndpointUri pulumi.StringOutput `pulumi:"endpointUri"`
	// Configuration information specific to running Hive metastore software as the metastore service.
	// Structure is documented below.
	HiveMetastoreConfig MetastoreServiceHiveMetastoreConfigPtrOutput `pulumi:"hiveMetastoreConfig"`
	// User-defined labels for the metastore service.
	Labels pulumi.StringMapOutput `pulumi:"labels"`
	// The  location where the autoscaling policy should reside.
	// The default value is `global`.
	Location pulumi.StringPtrOutput `pulumi:"location"`
	// The one hour maintenance window of the metastore service.
	// This specifies when the service can be restarted for maintenance purposes in UTC time.
	// Structure is documented below.
	MaintenanceWindow MetastoreServiceMaintenanceWindowPtrOutput `pulumi:"maintenanceWindow"`
	// The relative resource name of the metastore service.
	Name pulumi.StringOutput `pulumi:"name"`
	// The relative resource name of the VPC network on which the instance can be accessed. It is specified in the following form:
	// "projects/{projectNumber}/global/networks/{network_id}".
	Network pulumi.StringOutput `pulumi:"network"`
	// The TCP port at which the metastore service is reached. Default: 9083.
	Port pulumi.IntOutput `pulumi:"port"`
	// The ID of the project in which the resource belongs.
	// If it is not provided, the provider project is used.
	Project pulumi.StringOutput `pulumi:"project"`
	// The ID of the metastore service. The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_),
	// and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between
	// 3 and 63 characters.
	ServiceId pulumi.StringOutput `pulumi:"serviceId"`
	// The current state of the metastore service.
	State pulumi.StringOutput `pulumi:"state"`
	// Additional information about the current state of the metastore service, if available.
	StateMessage pulumi.StringOutput `pulumi:"stateMessage"`
	// The tier of the service.
	// Possible values are `DEVELOPER` and `ENTERPRISE`.
	Tier pulumi.StringOutput `pulumi:"tier"`
}

A managed metastore service that serves metadata queries.

## Example Usage ### Dataproc Metastore Service Basic

```go package main

import (

"github.com/pulumi/pulumi-gcp/sdk/v5/go/gcp/dataproc"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"

)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := dataproc.NewMetastoreService(ctx, "_default", &dataproc.MetastoreServiceArgs{
			ServiceId: pulumi.String("metastore-srv"),
			Location:  pulumi.String("us-central1"),
			Port:      pulumi.Int(9080),
			Tier:      pulumi.String("DEVELOPER"),
			MaintenanceWindow: &dataproc.MetastoreServiceMaintenanceWindowArgs{
				HourOfDay: pulumi.Int(2),
				DayOfWeek: pulumi.String("SUNDAY"),
			},
			HiveMetastoreConfig: &dataproc.MetastoreServiceHiveMetastoreConfigArgs{
				Version: pulumi.String("2.3.6"),
			},
		}, pulumi.Provider(google_beta))
		if err != nil {
			return err
		}
		return nil
	})
}

```

## Import

Service can be imported using any of these accepted formats

```sh

$ pulumi import gcp:dataproc/metastoreService:MetastoreService default projects/{{project}}/locations/{{location}}/services/{{service_id}}

```

```sh

$ pulumi import gcp:dataproc/metastoreService:MetastoreService default {{project}}/{{location}}/{{service_id}}

```

```sh

$ pulumi import gcp:dataproc/metastoreService:MetastoreService default {{location}}/{{service_id}}

```

func GetMetastoreService

func GetMetastoreService(ctx *pulumi.Context,
	name string, id pulumi.IDInput, state *MetastoreServiceState, opts ...pulumi.ResourceOption) (*MetastoreService, error)

GetMetastoreService gets an existing MetastoreService resource's state with the given name, ID, and optional state properties that are used to uniquely qualify the lookup (nil if not required).

func NewMetastoreService

func NewMetastoreService(ctx *pulumi.Context,
	name string, args *MetastoreServiceArgs, opts ...pulumi.ResourceOption) (*MetastoreService, error)

NewMetastoreService registers a new resource with the given unique name, arguments, and options.

func (*MetastoreService) ElementType

func (*MetastoreService) ElementType() reflect.Type

func (*MetastoreService) ToMetastoreServiceOutput

func (i *MetastoreService) ToMetastoreServiceOutput() MetastoreServiceOutput

func (*MetastoreService) ToMetastoreServiceOutputWithContext

func (i *MetastoreService) ToMetastoreServiceOutputWithContext(ctx context.Context) MetastoreServiceOutput

func (*MetastoreService) ToMetastoreServicePtrOutput

func (i *MetastoreService) ToMetastoreServicePtrOutput() MetastoreServicePtrOutput

func (*MetastoreService) ToMetastoreServicePtrOutputWithContext

func (i *MetastoreService) ToMetastoreServicePtrOutputWithContext(ctx context.Context) MetastoreServicePtrOutput

type MetastoreServiceArgs

type MetastoreServiceArgs struct {
	// Configuration information specific to running Hive metastore software as the metastore service.
	// Structure is documented below.
	HiveMetastoreConfig MetastoreServiceHiveMetastoreConfigPtrInput
	// User-defined labels for the metastore service.
	Labels pulumi.StringMapInput
	// The  location where the autoscaling policy should reside.
	// The default value is `global`.
	Location pulumi.StringPtrInput
	// The one hour maintenance window of the metastore service.
	// This specifies when the service can be restarted for maintenance purposes in UTC time.
	// Structure is documented below.
	MaintenanceWindow MetastoreServiceMaintenanceWindowPtrInput
	// The relative resource name of the VPC network on which the instance can be accessed. It is specified in the following form:
	// "projects/{projectNumber}/global/networks/{network_id}".
	Network pulumi.StringPtrInput
	// The TCP port at which the metastore service is reached. Default: 9083.
	Port pulumi.IntPtrInput
	// The ID of the project in which the resource belongs.
	// If it is not provided, the provider project is used.
	Project pulumi.StringPtrInput
	// The ID of the metastore service. The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_),
	// and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between
	// 3 and 63 characters.
	ServiceId pulumi.StringInput
	// The tier of the service.
	// Possible values are `DEVELOPER` and `ENTERPRISE`.
	Tier pulumi.StringPtrInput
}

The set of arguments for constructing a MetastoreService resource.

func (MetastoreServiceArgs) ElementType

func (MetastoreServiceArgs) ElementType() reflect.Type

type MetastoreServiceArray

type MetastoreServiceArray []MetastoreServiceInput

func (MetastoreServiceArray) ElementType

func (MetastoreServiceArray) ElementType() reflect.Type

func (MetastoreServiceArray) ToMetastoreServiceArrayOutput

func (i MetastoreServiceArray) ToMetastoreServiceArrayOutput() MetastoreServiceArrayOutput

func (MetastoreServiceArray) ToMetastoreServiceArrayOutputWithContext

func (i MetastoreServiceArray) ToMetastoreServiceArrayOutputWithContext(ctx context.Context) MetastoreServiceArrayOutput

type MetastoreServiceArrayInput

type MetastoreServiceArrayInput interface {
	pulumi.Input

	ToMetastoreServiceArrayOutput() MetastoreServiceArrayOutput
	ToMetastoreServiceArrayOutputWithContext(context.Context) MetastoreServiceArrayOutput
}

MetastoreServiceArrayInput is an input type that accepts MetastoreServiceArray and MetastoreServiceArrayOutput values. You can construct a concrete instance of `MetastoreServiceArrayInput` via:

MetastoreServiceArray{ MetastoreServiceArgs{...} }

type MetastoreServiceArrayOutput

type MetastoreServiceArrayOutput struct{ *pulumi.OutputState }

func (MetastoreServiceArrayOutput) ElementType

func (MetastoreServiceArrayOutput) Index

func (MetastoreServiceArrayOutput) ToMetastoreServiceArrayOutput

func (o MetastoreServiceArrayOutput) ToMetastoreServiceArrayOutput() MetastoreServiceArrayOutput

func (MetastoreServiceArrayOutput) ToMetastoreServiceArrayOutputWithContext

func (o MetastoreServiceArrayOutput) ToMetastoreServiceArrayOutputWithContext(ctx context.Context) MetastoreServiceArrayOutput

type MetastoreServiceHiveMetastoreConfig

type MetastoreServiceHiveMetastoreConfig struct {
	// A mapping of Hive metastore configuration key-value pairs to apply to the Hive metastore (configured in hive-site.xml).
	// The mappings override system defaults (some keys cannot be overridden)
	ConfigOverrides map[string]string `pulumi:"configOverrides"`
	// Information used to configure the Hive metastore service as a service principal in a Kerberos realm.
	// Structure is documented below.
	KerberosConfig *MetastoreServiceHiveMetastoreConfigKerberosConfig `pulumi:"kerberosConfig"`
	// The Hive metastore schema version.
	Version string `pulumi:"version"`
}

type MetastoreServiceHiveMetastoreConfigArgs

type MetastoreServiceHiveMetastoreConfigArgs struct {
	// A mapping of Hive metastore configuration key-value pairs to apply to the Hive metastore (configured in hive-site.xml).
	// The mappings override system defaults (some keys cannot be overridden)
	ConfigOverrides pulumi.StringMapInput `pulumi:"configOverrides"`
	// Information used to configure the Hive metastore service as a service principal in a Kerberos realm.
	// Structure is documented below.
	KerberosConfig MetastoreServiceHiveMetastoreConfigKerberosConfigPtrInput `pulumi:"kerberosConfig"`
	// The Hive metastore schema version.
	Version pulumi.StringInput `pulumi:"version"`
}

func (MetastoreServiceHiveMetastoreConfigArgs) ElementType

func (MetastoreServiceHiveMetastoreConfigArgs) ToMetastoreServiceHiveMetastoreConfigOutput

func (i MetastoreServiceHiveMetastoreConfigArgs) ToMetastoreServiceHiveMetastoreConfigOutput() MetastoreServiceHiveMetastoreConfigOutput

func (MetastoreServiceHiveMetastoreConfigArgs) ToMetastoreServiceHiveMetastoreConfigOutputWithContext

func (i MetastoreServiceHiveMetastoreConfigArgs) ToMetastoreServiceHiveMetastoreConfigOutputWithContext(ctx context.Context) MetastoreServiceHiveMetastoreConfigOutput

func (MetastoreServiceHiveMetastoreConfigArgs) ToMetastoreServiceHiveMetastoreConfigPtrOutput

func (i MetastoreServiceHiveMetastoreConfigArgs) ToMetastoreServiceHiveMetastoreConfigPtrOutput() MetastoreServiceHiveMetastoreConfigPtrOutput

func (MetastoreServiceHiveMetastoreConfigArgs) ToMetastoreServiceHiveMetastoreConfigPtrOutputWithContext

func (i MetastoreServiceHiveMetastoreConfigArgs) ToMetastoreServiceHiveMetastoreConfigPtrOutputWithContext(ctx context.Context) MetastoreServiceHiveMetastoreConfigPtrOutput

type MetastoreServiceHiveMetastoreConfigInput

type MetastoreServiceHiveMetastoreConfigInput interface {
	pulumi.Input

	ToMetastoreServiceHiveMetastoreConfigOutput() MetastoreServiceHiveMetastoreConfigOutput
	ToMetastoreServiceHiveMetastoreConfigOutputWithContext(context.Context) MetastoreServiceHiveMetastoreConfigOutput
}

MetastoreServiceHiveMetastoreConfigInput is an input type that accepts MetastoreServiceHiveMetastoreConfigArgs and MetastoreServiceHiveMetastoreConfigOutput values. You can construct a concrete instance of `MetastoreServiceHiveMetastoreConfigInput` via:

MetastoreServiceHiveMetastoreConfigArgs{...}

type MetastoreServiceHiveMetastoreConfigKerberosConfig

type MetastoreServiceHiveMetastoreConfigKerberosConfig struct {
	// A Kerberos keytab file that can be used to authenticate a service principal with a Kerberos Key Distribution Center (KDC).
	// Structure is documented below.
	Keytab MetastoreServiceHiveMetastoreConfigKerberosConfigKeytab `pulumi:"keytab"`
	// A Cloud Storage URI that specifies the path to a krb5.conf file. It is of the form gs://{bucket_name}/path/to/krb5.conf, although the file does not need to be named krb5.conf explicitly.
	Krb5ConfigGcsUri string `pulumi:"krb5ConfigGcsUri"`
	// A Kerberos principal that exists in the both the keytab the KDC to authenticate as. A typical principal is of the form "primary/instance@REALM", but there is no exact format.
	Principal string `pulumi:"principal"`
}

type MetastoreServiceHiveMetastoreConfigKerberosConfigArgs

type MetastoreServiceHiveMetastoreConfigKerberosConfigArgs struct {
	// A Kerberos keytab file that can be used to authenticate a service principal with a Kerberos Key Distribution Center (KDC).
	// Structure is documented below.
	Keytab MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabInput `pulumi:"keytab"`
	// A Cloud Storage URI that specifies the path to a krb5.conf file. It is of the form gs://{bucket_name}/path/to/krb5.conf, although the file does not need to be named krb5.conf explicitly.
	Krb5ConfigGcsUri pulumi.StringInput `pulumi:"krb5ConfigGcsUri"`
	// A Kerberos principal that exists in the both the keytab the KDC to authenticate as. A typical principal is of the form "primary/instance@REALM", but there is no exact format.
	Principal pulumi.StringInput `pulumi:"principal"`
}

func (MetastoreServiceHiveMetastoreConfigKerberosConfigArgs) ElementType

func (MetastoreServiceHiveMetastoreConfigKerberosConfigArgs) ToMetastoreServiceHiveMetastoreConfigKerberosConfigOutput

func (i MetastoreServiceHiveMetastoreConfigKerberosConfigArgs) ToMetastoreServiceHiveMetastoreConfigKerberosConfigOutput() MetastoreServiceHiveMetastoreConfigKerberosConfigOutput

func (MetastoreServiceHiveMetastoreConfigKerberosConfigArgs) ToMetastoreServiceHiveMetastoreConfigKerberosConfigOutputWithContext

func (i MetastoreServiceHiveMetastoreConfigKerberosConfigArgs) ToMetastoreServiceHiveMetastoreConfigKerberosConfigOutputWithContext(ctx context.Context) MetastoreServiceHiveMetastoreConfigKerberosConfigOutput

func (MetastoreServiceHiveMetastoreConfigKerberosConfigArgs) ToMetastoreServiceHiveMetastoreConfigKerberosConfigPtrOutput

func (i MetastoreServiceHiveMetastoreConfigKerberosConfigArgs) ToMetastoreServiceHiveMetastoreConfigKerberosConfigPtrOutput() MetastoreServiceHiveMetastoreConfigKerberosConfigPtrOutput

func (MetastoreServiceHiveMetastoreConfigKerberosConfigArgs) ToMetastoreServiceHiveMetastoreConfigKerberosConfigPtrOutputWithContext

func (i MetastoreServiceHiveMetastoreConfigKerberosConfigArgs) ToMetastoreServiceHiveMetastoreConfigKerberosConfigPtrOutputWithContext(ctx context.Context) MetastoreServiceHiveMetastoreConfigKerberosConfigPtrOutput

type MetastoreServiceHiveMetastoreConfigKerberosConfigInput

type MetastoreServiceHiveMetastoreConfigKerberosConfigInput interface {
	pulumi.Input

	ToMetastoreServiceHiveMetastoreConfigKerberosConfigOutput() MetastoreServiceHiveMetastoreConfigKerberosConfigOutput
	ToMetastoreServiceHiveMetastoreConfigKerberosConfigOutputWithContext(context.Context) MetastoreServiceHiveMetastoreConfigKerberosConfigOutput
}

MetastoreServiceHiveMetastoreConfigKerberosConfigInput is an input type that accepts MetastoreServiceHiveMetastoreConfigKerberosConfigArgs and MetastoreServiceHiveMetastoreConfigKerberosConfigOutput values. You can construct a concrete instance of `MetastoreServiceHiveMetastoreConfigKerberosConfigInput` via:

MetastoreServiceHiveMetastoreConfigKerberosConfigArgs{...}

type MetastoreServiceHiveMetastoreConfigKerberosConfigKeytab

type MetastoreServiceHiveMetastoreConfigKerberosConfigKeytab struct {
	// The relative resource name of a Secret Manager secret version, in the following form:
	// "projects/{projectNumber}/secrets/{secret_id}/versions/{version_id}".
	CloudSecret string `pulumi:"cloudSecret"`
}

type MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabArgs

type MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabArgs struct {
	// The relative resource name of a Secret Manager secret version, in the following form:
	// "projects/{projectNumber}/secrets/{secret_id}/versions/{version_id}".
	CloudSecret pulumi.StringInput `pulumi:"cloudSecret"`
}

func (MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabArgs) ElementType

func (MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabArgs) ToMetastoreServiceHiveMetastoreConfigKerberosConfigKeytabOutput

func (MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabArgs) ToMetastoreServiceHiveMetastoreConfigKerberosConfigKeytabOutputWithContext

func (i MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabArgs) ToMetastoreServiceHiveMetastoreConfigKerberosConfigKeytabOutputWithContext(ctx context.Context) MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabOutput

func (MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabArgs) ToMetastoreServiceHiveMetastoreConfigKerberosConfigKeytabPtrOutput

func (MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabArgs) ToMetastoreServiceHiveMetastoreConfigKerberosConfigKeytabPtrOutputWithContext

func (i MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabArgs) ToMetastoreServiceHiveMetastoreConfigKerberosConfigKeytabPtrOutputWithContext(ctx context.Context) MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabPtrOutput

type MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabInput

type MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabInput interface {
	pulumi.Input

	ToMetastoreServiceHiveMetastoreConfigKerberosConfigKeytabOutput() MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabOutput
	ToMetastoreServiceHiveMetastoreConfigKerberosConfigKeytabOutputWithContext(context.Context) MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabOutput
}

MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabInput is an input type that accepts MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabArgs and MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabOutput values. You can construct a concrete instance of `MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabInput` via:

MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabArgs{...}

type MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabOutput

type MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabOutput struct{ *pulumi.OutputState }

func (MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabOutput) CloudSecret

The relative resource name of a Secret Manager secret version, in the following form: "projects/{projectNumber}/secrets/{secret_id}/versions/{version_id}".

func (MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabOutput) ElementType

func (MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabOutput) ToMetastoreServiceHiveMetastoreConfigKerberosConfigKeytabOutput

func (MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabOutput) ToMetastoreServiceHiveMetastoreConfigKerberosConfigKeytabOutputWithContext

func (o MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabOutput) ToMetastoreServiceHiveMetastoreConfigKerberosConfigKeytabOutputWithContext(ctx context.Context) MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabOutput

func (MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabOutput) ToMetastoreServiceHiveMetastoreConfigKerberosConfigKeytabPtrOutput

func (MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabOutput) ToMetastoreServiceHiveMetastoreConfigKerberosConfigKeytabPtrOutputWithContext

func (o MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabOutput) ToMetastoreServiceHiveMetastoreConfigKerberosConfigKeytabPtrOutputWithContext(ctx context.Context) MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabPtrOutput

type MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabPtrInput

type MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabPtrInput interface {
	pulumi.Input

	ToMetastoreServiceHiveMetastoreConfigKerberosConfigKeytabPtrOutput() MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabPtrOutput
	ToMetastoreServiceHiveMetastoreConfigKerberosConfigKeytabPtrOutputWithContext(context.Context) MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabPtrOutput
}

MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabPtrInput is an input type that accepts MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabArgs, MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabPtr and MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabPtrOutput values. You can construct a concrete instance of `MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabPtrInput` via:

        MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabArgs{...}

or:

        nil

type MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabPtrOutput

type MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabPtrOutput struct{ *pulumi.OutputState }

func (MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabPtrOutput) CloudSecret

The relative resource name of a Secret Manager secret version, in the following form: "projects/{projectNumber}/secrets/{secret_id}/versions/{version_id}".

func (MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabPtrOutput) Elem

func (MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabPtrOutput) ElementType

func (MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabPtrOutput) ToMetastoreServiceHiveMetastoreConfigKerberosConfigKeytabPtrOutput

func (MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabPtrOutput) ToMetastoreServiceHiveMetastoreConfigKerberosConfigKeytabPtrOutputWithContext

func (o MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabPtrOutput) ToMetastoreServiceHiveMetastoreConfigKerberosConfigKeytabPtrOutputWithContext(ctx context.Context) MetastoreServiceHiveMetastoreConfigKerberosConfigKeytabPtrOutput

type MetastoreServiceHiveMetastoreConfigKerberosConfigOutput

type MetastoreServiceHiveMetastoreConfigKerberosConfigOutput struct{ *pulumi.OutputState }

func (MetastoreServiceHiveMetastoreConfigKerberosConfigOutput) ElementType

func (MetastoreServiceHiveMetastoreConfigKerberosConfigOutput) Keytab

A Kerberos keytab file that can be used to authenticate a service principal with a Kerberos Key Distribution Center (KDC). Structure is documented below.

func (MetastoreServiceHiveMetastoreConfigKerberosConfigOutput) Krb5ConfigGcsUri

A Cloud Storage URI that specifies the path to a krb5.conf file. It is of the form gs://{bucket_name}/path/to/krb5.conf, although the file does not need to be named krb5.conf explicitly.

func (MetastoreServiceHiveMetastoreConfigKerberosConfigOutput) Principal

A Kerberos principal that exists in the both the keytab the KDC to authenticate as. A typical principal is of the form "primary/instance@REALM", but there is no exact format.

func (MetastoreServiceHiveMetastoreConfigKerberosConfigOutput) ToMetastoreServiceHiveMetastoreConfigKerberosConfigOutput

func (MetastoreServiceHiveMetastoreConfigKerberosConfigOutput) ToMetastoreServiceHiveMetastoreConfigKerberosConfigOutputWithContext

func (o MetastoreServiceHiveMetastoreConfigKerberosConfigOutput) ToMetastoreServiceHiveMetastoreConfigKerberosConfigOutputWithContext(ctx context.Context) MetastoreServiceHiveMetastoreConfigKerberosConfigOutput

func (MetastoreServiceHiveMetastoreConfigKerberosConfigOutput) ToMetastoreServiceHiveMetastoreConfigKerberosConfigPtrOutput

func (MetastoreServiceHiveMetastoreConfigKerberosConfigOutput) ToMetastoreServiceHiveMetastoreConfigKerberosConfigPtrOutputWithContext

func (o MetastoreServiceHiveMetastoreConfigKerberosConfigOutput) ToMetastoreServiceHiveMetastoreConfigKerberosConfigPtrOutputWithContext(ctx context.Context) MetastoreServiceHiveMetastoreConfigKerberosConfigPtrOutput

type MetastoreServiceHiveMetastoreConfigKerberosConfigPtrInput

type MetastoreServiceHiveMetastoreConfigKerberosConfigPtrInput interface {
	pulumi.Input

	ToMetastoreServiceHiveMetastoreConfigKerberosConfigPtrOutput() MetastoreServiceHiveMetastoreConfigKerberosConfigPtrOutput
	ToMetastoreServiceHiveMetastoreConfigKerberosConfigPtrOutputWithContext(context.Context) MetastoreServiceHiveMetastoreConfigKerberosConfigPtrOutput
}

MetastoreServiceHiveMetastoreConfigKerberosConfigPtrInput is an input type that accepts MetastoreServiceHiveMetastoreConfigKerberosConfigArgs, MetastoreServiceHiveMetastoreConfigKerberosConfigPtr and MetastoreServiceHiveMetastoreConfigKerberosConfigPtrOutput values. You can construct a concrete instance of `MetastoreServiceHiveMetastoreConfigKerberosConfigPtrInput` via:

        MetastoreServiceHiveMetastoreConfigKerberosConfigArgs{...}

or:

        nil

type MetastoreServiceHiveMetastoreConfigKerberosConfigPtrOutput

type MetastoreServiceHiveMetastoreConfigKerberosConfigPtrOutput struct{ *pulumi.OutputState }

func (MetastoreServiceHiveMetastoreConfigKerberosConfigPtrOutput) Elem

func (MetastoreServiceHiveMetastoreConfigKerberosConfigPtrOutput) ElementType

func (MetastoreServiceHiveMetastoreConfigKerberosConfigPtrOutput) Keytab

A Kerberos keytab file that can be used to authenticate a service principal with a Kerberos Key Distribution Center (KDC). Structure is documented below.

func (MetastoreServiceHiveMetastoreConfigKerberosConfigPtrOutput) Krb5ConfigGcsUri

A Cloud Storage URI that specifies the path to a krb5.conf file. It is of the form gs://{bucket_name}/path/to/krb5.conf, although the file does not need to be named krb5.conf explicitly.

func (MetastoreServiceHiveMetastoreConfigKerberosConfigPtrOutput) Principal

A Kerberos principal that exists in the both the keytab the KDC to authenticate as. A typical principal is of the form "primary/instance@REALM", but there is no exact format.

func (MetastoreServiceHiveMetastoreConfigKerberosConfigPtrOutput) ToMetastoreServiceHiveMetastoreConfigKerberosConfigPtrOutput

func (MetastoreServiceHiveMetastoreConfigKerberosConfigPtrOutput) ToMetastoreServiceHiveMetastoreConfigKerberosConfigPtrOutputWithContext

func (o MetastoreServiceHiveMetastoreConfigKerberosConfigPtrOutput) ToMetastoreServiceHiveMetastoreConfigKerberosConfigPtrOutputWithContext(ctx context.Context) MetastoreServiceHiveMetastoreConfigKerberosConfigPtrOutput

type MetastoreServiceHiveMetastoreConfigOutput

type MetastoreServiceHiveMetastoreConfigOutput struct{ *pulumi.OutputState }

func (MetastoreServiceHiveMetastoreConfigOutput) ConfigOverrides

A mapping of Hive metastore configuration key-value pairs to apply to the Hive metastore (configured in hive-site.xml). The mappings override system defaults (some keys cannot be overridden)

func (MetastoreServiceHiveMetastoreConfigOutput) ElementType

func (MetastoreServiceHiveMetastoreConfigOutput) KerberosConfig

Information used to configure the Hive metastore service as a service principal in a Kerberos realm. Structure is documented below.

func (MetastoreServiceHiveMetastoreConfigOutput) ToMetastoreServiceHiveMetastoreConfigOutput

func (o MetastoreServiceHiveMetastoreConfigOutput) ToMetastoreServiceHiveMetastoreConfigOutput() MetastoreServiceHiveMetastoreConfigOutput

func (MetastoreServiceHiveMetastoreConfigOutput) ToMetastoreServiceHiveMetastoreConfigOutputWithContext

func (o MetastoreServiceHiveMetastoreConfigOutput) ToMetastoreServiceHiveMetastoreConfigOutputWithContext(ctx context.Context) MetastoreServiceHiveMetastoreConfigOutput

func (MetastoreServiceHiveMetastoreConfigOutput) ToMetastoreServiceHiveMetastoreConfigPtrOutput

func (o MetastoreServiceHiveMetastoreConfigOutput) ToMetastoreServiceHiveMetastoreConfigPtrOutput() MetastoreServiceHiveMetastoreConfigPtrOutput

func (MetastoreServiceHiveMetastoreConfigOutput) ToMetastoreServiceHiveMetastoreConfigPtrOutputWithContext

func (o MetastoreServiceHiveMetastoreConfigOutput) ToMetastoreServiceHiveMetastoreConfigPtrOutputWithContext(ctx context.Context) MetastoreServiceHiveMetastoreConfigPtrOutput

func (MetastoreServiceHiveMetastoreConfigOutput) Version

The Hive metastore schema version.

type MetastoreServiceHiveMetastoreConfigPtrInput

type MetastoreServiceHiveMetastoreConfigPtrInput interface {
	pulumi.Input

	ToMetastoreServiceHiveMetastoreConfigPtrOutput() MetastoreServiceHiveMetastoreConfigPtrOutput
	ToMetastoreServiceHiveMetastoreConfigPtrOutputWithContext(context.Context) MetastoreServiceHiveMetastoreConfigPtrOutput
}

MetastoreServiceHiveMetastoreConfigPtrInput is an input type that accepts MetastoreServiceHiveMetastoreConfigArgs, MetastoreServiceHiveMetastoreConfigPtr and MetastoreServiceHiveMetastoreConfigPtrOutput values. You can construct a concrete instance of `MetastoreServiceHiveMetastoreConfigPtrInput` via:

        MetastoreServiceHiveMetastoreConfigArgs{...}

or:

        nil

type MetastoreServiceHiveMetastoreConfigPtrOutput

type MetastoreServiceHiveMetastoreConfigPtrOutput struct{ *pulumi.OutputState }

func (MetastoreServiceHiveMetastoreConfigPtrOutput) ConfigOverrides

A mapping of Hive metastore configuration key-value pairs to apply to the Hive metastore (configured in hive-site.xml). The mappings override system defaults (some keys cannot be overridden)

func (MetastoreServiceHiveMetastoreConfigPtrOutput) Elem

func (MetastoreServiceHiveMetastoreConfigPtrOutput) ElementType

func (MetastoreServiceHiveMetastoreConfigPtrOutput) KerberosConfig

Information used to configure the Hive metastore service as a service principal in a Kerberos realm. Structure is documented below.

func (MetastoreServiceHiveMetastoreConfigPtrOutput) ToMetastoreServiceHiveMetastoreConfigPtrOutput

func (o MetastoreServiceHiveMetastoreConfigPtrOutput) ToMetastoreServiceHiveMetastoreConfigPtrOutput() MetastoreServiceHiveMetastoreConfigPtrOutput

func (MetastoreServiceHiveMetastoreConfigPtrOutput) ToMetastoreServiceHiveMetastoreConfigPtrOutputWithContext

func (o MetastoreServiceHiveMetastoreConfigPtrOutput) ToMetastoreServiceHiveMetastoreConfigPtrOutputWithContext(ctx context.Context) MetastoreServiceHiveMetastoreConfigPtrOutput

func (MetastoreServiceHiveMetastoreConfigPtrOutput) Version

The Hive metastore schema version.

type MetastoreServiceInput

type MetastoreServiceInput interface {
	pulumi.Input

	ToMetastoreServiceOutput() MetastoreServiceOutput
	ToMetastoreServiceOutputWithContext(ctx context.Context) MetastoreServiceOutput
}

type MetastoreServiceMaintenanceWindow

type MetastoreServiceMaintenanceWindow struct {
	// The day of week, when the window starts.
	// Possible values are `MONDAY`, `TUESDAY`, `WEDNESDAY`, `THURSDAY`, `FRIDAY`, `SATURDAY`, and `SUNDAY`.
	DayOfWeek string `pulumi:"dayOfWeek"`
	// The hour of day (0-23) when the window starts.
	HourOfDay int `pulumi:"hourOfDay"`
}

type MetastoreServiceMaintenanceWindowArgs

type MetastoreServiceMaintenanceWindowArgs struct {
	// The day of week, when the window starts.
	// Possible values are `MONDAY`, `TUESDAY`, `WEDNESDAY`, `THURSDAY`, `FRIDAY`, `SATURDAY`, and `SUNDAY`.
	DayOfWeek pulumi.StringInput `pulumi:"dayOfWeek"`
	// The hour of day (0-23) when the window starts.
	HourOfDay pulumi.IntInput `pulumi:"hourOfDay"`
}

func (MetastoreServiceMaintenanceWindowArgs) ElementType

func (MetastoreServiceMaintenanceWindowArgs) ToMetastoreServiceMaintenanceWindowOutput

func (i MetastoreServiceMaintenanceWindowArgs) ToMetastoreServiceMaintenanceWindowOutput() MetastoreServiceMaintenanceWindowOutput

func (MetastoreServiceMaintenanceWindowArgs) ToMetastoreServiceMaintenanceWindowOutputWithContext

func (i MetastoreServiceMaintenanceWindowArgs) ToMetastoreServiceMaintenanceWindowOutputWithContext(ctx context.Context) MetastoreServiceMaintenanceWindowOutput

func (MetastoreServiceMaintenanceWindowArgs) ToMetastoreServiceMaintenanceWindowPtrOutput

func (i MetastoreServiceMaintenanceWindowArgs) ToMetastoreServiceMaintenanceWindowPtrOutput() MetastoreServiceMaintenanceWindowPtrOutput

func (MetastoreServiceMaintenanceWindowArgs) ToMetastoreServiceMaintenanceWindowPtrOutputWithContext

func (i MetastoreServiceMaintenanceWindowArgs) ToMetastoreServiceMaintenanceWindowPtrOutputWithContext(ctx context.Context) MetastoreServiceMaintenanceWindowPtrOutput

type MetastoreServiceMaintenanceWindowInput

type MetastoreServiceMaintenanceWindowInput interface {
	pulumi.Input

	ToMetastoreServiceMaintenanceWindowOutput() MetastoreServiceMaintenanceWindowOutput
	ToMetastoreServiceMaintenanceWindowOutputWithContext(context.Context) MetastoreServiceMaintenanceWindowOutput
}

MetastoreServiceMaintenanceWindowInput is an input type that accepts MetastoreServiceMaintenanceWindowArgs and MetastoreServiceMaintenanceWindowOutput values. You can construct a concrete instance of `MetastoreServiceMaintenanceWindowInput` via:

MetastoreServiceMaintenanceWindowArgs{...}

type MetastoreServiceMaintenanceWindowOutput

type MetastoreServiceMaintenanceWindowOutput struct{ *pulumi.OutputState }

func (MetastoreServiceMaintenanceWindowOutput) DayOfWeek

The day of week, when the window starts. Possible values are `MONDAY`, `TUESDAY`, `WEDNESDAY`, `THURSDAY`, `FRIDAY`, `SATURDAY`, and `SUNDAY`.

func (MetastoreServiceMaintenanceWindowOutput) ElementType

func (MetastoreServiceMaintenanceWindowOutput) HourOfDay

The hour of day (0-23) when the window starts.

func (MetastoreServiceMaintenanceWindowOutput) ToMetastoreServiceMaintenanceWindowOutput

func (o MetastoreServiceMaintenanceWindowOutput) ToMetastoreServiceMaintenanceWindowOutput() MetastoreServiceMaintenanceWindowOutput

func (MetastoreServiceMaintenanceWindowOutput) ToMetastoreServiceMaintenanceWindowOutputWithContext

func (o MetastoreServiceMaintenanceWindowOutput) ToMetastoreServiceMaintenanceWindowOutputWithContext(ctx context.Context) MetastoreServiceMaintenanceWindowOutput

func (MetastoreServiceMaintenanceWindowOutput) ToMetastoreServiceMaintenanceWindowPtrOutput

func (o MetastoreServiceMaintenanceWindowOutput) ToMetastoreServiceMaintenanceWindowPtrOutput() MetastoreServiceMaintenanceWindowPtrOutput

func (MetastoreServiceMaintenanceWindowOutput) ToMetastoreServiceMaintenanceWindowPtrOutputWithContext

func (o MetastoreServiceMaintenanceWindowOutput) ToMetastoreServiceMaintenanceWindowPtrOutputWithContext(ctx context.Context) MetastoreServiceMaintenanceWindowPtrOutput

type MetastoreServiceMaintenanceWindowPtrInput

type MetastoreServiceMaintenanceWindowPtrInput interface {
	pulumi.Input

	ToMetastoreServiceMaintenanceWindowPtrOutput() MetastoreServiceMaintenanceWindowPtrOutput
	ToMetastoreServiceMaintenanceWindowPtrOutputWithContext(context.Context) MetastoreServiceMaintenanceWindowPtrOutput
}

MetastoreServiceMaintenanceWindowPtrInput is an input type that accepts MetastoreServiceMaintenanceWindowArgs, MetastoreServiceMaintenanceWindowPtr and MetastoreServiceMaintenanceWindowPtrOutput values. You can construct a concrete instance of `MetastoreServiceMaintenanceWindowPtrInput` via:

        MetastoreServiceMaintenanceWindowArgs{...}

or:

        nil

type MetastoreServiceMaintenanceWindowPtrOutput

type MetastoreServiceMaintenanceWindowPtrOutput struct{ *pulumi.OutputState }

func (MetastoreServiceMaintenanceWindowPtrOutput) DayOfWeek

The day of week, when the window starts. Possible values are `MONDAY`, `TUESDAY`, `WEDNESDAY`, `THURSDAY`, `FRIDAY`, `SATURDAY`, and `SUNDAY`.

func (MetastoreServiceMaintenanceWindowPtrOutput) Elem

func (MetastoreServiceMaintenanceWindowPtrOutput) ElementType

func (MetastoreServiceMaintenanceWindowPtrOutput) HourOfDay

The hour of day (0-23) when the window starts.

func (MetastoreServiceMaintenanceWindowPtrOutput) ToMetastoreServiceMaintenanceWindowPtrOutput

func (o MetastoreServiceMaintenanceWindowPtrOutput) ToMetastoreServiceMaintenanceWindowPtrOutput() MetastoreServiceMaintenanceWindowPtrOutput

func (MetastoreServiceMaintenanceWindowPtrOutput) ToMetastoreServiceMaintenanceWindowPtrOutputWithContext

func (o MetastoreServiceMaintenanceWindowPtrOutput) ToMetastoreServiceMaintenanceWindowPtrOutputWithContext(ctx context.Context) MetastoreServiceMaintenanceWindowPtrOutput

type MetastoreServiceMap

type MetastoreServiceMap map[string]MetastoreServiceInput

func (MetastoreServiceMap) ElementType

func (MetastoreServiceMap) ElementType() reflect.Type

func (MetastoreServiceMap) ToMetastoreServiceMapOutput

func (i MetastoreServiceMap) ToMetastoreServiceMapOutput() MetastoreServiceMapOutput

func (MetastoreServiceMap) ToMetastoreServiceMapOutputWithContext

func (i MetastoreServiceMap) ToMetastoreServiceMapOutputWithContext(ctx context.Context) MetastoreServiceMapOutput

type MetastoreServiceMapInput

type MetastoreServiceMapInput interface {
	pulumi.Input

	ToMetastoreServiceMapOutput() MetastoreServiceMapOutput
	ToMetastoreServiceMapOutputWithContext(context.Context) MetastoreServiceMapOutput
}

MetastoreServiceMapInput is an input type that accepts MetastoreServiceMap and MetastoreServiceMapOutput values. You can construct a concrete instance of `MetastoreServiceMapInput` via:

MetastoreServiceMap{ "key": MetastoreServiceArgs{...} }

type MetastoreServiceMapOutput

type MetastoreServiceMapOutput struct{ *pulumi.OutputState }

func (MetastoreServiceMapOutput) ElementType

func (MetastoreServiceMapOutput) ElementType() reflect.Type

func (MetastoreServiceMapOutput) MapIndex

func (MetastoreServiceMapOutput) ToMetastoreServiceMapOutput

func (o MetastoreServiceMapOutput) ToMetastoreServiceMapOutput() MetastoreServiceMapOutput

func (MetastoreServiceMapOutput) ToMetastoreServiceMapOutputWithContext

func (o MetastoreServiceMapOutput) ToMetastoreServiceMapOutputWithContext(ctx context.Context) MetastoreServiceMapOutput

type MetastoreServiceOutput

type MetastoreServiceOutput struct{ *pulumi.OutputState }

func (MetastoreServiceOutput) ElementType

func (MetastoreServiceOutput) ElementType() reflect.Type

func (MetastoreServiceOutput) ToMetastoreServiceOutput

func (o MetastoreServiceOutput) ToMetastoreServiceOutput() MetastoreServiceOutput

func (MetastoreServiceOutput) ToMetastoreServiceOutputWithContext

func (o MetastoreServiceOutput) ToMetastoreServiceOutputWithContext(ctx context.Context) MetastoreServiceOutput

func (MetastoreServiceOutput) ToMetastoreServicePtrOutput

func (o MetastoreServiceOutput) ToMetastoreServicePtrOutput() MetastoreServicePtrOutput

func (MetastoreServiceOutput) ToMetastoreServicePtrOutputWithContext

func (o MetastoreServiceOutput) ToMetastoreServicePtrOutputWithContext(ctx context.Context) MetastoreServicePtrOutput

type MetastoreServicePtrInput

type MetastoreServicePtrInput interface {
	pulumi.Input

	ToMetastoreServicePtrOutput() MetastoreServicePtrOutput
	ToMetastoreServicePtrOutputWithContext(ctx context.Context) MetastoreServicePtrOutput
}

type MetastoreServicePtrOutput

type MetastoreServicePtrOutput struct{ *pulumi.OutputState }

func (MetastoreServicePtrOutput) Elem added in v5.21.0

func (MetastoreServicePtrOutput) ElementType

func (MetastoreServicePtrOutput) ElementType() reflect.Type

func (MetastoreServicePtrOutput) ToMetastoreServicePtrOutput

func (o MetastoreServicePtrOutput) ToMetastoreServicePtrOutput() MetastoreServicePtrOutput

func (MetastoreServicePtrOutput) ToMetastoreServicePtrOutputWithContext

func (o MetastoreServicePtrOutput) ToMetastoreServicePtrOutputWithContext(ctx context.Context) MetastoreServicePtrOutput

type MetastoreServiceState

type MetastoreServiceState struct {
	// A Cloud Storage URI (starting with gs://) that specifies where artifacts related to the metastore service are stored.
	ArtifactGcsUri pulumi.StringPtrInput
	// The URI of the endpoint used to access the metastore service.
	EndpointUri pulumi.StringPtrInput
	// Configuration information specific to running Hive metastore software as the metastore service.
	// Structure is documented below.
	HiveMetastoreConfig MetastoreServiceHiveMetastoreConfigPtrInput
	// User-defined labels for the metastore service.
	Labels pulumi.StringMapInput
	// The  location where the autoscaling policy should reside.
	// The default value is `global`.
	Location pulumi.StringPtrInput
	// The one hour maintenance window of the metastore service.
	// This specifies when the service can be restarted for maintenance purposes in UTC time.
	// Structure is documented below.
	MaintenanceWindow MetastoreServiceMaintenanceWindowPtrInput
	// The relative resource name of the metastore service.
	Name pulumi.StringPtrInput
	// The relative resource name of the VPC network on which the instance can be accessed. It is specified in the following form:
	// "projects/{projectNumber}/global/networks/{network_id}".
	Network pulumi.StringPtrInput
	// The TCP port at which the metastore service is reached. Default: 9083.
	Port pulumi.IntPtrInput
	// The ID of the project in which the resource belongs.
	// If it is not provided, the provider project is used.
	Project pulumi.StringPtrInput
	// The ID of the metastore service. The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_),
	// and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between
	// 3 and 63 characters.
	ServiceId pulumi.StringPtrInput
	// The current state of the metastore service.
	State pulumi.StringPtrInput
	// Additional information about the current state of the metastore service, if available.
	StateMessage pulumi.StringPtrInput
	// The tier of the service.
	// Possible values are `DEVELOPER` and `ENTERPRISE`.
	Tier pulumi.StringPtrInput
}

func (MetastoreServiceState) ElementType

func (MetastoreServiceState) ElementType() reflect.Type

type WorkflowTemplate added in v5.2.0

type WorkflowTemplate struct {
	pulumi.CustomResourceState

	// Output only. The time template was created.
	CreateTime pulumi.StringOutput `pulumi:"createTime"`
	// (Beta only) Optional. Timeout duration for the DAG of jobs. You can use "s", "m", "h", and "d" suffixes for second, minute, hour, and day duration values, respectively. The timeout duration must be from 10 minutes ("10m") to 24 hours ("24h" or "1d"). The timer begins when the first job is submitted. If the workflow is running at the end of the timeout period, any remaining jobs are cancelled, the workflow is ended, and if the workflow was running on a (/dataproc/docs/concepts/workflows/using-workflows#configuring_or_selecting_a_cluster), the cluster is deleted.
	DagTimeout pulumi.StringPtrOutput `pulumi:"dagTimeout"`
	// Required. The Directed Acyclic Graph of Jobs to submit.
	Jobs WorkflowTemplateJobArrayOutput `pulumi:"jobs"`
	// Optional. The labels to associate with this cluster. Label keys must be between 1 and 63 characters long, and must conform to the following PCRE regular expression: {0,63} No more than 32 labels can be associated with a given cluster.
	Labels pulumi.StringMapOutput `pulumi:"labels"`
	// The location for the resource
	Location pulumi.StringOutput `pulumi:"location"`
	// Required. Parameter name. The parameter name is used as the key, and paired with the parameter value, which are passed to the template when the template is instantiated. The name must contain only capital letters (A-Z), numbers (0-9), and underscores (_), and must not start with a number. The maximum length is 40 characters.
	Name pulumi.StringOutput `pulumi:"name"`
	// Optional. Template parameters whose values are substituted into the template. Values for parameters must be provided when the template is instantiated.
	Parameters WorkflowTemplateParameterArrayOutput `pulumi:"parameters"`
	// Required. WorkflowTemplate scheduling information.
	Placement WorkflowTemplatePlacementOutput `pulumi:"placement"`
	// The project for the resource
	Project pulumi.StringOutput `pulumi:"project"`
	// Output only. The time template was last updated.
	UpdateTime pulumi.StringOutput `pulumi:"updateTime"`
	// Optional. Used to perform a consistent read-modify-write. This field should be left blank for a `CreateWorkflowTemplate` request. It is required for an `UpdateWorkflowTemplate` request, and must match the current server version. A typical update template flow would fetch the current template with a `GetWorkflowTemplate` request, which will return the current template with the `version` field filled in with the current server version. The user updates other fields in the template, then returns it as part of the `UpdateWorkflowTemplate` request.
	//
	// Deprecated: version is not useful as a configurable field, and will be removed in the future.
	Version pulumi.IntOutput `pulumi:"version"`
}

A Workflow Template is a reusable workflow configuration. It defines a graph of jobs with information on where to run those jobs.

## Example Usage

```go package main

import (

"github.com/pulumi/pulumi-gcp/sdk/v5/go/gcp/dataproc"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"

)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := dataproc.NewWorkflowTemplate(ctx, "template", &dataproc.WorkflowTemplateArgs{
			Jobs: dataproc.WorkflowTemplateJobArray{
				&dataproc.WorkflowTemplateJobArgs{
					SparkJob: &dataproc.WorkflowTemplateJobSparkJobArgs{
						MainClass: pulumi.String("SomeClass"),
					},
					StepId: pulumi.String("someJob"),
				},
				&dataproc.WorkflowTemplateJobArgs{
					PrerequisiteStepIds: pulumi.StringArray{
						pulumi.String("someJob"),
					},
					PrestoJob: &dataproc.WorkflowTemplateJobPrestoJobArgs{
						QueryFileUri: pulumi.String("someuri"),
					},
					StepId: pulumi.String("otherJob"),
				},
			},
			Location: pulumi.String("us-central1"),
			Placement: &dataproc.WorkflowTemplatePlacementArgs{
				ManagedCluster: &dataproc.WorkflowTemplatePlacementManagedClusterArgs{
					ClusterName: pulumi.String("my-cluster"),
					Config: &dataproc.WorkflowTemplatePlacementManagedClusterConfigArgs{
						GceClusterConfig: &dataproc.WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigArgs{
							Tags: pulumi.StringArray{
								pulumi.String("foo"),
								pulumi.String("bar"),
							},
							Zone: pulumi.String("us-central1-a"),
						},
						MasterConfig: &dataproc.WorkflowTemplatePlacementManagedClusterConfigMasterConfigArgs{
							DiskConfig: &dataproc.WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigArgs{
								BootDiskSizeGb: pulumi.Int(15),
								BootDiskType:   pulumi.String("pd-ssd"),
							},
							MachineType:  pulumi.String("n1-standard-1"),
							NumInstances: pulumi.Int(1),
						},
						SecondaryWorkerConfig: &dataproc.WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigArgs{
							NumInstances: pulumi.Int(2),
						},
						SoftwareConfig: &dataproc.WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigArgs{
							ImageVersion: pulumi.String("1.3.7-deb9"),
						},
						WorkerConfig: &dataproc.WorkflowTemplatePlacementManagedClusterConfigWorkerConfigArgs{
							DiskConfig: &dataproc.WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigArgs{
								BootDiskSizeGb: pulumi.Int(10),
								NumLocalSsds:   pulumi.Int(2),
							},
							MachineType:  pulumi.String("n1-standard-2"),
							NumInstances: pulumi.Int(3),
						},
					},
				},
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}

```

## Import

WorkflowTemplate can be imported using any of these accepted formats

```sh

$ pulumi import gcp:dataproc/workflowTemplate:WorkflowTemplate default projects/{{project}}/locations/{{location}}/workflowTemplates/{{name}}

```

```sh

$ pulumi import gcp:dataproc/workflowTemplate:WorkflowTemplate default {{project}}/{{location}}/{{name}}

```

```sh

$ pulumi import gcp:dataproc/workflowTemplate:WorkflowTemplate default {{location}}/{{name}}

```

func GetWorkflowTemplate added in v5.2.0

func GetWorkflowTemplate(ctx *pulumi.Context,
	name string, id pulumi.IDInput, state *WorkflowTemplateState, opts ...pulumi.ResourceOption) (*WorkflowTemplate, error)

GetWorkflowTemplate gets an existing WorkflowTemplate resource's state with the given name, ID, and optional state properties that are used to uniquely qualify the lookup (nil if not required).

func NewWorkflowTemplate added in v5.2.0

func NewWorkflowTemplate(ctx *pulumi.Context,
	name string, args *WorkflowTemplateArgs, opts ...pulumi.ResourceOption) (*WorkflowTemplate, error)

NewWorkflowTemplate registers a new resource with the given unique name, arguments, and options.

func (*WorkflowTemplate) ElementType added in v5.2.0

func (*WorkflowTemplate) ElementType() reflect.Type

func (*WorkflowTemplate) ToWorkflowTemplateOutput added in v5.2.0

func (i *WorkflowTemplate) ToWorkflowTemplateOutput() WorkflowTemplateOutput

func (*WorkflowTemplate) ToWorkflowTemplateOutputWithContext added in v5.2.0

func (i *WorkflowTemplate) ToWorkflowTemplateOutputWithContext(ctx context.Context) WorkflowTemplateOutput

func (*WorkflowTemplate) ToWorkflowTemplatePtrOutput added in v5.2.0

func (i *WorkflowTemplate) ToWorkflowTemplatePtrOutput() WorkflowTemplatePtrOutput

func (*WorkflowTemplate) ToWorkflowTemplatePtrOutputWithContext added in v5.2.0

func (i *WorkflowTemplate) ToWorkflowTemplatePtrOutputWithContext(ctx context.Context) WorkflowTemplatePtrOutput

type WorkflowTemplateArgs added in v5.2.0

type WorkflowTemplateArgs struct {
	// (Beta only) Optional. Timeout duration for the DAG of jobs. You can use "s", "m", "h", and "d" suffixes for second, minute, hour, and day duration values, respectively. The timeout duration must be from 10 minutes ("10m") to 24 hours ("24h" or "1d"). The timer begins when the first job is submitted. If the workflow is running at the end of the timeout period, any remaining jobs are cancelled, the workflow is ended, and if the workflow was running on a (/dataproc/docs/concepts/workflows/using-workflows#configuring_or_selecting_a_cluster), the cluster is deleted.
	DagTimeout pulumi.StringPtrInput
	// Required. The Directed Acyclic Graph of Jobs to submit.
	Jobs WorkflowTemplateJobArrayInput
	// Optional. The labels to associate with this cluster. Label keys must be between 1 and 63 characters long, and must conform to the following PCRE regular expression: {0,63} No more than 32 labels can be associated with a given cluster.
	Labels pulumi.StringMapInput
	// The location for the resource
	Location pulumi.StringInput
	// Required. Parameter name. The parameter name is used as the key, and paired with the parameter value, which are passed to the template when the template is instantiated. The name must contain only capital letters (A-Z), numbers (0-9), and underscores (_), and must not start with a number. The maximum length is 40 characters.
	Name pulumi.StringPtrInput
	// Optional. Template parameters whose values are substituted into the template. Values for parameters must be provided when the template is instantiated.
	Parameters WorkflowTemplateParameterArrayInput
	// Required. WorkflowTemplate scheduling information.
	Placement WorkflowTemplatePlacementInput
	// The project for the resource
	Project pulumi.StringPtrInput
	// Optional. Used to perform a consistent read-modify-write. This field should be left blank for a `CreateWorkflowTemplate` request. It is required for an `UpdateWorkflowTemplate` request, and must match the current server version. A typical update template flow would fetch the current template with a `GetWorkflowTemplate` request, which will return the current template with the `version` field filled in with the current server version. The user updates other fields in the template, then returns it as part of the `UpdateWorkflowTemplate` request.
	//
	// Deprecated: version is not useful as a configurable field, and will be removed in the future.
	Version pulumi.IntPtrInput
}

The set of arguments for constructing a WorkflowTemplate resource.

func (WorkflowTemplateArgs) ElementType added in v5.2.0

func (WorkflowTemplateArgs) ElementType() reflect.Type

type WorkflowTemplateArray added in v5.2.0

type WorkflowTemplateArray []WorkflowTemplateInput

func (WorkflowTemplateArray) ElementType added in v5.2.0

func (WorkflowTemplateArray) ElementType() reflect.Type

func (WorkflowTemplateArray) ToWorkflowTemplateArrayOutput added in v5.2.0

func (i WorkflowTemplateArray) ToWorkflowTemplateArrayOutput() WorkflowTemplateArrayOutput

func (WorkflowTemplateArray) ToWorkflowTemplateArrayOutputWithContext added in v5.2.0

func (i WorkflowTemplateArray) ToWorkflowTemplateArrayOutputWithContext(ctx context.Context) WorkflowTemplateArrayOutput

type WorkflowTemplateArrayInput added in v5.2.0

type WorkflowTemplateArrayInput interface {
	pulumi.Input

	ToWorkflowTemplateArrayOutput() WorkflowTemplateArrayOutput
	ToWorkflowTemplateArrayOutputWithContext(context.Context) WorkflowTemplateArrayOutput
}

WorkflowTemplateArrayInput is an input type that accepts WorkflowTemplateArray and WorkflowTemplateArrayOutput values. You can construct a concrete instance of `WorkflowTemplateArrayInput` via:

WorkflowTemplateArray{ WorkflowTemplateArgs{...} }

type WorkflowTemplateArrayOutput added in v5.2.0

type WorkflowTemplateArrayOutput struct{ *pulumi.OutputState }

func (WorkflowTemplateArrayOutput) ElementType added in v5.2.0

func (WorkflowTemplateArrayOutput) Index added in v5.2.0

func (WorkflowTemplateArrayOutput) ToWorkflowTemplateArrayOutput added in v5.2.0

func (o WorkflowTemplateArrayOutput) ToWorkflowTemplateArrayOutput() WorkflowTemplateArrayOutput

func (WorkflowTemplateArrayOutput) ToWorkflowTemplateArrayOutputWithContext added in v5.2.0

func (o WorkflowTemplateArrayOutput) ToWorkflowTemplateArrayOutputWithContext(ctx context.Context) WorkflowTemplateArrayOutput

type WorkflowTemplateInput added in v5.2.0

type WorkflowTemplateInput interface {
	pulumi.Input

	ToWorkflowTemplateOutput() WorkflowTemplateOutput
	ToWorkflowTemplateOutputWithContext(ctx context.Context) WorkflowTemplateOutput
}

type WorkflowTemplateJob added in v5.2.0

type WorkflowTemplateJob struct {
	// Optional. Job is a Hadoop job.
	HadoopJob *WorkflowTemplateJobHadoopJob `pulumi:"hadoopJob"`
	// Optional. Job is a Hive job.
	HiveJob *WorkflowTemplateJobHiveJob `pulumi:"hiveJob"`
	// Optional. The labels to associate with this cluster. Label keys must be between 1 and 63 characters long, and must conform to the following PCRE regular expression: {0,63} No more than 32 labels can be associated with a given cluster.
	Labels map[string]string `pulumi:"labels"`
	// Optional. Job is a Pig job.
	PigJob *WorkflowTemplateJobPigJob `pulumi:"pigJob"`
	// Optional. The optional list of prerequisite job step_ids. If not specified, the job will start at the beginning of workflow.
	PrerequisiteStepIds []string `pulumi:"prerequisiteStepIds"`
	// Optional. Job is a Presto job.
	PrestoJob *WorkflowTemplateJobPrestoJob `pulumi:"prestoJob"`
	// Optional. Job is a PySpark job.
	PysparkJob *WorkflowTemplateJobPysparkJob `pulumi:"pysparkJob"`
	// Optional. Job scheduling configuration.
	Scheduling *WorkflowTemplateJobScheduling `pulumi:"scheduling"`
	// Optional. Job is a Spark job.
	SparkJob *WorkflowTemplateJobSparkJob `pulumi:"sparkJob"`
	// Optional. Job is a SparkR job.
	SparkRJob *WorkflowTemplateJobSparkRJob `pulumi:"sparkRJob"`
	// Optional. Job is a SparkSql job.
	SparkSqlJob *WorkflowTemplateJobSparkSqlJob `pulumi:"sparkSqlJob"`
	// Required. The step id. The id must be unique among all jobs within the template. The step id is used as prefix for job id, as job `goog-dataproc-workflow-step-id` label, and in field from other steps. The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between 3 and 50 characters.
	StepId string `pulumi:"stepId"`
}

type WorkflowTemplateJobArgs added in v5.2.0

type WorkflowTemplateJobArgs struct {
	// Optional. Job is a Hadoop job.
	HadoopJob WorkflowTemplateJobHadoopJobPtrInput `pulumi:"hadoopJob"`
	// Optional. Job is a Hive job.
	HiveJob WorkflowTemplateJobHiveJobPtrInput `pulumi:"hiveJob"`
	// Optional. The labels to associate with this cluster. Label keys must be between 1 and 63 characters long, and must conform to the following PCRE regular expression: {0,63} No more than 32 labels can be associated with a given cluster.
	Labels pulumi.StringMapInput `pulumi:"labels"`
	// Optional. Job is a Pig job.
	PigJob WorkflowTemplateJobPigJobPtrInput `pulumi:"pigJob"`
	// Optional. The optional list of prerequisite job step_ids. If not specified, the job will start at the beginning of workflow.
	PrerequisiteStepIds pulumi.StringArrayInput `pulumi:"prerequisiteStepIds"`
	// Optional. Job is a Presto job.
	PrestoJob WorkflowTemplateJobPrestoJobPtrInput `pulumi:"prestoJob"`
	// Optional. Job is a PySpark job.
	PysparkJob WorkflowTemplateJobPysparkJobPtrInput `pulumi:"pysparkJob"`
	// Optional. Job scheduling configuration.
	Scheduling WorkflowTemplateJobSchedulingPtrInput `pulumi:"scheduling"`
	// Optional. Job is a Spark job.
	SparkJob WorkflowTemplateJobSparkJobPtrInput `pulumi:"sparkJob"`
	// Optional. Job is a SparkR job.
	SparkRJob WorkflowTemplateJobSparkRJobPtrInput `pulumi:"sparkRJob"`
	// Optional. Job is a SparkSql job.
	SparkSqlJob WorkflowTemplateJobSparkSqlJobPtrInput `pulumi:"sparkSqlJob"`
	// Required. The step id. The id must be unique among all jobs within the template. The step id is used as prefix for job id, as job `goog-dataproc-workflow-step-id` label, and in field from other steps. The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between 3 and 50 characters.
	StepId pulumi.StringInput `pulumi:"stepId"`
}

func (WorkflowTemplateJobArgs) ElementType added in v5.2.0

func (WorkflowTemplateJobArgs) ElementType() reflect.Type

func (WorkflowTemplateJobArgs) ToWorkflowTemplateJobOutput added in v5.2.0

func (i WorkflowTemplateJobArgs) ToWorkflowTemplateJobOutput() WorkflowTemplateJobOutput

func (WorkflowTemplateJobArgs) ToWorkflowTemplateJobOutputWithContext added in v5.2.0

func (i WorkflowTemplateJobArgs) ToWorkflowTemplateJobOutputWithContext(ctx context.Context) WorkflowTemplateJobOutput

type WorkflowTemplateJobArray added in v5.2.0

type WorkflowTemplateJobArray []WorkflowTemplateJobInput

func (WorkflowTemplateJobArray) ElementType added in v5.2.0

func (WorkflowTemplateJobArray) ElementType() reflect.Type

func (WorkflowTemplateJobArray) ToWorkflowTemplateJobArrayOutput added in v5.2.0

func (i WorkflowTemplateJobArray) ToWorkflowTemplateJobArrayOutput() WorkflowTemplateJobArrayOutput

func (WorkflowTemplateJobArray) ToWorkflowTemplateJobArrayOutputWithContext added in v5.2.0

func (i WorkflowTemplateJobArray) ToWorkflowTemplateJobArrayOutputWithContext(ctx context.Context) WorkflowTemplateJobArrayOutput

type WorkflowTemplateJobArrayInput added in v5.2.0

type WorkflowTemplateJobArrayInput interface {
	pulumi.Input

	ToWorkflowTemplateJobArrayOutput() WorkflowTemplateJobArrayOutput
	ToWorkflowTemplateJobArrayOutputWithContext(context.Context) WorkflowTemplateJobArrayOutput
}

WorkflowTemplateJobArrayInput is an input type that accepts WorkflowTemplateJobArray and WorkflowTemplateJobArrayOutput values. You can construct a concrete instance of `WorkflowTemplateJobArrayInput` via:

WorkflowTemplateJobArray{ WorkflowTemplateJobArgs{...} }

type WorkflowTemplateJobArrayOutput added in v5.2.0

type WorkflowTemplateJobArrayOutput struct{ *pulumi.OutputState }

func (WorkflowTemplateJobArrayOutput) ElementType added in v5.2.0

func (WorkflowTemplateJobArrayOutput) Index added in v5.2.0

func (WorkflowTemplateJobArrayOutput) ToWorkflowTemplateJobArrayOutput added in v5.2.0

func (o WorkflowTemplateJobArrayOutput) ToWorkflowTemplateJobArrayOutput() WorkflowTemplateJobArrayOutput

func (WorkflowTemplateJobArrayOutput) ToWorkflowTemplateJobArrayOutputWithContext added in v5.2.0

func (o WorkflowTemplateJobArrayOutput) ToWorkflowTemplateJobArrayOutputWithContext(ctx context.Context) WorkflowTemplateJobArrayOutput

type WorkflowTemplateJobHadoopJob added in v5.2.0

type WorkflowTemplateJobHadoopJob struct {
	// Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
	ArchiveUris []string `pulumi:"archiveUris"`
	// Optional. The arguments to pass to the driver. Do not include arguments, such as `--conf`, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
	Args []string `pulumi:"args"`
	// Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.
	FileUris []string `pulumi:"fileUris"`
	// Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH.
	JarFileUris []string `pulumi:"jarFileUris"`
	// Optional. The runtime log config for job execution.
	LoggingConfig *WorkflowTemplateJobHadoopJobLoggingConfig `pulumi:"loggingConfig"`
	// The name of the driver's main class. The jar file that contains the class must be in the default CLASSPATH or specified in `jarFileUris`.
	MainClass *string `pulumi:"mainClass"`
	// The HCFS URI of the jar file that contains the main class.
	MainJarFileUri *string `pulumi:"mainJarFileUri"`
	// Optional. The properties to set on daemon config files. Property keys are specified in `prefix:property` format, for example `core:hadoop.tmp.dir`. The following are supported prefixes and their mappings: * capacity-scheduler: `capacity-scheduler.xml` * core: `core-site.xml` * distcp: `distcp-default.xml` * hdfs: `hdfs-site.xml` * hive: `hive-site.xml` * mapred: `mapred-site.xml` * pig: `pig.properties` * spark: `spark-defaults.conf` * yarn: `yarn-site.xml` For more information, see (https://cloud.google.com/dataproc/docs/concepts/cluster-properties).
	Properties map[string]string `pulumi:"properties"`
}

type WorkflowTemplateJobHadoopJobArgs added in v5.2.0

type WorkflowTemplateJobHadoopJobArgs struct {
	// Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
	ArchiveUris pulumi.StringArrayInput `pulumi:"archiveUris"`
	// Optional. The arguments to pass to the driver. Do not include arguments, such as `--conf`, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
	Args pulumi.StringArrayInput `pulumi:"args"`
	// Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.
	FileUris pulumi.StringArrayInput `pulumi:"fileUris"`
	// Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH.
	JarFileUris pulumi.StringArrayInput `pulumi:"jarFileUris"`
	// Optional. The runtime log config for job execution.
	LoggingConfig WorkflowTemplateJobHadoopJobLoggingConfigPtrInput `pulumi:"loggingConfig"`
	// The name of the driver's main class. The jar file that contains the class must be in the default CLASSPATH or specified in `jarFileUris`.
	MainClass pulumi.StringPtrInput `pulumi:"mainClass"`
	// The HCFS URI of the jar file that contains the main class.
	MainJarFileUri pulumi.StringPtrInput `pulumi:"mainJarFileUri"`
	// Optional. The properties to set on daemon config files. Property keys are specified in `prefix:property` format, for example `core:hadoop.tmp.dir`. The following are supported prefixes and their mappings: * capacity-scheduler: `capacity-scheduler.xml` * core: `core-site.xml` * distcp: `distcp-default.xml` * hdfs: `hdfs-site.xml` * hive: `hive-site.xml` * mapred: `mapred-site.xml` * pig: `pig.properties` * spark: `spark-defaults.conf` * yarn: `yarn-site.xml` For more information, see (https://cloud.google.com/dataproc/docs/concepts/cluster-properties).
	Properties pulumi.StringMapInput `pulumi:"properties"`
}

func (WorkflowTemplateJobHadoopJobArgs) ElementType added in v5.2.0

func (WorkflowTemplateJobHadoopJobArgs) ToWorkflowTemplateJobHadoopJobOutput added in v5.2.0

func (i WorkflowTemplateJobHadoopJobArgs) ToWorkflowTemplateJobHadoopJobOutput() WorkflowTemplateJobHadoopJobOutput

func (WorkflowTemplateJobHadoopJobArgs) ToWorkflowTemplateJobHadoopJobOutputWithContext added in v5.2.0

func (i WorkflowTemplateJobHadoopJobArgs) ToWorkflowTemplateJobHadoopJobOutputWithContext(ctx context.Context) WorkflowTemplateJobHadoopJobOutput

func (WorkflowTemplateJobHadoopJobArgs) ToWorkflowTemplateJobHadoopJobPtrOutput added in v5.2.0

func (i WorkflowTemplateJobHadoopJobArgs) ToWorkflowTemplateJobHadoopJobPtrOutput() WorkflowTemplateJobHadoopJobPtrOutput

func (WorkflowTemplateJobHadoopJobArgs) ToWorkflowTemplateJobHadoopJobPtrOutputWithContext added in v5.2.0

func (i WorkflowTemplateJobHadoopJobArgs) ToWorkflowTemplateJobHadoopJobPtrOutputWithContext(ctx context.Context) WorkflowTemplateJobHadoopJobPtrOutput

type WorkflowTemplateJobHadoopJobInput added in v5.2.0

type WorkflowTemplateJobHadoopJobInput interface {
	pulumi.Input

	ToWorkflowTemplateJobHadoopJobOutput() WorkflowTemplateJobHadoopJobOutput
	ToWorkflowTemplateJobHadoopJobOutputWithContext(context.Context) WorkflowTemplateJobHadoopJobOutput
}

WorkflowTemplateJobHadoopJobInput is an input type that accepts WorkflowTemplateJobHadoopJobArgs and WorkflowTemplateJobHadoopJobOutput values. You can construct a concrete instance of `WorkflowTemplateJobHadoopJobInput` via:

WorkflowTemplateJobHadoopJobArgs{...}

type WorkflowTemplateJobHadoopJobLoggingConfig added in v5.2.0

type WorkflowTemplateJobHadoopJobLoggingConfig struct {
	// The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
	DriverLogLevels map[string]string `pulumi:"driverLogLevels"`
}

type WorkflowTemplateJobHadoopJobLoggingConfigArgs added in v5.2.0

type WorkflowTemplateJobHadoopJobLoggingConfigArgs struct {
	// The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
	DriverLogLevels pulumi.StringMapInput `pulumi:"driverLogLevels"`
}

func (WorkflowTemplateJobHadoopJobLoggingConfigArgs) ElementType added in v5.2.0

func (WorkflowTemplateJobHadoopJobLoggingConfigArgs) ToWorkflowTemplateJobHadoopJobLoggingConfigOutput added in v5.2.0

func (i WorkflowTemplateJobHadoopJobLoggingConfigArgs) ToWorkflowTemplateJobHadoopJobLoggingConfigOutput() WorkflowTemplateJobHadoopJobLoggingConfigOutput

func (WorkflowTemplateJobHadoopJobLoggingConfigArgs) ToWorkflowTemplateJobHadoopJobLoggingConfigOutputWithContext added in v5.2.0

func (i WorkflowTemplateJobHadoopJobLoggingConfigArgs) ToWorkflowTemplateJobHadoopJobLoggingConfigOutputWithContext(ctx context.Context) WorkflowTemplateJobHadoopJobLoggingConfigOutput

func (WorkflowTemplateJobHadoopJobLoggingConfigArgs) ToWorkflowTemplateJobHadoopJobLoggingConfigPtrOutput added in v5.2.0

func (i WorkflowTemplateJobHadoopJobLoggingConfigArgs) ToWorkflowTemplateJobHadoopJobLoggingConfigPtrOutput() WorkflowTemplateJobHadoopJobLoggingConfigPtrOutput

func (WorkflowTemplateJobHadoopJobLoggingConfigArgs) ToWorkflowTemplateJobHadoopJobLoggingConfigPtrOutputWithContext added in v5.2.0

func (i WorkflowTemplateJobHadoopJobLoggingConfigArgs) ToWorkflowTemplateJobHadoopJobLoggingConfigPtrOutputWithContext(ctx context.Context) WorkflowTemplateJobHadoopJobLoggingConfigPtrOutput

type WorkflowTemplateJobHadoopJobLoggingConfigInput added in v5.2.0

type WorkflowTemplateJobHadoopJobLoggingConfigInput interface {
	pulumi.Input

	ToWorkflowTemplateJobHadoopJobLoggingConfigOutput() WorkflowTemplateJobHadoopJobLoggingConfigOutput
	ToWorkflowTemplateJobHadoopJobLoggingConfigOutputWithContext(context.Context) WorkflowTemplateJobHadoopJobLoggingConfigOutput
}

WorkflowTemplateJobHadoopJobLoggingConfigInput is an input type that accepts WorkflowTemplateJobHadoopJobLoggingConfigArgs and WorkflowTemplateJobHadoopJobLoggingConfigOutput values. You can construct a concrete instance of `WorkflowTemplateJobHadoopJobLoggingConfigInput` via:

WorkflowTemplateJobHadoopJobLoggingConfigArgs{...}

type WorkflowTemplateJobHadoopJobLoggingConfigOutput added in v5.2.0

type WorkflowTemplateJobHadoopJobLoggingConfigOutput struct{ *pulumi.OutputState }

func (WorkflowTemplateJobHadoopJobLoggingConfigOutput) DriverLogLevels added in v5.2.0

The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'

func (WorkflowTemplateJobHadoopJobLoggingConfigOutput) ElementType added in v5.2.0

func (WorkflowTemplateJobHadoopJobLoggingConfigOutput) ToWorkflowTemplateJobHadoopJobLoggingConfigOutput added in v5.2.0

func (o WorkflowTemplateJobHadoopJobLoggingConfigOutput) ToWorkflowTemplateJobHadoopJobLoggingConfigOutput() WorkflowTemplateJobHadoopJobLoggingConfigOutput

func (WorkflowTemplateJobHadoopJobLoggingConfigOutput) ToWorkflowTemplateJobHadoopJobLoggingConfigOutputWithContext added in v5.2.0

func (o WorkflowTemplateJobHadoopJobLoggingConfigOutput) ToWorkflowTemplateJobHadoopJobLoggingConfigOutputWithContext(ctx context.Context) WorkflowTemplateJobHadoopJobLoggingConfigOutput

func (WorkflowTemplateJobHadoopJobLoggingConfigOutput) ToWorkflowTemplateJobHadoopJobLoggingConfigPtrOutput added in v5.2.0

func (o WorkflowTemplateJobHadoopJobLoggingConfigOutput) ToWorkflowTemplateJobHadoopJobLoggingConfigPtrOutput() WorkflowTemplateJobHadoopJobLoggingConfigPtrOutput

func (WorkflowTemplateJobHadoopJobLoggingConfigOutput) ToWorkflowTemplateJobHadoopJobLoggingConfigPtrOutputWithContext added in v5.2.0

func (o WorkflowTemplateJobHadoopJobLoggingConfigOutput) ToWorkflowTemplateJobHadoopJobLoggingConfigPtrOutputWithContext(ctx context.Context) WorkflowTemplateJobHadoopJobLoggingConfigPtrOutput

type WorkflowTemplateJobHadoopJobLoggingConfigPtrInput added in v5.2.0

type WorkflowTemplateJobHadoopJobLoggingConfigPtrInput interface {
	pulumi.Input

	ToWorkflowTemplateJobHadoopJobLoggingConfigPtrOutput() WorkflowTemplateJobHadoopJobLoggingConfigPtrOutput
	ToWorkflowTemplateJobHadoopJobLoggingConfigPtrOutputWithContext(context.Context) WorkflowTemplateJobHadoopJobLoggingConfigPtrOutput
}

WorkflowTemplateJobHadoopJobLoggingConfigPtrInput is an input type that accepts WorkflowTemplateJobHadoopJobLoggingConfigArgs, WorkflowTemplateJobHadoopJobLoggingConfigPtr and WorkflowTemplateJobHadoopJobLoggingConfigPtrOutput values. You can construct a concrete instance of `WorkflowTemplateJobHadoopJobLoggingConfigPtrInput` via:

        WorkflowTemplateJobHadoopJobLoggingConfigArgs{...}

or:

        nil

type WorkflowTemplateJobHadoopJobLoggingConfigPtrOutput added in v5.2.0

type WorkflowTemplateJobHadoopJobLoggingConfigPtrOutput struct{ *pulumi.OutputState }

func (WorkflowTemplateJobHadoopJobLoggingConfigPtrOutput) DriverLogLevels added in v5.2.0

The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'

func (WorkflowTemplateJobHadoopJobLoggingConfigPtrOutput) Elem added in v5.2.0

func (WorkflowTemplateJobHadoopJobLoggingConfigPtrOutput) ElementType added in v5.2.0

func (WorkflowTemplateJobHadoopJobLoggingConfigPtrOutput) ToWorkflowTemplateJobHadoopJobLoggingConfigPtrOutput added in v5.2.0

func (o WorkflowTemplateJobHadoopJobLoggingConfigPtrOutput) ToWorkflowTemplateJobHadoopJobLoggingConfigPtrOutput() WorkflowTemplateJobHadoopJobLoggingConfigPtrOutput

func (WorkflowTemplateJobHadoopJobLoggingConfigPtrOutput) ToWorkflowTemplateJobHadoopJobLoggingConfigPtrOutputWithContext added in v5.2.0

func (o WorkflowTemplateJobHadoopJobLoggingConfigPtrOutput) ToWorkflowTemplateJobHadoopJobLoggingConfigPtrOutputWithContext(ctx context.Context) WorkflowTemplateJobHadoopJobLoggingConfigPtrOutput

type WorkflowTemplateJobHadoopJobOutput added in v5.2.0

type WorkflowTemplateJobHadoopJobOutput struct{ *pulumi.OutputState }

func (WorkflowTemplateJobHadoopJobOutput) ArchiveUris added in v5.2.0

Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.

func (WorkflowTemplateJobHadoopJobOutput) Args added in v5.2.0

Optional. The arguments to pass to the driver. Do not include arguments, such as `--conf`, that can be set as job properties, since a collision may occur that causes an incorrect job submission.

func (WorkflowTemplateJobHadoopJobOutput) ElementType added in v5.2.0

func (WorkflowTemplateJobHadoopJobOutput) FileUris added in v5.2.0

Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.

func (WorkflowTemplateJobHadoopJobOutput) JarFileUris added in v5.2.0

Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH.

func (WorkflowTemplateJobHadoopJobOutput) LoggingConfig added in v5.2.0

Optional. The runtime log config for job execution.

func (WorkflowTemplateJobHadoopJobOutput) MainClass added in v5.2.0

The name of the driver's main class. The jar file that contains the class must be in the default CLASSPATH or specified in `jarFileUris`.

func (WorkflowTemplateJobHadoopJobOutput) MainJarFileUri added in v5.2.0

The HCFS URI of the jar file that contains the main class.

func (WorkflowTemplateJobHadoopJobOutput) Properties added in v5.2.0

Optional. The properties to set on daemon config files. Property keys are specified in `prefix:property` format, for example `core:hadoop.tmp.dir`. The following are supported prefixes and their mappings: * capacity-scheduler: `capacity-scheduler.xml` * core: `core-site.xml` * distcp: `distcp-default.xml` * hdfs: `hdfs-site.xml` * hive: `hive-site.xml` * mapred: `mapred-site.xml` * pig: `pig.properties` * spark: `spark-defaults.conf` * yarn: `yarn-site.xml` For more information, see (https://cloud.google.com/dataproc/docs/concepts/cluster-properties).

func (WorkflowTemplateJobHadoopJobOutput) ToWorkflowTemplateJobHadoopJobOutput added in v5.2.0

func (o WorkflowTemplateJobHadoopJobOutput) ToWorkflowTemplateJobHadoopJobOutput() WorkflowTemplateJobHadoopJobOutput

func (WorkflowTemplateJobHadoopJobOutput) ToWorkflowTemplateJobHadoopJobOutputWithContext added in v5.2.0

func (o WorkflowTemplateJobHadoopJobOutput) ToWorkflowTemplateJobHadoopJobOutputWithContext(ctx context.Context) WorkflowTemplateJobHadoopJobOutput

func (WorkflowTemplateJobHadoopJobOutput) ToWorkflowTemplateJobHadoopJobPtrOutput added in v5.2.0

func (o WorkflowTemplateJobHadoopJobOutput) ToWorkflowTemplateJobHadoopJobPtrOutput() WorkflowTemplateJobHadoopJobPtrOutput

func (WorkflowTemplateJobHadoopJobOutput) ToWorkflowTemplateJobHadoopJobPtrOutputWithContext added in v5.2.0

func (o WorkflowTemplateJobHadoopJobOutput) ToWorkflowTemplateJobHadoopJobPtrOutputWithContext(ctx context.Context) WorkflowTemplateJobHadoopJobPtrOutput

type WorkflowTemplateJobHadoopJobPtrInput added in v5.2.0

type WorkflowTemplateJobHadoopJobPtrInput interface {
	pulumi.Input

	ToWorkflowTemplateJobHadoopJobPtrOutput() WorkflowTemplateJobHadoopJobPtrOutput
	ToWorkflowTemplateJobHadoopJobPtrOutputWithContext(context.Context) WorkflowTemplateJobHadoopJobPtrOutput
}

WorkflowTemplateJobHadoopJobPtrInput is an input type that accepts WorkflowTemplateJobHadoopJobArgs, WorkflowTemplateJobHadoopJobPtr and WorkflowTemplateJobHadoopJobPtrOutput values. You can construct a concrete instance of `WorkflowTemplateJobHadoopJobPtrInput` via:

        WorkflowTemplateJobHadoopJobArgs{...}

or:

        nil

type WorkflowTemplateJobHadoopJobPtrOutput added in v5.2.0

type WorkflowTemplateJobHadoopJobPtrOutput struct{ *pulumi.OutputState }

func (WorkflowTemplateJobHadoopJobPtrOutput) ArchiveUris added in v5.2.0

Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.

func (WorkflowTemplateJobHadoopJobPtrOutput) Args added in v5.2.0

Optional. The arguments to pass to the driver. Do not include arguments, such as `--conf`, that can be set as job properties, since a collision may occur that causes an incorrect job submission.

func (WorkflowTemplateJobHadoopJobPtrOutput) Elem added in v5.2.0

func (WorkflowTemplateJobHadoopJobPtrOutput) ElementType added in v5.2.0

func (WorkflowTemplateJobHadoopJobPtrOutput) FileUris added in v5.2.0

Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.

func (WorkflowTemplateJobHadoopJobPtrOutput) JarFileUris added in v5.2.0

Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH.

func (WorkflowTemplateJobHadoopJobPtrOutput) LoggingConfig added in v5.2.0

Optional. The runtime log config for job execution.

func (WorkflowTemplateJobHadoopJobPtrOutput) MainClass added in v5.2.0

The name of the driver's main class. The jar file that contains the class must be in the default CLASSPATH or specified in `jarFileUris`.

func (WorkflowTemplateJobHadoopJobPtrOutput) MainJarFileUri added in v5.2.0

The HCFS URI of the jar file that contains the main class.

func (WorkflowTemplateJobHadoopJobPtrOutput) Properties added in v5.2.0

Optional. The properties to set on daemon config files. Property keys are specified in `prefix:property` format, for example `core:hadoop.tmp.dir`. The following are supported prefixes and their mappings: * capacity-scheduler: `capacity-scheduler.xml` * core: `core-site.xml` * distcp: `distcp-default.xml` * hdfs: `hdfs-site.xml` * hive: `hive-site.xml` * mapred: `mapred-site.xml` * pig: `pig.properties` * spark: `spark-defaults.conf` * yarn: `yarn-site.xml` For more information, see (https://cloud.google.com/dataproc/docs/concepts/cluster-properties).

func (WorkflowTemplateJobHadoopJobPtrOutput) ToWorkflowTemplateJobHadoopJobPtrOutput added in v5.2.0

func (o WorkflowTemplateJobHadoopJobPtrOutput) ToWorkflowTemplateJobHadoopJobPtrOutput() WorkflowTemplateJobHadoopJobPtrOutput

func (WorkflowTemplateJobHadoopJobPtrOutput) ToWorkflowTemplateJobHadoopJobPtrOutputWithContext added in v5.2.0

func (o WorkflowTemplateJobHadoopJobPtrOutput) ToWorkflowTemplateJobHadoopJobPtrOutputWithContext(ctx context.Context) WorkflowTemplateJobHadoopJobPtrOutput

type WorkflowTemplateJobHiveJob added in v5.2.0

type WorkflowTemplateJobHiveJob struct {
	// Optional. Whether to continue executing queries if a query fails. The default value is `false`. Setting to `true` can be useful when executing independent parallel queries.
	ContinueOnFailure *bool `pulumi:"continueOnFailure"`
	// Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH.
	JarFileUris []string `pulumi:"jarFileUris"`
	// Optional. The properties to set on daemon config files. Property keys are specified in `prefix:property` format, for example `core:hadoop.tmp.dir`. The following are supported prefixes and their mappings: * capacity-scheduler: `capacity-scheduler.xml` * core: `core-site.xml` * distcp: `distcp-default.xml` * hdfs: `hdfs-site.xml` * hive: `hive-site.xml` * mapred: `mapred-site.xml` * pig: `pig.properties` * spark: `spark-defaults.conf` * yarn: `yarn-site.xml` For more information, see (https://cloud.google.com/dataproc/docs/concepts/cluster-properties).
	Properties map[string]string `pulumi:"properties"`
	// The HCFS URI of the script that contains SQL queries.
	QueryFileUri *string `pulumi:"queryFileUri"`
	// A list of queries.
	QueryList *WorkflowTemplateJobHiveJobQueryList `pulumi:"queryList"`
	// Optional. Mapping of query variable names to values (equivalent to the Spark SQL command: SET `name="value";`).
	ScriptVariables map[string]string `pulumi:"scriptVariables"`
}

type WorkflowTemplateJobHiveJobArgs added in v5.2.0

type WorkflowTemplateJobHiveJobArgs struct {
	// Optional. Whether to continue executing queries if a query fails. The default value is `false`. Setting to `true` can be useful when executing independent parallel queries.
	ContinueOnFailure pulumi.BoolPtrInput `pulumi:"continueOnFailure"`
	// Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH.
	JarFileUris pulumi.StringArrayInput `pulumi:"jarFileUris"`
	// Optional. The properties to set on daemon config files. Property keys are specified in `prefix:property` format, for example `core:hadoop.tmp.dir`. The following are supported prefixes and their mappings: * capacity-scheduler: `capacity-scheduler.xml` * core: `core-site.xml` * distcp: `distcp-default.xml` * hdfs: `hdfs-site.xml` * hive: `hive-site.xml` * mapred: `mapred-site.xml` * pig: `pig.properties` * spark: `spark-defaults.conf` * yarn: `yarn-site.xml` For more information, see (https://cloud.google.com/dataproc/docs/concepts/cluster-properties).
	Properties pulumi.StringMapInput `pulumi:"properties"`
	// The HCFS URI of the script that contains SQL queries.
	QueryFileUri pulumi.StringPtrInput `pulumi:"queryFileUri"`
	// A list of queries.
	QueryList WorkflowTemplateJobHiveJobQueryListPtrInput `pulumi:"queryList"`
	// Optional. Mapping of query variable names to values (equivalent to the Spark SQL command: SET `name="value";`).
	ScriptVariables pulumi.StringMapInput `pulumi:"scriptVariables"`
}

func (WorkflowTemplateJobHiveJobArgs) ElementType added in v5.2.0

func (WorkflowTemplateJobHiveJobArgs) ToWorkflowTemplateJobHiveJobOutput added in v5.2.0

func (i WorkflowTemplateJobHiveJobArgs) ToWorkflowTemplateJobHiveJobOutput() WorkflowTemplateJobHiveJobOutput

func (WorkflowTemplateJobHiveJobArgs) ToWorkflowTemplateJobHiveJobOutputWithContext added in v5.2.0

func (i WorkflowTemplateJobHiveJobArgs) ToWorkflowTemplateJobHiveJobOutputWithContext(ctx context.Context) WorkflowTemplateJobHiveJobOutput

func (WorkflowTemplateJobHiveJobArgs) ToWorkflowTemplateJobHiveJobPtrOutput added in v5.2.0

func (i WorkflowTemplateJobHiveJobArgs) ToWorkflowTemplateJobHiveJobPtrOutput() WorkflowTemplateJobHiveJobPtrOutput

func (WorkflowTemplateJobHiveJobArgs) ToWorkflowTemplateJobHiveJobPtrOutputWithContext added in v5.2.0

func (i WorkflowTemplateJobHiveJobArgs) ToWorkflowTemplateJobHiveJobPtrOutputWithContext(ctx context.Context) WorkflowTemplateJobHiveJobPtrOutput

type WorkflowTemplateJobHiveJobInput added in v5.2.0

type WorkflowTemplateJobHiveJobInput interface {
	pulumi.Input

	ToWorkflowTemplateJobHiveJobOutput() WorkflowTemplateJobHiveJobOutput
	ToWorkflowTemplateJobHiveJobOutputWithContext(context.Context) WorkflowTemplateJobHiveJobOutput
}

WorkflowTemplateJobHiveJobInput is an input type that accepts WorkflowTemplateJobHiveJobArgs and WorkflowTemplateJobHiveJobOutput values. You can construct a concrete instance of `WorkflowTemplateJobHiveJobInput` via:

WorkflowTemplateJobHiveJobArgs{...}

type WorkflowTemplateJobHiveJobOutput added in v5.2.0

type WorkflowTemplateJobHiveJobOutput struct{ *pulumi.OutputState }

func (WorkflowTemplateJobHiveJobOutput) ContinueOnFailure added in v5.2.0

Optional. Whether to continue executing queries if a query fails. The default value is `false`. Setting to `true` can be useful when executing independent parallel queries.

func (WorkflowTemplateJobHiveJobOutput) ElementType added in v5.2.0

func (WorkflowTemplateJobHiveJobOutput) JarFileUris added in v5.2.0

Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH.

func (WorkflowTemplateJobHiveJobOutput) Properties added in v5.2.0

Optional. The properties to set on daemon config files. Property keys are specified in `prefix:property` format, for example `core:hadoop.tmp.dir`. The following are supported prefixes and their mappings: * capacity-scheduler: `capacity-scheduler.xml` * core: `core-site.xml` * distcp: `distcp-default.xml` * hdfs: `hdfs-site.xml` * hive: `hive-site.xml` * mapred: `mapred-site.xml` * pig: `pig.properties` * spark: `spark-defaults.conf` * yarn: `yarn-site.xml` For more information, see (https://cloud.google.com/dataproc/docs/concepts/cluster-properties).

func (WorkflowTemplateJobHiveJobOutput) QueryFileUri added in v5.2.0

The HCFS URI of the script that contains SQL queries.

func (WorkflowTemplateJobHiveJobOutput) QueryList added in v5.2.0

A list of queries.

func (WorkflowTemplateJobHiveJobOutput) ScriptVariables added in v5.2.0

Optional. Mapping of query variable names to values (equivalent to the Spark SQL command: SET `name="value";`).

func (WorkflowTemplateJobHiveJobOutput) ToWorkflowTemplateJobHiveJobOutput added in v5.2.0

func (o WorkflowTemplateJobHiveJobOutput) ToWorkflowTemplateJobHiveJobOutput() WorkflowTemplateJobHiveJobOutput

func (WorkflowTemplateJobHiveJobOutput) ToWorkflowTemplateJobHiveJobOutputWithContext added in v5.2.0

func (o WorkflowTemplateJobHiveJobOutput) ToWorkflowTemplateJobHiveJobOutputWithContext(ctx context.Context) WorkflowTemplateJobHiveJobOutput

func (WorkflowTemplateJobHiveJobOutput) ToWorkflowTemplateJobHiveJobPtrOutput added in v5.2.0

func (o WorkflowTemplateJobHiveJobOutput) ToWorkflowTemplateJobHiveJobPtrOutput() WorkflowTemplateJobHiveJobPtrOutput

func (WorkflowTemplateJobHiveJobOutput) ToWorkflowTemplateJobHiveJobPtrOutputWithContext added in v5.2.0

func (o WorkflowTemplateJobHiveJobOutput) ToWorkflowTemplateJobHiveJobPtrOutputWithContext(ctx context.Context) WorkflowTemplateJobHiveJobPtrOutput

type WorkflowTemplateJobHiveJobPtrInput added in v5.2.0

type WorkflowTemplateJobHiveJobPtrInput interface {
	pulumi.Input

	ToWorkflowTemplateJobHiveJobPtrOutput() WorkflowTemplateJobHiveJobPtrOutput
	ToWorkflowTemplateJobHiveJobPtrOutputWithContext(context.Context) WorkflowTemplateJobHiveJobPtrOutput
}

WorkflowTemplateJobHiveJobPtrInput is an input type that accepts WorkflowTemplateJobHiveJobArgs, WorkflowTemplateJobHiveJobPtr and WorkflowTemplateJobHiveJobPtrOutput values. You can construct a concrete instance of `WorkflowTemplateJobHiveJobPtrInput` via:

        WorkflowTemplateJobHiveJobArgs{...}

or:

        nil

func WorkflowTemplateJobHiveJobPtr added in v5.2.0

type WorkflowTemplateJobHiveJobPtrOutput added in v5.2.0

type WorkflowTemplateJobHiveJobPtrOutput struct{ *pulumi.OutputState }

func (WorkflowTemplateJobHiveJobPtrOutput) ContinueOnFailure added in v5.2.0

Optional. Whether to continue executing queries if a query fails. The default value is `false`. Setting to `true` can be useful when executing independent parallel queries.

func (WorkflowTemplateJobHiveJobPtrOutput) Elem added in v5.2.0

func (WorkflowTemplateJobHiveJobPtrOutput) ElementType added in v5.2.0

func (WorkflowTemplateJobHiveJobPtrOutput) JarFileUris added in v5.2.0

Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH.

func (WorkflowTemplateJobHiveJobPtrOutput) Properties added in v5.2.0

Optional. The properties to set on daemon config files. Property keys are specified in `prefix:property` format, for example `core:hadoop.tmp.dir`. The following are supported prefixes and their mappings: * capacity-scheduler: `capacity-scheduler.xml` * core: `core-site.xml` * distcp: `distcp-default.xml` * hdfs: `hdfs-site.xml` * hive: `hive-site.xml` * mapred: `mapred-site.xml` * pig: `pig.properties` * spark: `spark-defaults.conf` * yarn: `yarn-site.xml` For more information, see (https://cloud.google.com/dataproc/docs/concepts/cluster-properties).

func (WorkflowTemplateJobHiveJobPtrOutput) QueryFileUri added in v5.2.0

The HCFS URI of the script that contains SQL queries.

func (WorkflowTemplateJobHiveJobPtrOutput) QueryList added in v5.2.0

A list of queries.

func (WorkflowTemplateJobHiveJobPtrOutput) ScriptVariables added in v5.2.0

Optional. Mapping of query variable names to values (equivalent to the Spark SQL command: SET `name="value";`).

func (WorkflowTemplateJobHiveJobPtrOutput) ToWorkflowTemplateJobHiveJobPtrOutput added in v5.2.0

func (o WorkflowTemplateJobHiveJobPtrOutput) ToWorkflowTemplateJobHiveJobPtrOutput() WorkflowTemplateJobHiveJobPtrOutput

func (WorkflowTemplateJobHiveJobPtrOutput) ToWorkflowTemplateJobHiveJobPtrOutputWithContext added in v5.2.0

func (o WorkflowTemplateJobHiveJobPtrOutput) ToWorkflowTemplateJobHiveJobPtrOutputWithContext(ctx context.Context) WorkflowTemplateJobHiveJobPtrOutput

type WorkflowTemplateJobHiveJobQueryList added in v5.2.0

type WorkflowTemplateJobHiveJobQueryList struct {
	// Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": } }
	Queries []string `pulumi:"queries"`
}

type WorkflowTemplateJobHiveJobQueryListArgs added in v5.2.0

type WorkflowTemplateJobHiveJobQueryListArgs struct {
	// Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": } }
	Queries pulumi.StringArrayInput `pulumi:"queries"`
}

func (WorkflowTemplateJobHiveJobQueryListArgs) ElementType added in v5.2.0

func (WorkflowTemplateJobHiveJobQueryListArgs) ToWorkflowTemplateJobHiveJobQueryListOutput added in v5.2.0

func (i WorkflowTemplateJobHiveJobQueryListArgs) ToWorkflowTemplateJobHiveJobQueryListOutput() WorkflowTemplateJobHiveJobQueryListOutput

func (WorkflowTemplateJobHiveJobQueryListArgs) ToWorkflowTemplateJobHiveJobQueryListOutputWithContext added in v5.2.0

func (i WorkflowTemplateJobHiveJobQueryListArgs) ToWorkflowTemplateJobHiveJobQueryListOutputWithContext(ctx context.Context) WorkflowTemplateJobHiveJobQueryListOutput

func (WorkflowTemplateJobHiveJobQueryListArgs) ToWorkflowTemplateJobHiveJobQueryListPtrOutput added in v5.2.0

func (i WorkflowTemplateJobHiveJobQueryListArgs) ToWorkflowTemplateJobHiveJobQueryListPtrOutput() WorkflowTemplateJobHiveJobQueryListPtrOutput

func (WorkflowTemplateJobHiveJobQueryListArgs) ToWorkflowTemplateJobHiveJobQueryListPtrOutputWithContext added in v5.2.0

func (i WorkflowTemplateJobHiveJobQueryListArgs) ToWorkflowTemplateJobHiveJobQueryListPtrOutputWithContext(ctx context.Context) WorkflowTemplateJobHiveJobQueryListPtrOutput

type WorkflowTemplateJobHiveJobQueryListInput added in v5.2.0

type WorkflowTemplateJobHiveJobQueryListInput interface {
	pulumi.Input

	ToWorkflowTemplateJobHiveJobQueryListOutput() WorkflowTemplateJobHiveJobQueryListOutput
	ToWorkflowTemplateJobHiveJobQueryListOutputWithContext(context.Context) WorkflowTemplateJobHiveJobQueryListOutput
}

WorkflowTemplateJobHiveJobQueryListInput is an input type that accepts WorkflowTemplateJobHiveJobQueryListArgs and WorkflowTemplateJobHiveJobQueryListOutput values. You can construct a concrete instance of `WorkflowTemplateJobHiveJobQueryListInput` via:

WorkflowTemplateJobHiveJobQueryListArgs{...}

type WorkflowTemplateJobHiveJobQueryListOutput added in v5.2.0

type WorkflowTemplateJobHiveJobQueryListOutput struct{ *pulumi.OutputState }

func (WorkflowTemplateJobHiveJobQueryListOutput) ElementType added in v5.2.0

func (WorkflowTemplateJobHiveJobQueryListOutput) Queries added in v5.2.0

Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": } }

func (WorkflowTemplateJobHiveJobQueryListOutput) ToWorkflowTemplateJobHiveJobQueryListOutput added in v5.2.0

func (o WorkflowTemplateJobHiveJobQueryListOutput) ToWorkflowTemplateJobHiveJobQueryListOutput() WorkflowTemplateJobHiveJobQueryListOutput

func (WorkflowTemplateJobHiveJobQueryListOutput) ToWorkflowTemplateJobHiveJobQueryListOutputWithContext added in v5.2.0

func (o WorkflowTemplateJobHiveJobQueryListOutput) ToWorkflowTemplateJobHiveJobQueryListOutputWithContext(ctx context.Context) WorkflowTemplateJobHiveJobQueryListOutput

func (WorkflowTemplateJobHiveJobQueryListOutput) ToWorkflowTemplateJobHiveJobQueryListPtrOutput added in v5.2.0

func (o WorkflowTemplateJobHiveJobQueryListOutput) ToWorkflowTemplateJobHiveJobQueryListPtrOutput() WorkflowTemplateJobHiveJobQueryListPtrOutput

func (WorkflowTemplateJobHiveJobQueryListOutput) ToWorkflowTemplateJobHiveJobQueryListPtrOutputWithContext added in v5.2.0

func (o WorkflowTemplateJobHiveJobQueryListOutput) ToWorkflowTemplateJobHiveJobQueryListPtrOutputWithContext(ctx context.Context) WorkflowTemplateJobHiveJobQueryListPtrOutput

type WorkflowTemplateJobHiveJobQueryListPtrInput added in v5.2.0

type WorkflowTemplateJobHiveJobQueryListPtrInput interface {
	pulumi.Input

	ToWorkflowTemplateJobHiveJobQueryListPtrOutput() WorkflowTemplateJobHiveJobQueryListPtrOutput
	ToWorkflowTemplateJobHiveJobQueryListPtrOutputWithContext(context.Context) WorkflowTemplateJobHiveJobQueryListPtrOutput
}

WorkflowTemplateJobHiveJobQueryListPtrInput is an input type that accepts WorkflowTemplateJobHiveJobQueryListArgs, WorkflowTemplateJobHiveJobQueryListPtr and WorkflowTemplateJobHiveJobQueryListPtrOutput values. You can construct a concrete instance of `WorkflowTemplateJobHiveJobQueryListPtrInput` via:

        WorkflowTemplateJobHiveJobQueryListArgs{...}

or:

        nil

type WorkflowTemplateJobHiveJobQueryListPtrOutput added in v5.2.0

type WorkflowTemplateJobHiveJobQueryListPtrOutput struct{ *pulumi.OutputState }

func (WorkflowTemplateJobHiveJobQueryListPtrOutput) Elem added in v5.2.0

func (WorkflowTemplateJobHiveJobQueryListPtrOutput) ElementType added in v5.2.0

func (WorkflowTemplateJobHiveJobQueryListPtrOutput) Queries added in v5.2.0

Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": } }

func (WorkflowTemplateJobHiveJobQueryListPtrOutput) ToWorkflowTemplateJobHiveJobQueryListPtrOutput added in v5.2.0

func (o WorkflowTemplateJobHiveJobQueryListPtrOutput) ToWorkflowTemplateJobHiveJobQueryListPtrOutput() WorkflowTemplateJobHiveJobQueryListPtrOutput

func (WorkflowTemplateJobHiveJobQueryListPtrOutput) ToWorkflowTemplateJobHiveJobQueryListPtrOutputWithContext added in v5.2.0

func (o WorkflowTemplateJobHiveJobQueryListPtrOutput) ToWorkflowTemplateJobHiveJobQueryListPtrOutputWithContext(ctx context.Context) WorkflowTemplateJobHiveJobQueryListPtrOutput

type WorkflowTemplateJobInput added in v5.2.0

type WorkflowTemplateJobInput interface {
	pulumi.Input

	ToWorkflowTemplateJobOutput() WorkflowTemplateJobOutput
	ToWorkflowTemplateJobOutputWithContext(context.Context) WorkflowTemplateJobOutput
}

WorkflowTemplateJobInput is an input type that accepts WorkflowTemplateJobArgs and WorkflowTemplateJobOutput values. You can construct a concrete instance of `WorkflowTemplateJobInput` via:

WorkflowTemplateJobArgs{...}

type WorkflowTemplateJobOutput added in v5.2.0

type WorkflowTemplateJobOutput struct{ *pulumi.OutputState }

func (WorkflowTemplateJobOutput) ElementType added in v5.2.0

func (WorkflowTemplateJobOutput) ElementType() reflect.Type

func (WorkflowTemplateJobOutput) HadoopJob added in v5.2.0

Optional. Job is a Hadoop job.

func (WorkflowTemplateJobOutput) HiveJob added in v5.2.0

Optional. Job is a Hive job.

func (WorkflowTemplateJobOutput) Labels added in v5.2.0

Optional. The labels to associate with this cluster. Label keys must be between 1 and 63 characters long, and must conform to the following PCRE regular expression: {0,63} No more than 32 labels can be associated with a given cluster.

func (WorkflowTemplateJobOutput) PigJob added in v5.2.0

Optional. Job is a Pig job.

func (WorkflowTemplateJobOutput) PrerequisiteStepIds added in v5.2.0

func (o WorkflowTemplateJobOutput) PrerequisiteStepIds() pulumi.StringArrayOutput

Optional. The optional list of prerequisite job step_ids. If not specified, the job will start at the beginning of workflow.

func (WorkflowTemplateJobOutput) PrestoJob added in v5.2.0

Optional. Job is a Presto job.

func (WorkflowTemplateJobOutput) PysparkJob added in v5.2.0

Optional. Job is a PySpark job.

func (WorkflowTemplateJobOutput) Scheduling added in v5.2.0

Optional. Job scheduling configuration.

func (WorkflowTemplateJobOutput) SparkJob added in v5.2.0

Optional. Job is a Spark job.

func (WorkflowTemplateJobOutput) SparkRJob added in v5.2.0

Optional. Job is a SparkR job.

func (WorkflowTemplateJobOutput) SparkSqlJob added in v5.2.0

Optional. Job is a SparkSql job.

func (WorkflowTemplateJobOutput) StepId added in v5.2.0

Required. The step id. The id must be unique among all jobs within the template. The step id is used as prefix for job id, as job `goog-dataproc-workflow-step-id` label, and in field from other steps. The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between 3 and 50 characters.

func (WorkflowTemplateJobOutput) ToWorkflowTemplateJobOutput added in v5.2.0

func (o WorkflowTemplateJobOutput) ToWorkflowTemplateJobOutput() WorkflowTemplateJobOutput

func (WorkflowTemplateJobOutput) ToWorkflowTemplateJobOutputWithContext added in v5.2.0

func (o WorkflowTemplateJobOutput) ToWorkflowTemplateJobOutputWithContext(ctx context.Context) WorkflowTemplateJobOutput

type WorkflowTemplateJobPigJob added in v5.2.0

type WorkflowTemplateJobPigJob struct {
	// Optional. Whether to continue executing queries if a query fails. The default value is `false`. Setting to `true` can be useful when executing independent parallel queries.
	ContinueOnFailure *bool `pulumi:"continueOnFailure"`
	// Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH.
	JarFileUris []string `pulumi:"jarFileUris"`
	// Optional. The runtime log config for job execution.
	LoggingConfig *WorkflowTemplateJobPigJobLoggingConfig `pulumi:"loggingConfig"`
	// Optional. The properties to set on daemon config files. Property keys are specified in `prefix:property` format, for example `core:hadoop.tmp.dir`. The following are supported prefixes and their mappings: * capacity-scheduler: `capacity-scheduler.xml` * core: `core-site.xml` * distcp: `distcp-default.xml` * hdfs: `hdfs-site.xml` * hive: `hive-site.xml` * mapred: `mapred-site.xml` * pig: `pig.properties` * spark: `spark-defaults.conf` * yarn: `yarn-site.xml` For more information, see (https://cloud.google.com/dataproc/docs/concepts/cluster-properties).
	Properties map[string]string `pulumi:"properties"`
	// The HCFS URI of the script that contains SQL queries.
	QueryFileUri *string `pulumi:"queryFileUri"`
	// A list of queries.
	QueryList *WorkflowTemplateJobPigJobQueryList `pulumi:"queryList"`
	// Optional. Mapping of query variable names to values (equivalent to the Spark SQL command: SET `name="value";`).
	ScriptVariables map[string]string `pulumi:"scriptVariables"`
}

type WorkflowTemplateJobPigJobArgs added in v5.2.0

type WorkflowTemplateJobPigJobArgs struct {
	// Optional. Whether to continue executing queries if a query fails. The default value is `false`. Setting to `true` can be useful when executing independent parallel queries.
	ContinueOnFailure pulumi.BoolPtrInput `pulumi:"continueOnFailure"`
	// Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH.
	JarFileUris pulumi.StringArrayInput `pulumi:"jarFileUris"`
	// Optional. The runtime log config for job execution.
	LoggingConfig WorkflowTemplateJobPigJobLoggingConfigPtrInput `pulumi:"loggingConfig"`
	// Optional. The properties to set on daemon config files. Property keys are specified in `prefix:property` format, for example `core:hadoop.tmp.dir`. The following are supported prefixes and their mappings: * capacity-scheduler: `capacity-scheduler.xml` * core: `core-site.xml` * distcp: `distcp-default.xml` * hdfs: `hdfs-site.xml` * hive: `hive-site.xml` * mapred: `mapred-site.xml` * pig: `pig.properties` * spark: `spark-defaults.conf` * yarn: `yarn-site.xml` For more information, see (https://cloud.google.com/dataproc/docs/concepts/cluster-properties).
	Properties pulumi.StringMapInput `pulumi:"properties"`
	// The HCFS URI of the script that contains SQL queries.
	QueryFileUri pulumi.StringPtrInput `pulumi:"queryFileUri"`
	// A list of queries.
	QueryList WorkflowTemplateJobPigJobQueryListPtrInput `pulumi:"queryList"`
	// Optional. Mapping of query variable names to values (equivalent to the Spark SQL command: SET `name="value";`).
	ScriptVariables pulumi.StringMapInput `pulumi:"scriptVariables"`
}

func (WorkflowTemplateJobPigJobArgs) ElementType added in v5.2.0

func (WorkflowTemplateJobPigJobArgs) ToWorkflowTemplateJobPigJobOutput added in v5.2.0

func (i WorkflowTemplateJobPigJobArgs) ToWorkflowTemplateJobPigJobOutput() WorkflowTemplateJobPigJobOutput

func (WorkflowTemplateJobPigJobArgs) ToWorkflowTemplateJobPigJobOutputWithContext added in v5.2.0

func (i WorkflowTemplateJobPigJobArgs) ToWorkflowTemplateJobPigJobOutputWithContext(ctx context.Context) WorkflowTemplateJobPigJobOutput

func (WorkflowTemplateJobPigJobArgs) ToWorkflowTemplateJobPigJobPtrOutput added in v5.2.0

func (i WorkflowTemplateJobPigJobArgs) ToWorkflowTemplateJobPigJobPtrOutput() WorkflowTemplateJobPigJobPtrOutput

func (WorkflowTemplateJobPigJobArgs) ToWorkflowTemplateJobPigJobPtrOutputWithContext added in v5.2.0

func (i WorkflowTemplateJobPigJobArgs) ToWorkflowTemplateJobPigJobPtrOutputWithContext(ctx context.Context) WorkflowTemplateJobPigJobPtrOutput

type WorkflowTemplateJobPigJobInput added in v5.2.0

type WorkflowTemplateJobPigJobInput interface {
	pulumi.Input

	ToWorkflowTemplateJobPigJobOutput() WorkflowTemplateJobPigJobOutput
	ToWorkflowTemplateJobPigJobOutputWithContext(context.Context) WorkflowTemplateJobPigJobOutput
}

WorkflowTemplateJobPigJobInput is an input type that accepts WorkflowTemplateJobPigJobArgs and WorkflowTemplateJobPigJobOutput values. You can construct a concrete instance of `WorkflowTemplateJobPigJobInput` via:

WorkflowTemplateJobPigJobArgs{...}

type WorkflowTemplateJobPigJobLoggingConfig added in v5.2.0

type WorkflowTemplateJobPigJobLoggingConfig struct {
	// The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
	DriverLogLevels map[string]string `pulumi:"driverLogLevels"`
}

type WorkflowTemplateJobPigJobLoggingConfigArgs added in v5.2.0

type WorkflowTemplateJobPigJobLoggingConfigArgs struct {
	// The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
	DriverLogLevels pulumi.StringMapInput `pulumi:"driverLogLevels"`
}

func (WorkflowTemplateJobPigJobLoggingConfigArgs) ElementType added in v5.2.0

func (WorkflowTemplateJobPigJobLoggingConfigArgs) ToWorkflowTemplateJobPigJobLoggingConfigOutput added in v5.2.0

func (i WorkflowTemplateJobPigJobLoggingConfigArgs) ToWorkflowTemplateJobPigJobLoggingConfigOutput() WorkflowTemplateJobPigJobLoggingConfigOutput

func (WorkflowTemplateJobPigJobLoggingConfigArgs) ToWorkflowTemplateJobPigJobLoggingConfigOutputWithContext added in v5.2.0

func (i WorkflowTemplateJobPigJobLoggingConfigArgs) ToWorkflowTemplateJobPigJobLoggingConfigOutputWithContext(ctx context.Context) WorkflowTemplateJobPigJobLoggingConfigOutput

func (WorkflowTemplateJobPigJobLoggingConfigArgs) ToWorkflowTemplateJobPigJobLoggingConfigPtrOutput added in v5.2.0

func (i WorkflowTemplateJobPigJobLoggingConfigArgs) ToWorkflowTemplateJobPigJobLoggingConfigPtrOutput() WorkflowTemplateJobPigJobLoggingConfigPtrOutput

func (WorkflowTemplateJobPigJobLoggingConfigArgs) ToWorkflowTemplateJobPigJobLoggingConfigPtrOutputWithContext added in v5.2.0

func (i WorkflowTemplateJobPigJobLoggingConfigArgs) ToWorkflowTemplateJobPigJobLoggingConfigPtrOutputWithContext(ctx context.Context) WorkflowTemplateJobPigJobLoggingConfigPtrOutput

type WorkflowTemplateJobPigJobLoggingConfigInput added in v5.2.0

type WorkflowTemplateJobPigJobLoggingConfigInput interface {
	pulumi.Input

	ToWorkflowTemplateJobPigJobLoggingConfigOutput() WorkflowTemplateJobPigJobLoggingConfigOutput
	ToWorkflowTemplateJobPigJobLoggingConfigOutputWithContext(context.Context) WorkflowTemplateJobPigJobLoggingConfigOutput
}

WorkflowTemplateJobPigJobLoggingConfigInput is an input type that accepts WorkflowTemplateJobPigJobLoggingConfigArgs and WorkflowTemplateJobPigJobLoggingConfigOutput values. You can construct a concrete instance of `WorkflowTemplateJobPigJobLoggingConfigInput` via:

WorkflowTemplateJobPigJobLoggingConfigArgs{...}

type WorkflowTemplateJobPigJobLoggingConfigOutput added in v5.2.0

type WorkflowTemplateJobPigJobLoggingConfigOutput struct{ *pulumi.OutputState }

func (WorkflowTemplateJobPigJobLoggingConfigOutput) DriverLogLevels added in v5.2.0

The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'

func (WorkflowTemplateJobPigJobLoggingConfigOutput) ElementType added in v5.2.0

func (WorkflowTemplateJobPigJobLoggingConfigOutput) ToWorkflowTemplateJobPigJobLoggingConfigOutput added in v5.2.0

func (o WorkflowTemplateJobPigJobLoggingConfigOutput) ToWorkflowTemplateJobPigJobLoggingConfigOutput() WorkflowTemplateJobPigJobLoggingConfigOutput

func (WorkflowTemplateJobPigJobLoggingConfigOutput) ToWorkflowTemplateJobPigJobLoggingConfigOutputWithContext added in v5.2.0

func (o WorkflowTemplateJobPigJobLoggingConfigOutput) ToWorkflowTemplateJobPigJobLoggingConfigOutputWithContext(ctx context.Context) WorkflowTemplateJobPigJobLoggingConfigOutput

func (WorkflowTemplateJobPigJobLoggingConfigOutput) ToWorkflowTemplateJobPigJobLoggingConfigPtrOutput added in v5.2.0

func (o WorkflowTemplateJobPigJobLoggingConfigOutput) ToWorkflowTemplateJobPigJobLoggingConfigPtrOutput() WorkflowTemplateJobPigJobLoggingConfigPtrOutput

func (WorkflowTemplateJobPigJobLoggingConfigOutput) ToWorkflowTemplateJobPigJobLoggingConfigPtrOutputWithContext added in v5.2.0

func (o WorkflowTemplateJobPigJobLoggingConfigOutput) ToWorkflowTemplateJobPigJobLoggingConfigPtrOutputWithContext(ctx context.Context) WorkflowTemplateJobPigJobLoggingConfigPtrOutput

type WorkflowTemplateJobPigJobLoggingConfigPtrInput added in v5.2.0

type WorkflowTemplateJobPigJobLoggingConfigPtrInput interface {
	pulumi.Input

	ToWorkflowTemplateJobPigJobLoggingConfigPtrOutput() WorkflowTemplateJobPigJobLoggingConfigPtrOutput
	ToWorkflowTemplateJobPigJobLoggingConfigPtrOutputWithContext(context.Context) WorkflowTemplateJobPigJobLoggingConfigPtrOutput
}

WorkflowTemplateJobPigJobLoggingConfigPtrInput is an input type that accepts WorkflowTemplateJobPigJobLoggingConfigArgs, WorkflowTemplateJobPigJobLoggingConfigPtr and WorkflowTemplateJobPigJobLoggingConfigPtrOutput values. You can construct a concrete instance of `WorkflowTemplateJobPigJobLoggingConfigPtrInput` via:

        WorkflowTemplateJobPigJobLoggingConfigArgs{...}

or:

        nil

type WorkflowTemplateJobPigJobLoggingConfigPtrOutput added in v5.2.0

type WorkflowTemplateJobPigJobLoggingConfigPtrOutput struct{ *pulumi.OutputState }

func (WorkflowTemplateJobPigJobLoggingConfigPtrOutput) DriverLogLevels added in v5.2.0

The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'

func (WorkflowTemplateJobPigJobLoggingConfigPtrOutput) Elem added in v5.2.0

func (WorkflowTemplateJobPigJobLoggingConfigPtrOutput) ElementType added in v5.2.0

func (WorkflowTemplateJobPigJobLoggingConfigPtrOutput) ToWorkflowTemplateJobPigJobLoggingConfigPtrOutput added in v5.2.0

func (o WorkflowTemplateJobPigJobLoggingConfigPtrOutput) ToWorkflowTemplateJobPigJobLoggingConfigPtrOutput() WorkflowTemplateJobPigJobLoggingConfigPtrOutput

func (WorkflowTemplateJobPigJobLoggingConfigPtrOutput) ToWorkflowTemplateJobPigJobLoggingConfigPtrOutputWithContext added in v5.2.0

func (o WorkflowTemplateJobPigJobLoggingConfigPtrOutput) ToWorkflowTemplateJobPigJobLoggingConfigPtrOutputWithContext(ctx context.Context) WorkflowTemplateJobPigJobLoggingConfigPtrOutput

type WorkflowTemplateJobPigJobOutput added in v5.2.0

type WorkflowTemplateJobPigJobOutput struct{ *pulumi.OutputState }

func (WorkflowTemplateJobPigJobOutput) ContinueOnFailure added in v5.2.0

Optional. Whether to continue executing queries if a query fails. The default value is `false`. Setting to `true` can be useful when executing independent parallel queries.

func (WorkflowTemplateJobPigJobOutput) ElementType added in v5.2.0

func (WorkflowTemplateJobPigJobOutput) JarFileUris added in v5.2.0

Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH.

func (WorkflowTemplateJobPigJobOutput) LoggingConfig added in v5.2.0

Optional. The runtime log config for job execution.

func (WorkflowTemplateJobPigJobOutput) Properties added in v5.2.0

Optional. The properties to set on daemon config files. Property keys are specified in `prefix:property` format, for example `core:hadoop.tmp.dir`. The following are supported prefixes and their mappings: * capacity-scheduler: `capacity-scheduler.xml` * core: `core-site.xml` * distcp: `distcp-default.xml` * hdfs: `hdfs-site.xml` * hive: `hive-site.xml` * mapred: `mapred-site.xml` * pig: `pig.properties` * spark: `spark-defaults.conf` * yarn: `yarn-site.xml` For more information, see (https://cloud.google.com/dataproc/docs/concepts/cluster-properties).

func (WorkflowTemplateJobPigJobOutput) QueryFileUri added in v5.2.0

The HCFS URI of the script that contains SQL queries.

func (WorkflowTemplateJobPigJobOutput) QueryList added in v5.2.0

A list of queries.

func (WorkflowTemplateJobPigJobOutput) ScriptVariables added in v5.2.0

Optional. Mapping of query variable names to values (equivalent to the Spark SQL command: SET `name="value";`).

func (WorkflowTemplateJobPigJobOutput) ToWorkflowTemplateJobPigJobOutput added in v5.2.0

func (o WorkflowTemplateJobPigJobOutput) ToWorkflowTemplateJobPigJobOutput() WorkflowTemplateJobPigJobOutput

func (WorkflowTemplateJobPigJobOutput) ToWorkflowTemplateJobPigJobOutputWithContext added in v5.2.0

func (o WorkflowTemplateJobPigJobOutput) ToWorkflowTemplateJobPigJobOutputWithContext(ctx context.Context) WorkflowTemplateJobPigJobOutput

func (WorkflowTemplateJobPigJobOutput) ToWorkflowTemplateJobPigJobPtrOutput added in v5.2.0

func (o WorkflowTemplateJobPigJobOutput) ToWorkflowTemplateJobPigJobPtrOutput() WorkflowTemplateJobPigJobPtrOutput

func (WorkflowTemplateJobPigJobOutput) ToWorkflowTemplateJobPigJobPtrOutputWithContext added in v5.2.0

func (o WorkflowTemplateJobPigJobOutput) ToWorkflowTemplateJobPigJobPtrOutputWithContext(ctx context.Context) WorkflowTemplateJobPigJobPtrOutput

type WorkflowTemplateJobPigJobPtrInput added in v5.2.0

type WorkflowTemplateJobPigJobPtrInput interface {
	pulumi.Input

	ToWorkflowTemplateJobPigJobPtrOutput() WorkflowTemplateJobPigJobPtrOutput
	ToWorkflowTemplateJobPigJobPtrOutputWithContext(context.Context) WorkflowTemplateJobPigJobPtrOutput
}

WorkflowTemplateJobPigJobPtrInput is an input type that accepts WorkflowTemplateJobPigJobArgs, WorkflowTemplateJobPigJobPtr and WorkflowTemplateJobPigJobPtrOutput values. You can construct a concrete instance of `WorkflowTemplateJobPigJobPtrInput` via:

        WorkflowTemplateJobPigJobArgs{...}

or:

        nil

func WorkflowTemplateJobPigJobPtr added in v5.2.0

type WorkflowTemplateJobPigJobPtrOutput added in v5.2.0

type WorkflowTemplateJobPigJobPtrOutput struct{ *pulumi.OutputState }

func (WorkflowTemplateJobPigJobPtrOutput) ContinueOnFailure added in v5.2.0

Optional. Whether to continue executing queries if a query fails. The default value is `false`. Setting to `true` can be useful when executing independent parallel queries.

func (WorkflowTemplateJobPigJobPtrOutput) Elem added in v5.2.0

func (WorkflowTemplateJobPigJobPtrOutput) ElementType added in v5.2.0

func (WorkflowTemplateJobPigJobPtrOutput) JarFileUris added in v5.2.0

Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH.

func (WorkflowTemplateJobPigJobPtrOutput) LoggingConfig added in v5.2.0

Optional. The runtime log config for job execution.

func (WorkflowTemplateJobPigJobPtrOutput) Properties added in v5.2.0

Optional. The properties to set on daemon config files. Property keys are specified in `prefix:property` format, for example `core:hadoop.tmp.dir`. The following are supported prefixes and their mappings: * capacity-scheduler: `capacity-scheduler.xml` * core: `core-site.xml` * distcp: `distcp-default.xml` * hdfs: `hdfs-site.xml` * hive: `hive-site.xml` * mapred: `mapred-site.xml` * pig: `pig.properties` * spark: `spark-defaults.conf` * yarn: `yarn-site.xml` For more information, see (https://cloud.google.com/dataproc/docs/concepts/cluster-properties).

func (WorkflowTemplateJobPigJobPtrOutput) QueryFileUri added in v5.2.0

The HCFS URI of the script that contains SQL queries.

func (WorkflowTemplateJobPigJobPtrOutput) QueryList added in v5.2.0

A list of queries.

func (WorkflowTemplateJobPigJobPtrOutput) ScriptVariables added in v5.2.0

Optional. Mapping of query variable names to values (equivalent to the Spark SQL command: SET `name="value";`).

func (WorkflowTemplateJobPigJobPtrOutput) ToWorkflowTemplateJobPigJobPtrOutput added in v5.2.0

func (o WorkflowTemplateJobPigJobPtrOutput) ToWorkflowTemplateJobPigJobPtrOutput() WorkflowTemplateJobPigJobPtrOutput

func (WorkflowTemplateJobPigJobPtrOutput) ToWorkflowTemplateJobPigJobPtrOutputWithContext added in v5.2.0

func (o WorkflowTemplateJobPigJobPtrOutput) ToWorkflowTemplateJobPigJobPtrOutputWithContext(ctx context.Context) WorkflowTemplateJobPigJobPtrOutput

type WorkflowTemplateJobPigJobQueryList added in v5.2.0

type WorkflowTemplateJobPigJobQueryList struct {
	// Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": } }
	Queries []string `pulumi:"queries"`
}

type WorkflowTemplateJobPigJobQueryListArgs added in v5.2.0

type WorkflowTemplateJobPigJobQueryListArgs struct {
	// Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": } }
	Queries pulumi.StringArrayInput `pulumi:"queries"`
}

func (WorkflowTemplateJobPigJobQueryListArgs) ElementType added in v5.2.0

func (WorkflowTemplateJobPigJobQueryListArgs) ToWorkflowTemplateJobPigJobQueryListOutput added in v5.2.0

func (i WorkflowTemplateJobPigJobQueryListArgs) ToWorkflowTemplateJobPigJobQueryListOutput() WorkflowTemplateJobPigJobQueryListOutput

func (WorkflowTemplateJobPigJobQueryListArgs) ToWorkflowTemplateJobPigJobQueryListOutputWithContext added in v5.2.0

func (i WorkflowTemplateJobPigJobQueryListArgs) ToWorkflowTemplateJobPigJobQueryListOutputWithContext(ctx context.Context) WorkflowTemplateJobPigJobQueryListOutput

func (WorkflowTemplateJobPigJobQueryListArgs) ToWorkflowTemplateJobPigJobQueryListPtrOutput added in v5.2.0

func (i WorkflowTemplateJobPigJobQueryListArgs) ToWorkflowTemplateJobPigJobQueryListPtrOutput() WorkflowTemplateJobPigJobQueryListPtrOutput

func (WorkflowTemplateJobPigJobQueryListArgs) ToWorkflowTemplateJobPigJobQueryListPtrOutputWithContext added in v5.2.0

func (i WorkflowTemplateJobPigJobQueryListArgs) ToWorkflowTemplateJobPigJobQueryListPtrOutputWithContext(ctx context.Context) WorkflowTemplateJobPigJobQueryListPtrOutput

type WorkflowTemplateJobPigJobQueryListInput added in v5.2.0

type WorkflowTemplateJobPigJobQueryListInput interface {
	pulumi.Input

	ToWorkflowTemplateJobPigJobQueryListOutput() WorkflowTemplateJobPigJobQueryListOutput
	ToWorkflowTemplateJobPigJobQueryListOutputWithContext(context.Context) WorkflowTemplateJobPigJobQueryListOutput
}

WorkflowTemplateJobPigJobQueryListInput is an input type that accepts WorkflowTemplateJobPigJobQueryListArgs and WorkflowTemplateJobPigJobQueryListOutput values. You can construct a concrete instance of `WorkflowTemplateJobPigJobQueryListInput` via:

WorkflowTemplateJobPigJobQueryListArgs{...}

type WorkflowTemplateJobPigJobQueryListOutput added in v5.2.0

type WorkflowTemplateJobPigJobQueryListOutput struct{ *pulumi.OutputState }

func (WorkflowTemplateJobPigJobQueryListOutput) ElementType added in v5.2.0

func (WorkflowTemplateJobPigJobQueryListOutput) Queries added in v5.2.0

Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": } }

func (WorkflowTemplateJobPigJobQueryListOutput) ToWorkflowTemplateJobPigJobQueryListOutput added in v5.2.0

func (o WorkflowTemplateJobPigJobQueryListOutput) ToWorkflowTemplateJobPigJobQueryListOutput() WorkflowTemplateJobPigJobQueryListOutput

func (WorkflowTemplateJobPigJobQueryListOutput) ToWorkflowTemplateJobPigJobQueryListOutputWithContext added in v5.2.0

func (o WorkflowTemplateJobPigJobQueryListOutput) ToWorkflowTemplateJobPigJobQueryListOutputWithContext(ctx context.Context) WorkflowTemplateJobPigJobQueryListOutput

func (WorkflowTemplateJobPigJobQueryListOutput) ToWorkflowTemplateJobPigJobQueryListPtrOutput added in v5.2.0

func (o WorkflowTemplateJobPigJobQueryListOutput) ToWorkflowTemplateJobPigJobQueryListPtrOutput() WorkflowTemplateJobPigJobQueryListPtrOutput

func (WorkflowTemplateJobPigJobQueryListOutput) ToWorkflowTemplateJobPigJobQueryListPtrOutputWithContext added in v5.2.0

func (o WorkflowTemplateJobPigJobQueryListOutput) ToWorkflowTemplateJobPigJobQueryListPtrOutputWithContext(ctx context.Context) WorkflowTemplateJobPigJobQueryListPtrOutput

type WorkflowTemplateJobPigJobQueryListPtrInput added in v5.2.0

type WorkflowTemplateJobPigJobQueryListPtrInput interface {
	pulumi.Input

	ToWorkflowTemplateJobPigJobQueryListPtrOutput() WorkflowTemplateJobPigJobQueryListPtrOutput
	ToWorkflowTemplateJobPigJobQueryListPtrOutputWithContext(context.Context) WorkflowTemplateJobPigJobQueryListPtrOutput
}

WorkflowTemplateJobPigJobQueryListPtrInput is an input type that accepts WorkflowTemplateJobPigJobQueryListArgs, WorkflowTemplateJobPigJobQueryListPtr and WorkflowTemplateJobPigJobQueryListPtrOutput values. You can construct a concrete instance of `WorkflowTemplateJobPigJobQueryListPtrInput` via:

        WorkflowTemplateJobPigJobQueryListArgs{...}

or:

        nil

type WorkflowTemplateJobPigJobQueryListPtrOutput added in v5.2.0

type WorkflowTemplateJobPigJobQueryListPtrOutput struct{ *pulumi.OutputState }

func (WorkflowTemplateJobPigJobQueryListPtrOutput) Elem added in v5.2.0

func (WorkflowTemplateJobPigJobQueryListPtrOutput) ElementType added in v5.2.0

func (WorkflowTemplateJobPigJobQueryListPtrOutput) Queries added in v5.2.0

Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": } }

func (WorkflowTemplateJobPigJobQueryListPtrOutput) ToWorkflowTemplateJobPigJobQueryListPtrOutput added in v5.2.0

func (o WorkflowTemplateJobPigJobQueryListPtrOutput) ToWorkflowTemplateJobPigJobQueryListPtrOutput() WorkflowTemplateJobPigJobQueryListPtrOutput

func (WorkflowTemplateJobPigJobQueryListPtrOutput) ToWorkflowTemplateJobPigJobQueryListPtrOutputWithContext added in v5.2.0

func (o WorkflowTemplateJobPigJobQueryListPtrOutput) ToWorkflowTemplateJobPigJobQueryListPtrOutputWithContext(ctx context.Context) WorkflowTemplateJobPigJobQueryListPtrOutput

type WorkflowTemplateJobPrestoJob added in v5.2.0

type WorkflowTemplateJobPrestoJob struct {
	// Optional. Presto client tags to attach to this query
	ClientTags []string `pulumi:"clientTags"`
	// Optional. Whether to continue executing queries if a query fails. The default value is `false`. Setting to `true` can be useful when executing independent parallel queries.
	ContinueOnFailure *bool `pulumi:"continueOnFailure"`
	// Optional. The runtime log config for job execution.
	LoggingConfig *WorkflowTemplateJobPrestoJobLoggingConfig `pulumi:"loggingConfig"`
	// Optional. The format in which query output will be displayed. See the Presto documentation for supported output formats
	OutputFormat *string `pulumi:"outputFormat"`
	// Optional. The properties to set on daemon config files. Property keys are specified in `prefix:property` format, for example `core:hadoop.tmp.dir`. The following are supported prefixes and their mappings: * capacity-scheduler: `capacity-scheduler.xml` * core: `core-site.xml` * distcp: `distcp-default.xml` * hdfs: `hdfs-site.xml` * hive: `hive-site.xml` * mapred: `mapred-site.xml` * pig: `pig.properties` * spark: `spark-defaults.conf` * yarn: `yarn-site.xml` For more information, see (https://cloud.google.com/dataproc/docs/concepts/cluster-properties).
	Properties map[string]string `pulumi:"properties"`
	// The HCFS URI of the script that contains SQL queries.
	QueryFileUri *string `pulumi:"queryFileUri"`
	// A list of queries.
	QueryList *WorkflowTemplateJobPrestoJobQueryList `pulumi:"queryList"`
}

type WorkflowTemplateJobPrestoJobArgs added in v5.2.0

type WorkflowTemplateJobPrestoJobArgs struct {
	// Optional. Presto client tags to attach to this query
	ClientTags pulumi.StringArrayInput `pulumi:"clientTags"`
	// Optional. Whether to continue executing queries if a query fails. The default value is `false`. Setting to `true` can be useful when executing independent parallel queries.
	ContinueOnFailure pulumi.BoolPtrInput `pulumi:"continueOnFailure"`
	// Optional. The runtime log config for job execution.
	LoggingConfig WorkflowTemplateJobPrestoJobLoggingConfigPtrInput `pulumi:"loggingConfig"`
	// Optional. The format in which query output will be displayed. See the Presto documentation for supported output formats
	OutputFormat pulumi.StringPtrInput `pulumi:"outputFormat"`
	// Optional. The properties to set on daemon config files. Property keys are specified in `prefix:property` format, for example `core:hadoop.tmp.dir`. The following are supported prefixes and their mappings: * capacity-scheduler: `capacity-scheduler.xml` * core: `core-site.xml` * distcp: `distcp-default.xml` * hdfs: `hdfs-site.xml` * hive: `hive-site.xml` * mapred: `mapred-site.xml` * pig: `pig.properties` * spark: `spark-defaults.conf` * yarn: `yarn-site.xml` For more information, see (https://cloud.google.com/dataproc/docs/concepts/cluster-properties).
	Properties pulumi.StringMapInput `pulumi:"properties"`
	// The HCFS URI of the script that contains SQL queries.
	QueryFileUri pulumi.StringPtrInput `pulumi:"queryFileUri"`
	// A list of queries.
	QueryList WorkflowTemplateJobPrestoJobQueryListPtrInput `pulumi:"queryList"`
}

func (WorkflowTemplateJobPrestoJobArgs) ElementType added in v5.2.0

func (WorkflowTemplateJobPrestoJobArgs) ToWorkflowTemplateJobPrestoJobOutput added in v5.2.0

func (i WorkflowTemplateJobPrestoJobArgs) ToWorkflowTemplateJobPrestoJobOutput() WorkflowTemplateJobPrestoJobOutput

func (WorkflowTemplateJobPrestoJobArgs) ToWorkflowTemplateJobPrestoJobOutputWithContext added in v5.2.0

func (i WorkflowTemplateJobPrestoJobArgs) ToWorkflowTemplateJobPrestoJobOutputWithContext(ctx context.Context) WorkflowTemplateJobPrestoJobOutput

func (WorkflowTemplateJobPrestoJobArgs) ToWorkflowTemplateJobPrestoJobPtrOutput added in v5.2.0

func (i WorkflowTemplateJobPrestoJobArgs) ToWorkflowTemplateJobPrestoJobPtrOutput() WorkflowTemplateJobPrestoJobPtrOutput

func (WorkflowTemplateJobPrestoJobArgs) ToWorkflowTemplateJobPrestoJobPtrOutputWithContext added in v5.2.0

func (i WorkflowTemplateJobPrestoJobArgs) ToWorkflowTemplateJobPrestoJobPtrOutputWithContext(ctx context.Context) WorkflowTemplateJobPrestoJobPtrOutput

type WorkflowTemplateJobPrestoJobInput added in v5.2.0

type WorkflowTemplateJobPrestoJobInput interface {
	pulumi.Input

	ToWorkflowTemplateJobPrestoJobOutput() WorkflowTemplateJobPrestoJobOutput
	ToWorkflowTemplateJobPrestoJobOutputWithContext(context.Context) WorkflowTemplateJobPrestoJobOutput
}

WorkflowTemplateJobPrestoJobInput is an input type that accepts WorkflowTemplateJobPrestoJobArgs and WorkflowTemplateJobPrestoJobOutput values. You can construct a concrete instance of `WorkflowTemplateJobPrestoJobInput` via:

WorkflowTemplateJobPrestoJobArgs{...}

type WorkflowTemplateJobPrestoJobLoggingConfig added in v5.2.0

type WorkflowTemplateJobPrestoJobLoggingConfig struct {
	// The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
	DriverLogLevels map[string]string `pulumi:"driverLogLevels"`
}

type WorkflowTemplateJobPrestoJobLoggingConfigArgs added in v5.2.0

type WorkflowTemplateJobPrestoJobLoggingConfigArgs struct {
	// The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
	DriverLogLevels pulumi.StringMapInput `pulumi:"driverLogLevels"`
}

func (WorkflowTemplateJobPrestoJobLoggingConfigArgs) ElementType added in v5.2.0

func (WorkflowTemplateJobPrestoJobLoggingConfigArgs) ToWorkflowTemplateJobPrestoJobLoggingConfigOutput added in v5.2.0

func (i WorkflowTemplateJobPrestoJobLoggingConfigArgs) ToWorkflowTemplateJobPrestoJobLoggingConfigOutput() WorkflowTemplateJobPrestoJobLoggingConfigOutput

func (WorkflowTemplateJobPrestoJobLoggingConfigArgs) ToWorkflowTemplateJobPrestoJobLoggingConfigOutputWithContext added in v5.2.0

func (i WorkflowTemplateJobPrestoJobLoggingConfigArgs) ToWorkflowTemplateJobPrestoJobLoggingConfigOutputWithContext(ctx context.Context) WorkflowTemplateJobPrestoJobLoggingConfigOutput

func (WorkflowTemplateJobPrestoJobLoggingConfigArgs) ToWorkflowTemplateJobPrestoJobLoggingConfigPtrOutput added in v5.2.0

func (i WorkflowTemplateJobPrestoJobLoggingConfigArgs) ToWorkflowTemplateJobPrestoJobLoggingConfigPtrOutput() WorkflowTemplateJobPrestoJobLoggingConfigPtrOutput

func (WorkflowTemplateJobPrestoJobLoggingConfigArgs) ToWorkflowTemplateJobPrestoJobLoggingConfigPtrOutputWithContext added in v5.2.0

func (i WorkflowTemplateJobPrestoJobLoggingConfigArgs) ToWorkflowTemplateJobPrestoJobLoggingConfigPtrOutputWithContext(ctx context.Context) WorkflowTemplateJobPrestoJobLoggingConfigPtrOutput

type WorkflowTemplateJobPrestoJobLoggingConfigInput added in v5.2.0

type WorkflowTemplateJobPrestoJobLoggingConfigInput interface {
	pulumi.Input

	ToWorkflowTemplateJobPrestoJobLoggingConfigOutput() WorkflowTemplateJobPrestoJobLoggingConfigOutput
	ToWorkflowTemplateJobPrestoJobLoggingConfigOutputWithContext(context.Context) WorkflowTemplateJobPrestoJobLoggingConfigOutput
}

WorkflowTemplateJobPrestoJobLoggingConfigInput is an input type that accepts WorkflowTemplateJobPrestoJobLoggingConfigArgs and WorkflowTemplateJobPrestoJobLoggingConfigOutput values. You can construct a concrete instance of `WorkflowTemplateJobPrestoJobLoggingConfigInput` via:

WorkflowTemplateJobPrestoJobLoggingConfigArgs{...}

type WorkflowTemplateJobPrestoJobLoggingConfigOutput added in v5.2.0

type WorkflowTemplateJobPrestoJobLoggingConfigOutput struct{ *pulumi.OutputState }

func (WorkflowTemplateJobPrestoJobLoggingConfigOutput) DriverLogLevels added in v5.2.0

The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'

func (WorkflowTemplateJobPrestoJobLoggingConfigOutput) ElementType added in v5.2.0

func (WorkflowTemplateJobPrestoJobLoggingConfigOutput) ToWorkflowTemplateJobPrestoJobLoggingConfigOutput added in v5.2.0

func (o WorkflowTemplateJobPrestoJobLoggingConfigOutput) ToWorkflowTemplateJobPrestoJobLoggingConfigOutput() WorkflowTemplateJobPrestoJobLoggingConfigOutput

func (WorkflowTemplateJobPrestoJobLoggingConfigOutput) ToWorkflowTemplateJobPrestoJobLoggingConfigOutputWithContext added in v5.2.0

func (o WorkflowTemplateJobPrestoJobLoggingConfigOutput) ToWorkflowTemplateJobPrestoJobLoggingConfigOutputWithContext(ctx context.Context) WorkflowTemplateJobPrestoJobLoggingConfigOutput

func (WorkflowTemplateJobPrestoJobLoggingConfigOutput) ToWorkflowTemplateJobPrestoJobLoggingConfigPtrOutput added in v5.2.0

func (o WorkflowTemplateJobPrestoJobLoggingConfigOutput) ToWorkflowTemplateJobPrestoJobLoggingConfigPtrOutput() WorkflowTemplateJobPrestoJobLoggingConfigPtrOutput

func (WorkflowTemplateJobPrestoJobLoggingConfigOutput) ToWorkflowTemplateJobPrestoJobLoggingConfigPtrOutputWithContext added in v5.2.0

func (o WorkflowTemplateJobPrestoJobLoggingConfigOutput) ToWorkflowTemplateJobPrestoJobLoggingConfigPtrOutputWithContext(ctx context.Context) WorkflowTemplateJobPrestoJobLoggingConfigPtrOutput

type WorkflowTemplateJobPrestoJobLoggingConfigPtrInput added in v5.2.0

type WorkflowTemplateJobPrestoJobLoggingConfigPtrInput interface {
	pulumi.Input

	ToWorkflowTemplateJobPrestoJobLoggingConfigPtrOutput() WorkflowTemplateJobPrestoJobLoggingConfigPtrOutput
	ToWorkflowTemplateJobPrestoJobLoggingConfigPtrOutputWithContext(context.Context) WorkflowTemplateJobPrestoJobLoggingConfigPtrOutput
}

WorkflowTemplateJobPrestoJobLoggingConfigPtrInput is an input type that accepts WorkflowTemplateJobPrestoJobLoggingConfigArgs, WorkflowTemplateJobPrestoJobLoggingConfigPtr and WorkflowTemplateJobPrestoJobLoggingConfigPtrOutput values. You can construct a concrete instance of `WorkflowTemplateJobPrestoJobLoggingConfigPtrInput` via:

        WorkflowTemplateJobPrestoJobLoggingConfigArgs{...}

or:

        nil

type WorkflowTemplateJobPrestoJobLoggingConfigPtrOutput added in v5.2.0

type WorkflowTemplateJobPrestoJobLoggingConfigPtrOutput struct{ *pulumi.OutputState }

func (WorkflowTemplateJobPrestoJobLoggingConfigPtrOutput) DriverLogLevels added in v5.2.0

The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'

func (WorkflowTemplateJobPrestoJobLoggingConfigPtrOutput) Elem added in v5.2.0

func (WorkflowTemplateJobPrestoJobLoggingConfigPtrOutput) ElementType added in v5.2.0

func (WorkflowTemplateJobPrestoJobLoggingConfigPtrOutput) ToWorkflowTemplateJobPrestoJobLoggingConfigPtrOutput added in v5.2.0

func (o WorkflowTemplateJobPrestoJobLoggingConfigPtrOutput) ToWorkflowTemplateJobPrestoJobLoggingConfigPtrOutput() WorkflowTemplateJobPrestoJobLoggingConfigPtrOutput

func (WorkflowTemplateJobPrestoJobLoggingConfigPtrOutput) ToWorkflowTemplateJobPrestoJobLoggingConfigPtrOutputWithContext added in v5.2.0

func (o WorkflowTemplateJobPrestoJobLoggingConfigPtrOutput) ToWorkflowTemplateJobPrestoJobLoggingConfigPtrOutputWithContext(ctx context.Context) WorkflowTemplateJobPrestoJobLoggingConfigPtrOutput

type WorkflowTemplateJobPrestoJobOutput added in v5.2.0

type WorkflowTemplateJobPrestoJobOutput struct{ *pulumi.OutputState }

func (WorkflowTemplateJobPrestoJobOutput) ClientTags added in v5.2.0

Optional. Presto client tags to attach to this query

func (WorkflowTemplateJobPrestoJobOutput) ContinueOnFailure added in v5.2.0

Optional. Whether to continue executing queries if a query fails. The default value is `false`. Setting to `true` can be useful when executing independent parallel queries.

func (WorkflowTemplateJobPrestoJobOutput) ElementType added in v5.2.0

func (WorkflowTemplateJobPrestoJobOutput) LoggingConfig added in v5.2.0

Optional. The runtime log config for job execution.

func (WorkflowTemplateJobPrestoJobOutput) OutputFormat added in v5.2.0

Optional. The format in which query output will be displayed. See the Presto documentation for supported output formats

func (WorkflowTemplateJobPrestoJobOutput) Properties added in v5.2.0

Optional. The properties to set on daemon config files. Property keys are specified in `prefix:property` format, for example `core:hadoop.tmp.dir`. The following are supported prefixes and their mappings: * capacity-scheduler: `capacity-scheduler.xml` * core: `core-site.xml` * distcp: `distcp-default.xml` * hdfs: `hdfs-site.xml` * hive: `hive-site.xml` * mapred: `mapred-site.xml` * pig: `pig.properties` * spark: `spark-defaults.conf` * yarn: `yarn-site.xml` For more information, see (https://cloud.google.com/dataproc/docs/concepts/cluster-properties).

func (WorkflowTemplateJobPrestoJobOutput) QueryFileUri added in v5.2.0

The HCFS URI of the script that contains SQL queries.

func (WorkflowTemplateJobPrestoJobOutput) QueryList added in v5.2.0

A list of queries.

func (WorkflowTemplateJobPrestoJobOutput) ToWorkflowTemplateJobPrestoJobOutput added in v5.2.0

func (o WorkflowTemplateJobPrestoJobOutput) ToWorkflowTemplateJobPrestoJobOutput() WorkflowTemplateJobPrestoJobOutput

func (WorkflowTemplateJobPrestoJobOutput) ToWorkflowTemplateJobPrestoJobOutputWithContext added in v5.2.0

func (o WorkflowTemplateJobPrestoJobOutput) ToWorkflowTemplateJobPrestoJobOutputWithContext(ctx context.Context) WorkflowTemplateJobPrestoJobOutput

func (WorkflowTemplateJobPrestoJobOutput) ToWorkflowTemplateJobPrestoJobPtrOutput added in v5.2.0

func (o WorkflowTemplateJobPrestoJobOutput) ToWorkflowTemplateJobPrestoJobPtrOutput() WorkflowTemplateJobPrestoJobPtrOutput

func (WorkflowTemplateJobPrestoJobOutput) ToWorkflowTemplateJobPrestoJobPtrOutputWithContext added in v5.2.0

func (o WorkflowTemplateJobPrestoJobOutput) ToWorkflowTemplateJobPrestoJobPtrOutputWithContext(ctx context.Context) WorkflowTemplateJobPrestoJobPtrOutput

type WorkflowTemplateJobPrestoJobPtrInput added in v5.2.0

type WorkflowTemplateJobPrestoJobPtrInput interface {
	pulumi.Input

	ToWorkflowTemplateJobPrestoJobPtrOutput() WorkflowTemplateJobPrestoJobPtrOutput
	ToWorkflowTemplateJobPrestoJobPtrOutputWithContext(context.Context) WorkflowTemplateJobPrestoJobPtrOutput
}

WorkflowTemplateJobPrestoJobPtrInput is an input type that accepts WorkflowTemplateJobPrestoJobArgs, WorkflowTemplateJobPrestoJobPtr and WorkflowTemplateJobPrestoJobPtrOutput values. You can construct a concrete instance of `WorkflowTemplateJobPrestoJobPtrInput` via:

        WorkflowTemplateJobPrestoJobArgs{...}

or:

        nil

type WorkflowTemplateJobPrestoJobPtrOutput added in v5.2.0

type WorkflowTemplateJobPrestoJobPtrOutput struct{ *pulumi.OutputState }

func (WorkflowTemplateJobPrestoJobPtrOutput) ClientTags added in v5.2.0

Optional. Presto client tags to attach to this query

func (WorkflowTemplateJobPrestoJobPtrOutput) ContinueOnFailure added in v5.2.0

Optional. Whether to continue executing queries if a query fails. The default value is `false`. Setting to `true` can be useful when executing independent parallel queries.

func (WorkflowTemplateJobPrestoJobPtrOutput) Elem added in v5.2.0

func (WorkflowTemplateJobPrestoJobPtrOutput) ElementType added in v5.2.0

func (WorkflowTemplateJobPrestoJobPtrOutput) LoggingConfig added in v5.2.0

Optional. The runtime log config for job execution.

func (WorkflowTemplateJobPrestoJobPtrOutput) OutputFormat added in v5.2.0

Optional. The format in which query output will be displayed. See the Presto documentation for supported output formats

func (WorkflowTemplateJobPrestoJobPtrOutput) Properties added in v5.2.0

Optional. The properties to set on daemon config files. Property keys are specified in `prefix:property` format, for example `core:hadoop.tmp.dir`. The following are supported prefixes and their mappings: * capacity-scheduler: `capacity-scheduler.xml` * core: `core-site.xml` * distcp: `distcp-default.xml` * hdfs: `hdfs-site.xml` * hive: `hive-site.xml` * mapred: `mapred-site.xml` * pig: `pig.properties` * spark: `spark-defaults.conf` * yarn: `yarn-site.xml` For more information, see (https://cloud.google.com/dataproc/docs/concepts/cluster-properties).

func (WorkflowTemplateJobPrestoJobPtrOutput) QueryFileUri added in v5.2.0

The HCFS URI of the script that contains SQL queries.

func (WorkflowTemplateJobPrestoJobPtrOutput) QueryList added in v5.2.0

A list of queries.

func (WorkflowTemplateJobPrestoJobPtrOutput) ToWorkflowTemplateJobPrestoJobPtrOutput added in v5.2.0

func (o WorkflowTemplateJobPrestoJobPtrOutput) ToWorkflowTemplateJobPrestoJobPtrOutput() WorkflowTemplateJobPrestoJobPtrOutput

func (WorkflowTemplateJobPrestoJobPtrOutput) ToWorkflowTemplateJobPrestoJobPtrOutputWithContext added in v5.2.0

func (o WorkflowTemplateJobPrestoJobPtrOutput) ToWorkflowTemplateJobPrestoJobPtrOutputWithContext(ctx context.Context) WorkflowTemplateJobPrestoJobPtrOutput

type WorkflowTemplateJobPrestoJobQueryList added in v5.2.0

type WorkflowTemplateJobPrestoJobQueryList struct {
	// Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": } }
	Queries []string `pulumi:"queries"`
}

type WorkflowTemplateJobPrestoJobQueryListArgs added in v5.2.0

type WorkflowTemplateJobPrestoJobQueryListArgs struct {
	// Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": } }
	Queries pulumi.StringArrayInput `pulumi:"queries"`
}

func (WorkflowTemplateJobPrestoJobQueryListArgs) ElementType added in v5.2.0

func (WorkflowTemplateJobPrestoJobQueryListArgs) ToWorkflowTemplateJobPrestoJobQueryListOutput added in v5.2.0

func (i WorkflowTemplateJobPrestoJobQueryListArgs) ToWorkflowTemplateJobPrestoJobQueryListOutput() WorkflowTemplateJobPrestoJobQueryListOutput

func (WorkflowTemplateJobPrestoJobQueryListArgs) ToWorkflowTemplateJobPrestoJobQueryListOutputWithContext added in v5.2.0

func (i WorkflowTemplateJobPrestoJobQueryListArgs) ToWorkflowTemplateJobPrestoJobQueryListOutputWithContext(ctx context.Context) WorkflowTemplateJobPrestoJobQueryListOutput

func (WorkflowTemplateJobPrestoJobQueryListArgs) ToWorkflowTemplateJobPrestoJobQueryListPtrOutput added in v5.2.0

func (i WorkflowTemplateJobPrestoJobQueryListArgs) ToWorkflowTemplateJobPrestoJobQueryListPtrOutput() WorkflowTemplateJobPrestoJobQueryListPtrOutput

func (WorkflowTemplateJobPrestoJobQueryListArgs) ToWorkflowTemplateJobPrestoJobQueryListPtrOutputWithContext added in v5.2.0

func (i WorkflowTemplateJobPrestoJobQueryListArgs) ToWorkflowTemplateJobPrestoJobQueryListPtrOutputWithContext(ctx context.Context) WorkflowTemplateJobPrestoJobQueryListPtrOutput

type WorkflowTemplateJobPrestoJobQueryListInput added in v5.2.0

type WorkflowTemplateJobPrestoJobQueryListInput interface {
	pulumi.Input

	ToWorkflowTemplateJobPrestoJobQueryListOutput() WorkflowTemplateJobPrestoJobQueryListOutput
	ToWorkflowTemplateJobPrestoJobQueryListOutputWithContext(context.Context) WorkflowTemplateJobPrestoJobQueryListOutput
}

WorkflowTemplateJobPrestoJobQueryListInput is an input type that accepts WorkflowTemplateJobPrestoJobQueryListArgs and WorkflowTemplateJobPrestoJobQueryListOutput values. You can construct a concrete instance of `WorkflowTemplateJobPrestoJobQueryListInput` via:

WorkflowTemplateJobPrestoJobQueryListArgs{...}

type WorkflowTemplateJobPrestoJobQueryListOutput added in v5.2.0

type WorkflowTemplateJobPrestoJobQueryListOutput struct{ *pulumi.OutputState }

func (WorkflowTemplateJobPrestoJobQueryListOutput) ElementType added in v5.2.0

func (WorkflowTemplateJobPrestoJobQueryListOutput) Queries added in v5.2.0

Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": } }

func (WorkflowTemplateJobPrestoJobQueryListOutput) ToWorkflowTemplateJobPrestoJobQueryListOutput added in v5.2.0

func (o WorkflowTemplateJobPrestoJobQueryListOutput) ToWorkflowTemplateJobPrestoJobQueryListOutput() WorkflowTemplateJobPrestoJobQueryListOutput

func (WorkflowTemplateJobPrestoJobQueryListOutput) ToWorkflowTemplateJobPrestoJobQueryListOutputWithContext added in v5.2.0

func (o WorkflowTemplateJobPrestoJobQueryListOutput) ToWorkflowTemplateJobPrestoJobQueryListOutputWithContext(ctx context.Context) WorkflowTemplateJobPrestoJobQueryListOutput

func (WorkflowTemplateJobPrestoJobQueryListOutput) ToWorkflowTemplateJobPrestoJobQueryListPtrOutput added in v5.2.0

func (o WorkflowTemplateJobPrestoJobQueryListOutput) ToWorkflowTemplateJobPrestoJobQueryListPtrOutput() WorkflowTemplateJobPrestoJobQueryListPtrOutput

func (WorkflowTemplateJobPrestoJobQueryListOutput) ToWorkflowTemplateJobPrestoJobQueryListPtrOutputWithContext added in v5.2.0

func (o WorkflowTemplateJobPrestoJobQueryListOutput) ToWorkflowTemplateJobPrestoJobQueryListPtrOutputWithContext(ctx context.Context) WorkflowTemplateJobPrestoJobQueryListPtrOutput

type WorkflowTemplateJobPrestoJobQueryListPtrInput added in v5.2.0

type WorkflowTemplateJobPrestoJobQueryListPtrInput interface {
	pulumi.Input

	ToWorkflowTemplateJobPrestoJobQueryListPtrOutput() WorkflowTemplateJobPrestoJobQueryListPtrOutput
	ToWorkflowTemplateJobPrestoJobQueryListPtrOutputWithContext(context.Context) WorkflowTemplateJobPrestoJobQueryListPtrOutput
}

WorkflowTemplateJobPrestoJobQueryListPtrInput is an input type that accepts WorkflowTemplateJobPrestoJobQueryListArgs, WorkflowTemplateJobPrestoJobQueryListPtr and WorkflowTemplateJobPrestoJobQueryListPtrOutput values. You can construct a concrete instance of `WorkflowTemplateJobPrestoJobQueryListPtrInput` via:

        WorkflowTemplateJobPrestoJobQueryListArgs{...}

or:

        nil

type WorkflowTemplateJobPrestoJobQueryListPtrOutput added in v5.2.0

type WorkflowTemplateJobPrestoJobQueryListPtrOutput struct{ *pulumi.OutputState }

func (WorkflowTemplateJobPrestoJobQueryListPtrOutput) Elem added in v5.2.0

func (WorkflowTemplateJobPrestoJobQueryListPtrOutput) ElementType added in v5.2.0

func (WorkflowTemplateJobPrestoJobQueryListPtrOutput) Queries added in v5.2.0

Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": } }

func (WorkflowTemplateJobPrestoJobQueryListPtrOutput) ToWorkflowTemplateJobPrestoJobQueryListPtrOutput added in v5.2.0

func (o WorkflowTemplateJobPrestoJobQueryListPtrOutput) ToWorkflowTemplateJobPrestoJobQueryListPtrOutput() WorkflowTemplateJobPrestoJobQueryListPtrOutput

func (WorkflowTemplateJobPrestoJobQueryListPtrOutput) ToWorkflowTemplateJobPrestoJobQueryListPtrOutputWithContext added in v5.2.0

func (o WorkflowTemplateJobPrestoJobQueryListPtrOutput) ToWorkflowTemplateJobPrestoJobQueryListPtrOutputWithContext(ctx context.Context) WorkflowTemplateJobPrestoJobQueryListPtrOutput

type WorkflowTemplateJobPysparkJob added in v5.2.0

type WorkflowTemplateJobPysparkJob struct {
	// Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
	ArchiveUris []string `pulumi:"archiveUris"`
	// Optional. The arguments to pass to the driver. Do not include arguments, such as `--conf`, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
	Args []string `pulumi:"args"`
	// Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.
	FileUris []string `pulumi:"fileUris"`
	// Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH.
	JarFileUris []string `pulumi:"jarFileUris"`
	// Optional. The runtime log config for job execution.
	LoggingConfig *WorkflowTemplateJobPysparkJobLoggingConfig `pulumi:"loggingConfig"`
	// Required. The HCFS URI of the main Python file to use as the driver. Must be a .py file.
	MainPythonFileUri string `pulumi:"mainPythonFileUri"`
	// Optional. The properties to set on daemon config files. Property keys are specified in `prefix:property` format, for example `core:hadoop.tmp.dir`. The following are supported prefixes and their mappings: * capacity-scheduler: `capacity-scheduler.xml` * core: `core-site.xml` * distcp: `distcp-default.xml` * hdfs: `hdfs-site.xml` * hive: `hive-site.xml` * mapred: `mapred-site.xml` * pig: `pig.properties` * spark: `spark-defaults.conf` * yarn: `yarn-site.xml` For more information, see (https://cloud.google.com/dataproc/docs/concepts/cluster-properties).
	Properties map[string]string `pulumi:"properties"`
	// Optional. HCFS file URIs of Python files to pass to the PySpark framework. Supported file types: .py, .egg, and .zip.
	PythonFileUris []string `pulumi:"pythonFileUris"`
}

type WorkflowTemplateJobPysparkJobArgs added in v5.2.0

type WorkflowTemplateJobPysparkJobArgs struct {
	// Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
	ArchiveUris pulumi.StringArrayInput `pulumi:"archiveUris"`
	// Optional. The arguments to pass to the driver. Do not include arguments, such as `--conf`, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
	Args pulumi.StringArrayInput `pulumi:"args"`
	// Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.
	FileUris pulumi.StringArrayInput `pulumi:"fileUris"`
	// Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH.
	JarFileUris pulumi.StringArrayInput `pulumi:"jarFileUris"`
	// Optional. The runtime log config for job execution.
	LoggingConfig WorkflowTemplateJobPysparkJobLoggingConfigPtrInput `pulumi:"loggingConfig"`
	// Required. The HCFS URI of the main Python file to use as the driver. Must be a .py file.
	MainPythonFileUri pulumi.StringInput `pulumi:"mainPythonFileUri"`
	// Optional. The properties to set on daemon config files. Property keys are specified in `prefix:property` format, for example `core:hadoop.tmp.dir`. The following are supported prefixes and their mappings: * capacity-scheduler: `capacity-scheduler.xml` * core: `core-site.xml` * distcp: `distcp-default.xml` * hdfs: `hdfs-site.xml` * hive: `hive-site.xml` * mapred: `mapred-site.xml` * pig: `pig.properties` * spark: `spark-defaults.conf` * yarn: `yarn-site.xml` For more information, see (https://cloud.google.com/dataproc/docs/concepts/cluster-properties).
	Properties pulumi.StringMapInput `pulumi:"properties"`
	// Optional. HCFS file URIs of Python files to pass to the PySpark framework. Supported file types: .py, .egg, and .zip.
	PythonFileUris pulumi.StringArrayInput `pulumi:"pythonFileUris"`
}

func (WorkflowTemplateJobPysparkJobArgs) ElementType added in v5.2.0

func (WorkflowTemplateJobPysparkJobArgs) ToWorkflowTemplateJobPysparkJobOutput added in v5.2.0

func (i WorkflowTemplateJobPysparkJobArgs) ToWorkflowTemplateJobPysparkJobOutput() WorkflowTemplateJobPysparkJobOutput

func (WorkflowTemplateJobPysparkJobArgs) ToWorkflowTemplateJobPysparkJobOutputWithContext added in v5.2.0

func (i WorkflowTemplateJobPysparkJobArgs) ToWorkflowTemplateJobPysparkJobOutputWithContext(ctx context.Context) WorkflowTemplateJobPysparkJobOutput

func (WorkflowTemplateJobPysparkJobArgs) ToWorkflowTemplateJobPysparkJobPtrOutput added in v5.2.0

func (i WorkflowTemplateJobPysparkJobArgs) ToWorkflowTemplateJobPysparkJobPtrOutput() WorkflowTemplateJobPysparkJobPtrOutput

func (WorkflowTemplateJobPysparkJobArgs) ToWorkflowTemplateJobPysparkJobPtrOutputWithContext added in v5.2.0

func (i WorkflowTemplateJobPysparkJobArgs) ToWorkflowTemplateJobPysparkJobPtrOutputWithContext(ctx context.Context) WorkflowTemplateJobPysparkJobPtrOutput

type WorkflowTemplateJobPysparkJobInput added in v5.2.0

type WorkflowTemplateJobPysparkJobInput interface {
	pulumi.Input

	ToWorkflowTemplateJobPysparkJobOutput() WorkflowTemplateJobPysparkJobOutput
	ToWorkflowTemplateJobPysparkJobOutputWithContext(context.Context) WorkflowTemplateJobPysparkJobOutput
}

WorkflowTemplateJobPysparkJobInput is an input type that accepts WorkflowTemplateJobPysparkJobArgs and WorkflowTemplateJobPysparkJobOutput values. You can construct a concrete instance of `WorkflowTemplateJobPysparkJobInput` via:

WorkflowTemplateJobPysparkJobArgs{...}

type WorkflowTemplateJobPysparkJobLoggingConfig added in v5.2.0

type WorkflowTemplateJobPysparkJobLoggingConfig struct {
	// The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
	DriverLogLevels map[string]string `pulumi:"driverLogLevels"`
}

type WorkflowTemplateJobPysparkJobLoggingConfigArgs added in v5.2.0

type WorkflowTemplateJobPysparkJobLoggingConfigArgs struct {
	// The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
	DriverLogLevels pulumi.StringMapInput `pulumi:"driverLogLevels"`
}

func (WorkflowTemplateJobPysparkJobLoggingConfigArgs) ElementType added in v5.2.0

func (WorkflowTemplateJobPysparkJobLoggingConfigArgs) ToWorkflowTemplateJobPysparkJobLoggingConfigOutput added in v5.2.0

func (i WorkflowTemplateJobPysparkJobLoggingConfigArgs) ToWorkflowTemplateJobPysparkJobLoggingConfigOutput() WorkflowTemplateJobPysparkJobLoggingConfigOutput

func (WorkflowTemplateJobPysparkJobLoggingConfigArgs) ToWorkflowTemplateJobPysparkJobLoggingConfigOutputWithContext added in v5.2.0

func (i WorkflowTemplateJobPysparkJobLoggingConfigArgs) ToWorkflowTemplateJobPysparkJobLoggingConfigOutputWithContext(ctx context.Context) WorkflowTemplateJobPysparkJobLoggingConfigOutput

func (WorkflowTemplateJobPysparkJobLoggingConfigArgs) ToWorkflowTemplateJobPysparkJobLoggingConfigPtrOutput added in v5.2.0

func (i WorkflowTemplateJobPysparkJobLoggingConfigArgs) ToWorkflowTemplateJobPysparkJobLoggingConfigPtrOutput() WorkflowTemplateJobPysparkJobLoggingConfigPtrOutput

func (WorkflowTemplateJobPysparkJobLoggingConfigArgs) ToWorkflowTemplateJobPysparkJobLoggingConfigPtrOutputWithContext added in v5.2.0

func (i WorkflowTemplateJobPysparkJobLoggingConfigArgs) ToWorkflowTemplateJobPysparkJobLoggingConfigPtrOutputWithContext(ctx context.Context) WorkflowTemplateJobPysparkJobLoggingConfigPtrOutput

type WorkflowTemplateJobPysparkJobLoggingConfigInput added in v5.2.0

type WorkflowTemplateJobPysparkJobLoggingConfigInput interface {
	pulumi.Input

	ToWorkflowTemplateJobPysparkJobLoggingConfigOutput() WorkflowTemplateJobPysparkJobLoggingConfigOutput
	ToWorkflowTemplateJobPysparkJobLoggingConfigOutputWithContext(context.Context) WorkflowTemplateJobPysparkJobLoggingConfigOutput
}

WorkflowTemplateJobPysparkJobLoggingConfigInput is an input type that accepts WorkflowTemplateJobPysparkJobLoggingConfigArgs and WorkflowTemplateJobPysparkJobLoggingConfigOutput values. You can construct a concrete instance of `WorkflowTemplateJobPysparkJobLoggingConfigInput` via:

WorkflowTemplateJobPysparkJobLoggingConfigArgs{...}

type WorkflowTemplateJobPysparkJobLoggingConfigOutput added in v5.2.0

type WorkflowTemplateJobPysparkJobLoggingConfigOutput struct{ *pulumi.OutputState }

func (WorkflowTemplateJobPysparkJobLoggingConfigOutput) DriverLogLevels added in v5.2.0

The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'

func (WorkflowTemplateJobPysparkJobLoggingConfigOutput) ElementType added in v5.2.0

func (WorkflowTemplateJobPysparkJobLoggingConfigOutput) ToWorkflowTemplateJobPysparkJobLoggingConfigOutput added in v5.2.0

func (o WorkflowTemplateJobPysparkJobLoggingConfigOutput) ToWorkflowTemplateJobPysparkJobLoggingConfigOutput() WorkflowTemplateJobPysparkJobLoggingConfigOutput

func (WorkflowTemplateJobPysparkJobLoggingConfigOutput) ToWorkflowTemplateJobPysparkJobLoggingConfigOutputWithContext added in v5.2.0

func (o WorkflowTemplateJobPysparkJobLoggingConfigOutput) ToWorkflowTemplateJobPysparkJobLoggingConfigOutputWithContext(ctx context.Context) WorkflowTemplateJobPysparkJobLoggingConfigOutput

func (WorkflowTemplateJobPysparkJobLoggingConfigOutput) ToWorkflowTemplateJobPysparkJobLoggingConfigPtrOutput added in v5.2.0

func (o WorkflowTemplateJobPysparkJobLoggingConfigOutput) ToWorkflowTemplateJobPysparkJobLoggingConfigPtrOutput() WorkflowTemplateJobPysparkJobLoggingConfigPtrOutput

func (WorkflowTemplateJobPysparkJobLoggingConfigOutput) ToWorkflowTemplateJobPysparkJobLoggingConfigPtrOutputWithContext added in v5.2.0

func (o WorkflowTemplateJobPysparkJobLoggingConfigOutput) ToWorkflowTemplateJobPysparkJobLoggingConfigPtrOutputWithContext(ctx context.Context) WorkflowTemplateJobPysparkJobLoggingConfigPtrOutput

type WorkflowTemplateJobPysparkJobLoggingConfigPtrInput added in v5.2.0

type WorkflowTemplateJobPysparkJobLoggingConfigPtrInput interface {
	pulumi.Input

	ToWorkflowTemplateJobPysparkJobLoggingConfigPtrOutput() WorkflowTemplateJobPysparkJobLoggingConfigPtrOutput
	ToWorkflowTemplateJobPysparkJobLoggingConfigPtrOutputWithContext(context.Context) WorkflowTemplateJobPysparkJobLoggingConfigPtrOutput
}

WorkflowTemplateJobPysparkJobLoggingConfigPtrInput is an input type that accepts WorkflowTemplateJobPysparkJobLoggingConfigArgs, WorkflowTemplateJobPysparkJobLoggingConfigPtr and WorkflowTemplateJobPysparkJobLoggingConfigPtrOutput values. You can construct a concrete instance of `WorkflowTemplateJobPysparkJobLoggingConfigPtrInput` via:

        WorkflowTemplateJobPysparkJobLoggingConfigArgs{...}

or:

        nil

type WorkflowTemplateJobPysparkJobLoggingConfigPtrOutput added in v5.2.0

type WorkflowTemplateJobPysparkJobLoggingConfigPtrOutput struct{ *pulumi.OutputState }

func (WorkflowTemplateJobPysparkJobLoggingConfigPtrOutput) DriverLogLevels added in v5.2.0

The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'

func (WorkflowTemplateJobPysparkJobLoggingConfigPtrOutput) Elem added in v5.2.0

func (WorkflowTemplateJobPysparkJobLoggingConfigPtrOutput) ElementType added in v5.2.0

func (WorkflowTemplateJobPysparkJobLoggingConfigPtrOutput) ToWorkflowTemplateJobPysparkJobLoggingConfigPtrOutput added in v5.2.0

func (o WorkflowTemplateJobPysparkJobLoggingConfigPtrOutput) ToWorkflowTemplateJobPysparkJobLoggingConfigPtrOutput() WorkflowTemplateJobPysparkJobLoggingConfigPtrOutput

func (WorkflowTemplateJobPysparkJobLoggingConfigPtrOutput) ToWorkflowTemplateJobPysparkJobLoggingConfigPtrOutputWithContext added in v5.2.0

func (o WorkflowTemplateJobPysparkJobLoggingConfigPtrOutput) ToWorkflowTemplateJobPysparkJobLoggingConfigPtrOutputWithContext(ctx context.Context) WorkflowTemplateJobPysparkJobLoggingConfigPtrOutput

type WorkflowTemplateJobPysparkJobOutput added in v5.2.0

type WorkflowTemplateJobPysparkJobOutput struct{ *pulumi.OutputState }

func (WorkflowTemplateJobPysparkJobOutput) ArchiveUris added in v5.2.0

Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.

func (WorkflowTemplateJobPysparkJobOutput) Args added in v5.2.0

Optional. The arguments to pass to the driver. Do not include arguments, such as `--conf`, that can be set as job properties, since a collision may occur that causes an incorrect job submission.

func (WorkflowTemplateJobPysparkJobOutput) ElementType added in v5.2.0

func (WorkflowTemplateJobPysparkJobOutput) FileUris added in v5.2.0

Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.

func (WorkflowTemplateJobPysparkJobOutput) JarFileUris added in v5.2.0

Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH.

func (WorkflowTemplateJobPysparkJobOutput) LoggingConfig added in v5.2.0

Optional. The runtime log config for job execution.

func (WorkflowTemplateJobPysparkJobOutput) MainPythonFileUri added in v5.2.0

Required. The HCFS URI of the main Python file to use as the driver. Must be a .py file.

func (WorkflowTemplateJobPysparkJobOutput) Properties added in v5.2.0

Optional. The properties to set on daemon config files. Property keys are specified in `prefix:property` format, for example `core:hadoop.tmp.dir`. The following are supported prefixes and their mappings: * capacity-scheduler: `capacity-scheduler.xml` * core: `core-site.xml` * distcp: `distcp-default.xml` * hdfs: `hdfs-site.xml` * hive: `hive-site.xml` * mapred: `mapred-site.xml` * pig: `pig.properties` * spark: `spark-defaults.conf` * yarn: `yarn-site.xml` For more information, see (https://cloud.google.com/dataproc/docs/concepts/cluster-properties).

func (WorkflowTemplateJobPysparkJobOutput) PythonFileUris added in v5.2.0

Optional. HCFS file URIs of Python files to pass to the PySpark framework. Supported file types: .py, .egg, and .zip.

func (WorkflowTemplateJobPysparkJobOutput) ToWorkflowTemplateJobPysparkJobOutput added in v5.2.0

func (o WorkflowTemplateJobPysparkJobOutput) ToWorkflowTemplateJobPysparkJobOutput() WorkflowTemplateJobPysparkJobOutput

func (WorkflowTemplateJobPysparkJobOutput) ToWorkflowTemplateJobPysparkJobOutputWithContext added in v5.2.0

func (o WorkflowTemplateJobPysparkJobOutput) ToWorkflowTemplateJobPysparkJobOutputWithContext(ctx context.Context) WorkflowTemplateJobPysparkJobOutput

func (WorkflowTemplateJobPysparkJobOutput) ToWorkflowTemplateJobPysparkJobPtrOutput added in v5.2.0

func (o WorkflowTemplateJobPysparkJobOutput) ToWorkflowTemplateJobPysparkJobPtrOutput() WorkflowTemplateJobPysparkJobPtrOutput

func (WorkflowTemplateJobPysparkJobOutput) ToWorkflowTemplateJobPysparkJobPtrOutputWithContext added in v5.2.0

func (o WorkflowTemplateJobPysparkJobOutput) ToWorkflowTemplateJobPysparkJobPtrOutputWithContext(ctx context.Context) WorkflowTemplateJobPysparkJobPtrOutput

type WorkflowTemplateJobPysparkJobPtrInput added in v5.2.0

type WorkflowTemplateJobPysparkJobPtrInput interface {
	pulumi.Input

	ToWorkflowTemplateJobPysparkJobPtrOutput() WorkflowTemplateJobPysparkJobPtrOutput
	ToWorkflowTemplateJobPysparkJobPtrOutputWithContext(context.Context) WorkflowTemplateJobPysparkJobPtrOutput
}

WorkflowTemplateJobPysparkJobPtrInput is an input type that accepts WorkflowTemplateJobPysparkJobArgs, WorkflowTemplateJobPysparkJobPtr and WorkflowTemplateJobPysparkJobPtrOutput values. You can construct a concrete instance of `WorkflowTemplateJobPysparkJobPtrInput` via:

        WorkflowTemplateJobPysparkJobArgs{...}

or:

        nil

type WorkflowTemplateJobPysparkJobPtrOutput added in v5.2.0

type WorkflowTemplateJobPysparkJobPtrOutput struct{ *pulumi.OutputState }

func (WorkflowTemplateJobPysparkJobPtrOutput) ArchiveUris added in v5.2.0

Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.

func (WorkflowTemplateJobPysparkJobPtrOutput) Args added in v5.2.0

Optional. The arguments to pass to the driver. Do not include arguments, such as `--conf`, that can be set as job properties, since a collision may occur that causes an incorrect job submission.

func (WorkflowTemplateJobPysparkJobPtrOutput) Elem added in v5.2.0

func (WorkflowTemplateJobPysparkJobPtrOutput) ElementType added in v5.2.0

func (WorkflowTemplateJobPysparkJobPtrOutput) FileUris added in v5.2.0

Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.

func (WorkflowTemplateJobPysparkJobPtrOutput) JarFileUris added in v5.2.0

Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH.

func (WorkflowTemplateJobPysparkJobPtrOutput) LoggingConfig added in v5.2.0

Optional. The runtime log config for job execution.

func (WorkflowTemplateJobPysparkJobPtrOutput) MainPythonFileUri added in v5.2.0

Required. The HCFS URI of the main Python file to use as the driver. Must be a .py file.

func (WorkflowTemplateJobPysparkJobPtrOutput) Properties added in v5.2.0

Optional. The properties to set on daemon config files. Property keys are specified in `prefix:property` format, for example `core:hadoop.tmp.dir`. The following are supported prefixes and their mappings: * capacity-scheduler: `capacity-scheduler.xml` * core: `core-site.xml` * distcp: `distcp-default.xml` * hdfs: `hdfs-site.xml` * hive: `hive-site.xml` * mapred: `mapred-site.xml` * pig: `pig.properties` * spark: `spark-defaults.conf` * yarn: `yarn-site.xml` For more information, see (https://cloud.google.com/dataproc/docs/concepts/cluster-properties).

func (WorkflowTemplateJobPysparkJobPtrOutput) PythonFileUris added in v5.2.0

Optional. HCFS file URIs of Python files to pass to the PySpark framework. Supported file types: .py, .egg, and .zip.

func (WorkflowTemplateJobPysparkJobPtrOutput) ToWorkflowTemplateJobPysparkJobPtrOutput added in v5.2.0

func (o WorkflowTemplateJobPysparkJobPtrOutput) ToWorkflowTemplateJobPysparkJobPtrOutput() WorkflowTemplateJobPysparkJobPtrOutput

func (WorkflowTemplateJobPysparkJobPtrOutput) ToWorkflowTemplateJobPysparkJobPtrOutputWithContext added in v5.2.0

func (o WorkflowTemplateJobPysparkJobPtrOutput) ToWorkflowTemplateJobPysparkJobPtrOutputWithContext(ctx context.Context) WorkflowTemplateJobPysparkJobPtrOutput

type WorkflowTemplateJobScheduling added in v5.2.0

type WorkflowTemplateJobScheduling struct {
	// Optional. Maximum number of times per hour a driver may be restarted as a result of driver exiting with non-zero code before job is reported failed. A job may be reported as thrashing if driver exits with non-zero code 4 times within 10 minute window. Maximum value is 10.
	MaxFailuresPerHour *int `pulumi:"maxFailuresPerHour"`
	// Optional. Maximum number of times in total a driver may be restarted as a result of driver exiting with non-zero code before job is reported failed. Maximum value is 240
	MaxFailuresTotal *int `pulumi:"maxFailuresTotal"`
}

type WorkflowTemplateJobSchedulingArgs added in v5.2.0

type WorkflowTemplateJobSchedulingArgs struct {
	// Optional. Maximum number of times per hour a driver may be restarted as a result of driver exiting with non-zero code before job is reported failed. A job may be reported as thrashing if driver exits with non-zero code 4 times within 10 minute window. Maximum value is 10.
	MaxFailuresPerHour pulumi.IntPtrInput `pulumi:"maxFailuresPerHour"`
	// Optional. Maximum number of times in total a driver may be restarted as a result of driver exiting with non-zero code before job is reported failed. Maximum value is 240
	MaxFailuresTotal pulumi.IntPtrInput `pulumi:"maxFailuresTotal"`
}

func (WorkflowTemplateJobSchedulingArgs) ElementType added in v5.2.0

func (WorkflowTemplateJobSchedulingArgs) ToWorkflowTemplateJobSchedulingOutput added in v5.2.0

func (i WorkflowTemplateJobSchedulingArgs) ToWorkflowTemplateJobSchedulingOutput() WorkflowTemplateJobSchedulingOutput

func (WorkflowTemplateJobSchedulingArgs) ToWorkflowTemplateJobSchedulingOutputWithContext added in v5.2.0

func (i WorkflowTemplateJobSchedulingArgs) ToWorkflowTemplateJobSchedulingOutputWithContext(ctx context.Context) WorkflowTemplateJobSchedulingOutput

func (WorkflowTemplateJobSchedulingArgs) ToWorkflowTemplateJobSchedulingPtrOutput added in v5.2.0

func (i WorkflowTemplateJobSchedulingArgs) ToWorkflowTemplateJobSchedulingPtrOutput() WorkflowTemplateJobSchedulingPtrOutput

func (WorkflowTemplateJobSchedulingArgs) ToWorkflowTemplateJobSchedulingPtrOutputWithContext added in v5.2.0

func (i WorkflowTemplateJobSchedulingArgs) ToWorkflowTemplateJobSchedulingPtrOutputWithContext(ctx context.Context) WorkflowTemplateJobSchedulingPtrOutput

type WorkflowTemplateJobSchedulingInput added in v5.2.0

type WorkflowTemplateJobSchedulingInput interface {
	pulumi.Input

	ToWorkflowTemplateJobSchedulingOutput() WorkflowTemplateJobSchedulingOutput
	ToWorkflowTemplateJobSchedulingOutputWithContext(context.Context) WorkflowTemplateJobSchedulingOutput
}

WorkflowTemplateJobSchedulingInput is an input type that accepts WorkflowTemplateJobSchedulingArgs and WorkflowTemplateJobSchedulingOutput values. You can construct a concrete instance of `WorkflowTemplateJobSchedulingInput` via:

WorkflowTemplateJobSchedulingArgs{...}

type WorkflowTemplateJobSchedulingOutput added in v5.2.0

type WorkflowTemplateJobSchedulingOutput struct{ *pulumi.OutputState }

func (WorkflowTemplateJobSchedulingOutput) ElementType added in v5.2.0

func (WorkflowTemplateJobSchedulingOutput) MaxFailuresPerHour added in v5.2.0

Optional. Maximum number of times per hour a driver may be restarted as a result of driver exiting with non-zero code before job is reported failed. A job may be reported as thrashing if driver exits with non-zero code 4 times within 10 minute window. Maximum value is 10.

func (WorkflowTemplateJobSchedulingOutput) MaxFailuresTotal added in v5.2.0

Optional. Maximum number of times in total a driver may be restarted as a result of driver exiting with non-zero code before job is reported failed. Maximum value is 240

func (WorkflowTemplateJobSchedulingOutput) ToWorkflowTemplateJobSchedulingOutput added in v5.2.0

func (o WorkflowTemplateJobSchedulingOutput) ToWorkflowTemplateJobSchedulingOutput() WorkflowTemplateJobSchedulingOutput

func (WorkflowTemplateJobSchedulingOutput) ToWorkflowTemplateJobSchedulingOutputWithContext added in v5.2.0

func (o WorkflowTemplateJobSchedulingOutput) ToWorkflowTemplateJobSchedulingOutputWithContext(ctx context.Context) WorkflowTemplateJobSchedulingOutput

func (WorkflowTemplateJobSchedulingOutput) ToWorkflowTemplateJobSchedulingPtrOutput added in v5.2.0

func (o WorkflowTemplateJobSchedulingOutput) ToWorkflowTemplateJobSchedulingPtrOutput() WorkflowTemplateJobSchedulingPtrOutput

func (WorkflowTemplateJobSchedulingOutput) ToWorkflowTemplateJobSchedulingPtrOutputWithContext added in v5.2.0

func (o WorkflowTemplateJobSchedulingOutput) ToWorkflowTemplateJobSchedulingPtrOutputWithContext(ctx context.Context) WorkflowTemplateJobSchedulingPtrOutput

type WorkflowTemplateJobSchedulingPtrInput added in v5.2.0

type WorkflowTemplateJobSchedulingPtrInput interface {
	pulumi.Input

	ToWorkflowTemplateJobSchedulingPtrOutput() WorkflowTemplateJobSchedulingPtrOutput
	ToWorkflowTemplateJobSchedulingPtrOutputWithContext(context.Context) WorkflowTemplateJobSchedulingPtrOutput
}

WorkflowTemplateJobSchedulingPtrInput is an input type that accepts WorkflowTemplateJobSchedulingArgs, WorkflowTemplateJobSchedulingPtr and WorkflowTemplateJobSchedulingPtrOutput values. You can construct a concrete instance of `WorkflowTemplateJobSchedulingPtrInput` via:

        WorkflowTemplateJobSchedulingArgs{...}

or:

        nil

type WorkflowTemplateJobSchedulingPtrOutput added in v5.2.0

type WorkflowTemplateJobSchedulingPtrOutput struct{ *pulumi.OutputState }

func (WorkflowTemplateJobSchedulingPtrOutput) Elem added in v5.2.0

func (WorkflowTemplateJobSchedulingPtrOutput) ElementType added in v5.2.0

func (WorkflowTemplateJobSchedulingPtrOutput) MaxFailuresPerHour added in v5.2.0

Optional. Maximum number of times per hour a driver may be restarted as a result of driver exiting with non-zero code before job is reported failed. A job may be reported as thrashing if driver exits with non-zero code 4 times within 10 minute window. Maximum value is 10.

func (WorkflowTemplateJobSchedulingPtrOutput) MaxFailuresTotal added in v5.2.0

Optional. Maximum number of times in total a driver may be restarted as a result of driver exiting with non-zero code before job is reported failed. Maximum value is 240

func (WorkflowTemplateJobSchedulingPtrOutput) ToWorkflowTemplateJobSchedulingPtrOutput added in v5.2.0

func (o WorkflowTemplateJobSchedulingPtrOutput) ToWorkflowTemplateJobSchedulingPtrOutput() WorkflowTemplateJobSchedulingPtrOutput

func (WorkflowTemplateJobSchedulingPtrOutput) ToWorkflowTemplateJobSchedulingPtrOutputWithContext added in v5.2.0

func (o WorkflowTemplateJobSchedulingPtrOutput) ToWorkflowTemplateJobSchedulingPtrOutputWithContext(ctx context.Context) WorkflowTemplateJobSchedulingPtrOutput

type WorkflowTemplateJobSparkJob added in v5.2.0

type WorkflowTemplateJobSparkJob struct {
	// Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
	ArchiveUris []string `pulumi:"archiveUris"`
	// Optional. The arguments to pass to the driver. Do not include arguments, such as `--conf`, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
	Args []string `pulumi:"args"`
	// Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.
	FileUris []string `pulumi:"fileUris"`
	// Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH.
	JarFileUris []string `pulumi:"jarFileUris"`
	// Optional. The runtime log config for job execution.
	LoggingConfig *WorkflowTemplateJobSparkJobLoggingConfig `pulumi:"loggingConfig"`
	// The name of the driver's main class. The jar file that contains the class must be in the default CLASSPATH or specified in `jarFileUris`.
	MainClass *string `pulumi:"mainClass"`
	// The HCFS URI of the jar file that contains the main class.
	MainJarFileUri *string `pulumi:"mainJarFileUri"`
	// Optional. The properties to set on daemon config files. Property keys are specified in `prefix:property` format, for example `core:hadoop.tmp.dir`. The following are supported prefixes and their mappings: * capacity-scheduler: `capacity-scheduler.xml` * core: `core-site.xml` * distcp: `distcp-default.xml` * hdfs: `hdfs-site.xml` * hive: `hive-site.xml` * mapred: `mapred-site.xml` * pig: `pig.properties` * spark: `spark-defaults.conf` * yarn: `yarn-site.xml` For more information, see (https://cloud.google.com/dataproc/docs/concepts/cluster-properties).
	Properties map[string]string `pulumi:"properties"`
}

type WorkflowTemplateJobSparkJobArgs added in v5.2.0

type WorkflowTemplateJobSparkJobArgs struct {
	// Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
	ArchiveUris pulumi.StringArrayInput `pulumi:"archiveUris"`
	// Optional. The arguments to pass to the driver. Do not include arguments, such as `--conf`, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
	Args pulumi.StringArrayInput `pulumi:"args"`
	// Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.
	FileUris pulumi.StringArrayInput `pulumi:"fileUris"`
	// Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH.
	JarFileUris pulumi.StringArrayInput `pulumi:"jarFileUris"`
	// Optional. The runtime log config for job execution.
	LoggingConfig WorkflowTemplateJobSparkJobLoggingConfigPtrInput `pulumi:"loggingConfig"`
	// The name of the driver's main class. The jar file that contains the class must be in the default CLASSPATH or specified in `jarFileUris`.
	MainClass pulumi.StringPtrInput `pulumi:"mainClass"`
	// The HCFS URI of the jar file that contains the main class.
	MainJarFileUri pulumi.StringPtrInput `pulumi:"mainJarFileUri"`
	// Optional. The properties to set on daemon config files. Property keys are specified in `prefix:property` format, for example `core:hadoop.tmp.dir`. The following are supported prefixes and their mappings: * capacity-scheduler: `capacity-scheduler.xml` * core: `core-site.xml` * distcp: `distcp-default.xml` * hdfs: `hdfs-site.xml` * hive: `hive-site.xml` * mapred: `mapred-site.xml` * pig: `pig.properties` * spark: `spark-defaults.conf` * yarn: `yarn-site.xml` For more information, see (https://cloud.google.com/dataproc/docs/concepts/cluster-properties).
	Properties pulumi.StringMapInput `pulumi:"properties"`
}

func (WorkflowTemplateJobSparkJobArgs) ElementType added in v5.2.0

func (WorkflowTemplateJobSparkJobArgs) ToWorkflowTemplateJobSparkJobOutput added in v5.2.0

func (i WorkflowTemplateJobSparkJobArgs) ToWorkflowTemplateJobSparkJobOutput() WorkflowTemplateJobSparkJobOutput

func (WorkflowTemplateJobSparkJobArgs) ToWorkflowTemplateJobSparkJobOutputWithContext added in v5.2.0

func (i WorkflowTemplateJobSparkJobArgs) ToWorkflowTemplateJobSparkJobOutputWithContext(ctx context.Context) WorkflowTemplateJobSparkJobOutput

func (WorkflowTemplateJobSparkJobArgs) ToWorkflowTemplateJobSparkJobPtrOutput added in v5.2.0

func (i WorkflowTemplateJobSparkJobArgs) ToWorkflowTemplateJobSparkJobPtrOutput() WorkflowTemplateJobSparkJobPtrOutput

func (WorkflowTemplateJobSparkJobArgs) ToWorkflowTemplateJobSparkJobPtrOutputWithContext added in v5.2.0

func (i WorkflowTemplateJobSparkJobArgs) ToWorkflowTemplateJobSparkJobPtrOutputWithContext(ctx context.Context) WorkflowTemplateJobSparkJobPtrOutput

type WorkflowTemplateJobSparkJobInput added in v5.2.0

type WorkflowTemplateJobSparkJobInput interface {
	pulumi.Input

	ToWorkflowTemplateJobSparkJobOutput() WorkflowTemplateJobSparkJobOutput
	ToWorkflowTemplateJobSparkJobOutputWithContext(context.Context) WorkflowTemplateJobSparkJobOutput
}

WorkflowTemplateJobSparkJobInput is an input type that accepts WorkflowTemplateJobSparkJobArgs and WorkflowTemplateJobSparkJobOutput values. You can construct a concrete instance of `WorkflowTemplateJobSparkJobInput` via:

WorkflowTemplateJobSparkJobArgs{...}

type WorkflowTemplateJobSparkJobLoggingConfig added in v5.2.0

type WorkflowTemplateJobSparkJobLoggingConfig struct {
	// The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
	DriverLogLevels map[string]string `pulumi:"driverLogLevels"`
}

type WorkflowTemplateJobSparkJobLoggingConfigArgs added in v5.2.0

type WorkflowTemplateJobSparkJobLoggingConfigArgs struct {
	// The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
	DriverLogLevels pulumi.StringMapInput `pulumi:"driverLogLevels"`
}

func (WorkflowTemplateJobSparkJobLoggingConfigArgs) ElementType added in v5.2.0

func (WorkflowTemplateJobSparkJobLoggingConfigArgs) ToWorkflowTemplateJobSparkJobLoggingConfigOutput added in v5.2.0

func (i WorkflowTemplateJobSparkJobLoggingConfigArgs) ToWorkflowTemplateJobSparkJobLoggingConfigOutput() WorkflowTemplateJobSparkJobLoggingConfigOutput

func (WorkflowTemplateJobSparkJobLoggingConfigArgs) ToWorkflowTemplateJobSparkJobLoggingConfigOutputWithContext added in v5.2.0

func (i WorkflowTemplateJobSparkJobLoggingConfigArgs) ToWorkflowTemplateJobSparkJobLoggingConfigOutputWithContext(ctx context.Context) WorkflowTemplateJobSparkJobLoggingConfigOutput

func (WorkflowTemplateJobSparkJobLoggingConfigArgs) ToWorkflowTemplateJobSparkJobLoggingConfigPtrOutput added in v5.2.0

func (i WorkflowTemplateJobSparkJobLoggingConfigArgs) ToWorkflowTemplateJobSparkJobLoggingConfigPtrOutput() WorkflowTemplateJobSparkJobLoggingConfigPtrOutput

func (WorkflowTemplateJobSparkJobLoggingConfigArgs) ToWorkflowTemplateJobSparkJobLoggingConfigPtrOutputWithContext added in v5.2.0

func (i WorkflowTemplateJobSparkJobLoggingConfigArgs) ToWorkflowTemplateJobSparkJobLoggingConfigPtrOutputWithContext(ctx context.Context) WorkflowTemplateJobSparkJobLoggingConfigPtrOutput

type WorkflowTemplateJobSparkJobLoggingConfigInput added in v5.2.0

type WorkflowTemplateJobSparkJobLoggingConfigInput interface {
	pulumi.Input

	ToWorkflowTemplateJobSparkJobLoggingConfigOutput() WorkflowTemplateJobSparkJobLoggingConfigOutput
	ToWorkflowTemplateJobSparkJobLoggingConfigOutputWithContext(context.Context) WorkflowTemplateJobSparkJobLoggingConfigOutput
}

WorkflowTemplateJobSparkJobLoggingConfigInput is an input type that accepts WorkflowTemplateJobSparkJobLoggingConfigArgs and WorkflowTemplateJobSparkJobLoggingConfigOutput values. You can construct a concrete instance of `WorkflowTemplateJobSparkJobLoggingConfigInput` via:

WorkflowTemplateJobSparkJobLoggingConfigArgs{...}

type WorkflowTemplateJobSparkJobLoggingConfigOutput added in v5.2.0

type WorkflowTemplateJobSparkJobLoggingConfigOutput struct{ *pulumi.OutputState }

func (WorkflowTemplateJobSparkJobLoggingConfigOutput) DriverLogLevels added in v5.2.0

The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'

func (WorkflowTemplateJobSparkJobLoggingConfigOutput) ElementType added in v5.2.0

func (WorkflowTemplateJobSparkJobLoggingConfigOutput) ToWorkflowTemplateJobSparkJobLoggingConfigOutput added in v5.2.0

func (o WorkflowTemplateJobSparkJobLoggingConfigOutput) ToWorkflowTemplateJobSparkJobLoggingConfigOutput() WorkflowTemplateJobSparkJobLoggingConfigOutput

func (WorkflowTemplateJobSparkJobLoggingConfigOutput) ToWorkflowTemplateJobSparkJobLoggingConfigOutputWithContext added in v5.2.0

func (o WorkflowTemplateJobSparkJobLoggingConfigOutput) ToWorkflowTemplateJobSparkJobLoggingConfigOutputWithContext(ctx context.Context) WorkflowTemplateJobSparkJobLoggingConfigOutput

func (WorkflowTemplateJobSparkJobLoggingConfigOutput) ToWorkflowTemplateJobSparkJobLoggingConfigPtrOutput added in v5.2.0

func (o WorkflowTemplateJobSparkJobLoggingConfigOutput) ToWorkflowTemplateJobSparkJobLoggingConfigPtrOutput() WorkflowTemplateJobSparkJobLoggingConfigPtrOutput

func (WorkflowTemplateJobSparkJobLoggingConfigOutput) ToWorkflowTemplateJobSparkJobLoggingConfigPtrOutputWithContext added in v5.2.0

func (o WorkflowTemplateJobSparkJobLoggingConfigOutput) ToWorkflowTemplateJobSparkJobLoggingConfigPtrOutputWithContext(ctx context.Context) WorkflowTemplateJobSparkJobLoggingConfigPtrOutput

type WorkflowTemplateJobSparkJobLoggingConfigPtrInput added in v5.2.0

type WorkflowTemplateJobSparkJobLoggingConfigPtrInput interface {
	pulumi.Input

	ToWorkflowTemplateJobSparkJobLoggingConfigPtrOutput() WorkflowTemplateJobSparkJobLoggingConfigPtrOutput
	ToWorkflowTemplateJobSparkJobLoggingConfigPtrOutputWithContext(context.Context) WorkflowTemplateJobSparkJobLoggingConfigPtrOutput
}

WorkflowTemplateJobSparkJobLoggingConfigPtrInput is an input type that accepts WorkflowTemplateJobSparkJobLoggingConfigArgs, WorkflowTemplateJobSparkJobLoggingConfigPtr and WorkflowTemplateJobSparkJobLoggingConfigPtrOutput values. You can construct a concrete instance of `WorkflowTemplateJobSparkJobLoggingConfigPtrInput` via:

        WorkflowTemplateJobSparkJobLoggingConfigArgs{...}

or:

        nil

type WorkflowTemplateJobSparkJobLoggingConfigPtrOutput added in v5.2.0

type WorkflowTemplateJobSparkJobLoggingConfigPtrOutput struct{ *pulumi.OutputState }

func (WorkflowTemplateJobSparkJobLoggingConfigPtrOutput) DriverLogLevels added in v5.2.0

The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'

func (WorkflowTemplateJobSparkJobLoggingConfigPtrOutput) Elem added in v5.2.0

func (WorkflowTemplateJobSparkJobLoggingConfigPtrOutput) ElementType added in v5.2.0

func (WorkflowTemplateJobSparkJobLoggingConfigPtrOutput) ToWorkflowTemplateJobSparkJobLoggingConfigPtrOutput added in v5.2.0

func (o WorkflowTemplateJobSparkJobLoggingConfigPtrOutput) ToWorkflowTemplateJobSparkJobLoggingConfigPtrOutput() WorkflowTemplateJobSparkJobLoggingConfigPtrOutput

func (WorkflowTemplateJobSparkJobLoggingConfigPtrOutput) ToWorkflowTemplateJobSparkJobLoggingConfigPtrOutputWithContext added in v5.2.0

func (o WorkflowTemplateJobSparkJobLoggingConfigPtrOutput) ToWorkflowTemplateJobSparkJobLoggingConfigPtrOutputWithContext(ctx context.Context) WorkflowTemplateJobSparkJobLoggingConfigPtrOutput

type WorkflowTemplateJobSparkJobOutput added in v5.2.0

type WorkflowTemplateJobSparkJobOutput struct{ *pulumi.OutputState }

func (WorkflowTemplateJobSparkJobOutput) ArchiveUris added in v5.2.0

Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.

func (WorkflowTemplateJobSparkJobOutput) Args added in v5.2.0

Optional. The arguments to pass to the driver. Do not include arguments, such as `--conf`, that can be set as job properties, since a collision may occur that causes an incorrect job submission.

func (WorkflowTemplateJobSparkJobOutput) ElementType added in v5.2.0

func (WorkflowTemplateJobSparkJobOutput) FileUris added in v5.2.0

Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.

func (WorkflowTemplateJobSparkJobOutput) JarFileUris added in v5.2.0

Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH.

func (WorkflowTemplateJobSparkJobOutput) LoggingConfig added in v5.2.0

Optional. The runtime log config for job execution.

func (WorkflowTemplateJobSparkJobOutput) MainClass added in v5.2.0

The name of the driver's main class. The jar file that contains the class must be in the default CLASSPATH or specified in `jarFileUris`.

func (WorkflowTemplateJobSparkJobOutput) MainJarFileUri added in v5.2.0

The HCFS URI of the jar file that contains the main class.

func (WorkflowTemplateJobSparkJobOutput) Properties added in v5.2.0

Optional. The properties to set on daemon config files. Property keys are specified in `prefix:property` format, for example `core:hadoop.tmp.dir`. The following are supported prefixes and their mappings: * capacity-scheduler: `capacity-scheduler.xml` * core: `core-site.xml` * distcp: `distcp-default.xml` * hdfs: `hdfs-site.xml` * hive: `hive-site.xml` * mapred: `mapred-site.xml` * pig: `pig.properties` * spark: `spark-defaults.conf` * yarn: `yarn-site.xml` For more information, see (https://cloud.google.com/dataproc/docs/concepts/cluster-properties).

func (WorkflowTemplateJobSparkJobOutput) ToWorkflowTemplateJobSparkJobOutput added in v5.2.0

func (o WorkflowTemplateJobSparkJobOutput) ToWorkflowTemplateJobSparkJobOutput() WorkflowTemplateJobSparkJobOutput

func (WorkflowTemplateJobSparkJobOutput) ToWorkflowTemplateJobSparkJobOutputWithContext added in v5.2.0

func (o WorkflowTemplateJobSparkJobOutput) ToWorkflowTemplateJobSparkJobOutputWithContext(ctx context.Context) WorkflowTemplateJobSparkJobOutput

func (WorkflowTemplateJobSparkJobOutput) ToWorkflowTemplateJobSparkJobPtrOutput added in v5.2.0

func (o WorkflowTemplateJobSparkJobOutput) ToWorkflowTemplateJobSparkJobPtrOutput() WorkflowTemplateJobSparkJobPtrOutput

func (WorkflowTemplateJobSparkJobOutput) ToWorkflowTemplateJobSparkJobPtrOutputWithContext added in v5.2.0

func (o WorkflowTemplateJobSparkJobOutput) ToWorkflowTemplateJobSparkJobPtrOutputWithContext(ctx context.Context) WorkflowTemplateJobSparkJobPtrOutput

type WorkflowTemplateJobSparkJobPtrInput added in v5.2.0

type WorkflowTemplateJobSparkJobPtrInput interface {
	pulumi.Input

	ToWorkflowTemplateJobSparkJobPtrOutput() WorkflowTemplateJobSparkJobPtrOutput
	ToWorkflowTemplateJobSparkJobPtrOutputWithContext(context.Context) WorkflowTemplateJobSparkJobPtrOutput
}

WorkflowTemplateJobSparkJobPtrInput is an input type that accepts WorkflowTemplateJobSparkJobArgs, WorkflowTemplateJobSparkJobPtr and WorkflowTemplateJobSparkJobPtrOutput values. You can construct a concrete instance of `WorkflowTemplateJobSparkJobPtrInput` via:

        WorkflowTemplateJobSparkJobArgs{...}

or:

        nil

func WorkflowTemplateJobSparkJobPtr added in v5.2.0

type WorkflowTemplateJobSparkJobPtrOutput added in v5.2.0

type WorkflowTemplateJobSparkJobPtrOutput struct{ *pulumi.OutputState }

func (WorkflowTemplateJobSparkJobPtrOutput) ArchiveUris added in v5.2.0

Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.

func (WorkflowTemplateJobSparkJobPtrOutput) Args added in v5.2.0

Optional. The arguments to pass to the driver. Do not include arguments, such as `--conf`, that can be set as job properties, since a collision may occur that causes an incorrect job submission.

func (WorkflowTemplateJobSparkJobPtrOutput) Elem added in v5.2.0

func (WorkflowTemplateJobSparkJobPtrOutput) ElementType added in v5.2.0

func (WorkflowTemplateJobSparkJobPtrOutput) FileUris added in v5.2.0

Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.

func (WorkflowTemplateJobSparkJobPtrOutput) JarFileUris added in v5.2.0

Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH.

func (WorkflowTemplateJobSparkJobPtrOutput) LoggingConfig added in v5.2.0

Optional. The runtime log config for job execution.

func (WorkflowTemplateJobSparkJobPtrOutput) MainClass added in v5.2.0

The name of the driver's main class. The jar file that contains the class must be in the default CLASSPATH or specified in `jarFileUris`.

func (WorkflowTemplateJobSparkJobPtrOutput) MainJarFileUri added in v5.2.0

The HCFS URI of the jar file that contains the main class.

func (WorkflowTemplateJobSparkJobPtrOutput) Properties added in v5.2.0

Optional. The properties to set on daemon config files. Property keys are specified in `prefix:property` format, for example `core:hadoop.tmp.dir`. The following are supported prefixes and their mappings: * capacity-scheduler: `capacity-scheduler.xml` * core: `core-site.xml` * distcp: `distcp-default.xml` * hdfs: `hdfs-site.xml` * hive: `hive-site.xml` * mapred: `mapred-site.xml` * pig: `pig.properties` * spark: `spark-defaults.conf` * yarn: `yarn-site.xml` For more information, see (https://cloud.google.com/dataproc/docs/concepts/cluster-properties).

func (WorkflowTemplateJobSparkJobPtrOutput) ToWorkflowTemplateJobSparkJobPtrOutput added in v5.2.0

func (o WorkflowTemplateJobSparkJobPtrOutput) ToWorkflowTemplateJobSparkJobPtrOutput() WorkflowTemplateJobSparkJobPtrOutput

func (WorkflowTemplateJobSparkJobPtrOutput) ToWorkflowTemplateJobSparkJobPtrOutputWithContext added in v5.2.0

func (o WorkflowTemplateJobSparkJobPtrOutput) ToWorkflowTemplateJobSparkJobPtrOutputWithContext(ctx context.Context) WorkflowTemplateJobSparkJobPtrOutput

type WorkflowTemplateJobSparkRJob added in v5.2.0

type WorkflowTemplateJobSparkRJob struct {
	// Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
	ArchiveUris []string `pulumi:"archiveUris"`
	// Optional. The arguments to pass to the driver. Do not include arguments, such as `--conf`, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
	Args []string `pulumi:"args"`
	// Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.
	FileUris []string `pulumi:"fileUris"`
	// Optional. The runtime log config for job execution.
	LoggingConfig *WorkflowTemplateJobSparkRJobLoggingConfig `pulumi:"loggingConfig"`
	// Required. The HCFS URI of the main R file to use as the driver. Must be a .R file.
	MainRFileUri string `pulumi:"mainRFileUri"`
	// Optional. The properties to set on daemon config files. Property keys are specified in `prefix:property` format, for example `core:hadoop.tmp.dir`. The following are supported prefixes and their mappings: * capacity-scheduler: `capacity-scheduler.xml` * core: `core-site.xml` * distcp: `distcp-default.xml` * hdfs: `hdfs-site.xml` * hive: `hive-site.xml` * mapred: `mapred-site.xml` * pig: `pig.properties` * spark: `spark-defaults.conf` * yarn: `yarn-site.xml` For more information, see (https://cloud.google.com/dataproc/docs/concepts/cluster-properties).
	Properties map[string]string `pulumi:"properties"`
}

type WorkflowTemplateJobSparkRJobArgs added in v5.2.0

type WorkflowTemplateJobSparkRJobArgs struct {
	// Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
	ArchiveUris pulumi.StringArrayInput `pulumi:"archiveUris"`
	// Optional. The arguments to pass to the driver. Do not include arguments, such as `--conf`, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
	Args pulumi.StringArrayInput `pulumi:"args"`
	// Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.
	FileUris pulumi.StringArrayInput `pulumi:"fileUris"`
	// Optional. The runtime log config for job execution.
	LoggingConfig WorkflowTemplateJobSparkRJobLoggingConfigPtrInput `pulumi:"loggingConfig"`
	// Required. The HCFS URI of the main R file to use as the driver. Must be a .R file.
	MainRFileUri pulumi.StringInput `pulumi:"mainRFileUri"`
	// Optional. The properties to set on daemon config files. Property keys are specified in `prefix:property` format, for example `core:hadoop.tmp.dir`. The following are supported prefixes and their mappings: * capacity-scheduler: `capacity-scheduler.xml` * core: `core-site.xml` * distcp: `distcp-default.xml` * hdfs: `hdfs-site.xml` * hive: `hive-site.xml` * mapred: `mapred-site.xml` * pig: `pig.properties` * spark: `spark-defaults.conf` * yarn: `yarn-site.xml` For more information, see (https://cloud.google.com/dataproc/docs/concepts/cluster-properties).
	Properties pulumi.StringMapInput `pulumi:"properties"`
}

func (WorkflowTemplateJobSparkRJobArgs) ElementType added in v5.2.0

func (WorkflowTemplateJobSparkRJobArgs) ToWorkflowTemplateJobSparkRJobOutput added in v5.2.0

func (i WorkflowTemplateJobSparkRJobArgs) ToWorkflowTemplateJobSparkRJobOutput() WorkflowTemplateJobSparkRJobOutput

func (WorkflowTemplateJobSparkRJobArgs) ToWorkflowTemplateJobSparkRJobOutputWithContext added in v5.2.0

func (i WorkflowTemplateJobSparkRJobArgs) ToWorkflowTemplateJobSparkRJobOutputWithContext(ctx context.Context) WorkflowTemplateJobSparkRJobOutput

func (WorkflowTemplateJobSparkRJobArgs) ToWorkflowTemplateJobSparkRJobPtrOutput added in v5.2.0

func (i WorkflowTemplateJobSparkRJobArgs) ToWorkflowTemplateJobSparkRJobPtrOutput() WorkflowTemplateJobSparkRJobPtrOutput

func (WorkflowTemplateJobSparkRJobArgs) ToWorkflowTemplateJobSparkRJobPtrOutputWithContext added in v5.2.0

func (i WorkflowTemplateJobSparkRJobArgs) ToWorkflowTemplateJobSparkRJobPtrOutputWithContext(ctx context.Context) WorkflowTemplateJobSparkRJobPtrOutput

type WorkflowTemplateJobSparkRJobInput added in v5.2.0

type WorkflowTemplateJobSparkRJobInput interface {
	pulumi.Input

	ToWorkflowTemplateJobSparkRJobOutput() WorkflowTemplateJobSparkRJobOutput
	ToWorkflowTemplateJobSparkRJobOutputWithContext(context.Context) WorkflowTemplateJobSparkRJobOutput
}

WorkflowTemplateJobSparkRJobInput is an input type that accepts WorkflowTemplateJobSparkRJobArgs and WorkflowTemplateJobSparkRJobOutput values. You can construct a concrete instance of `WorkflowTemplateJobSparkRJobInput` via:

WorkflowTemplateJobSparkRJobArgs{...}

type WorkflowTemplateJobSparkRJobLoggingConfig added in v5.2.0

type WorkflowTemplateJobSparkRJobLoggingConfig struct {
	// The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
	DriverLogLevels map[string]string `pulumi:"driverLogLevels"`
}

type WorkflowTemplateJobSparkRJobLoggingConfigArgs added in v5.2.0

type WorkflowTemplateJobSparkRJobLoggingConfigArgs struct {
	// The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
	DriverLogLevels pulumi.StringMapInput `pulumi:"driverLogLevels"`
}

func (WorkflowTemplateJobSparkRJobLoggingConfigArgs) ElementType added in v5.2.0

func (WorkflowTemplateJobSparkRJobLoggingConfigArgs) ToWorkflowTemplateJobSparkRJobLoggingConfigOutput added in v5.2.0

func (i WorkflowTemplateJobSparkRJobLoggingConfigArgs) ToWorkflowTemplateJobSparkRJobLoggingConfigOutput() WorkflowTemplateJobSparkRJobLoggingConfigOutput

func (WorkflowTemplateJobSparkRJobLoggingConfigArgs) ToWorkflowTemplateJobSparkRJobLoggingConfigOutputWithContext added in v5.2.0

func (i WorkflowTemplateJobSparkRJobLoggingConfigArgs) ToWorkflowTemplateJobSparkRJobLoggingConfigOutputWithContext(ctx context.Context) WorkflowTemplateJobSparkRJobLoggingConfigOutput

func (WorkflowTemplateJobSparkRJobLoggingConfigArgs) ToWorkflowTemplateJobSparkRJobLoggingConfigPtrOutput added in v5.2.0

func (i WorkflowTemplateJobSparkRJobLoggingConfigArgs) ToWorkflowTemplateJobSparkRJobLoggingConfigPtrOutput() WorkflowTemplateJobSparkRJobLoggingConfigPtrOutput

func (WorkflowTemplateJobSparkRJobLoggingConfigArgs) ToWorkflowTemplateJobSparkRJobLoggingConfigPtrOutputWithContext added in v5.2.0

func (i WorkflowTemplateJobSparkRJobLoggingConfigArgs) ToWorkflowTemplateJobSparkRJobLoggingConfigPtrOutputWithContext(ctx context.Context) WorkflowTemplateJobSparkRJobLoggingConfigPtrOutput

type WorkflowTemplateJobSparkRJobLoggingConfigInput added in v5.2.0

type WorkflowTemplateJobSparkRJobLoggingConfigInput interface {
	pulumi.Input

	ToWorkflowTemplateJobSparkRJobLoggingConfigOutput() WorkflowTemplateJobSparkRJobLoggingConfigOutput
	ToWorkflowTemplateJobSparkRJobLoggingConfigOutputWithContext(context.Context) WorkflowTemplateJobSparkRJobLoggingConfigOutput
}

WorkflowTemplateJobSparkRJobLoggingConfigInput is an input type that accepts WorkflowTemplateJobSparkRJobLoggingConfigArgs and WorkflowTemplateJobSparkRJobLoggingConfigOutput values. You can construct a concrete instance of `WorkflowTemplateJobSparkRJobLoggingConfigInput` via:

WorkflowTemplateJobSparkRJobLoggingConfigArgs{...}

type WorkflowTemplateJobSparkRJobLoggingConfigOutput added in v5.2.0

type WorkflowTemplateJobSparkRJobLoggingConfigOutput struct{ *pulumi.OutputState }

func (WorkflowTemplateJobSparkRJobLoggingConfigOutput) DriverLogLevels added in v5.2.0

The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'

func (WorkflowTemplateJobSparkRJobLoggingConfigOutput) ElementType added in v5.2.0

func (WorkflowTemplateJobSparkRJobLoggingConfigOutput) ToWorkflowTemplateJobSparkRJobLoggingConfigOutput added in v5.2.0

func (o WorkflowTemplateJobSparkRJobLoggingConfigOutput) ToWorkflowTemplateJobSparkRJobLoggingConfigOutput() WorkflowTemplateJobSparkRJobLoggingConfigOutput

func (WorkflowTemplateJobSparkRJobLoggingConfigOutput) ToWorkflowTemplateJobSparkRJobLoggingConfigOutputWithContext added in v5.2.0

func (o WorkflowTemplateJobSparkRJobLoggingConfigOutput) ToWorkflowTemplateJobSparkRJobLoggingConfigOutputWithContext(ctx context.Context) WorkflowTemplateJobSparkRJobLoggingConfigOutput

func (WorkflowTemplateJobSparkRJobLoggingConfigOutput) ToWorkflowTemplateJobSparkRJobLoggingConfigPtrOutput added in v5.2.0

func (o WorkflowTemplateJobSparkRJobLoggingConfigOutput) ToWorkflowTemplateJobSparkRJobLoggingConfigPtrOutput() WorkflowTemplateJobSparkRJobLoggingConfigPtrOutput

func (WorkflowTemplateJobSparkRJobLoggingConfigOutput) ToWorkflowTemplateJobSparkRJobLoggingConfigPtrOutputWithContext added in v5.2.0

func (o WorkflowTemplateJobSparkRJobLoggingConfigOutput) ToWorkflowTemplateJobSparkRJobLoggingConfigPtrOutputWithContext(ctx context.Context) WorkflowTemplateJobSparkRJobLoggingConfigPtrOutput

type WorkflowTemplateJobSparkRJobLoggingConfigPtrInput added in v5.2.0

type WorkflowTemplateJobSparkRJobLoggingConfigPtrInput interface {
	pulumi.Input

	ToWorkflowTemplateJobSparkRJobLoggingConfigPtrOutput() WorkflowTemplateJobSparkRJobLoggingConfigPtrOutput
	ToWorkflowTemplateJobSparkRJobLoggingConfigPtrOutputWithContext(context.Context) WorkflowTemplateJobSparkRJobLoggingConfigPtrOutput
}

WorkflowTemplateJobSparkRJobLoggingConfigPtrInput is an input type that accepts WorkflowTemplateJobSparkRJobLoggingConfigArgs, WorkflowTemplateJobSparkRJobLoggingConfigPtr and WorkflowTemplateJobSparkRJobLoggingConfigPtrOutput values. You can construct a concrete instance of `WorkflowTemplateJobSparkRJobLoggingConfigPtrInput` via:

        WorkflowTemplateJobSparkRJobLoggingConfigArgs{...}

or:

        nil

type WorkflowTemplateJobSparkRJobLoggingConfigPtrOutput added in v5.2.0

type WorkflowTemplateJobSparkRJobLoggingConfigPtrOutput struct{ *pulumi.OutputState }

func (WorkflowTemplateJobSparkRJobLoggingConfigPtrOutput) DriverLogLevels added in v5.2.0

The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'

func (WorkflowTemplateJobSparkRJobLoggingConfigPtrOutput) Elem added in v5.2.0

func (WorkflowTemplateJobSparkRJobLoggingConfigPtrOutput) ElementType added in v5.2.0

func (WorkflowTemplateJobSparkRJobLoggingConfigPtrOutput) ToWorkflowTemplateJobSparkRJobLoggingConfigPtrOutput added in v5.2.0

func (o WorkflowTemplateJobSparkRJobLoggingConfigPtrOutput) ToWorkflowTemplateJobSparkRJobLoggingConfigPtrOutput() WorkflowTemplateJobSparkRJobLoggingConfigPtrOutput

func (WorkflowTemplateJobSparkRJobLoggingConfigPtrOutput) ToWorkflowTemplateJobSparkRJobLoggingConfigPtrOutputWithContext added in v5.2.0

func (o WorkflowTemplateJobSparkRJobLoggingConfigPtrOutput) ToWorkflowTemplateJobSparkRJobLoggingConfigPtrOutputWithContext(ctx context.Context) WorkflowTemplateJobSparkRJobLoggingConfigPtrOutput

type WorkflowTemplateJobSparkRJobOutput added in v5.2.0

type WorkflowTemplateJobSparkRJobOutput struct{ *pulumi.OutputState }

func (WorkflowTemplateJobSparkRJobOutput) ArchiveUris added in v5.2.0

Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.

func (WorkflowTemplateJobSparkRJobOutput) Args added in v5.2.0

Optional. The arguments to pass to the driver. Do not include arguments, such as `--conf`, that can be set as job properties, since a collision may occur that causes an incorrect job submission.

func (WorkflowTemplateJobSparkRJobOutput) ElementType added in v5.2.0

func (WorkflowTemplateJobSparkRJobOutput) FileUris added in v5.2.0

Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.

func (WorkflowTemplateJobSparkRJobOutput) LoggingConfig added in v5.2.0

Optional. The runtime log config for job execution.

func (WorkflowTemplateJobSparkRJobOutput) MainRFileUri added in v5.2.0

Required. The HCFS URI of the main R file to use as the driver. Must be a .R file.

func (WorkflowTemplateJobSparkRJobOutput) Properties added in v5.2.0

Optional. The properties to set on daemon config files. Property keys are specified in `prefix:property` format, for example `core:hadoop.tmp.dir`. The following are supported prefixes and their mappings: * capacity-scheduler: `capacity-scheduler.xml` * core: `core-site.xml` * distcp: `distcp-default.xml` * hdfs: `hdfs-site.xml` * hive: `hive-site.xml` * mapred: `mapred-site.xml` * pig: `pig.properties` * spark: `spark-defaults.conf` * yarn: `yarn-site.xml` For more information, see (https://cloud.google.com/dataproc/docs/concepts/cluster-properties).

func (WorkflowTemplateJobSparkRJobOutput) ToWorkflowTemplateJobSparkRJobOutput added in v5.2.0

func (o WorkflowTemplateJobSparkRJobOutput) ToWorkflowTemplateJobSparkRJobOutput() WorkflowTemplateJobSparkRJobOutput

func (WorkflowTemplateJobSparkRJobOutput) ToWorkflowTemplateJobSparkRJobOutputWithContext added in v5.2.0

func (o WorkflowTemplateJobSparkRJobOutput) ToWorkflowTemplateJobSparkRJobOutputWithContext(ctx context.Context) WorkflowTemplateJobSparkRJobOutput

func (WorkflowTemplateJobSparkRJobOutput) ToWorkflowTemplateJobSparkRJobPtrOutput added in v5.2.0

func (o WorkflowTemplateJobSparkRJobOutput) ToWorkflowTemplateJobSparkRJobPtrOutput() WorkflowTemplateJobSparkRJobPtrOutput

func (WorkflowTemplateJobSparkRJobOutput) ToWorkflowTemplateJobSparkRJobPtrOutputWithContext added in v5.2.0

func (o WorkflowTemplateJobSparkRJobOutput) ToWorkflowTemplateJobSparkRJobPtrOutputWithContext(ctx context.Context) WorkflowTemplateJobSparkRJobPtrOutput

type WorkflowTemplateJobSparkRJobPtrInput added in v5.2.0

type WorkflowTemplateJobSparkRJobPtrInput interface {
	pulumi.Input

	ToWorkflowTemplateJobSparkRJobPtrOutput() WorkflowTemplateJobSparkRJobPtrOutput
	ToWorkflowTemplateJobSparkRJobPtrOutputWithContext(context.Context) WorkflowTemplateJobSparkRJobPtrOutput
}

WorkflowTemplateJobSparkRJobPtrInput is an input type that accepts WorkflowTemplateJobSparkRJobArgs, WorkflowTemplateJobSparkRJobPtr and WorkflowTemplateJobSparkRJobPtrOutput values. You can construct a concrete instance of `WorkflowTemplateJobSparkRJobPtrInput` via:

        WorkflowTemplateJobSparkRJobArgs{...}

or:

        nil

type WorkflowTemplateJobSparkRJobPtrOutput added in v5.2.0

type WorkflowTemplateJobSparkRJobPtrOutput struct{ *pulumi.OutputState }

func (WorkflowTemplateJobSparkRJobPtrOutput) ArchiveUris added in v5.2.0

Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.

func (WorkflowTemplateJobSparkRJobPtrOutput) Args added in v5.2.0

Optional. The arguments to pass to the driver. Do not include arguments, such as `--conf`, that can be set as job properties, since a collision may occur that causes an incorrect job submission.

func (WorkflowTemplateJobSparkRJobPtrOutput) Elem added in v5.2.0

func (WorkflowTemplateJobSparkRJobPtrOutput) ElementType added in v5.2.0

func (WorkflowTemplateJobSparkRJobPtrOutput) FileUris added in v5.2.0

Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.

func (WorkflowTemplateJobSparkRJobPtrOutput) LoggingConfig added in v5.2.0

Optional. The runtime log config for job execution.

func (WorkflowTemplateJobSparkRJobPtrOutput) MainRFileUri added in v5.2.0

Required. The HCFS URI of the main R file to use as the driver. Must be a .R file.

func (WorkflowTemplateJobSparkRJobPtrOutput) Properties added in v5.2.0

Optional. The properties to set on daemon config files. Property keys are specified in `prefix:property` format, for example `core:hadoop.tmp.dir`. The following are supported prefixes and their mappings: * capacity-scheduler: `capacity-scheduler.xml` * core: `core-site.xml` * distcp: `distcp-default.xml` * hdfs: `hdfs-site.xml` * hive: `hive-site.xml` * mapred: `mapred-site.xml` * pig: `pig.properties` * spark: `spark-defaults.conf` * yarn: `yarn-site.xml` For more information, see (https://cloud.google.com/dataproc/docs/concepts/cluster-properties).

func (WorkflowTemplateJobSparkRJobPtrOutput) ToWorkflowTemplateJobSparkRJobPtrOutput added in v5.2.0

func (o WorkflowTemplateJobSparkRJobPtrOutput) ToWorkflowTemplateJobSparkRJobPtrOutput() WorkflowTemplateJobSparkRJobPtrOutput

func (WorkflowTemplateJobSparkRJobPtrOutput) ToWorkflowTemplateJobSparkRJobPtrOutputWithContext added in v5.2.0

func (o WorkflowTemplateJobSparkRJobPtrOutput) ToWorkflowTemplateJobSparkRJobPtrOutputWithContext(ctx context.Context) WorkflowTemplateJobSparkRJobPtrOutput

type WorkflowTemplateJobSparkSqlJob added in v5.2.0

type WorkflowTemplateJobSparkSqlJob struct {
	// Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH.
	JarFileUris []string `pulumi:"jarFileUris"`
	// Optional. The runtime log config for job execution.
	LoggingConfig *WorkflowTemplateJobSparkSqlJobLoggingConfig `pulumi:"loggingConfig"`
	// Optional. The properties to set on daemon config files. Property keys are specified in `prefix:property` format, for example `core:hadoop.tmp.dir`. The following are supported prefixes and their mappings: * capacity-scheduler: `capacity-scheduler.xml` * core: `core-site.xml` * distcp: `distcp-default.xml` * hdfs: `hdfs-site.xml` * hive: `hive-site.xml` * mapred: `mapred-site.xml` * pig: `pig.properties` * spark: `spark-defaults.conf` * yarn: `yarn-site.xml` For more information, see (https://cloud.google.com/dataproc/docs/concepts/cluster-properties).
	Properties map[string]string `pulumi:"properties"`
	// The HCFS URI of the script that contains SQL queries.
	QueryFileUri *string `pulumi:"queryFileUri"`
	// A list of queries.
	QueryList *WorkflowTemplateJobSparkSqlJobQueryList `pulumi:"queryList"`
	// Optional. Mapping of query variable names to values (equivalent to the Spark SQL command: SET `name="value";`).
	ScriptVariables map[string]string `pulumi:"scriptVariables"`
}

type WorkflowTemplateJobSparkSqlJobArgs added in v5.2.0

type WorkflowTemplateJobSparkSqlJobArgs struct {
	// Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH.
	JarFileUris pulumi.StringArrayInput `pulumi:"jarFileUris"`
	// Optional. The runtime log config for job execution.
	LoggingConfig WorkflowTemplateJobSparkSqlJobLoggingConfigPtrInput `pulumi:"loggingConfig"`
	// Optional. The properties to set on daemon config files. Property keys are specified in `prefix:property` format, for example `core:hadoop.tmp.dir`. The following are supported prefixes and their mappings: * capacity-scheduler: `capacity-scheduler.xml` * core: `core-site.xml` * distcp: `distcp-default.xml` * hdfs: `hdfs-site.xml` * hive: `hive-site.xml` * mapred: `mapred-site.xml` * pig: `pig.properties` * spark: `spark-defaults.conf` * yarn: `yarn-site.xml` For more information, see (https://cloud.google.com/dataproc/docs/concepts/cluster-properties).
	Properties pulumi.StringMapInput `pulumi:"properties"`
	// The HCFS URI of the script that contains SQL queries.
	QueryFileUri pulumi.StringPtrInput `pulumi:"queryFileUri"`
	// A list of queries.
	QueryList WorkflowTemplateJobSparkSqlJobQueryListPtrInput `pulumi:"queryList"`
	// Optional. Mapping of query variable names to values (equivalent to the Spark SQL command: SET `name="value";`).
	ScriptVariables pulumi.StringMapInput `pulumi:"scriptVariables"`
}

func (WorkflowTemplateJobSparkSqlJobArgs) ElementType added in v5.2.0

func (WorkflowTemplateJobSparkSqlJobArgs) ToWorkflowTemplateJobSparkSqlJobOutput added in v5.2.0

func (i WorkflowTemplateJobSparkSqlJobArgs) ToWorkflowTemplateJobSparkSqlJobOutput() WorkflowTemplateJobSparkSqlJobOutput

func (WorkflowTemplateJobSparkSqlJobArgs) ToWorkflowTemplateJobSparkSqlJobOutputWithContext added in v5.2.0

func (i WorkflowTemplateJobSparkSqlJobArgs) ToWorkflowTemplateJobSparkSqlJobOutputWithContext(ctx context.Context) WorkflowTemplateJobSparkSqlJobOutput

func (WorkflowTemplateJobSparkSqlJobArgs) ToWorkflowTemplateJobSparkSqlJobPtrOutput added in v5.2.0

func (i WorkflowTemplateJobSparkSqlJobArgs) ToWorkflowTemplateJobSparkSqlJobPtrOutput() WorkflowTemplateJobSparkSqlJobPtrOutput

func (WorkflowTemplateJobSparkSqlJobArgs) ToWorkflowTemplateJobSparkSqlJobPtrOutputWithContext added in v5.2.0

func (i WorkflowTemplateJobSparkSqlJobArgs) ToWorkflowTemplateJobSparkSqlJobPtrOutputWithContext(ctx context.Context) WorkflowTemplateJobSparkSqlJobPtrOutput

type WorkflowTemplateJobSparkSqlJobInput added in v5.2.0

type WorkflowTemplateJobSparkSqlJobInput interface {
	pulumi.Input

	ToWorkflowTemplateJobSparkSqlJobOutput() WorkflowTemplateJobSparkSqlJobOutput
	ToWorkflowTemplateJobSparkSqlJobOutputWithContext(context.Context) WorkflowTemplateJobSparkSqlJobOutput
}

WorkflowTemplateJobSparkSqlJobInput is an input type that accepts WorkflowTemplateJobSparkSqlJobArgs and WorkflowTemplateJobSparkSqlJobOutput values. You can construct a concrete instance of `WorkflowTemplateJobSparkSqlJobInput` via:

WorkflowTemplateJobSparkSqlJobArgs{...}

type WorkflowTemplateJobSparkSqlJobLoggingConfig added in v5.2.0

type WorkflowTemplateJobSparkSqlJobLoggingConfig struct {
	// The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
	DriverLogLevels map[string]string `pulumi:"driverLogLevels"`
}

type WorkflowTemplateJobSparkSqlJobLoggingConfigArgs added in v5.2.0

type WorkflowTemplateJobSparkSqlJobLoggingConfigArgs struct {
	// The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
	DriverLogLevels pulumi.StringMapInput `pulumi:"driverLogLevels"`
}

func (WorkflowTemplateJobSparkSqlJobLoggingConfigArgs) ElementType added in v5.2.0

func (WorkflowTemplateJobSparkSqlJobLoggingConfigArgs) ToWorkflowTemplateJobSparkSqlJobLoggingConfigOutput added in v5.2.0

func (i WorkflowTemplateJobSparkSqlJobLoggingConfigArgs) ToWorkflowTemplateJobSparkSqlJobLoggingConfigOutput() WorkflowTemplateJobSparkSqlJobLoggingConfigOutput

func (WorkflowTemplateJobSparkSqlJobLoggingConfigArgs) ToWorkflowTemplateJobSparkSqlJobLoggingConfigOutputWithContext added in v5.2.0

func (i WorkflowTemplateJobSparkSqlJobLoggingConfigArgs) ToWorkflowTemplateJobSparkSqlJobLoggingConfigOutputWithContext(ctx context.Context) WorkflowTemplateJobSparkSqlJobLoggingConfigOutput

func (WorkflowTemplateJobSparkSqlJobLoggingConfigArgs) ToWorkflowTemplateJobSparkSqlJobLoggingConfigPtrOutput added in v5.2.0

func (i WorkflowTemplateJobSparkSqlJobLoggingConfigArgs) ToWorkflowTemplateJobSparkSqlJobLoggingConfigPtrOutput() WorkflowTemplateJobSparkSqlJobLoggingConfigPtrOutput

func (WorkflowTemplateJobSparkSqlJobLoggingConfigArgs) ToWorkflowTemplateJobSparkSqlJobLoggingConfigPtrOutputWithContext added in v5.2.0

func (i WorkflowTemplateJobSparkSqlJobLoggingConfigArgs) ToWorkflowTemplateJobSparkSqlJobLoggingConfigPtrOutputWithContext(ctx context.Context) WorkflowTemplateJobSparkSqlJobLoggingConfigPtrOutput

type WorkflowTemplateJobSparkSqlJobLoggingConfigInput added in v5.2.0

type WorkflowTemplateJobSparkSqlJobLoggingConfigInput interface {
	pulumi.Input

	ToWorkflowTemplateJobSparkSqlJobLoggingConfigOutput() WorkflowTemplateJobSparkSqlJobLoggingConfigOutput
	ToWorkflowTemplateJobSparkSqlJobLoggingConfigOutputWithContext(context.Context) WorkflowTemplateJobSparkSqlJobLoggingConfigOutput
}

WorkflowTemplateJobSparkSqlJobLoggingConfigInput is an input type that accepts WorkflowTemplateJobSparkSqlJobLoggingConfigArgs and WorkflowTemplateJobSparkSqlJobLoggingConfigOutput values. You can construct a concrete instance of `WorkflowTemplateJobSparkSqlJobLoggingConfigInput` via:

WorkflowTemplateJobSparkSqlJobLoggingConfigArgs{...}

type WorkflowTemplateJobSparkSqlJobLoggingConfigOutput added in v5.2.0

type WorkflowTemplateJobSparkSqlJobLoggingConfigOutput struct{ *pulumi.OutputState }

func (WorkflowTemplateJobSparkSqlJobLoggingConfigOutput) DriverLogLevels added in v5.2.0

The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'

func (WorkflowTemplateJobSparkSqlJobLoggingConfigOutput) ElementType added in v5.2.0

func (WorkflowTemplateJobSparkSqlJobLoggingConfigOutput) ToWorkflowTemplateJobSparkSqlJobLoggingConfigOutput added in v5.2.0

func (o WorkflowTemplateJobSparkSqlJobLoggingConfigOutput) ToWorkflowTemplateJobSparkSqlJobLoggingConfigOutput() WorkflowTemplateJobSparkSqlJobLoggingConfigOutput

func (WorkflowTemplateJobSparkSqlJobLoggingConfigOutput) ToWorkflowTemplateJobSparkSqlJobLoggingConfigOutputWithContext added in v5.2.0

func (o WorkflowTemplateJobSparkSqlJobLoggingConfigOutput) ToWorkflowTemplateJobSparkSqlJobLoggingConfigOutputWithContext(ctx context.Context) WorkflowTemplateJobSparkSqlJobLoggingConfigOutput

func (WorkflowTemplateJobSparkSqlJobLoggingConfigOutput) ToWorkflowTemplateJobSparkSqlJobLoggingConfigPtrOutput added in v5.2.0

func (o WorkflowTemplateJobSparkSqlJobLoggingConfigOutput) ToWorkflowTemplateJobSparkSqlJobLoggingConfigPtrOutput() WorkflowTemplateJobSparkSqlJobLoggingConfigPtrOutput

func (WorkflowTemplateJobSparkSqlJobLoggingConfigOutput) ToWorkflowTemplateJobSparkSqlJobLoggingConfigPtrOutputWithContext added in v5.2.0

func (o WorkflowTemplateJobSparkSqlJobLoggingConfigOutput) ToWorkflowTemplateJobSparkSqlJobLoggingConfigPtrOutputWithContext(ctx context.Context) WorkflowTemplateJobSparkSqlJobLoggingConfigPtrOutput

type WorkflowTemplateJobSparkSqlJobLoggingConfigPtrInput added in v5.2.0

type WorkflowTemplateJobSparkSqlJobLoggingConfigPtrInput interface {
	pulumi.Input

	ToWorkflowTemplateJobSparkSqlJobLoggingConfigPtrOutput() WorkflowTemplateJobSparkSqlJobLoggingConfigPtrOutput
	ToWorkflowTemplateJobSparkSqlJobLoggingConfigPtrOutputWithContext(context.Context) WorkflowTemplateJobSparkSqlJobLoggingConfigPtrOutput
}

WorkflowTemplateJobSparkSqlJobLoggingConfigPtrInput is an input type that accepts WorkflowTemplateJobSparkSqlJobLoggingConfigArgs, WorkflowTemplateJobSparkSqlJobLoggingConfigPtr and WorkflowTemplateJobSparkSqlJobLoggingConfigPtrOutput values. You can construct a concrete instance of `WorkflowTemplateJobSparkSqlJobLoggingConfigPtrInput` via:

        WorkflowTemplateJobSparkSqlJobLoggingConfigArgs{...}

or:

        nil

type WorkflowTemplateJobSparkSqlJobLoggingConfigPtrOutput added in v5.2.0

type WorkflowTemplateJobSparkSqlJobLoggingConfigPtrOutput struct{ *pulumi.OutputState }

func (WorkflowTemplateJobSparkSqlJobLoggingConfigPtrOutput) DriverLogLevels added in v5.2.0

The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'

func (WorkflowTemplateJobSparkSqlJobLoggingConfigPtrOutput) Elem added in v5.2.0

func (WorkflowTemplateJobSparkSqlJobLoggingConfigPtrOutput) ElementType added in v5.2.0

func (WorkflowTemplateJobSparkSqlJobLoggingConfigPtrOutput) ToWorkflowTemplateJobSparkSqlJobLoggingConfigPtrOutput added in v5.2.0

func (WorkflowTemplateJobSparkSqlJobLoggingConfigPtrOutput) ToWorkflowTemplateJobSparkSqlJobLoggingConfigPtrOutputWithContext added in v5.2.0

func (o WorkflowTemplateJobSparkSqlJobLoggingConfigPtrOutput) ToWorkflowTemplateJobSparkSqlJobLoggingConfigPtrOutputWithContext(ctx context.Context) WorkflowTemplateJobSparkSqlJobLoggingConfigPtrOutput

type WorkflowTemplateJobSparkSqlJobOutput added in v5.2.0

type WorkflowTemplateJobSparkSqlJobOutput struct{ *pulumi.OutputState }

func (WorkflowTemplateJobSparkSqlJobOutput) ElementType added in v5.2.0

func (WorkflowTemplateJobSparkSqlJobOutput) JarFileUris added in v5.2.0

Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH.

func (WorkflowTemplateJobSparkSqlJobOutput) LoggingConfig added in v5.2.0

Optional. The runtime log config for job execution.

func (WorkflowTemplateJobSparkSqlJobOutput) Properties added in v5.2.0

Optional. The properties to set on daemon config files. Property keys are specified in `prefix:property` format, for example `core:hadoop.tmp.dir`. The following are supported prefixes and their mappings: * capacity-scheduler: `capacity-scheduler.xml` * core: `core-site.xml` * distcp: `distcp-default.xml` * hdfs: `hdfs-site.xml` * hive: `hive-site.xml` * mapred: `mapred-site.xml` * pig: `pig.properties` * spark: `spark-defaults.conf` * yarn: `yarn-site.xml` For more information, see (https://cloud.google.com/dataproc/docs/concepts/cluster-properties).

func (WorkflowTemplateJobSparkSqlJobOutput) QueryFileUri added in v5.2.0

The HCFS URI of the script that contains SQL queries.

func (WorkflowTemplateJobSparkSqlJobOutput) QueryList added in v5.2.0

A list of queries.

func (WorkflowTemplateJobSparkSqlJobOutput) ScriptVariables added in v5.2.0

Optional. Mapping of query variable names to values (equivalent to the Spark SQL command: SET `name="value";`).

func (WorkflowTemplateJobSparkSqlJobOutput) ToWorkflowTemplateJobSparkSqlJobOutput added in v5.2.0

func (o WorkflowTemplateJobSparkSqlJobOutput) ToWorkflowTemplateJobSparkSqlJobOutput() WorkflowTemplateJobSparkSqlJobOutput

func (WorkflowTemplateJobSparkSqlJobOutput) ToWorkflowTemplateJobSparkSqlJobOutputWithContext added in v5.2.0

func (o WorkflowTemplateJobSparkSqlJobOutput) ToWorkflowTemplateJobSparkSqlJobOutputWithContext(ctx context.Context) WorkflowTemplateJobSparkSqlJobOutput

func (WorkflowTemplateJobSparkSqlJobOutput) ToWorkflowTemplateJobSparkSqlJobPtrOutput added in v5.2.0

func (o WorkflowTemplateJobSparkSqlJobOutput) ToWorkflowTemplateJobSparkSqlJobPtrOutput() WorkflowTemplateJobSparkSqlJobPtrOutput

func (WorkflowTemplateJobSparkSqlJobOutput) ToWorkflowTemplateJobSparkSqlJobPtrOutputWithContext added in v5.2.0

func (o WorkflowTemplateJobSparkSqlJobOutput) ToWorkflowTemplateJobSparkSqlJobPtrOutputWithContext(ctx context.Context) WorkflowTemplateJobSparkSqlJobPtrOutput

type WorkflowTemplateJobSparkSqlJobPtrInput added in v5.2.0

type WorkflowTemplateJobSparkSqlJobPtrInput interface {
	pulumi.Input

	ToWorkflowTemplateJobSparkSqlJobPtrOutput() WorkflowTemplateJobSparkSqlJobPtrOutput
	ToWorkflowTemplateJobSparkSqlJobPtrOutputWithContext(context.Context) WorkflowTemplateJobSparkSqlJobPtrOutput
}

WorkflowTemplateJobSparkSqlJobPtrInput is an input type that accepts WorkflowTemplateJobSparkSqlJobArgs, WorkflowTemplateJobSparkSqlJobPtr and WorkflowTemplateJobSparkSqlJobPtrOutput values. You can construct a concrete instance of `WorkflowTemplateJobSparkSqlJobPtrInput` via:

        WorkflowTemplateJobSparkSqlJobArgs{...}

or:

        nil

type WorkflowTemplateJobSparkSqlJobPtrOutput added in v5.2.0

type WorkflowTemplateJobSparkSqlJobPtrOutput struct{ *pulumi.OutputState }

func (WorkflowTemplateJobSparkSqlJobPtrOutput) Elem added in v5.2.0

func (WorkflowTemplateJobSparkSqlJobPtrOutput) ElementType added in v5.2.0

func (WorkflowTemplateJobSparkSqlJobPtrOutput) JarFileUris added in v5.2.0

Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH.

func (WorkflowTemplateJobSparkSqlJobPtrOutput) LoggingConfig added in v5.2.0

Optional. The runtime log config for job execution.

func (WorkflowTemplateJobSparkSqlJobPtrOutput) Properties added in v5.2.0

Optional. The properties to set on daemon config files. Property keys are specified in `prefix:property` format, for example `core:hadoop.tmp.dir`. The following are supported prefixes and their mappings: * capacity-scheduler: `capacity-scheduler.xml` * core: `core-site.xml` * distcp: `distcp-default.xml` * hdfs: `hdfs-site.xml` * hive: `hive-site.xml` * mapred: `mapred-site.xml` * pig: `pig.properties` * spark: `spark-defaults.conf` * yarn: `yarn-site.xml` For more information, see (https://cloud.google.com/dataproc/docs/concepts/cluster-properties).

func (WorkflowTemplateJobSparkSqlJobPtrOutput) QueryFileUri added in v5.2.0

The HCFS URI of the script that contains SQL queries.

func (WorkflowTemplateJobSparkSqlJobPtrOutput) QueryList added in v5.2.0

A list of queries.

func (WorkflowTemplateJobSparkSqlJobPtrOutput) ScriptVariables added in v5.2.0

Optional. Mapping of query variable names to values (equivalent to the Spark SQL command: SET `name="value";`).

func (WorkflowTemplateJobSparkSqlJobPtrOutput) ToWorkflowTemplateJobSparkSqlJobPtrOutput added in v5.2.0

func (o WorkflowTemplateJobSparkSqlJobPtrOutput) ToWorkflowTemplateJobSparkSqlJobPtrOutput() WorkflowTemplateJobSparkSqlJobPtrOutput

func (WorkflowTemplateJobSparkSqlJobPtrOutput) ToWorkflowTemplateJobSparkSqlJobPtrOutputWithContext added in v5.2.0

func (o WorkflowTemplateJobSparkSqlJobPtrOutput) ToWorkflowTemplateJobSparkSqlJobPtrOutputWithContext(ctx context.Context) WorkflowTemplateJobSparkSqlJobPtrOutput

type WorkflowTemplateJobSparkSqlJobQueryList added in v5.2.0

type WorkflowTemplateJobSparkSqlJobQueryList struct {
	// Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": } }
	Queries []string `pulumi:"queries"`
}

type WorkflowTemplateJobSparkSqlJobQueryListArgs added in v5.2.0

type WorkflowTemplateJobSparkSqlJobQueryListArgs struct {
	// Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": } }
	Queries pulumi.StringArrayInput `pulumi:"queries"`
}

func (WorkflowTemplateJobSparkSqlJobQueryListArgs) ElementType added in v5.2.0

func (WorkflowTemplateJobSparkSqlJobQueryListArgs) ToWorkflowTemplateJobSparkSqlJobQueryListOutput added in v5.2.0

func (i WorkflowTemplateJobSparkSqlJobQueryListArgs) ToWorkflowTemplateJobSparkSqlJobQueryListOutput() WorkflowTemplateJobSparkSqlJobQueryListOutput

func (WorkflowTemplateJobSparkSqlJobQueryListArgs) ToWorkflowTemplateJobSparkSqlJobQueryListOutputWithContext added in v5.2.0

func (i WorkflowTemplateJobSparkSqlJobQueryListArgs) ToWorkflowTemplateJobSparkSqlJobQueryListOutputWithContext(ctx context.Context) WorkflowTemplateJobSparkSqlJobQueryListOutput

func (WorkflowTemplateJobSparkSqlJobQueryListArgs) ToWorkflowTemplateJobSparkSqlJobQueryListPtrOutput added in v5.2.0

func (i WorkflowTemplateJobSparkSqlJobQueryListArgs) ToWorkflowTemplateJobSparkSqlJobQueryListPtrOutput() WorkflowTemplateJobSparkSqlJobQueryListPtrOutput

func (WorkflowTemplateJobSparkSqlJobQueryListArgs) ToWorkflowTemplateJobSparkSqlJobQueryListPtrOutputWithContext added in v5.2.0

func (i WorkflowTemplateJobSparkSqlJobQueryListArgs) ToWorkflowTemplateJobSparkSqlJobQueryListPtrOutputWithContext(ctx context.Context) WorkflowTemplateJobSparkSqlJobQueryListPtrOutput

type WorkflowTemplateJobSparkSqlJobQueryListInput added in v5.2.0

type WorkflowTemplateJobSparkSqlJobQueryListInput interface {
	pulumi.Input

	ToWorkflowTemplateJobSparkSqlJobQueryListOutput() WorkflowTemplateJobSparkSqlJobQueryListOutput
	ToWorkflowTemplateJobSparkSqlJobQueryListOutputWithContext(context.Context) WorkflowTemplateJobSparkSqlJobQueryListOutput
}

WorkflowTemplateJobSparkSqlJobQueryListInput is an input type that accepts WorkflowTemplateJobSparkSqlJobQueryListArgs and WorkflowTemplateJobSparkSqlJobQueryListOutput values. You can construct a concrete instance of `WorkflowTemplateJobSparkSqlJobQueryListInput` via:

WorkflowTemplateJobSparkSqlJobQueryListArgs{...}

type WorkflowTemplateJobSparkSqlJobQueryListOutput added in v5.2.0

type WorkflowTemplateJobSparkSqlJobQueryListOutput struct{ *pulumi.OutputState }

func (WorkflowTemplateJobSparkSqlJobQueryListOutput) ElementType added in v5.2.0

func (WorkflowTemplateJobSparkSqlJobQueryListOutput) Queries added in v5.2.0

Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": } }

func (WorkflowTemplateJobSparkSqlJobQueryListOutput) ToWorkflowTemplateJobSparkSqlJobQueryListOutput added in v5.2.0

func (o WorkflowTemplateJobSparkSqlJobQueryListOutput) ToWorkflowTemplateJobSparkSqlJobQueryListOutput() WorkflowTemplateJobSparkSqlJobQueryListOutput

func (WorkflowTemplateJobSparkSqlJobQueryListOutput) ToWorkflowTemplateJobSparkSqlJobQueryListOutputWithContext added in v5.2.0

func (o WorkflowTemplateJobSparkSqlJobQueryListOutput) ToWorkflowTemplateJobSparkSqlJobQueryListOutputWithContext(ctx context.Context) WorkflowTemplateJobSparkSqlJobQueryListOutput

func (WorkflowTemplateJobSparkSqlJobQueryListOutput) ToWorkflowTemplateJobSparkSqlJobQueryListPtrOutput added in v5.2.0

func (o WorkflowTemplateJobSparkSqlJobQueryListOutput) ToWorkflowTemplateJobSparkSqlJobQueryListPtrOutput() WorkflowTemplateJobSparkSqlJobQueryListPtrOutput

func (WorkflowTemplateJobSparkSqlJobQueryListOutput) ToWorkflowTemplateJobSparkSqlJobQueryListPtrOutputWithContext added in v5.2.0

func (o WorkflowTemplateJobSparkSqlJobQueryListOutput) ToWorkflowTemplateJobSparkSqlJobQueryListPtrOutputWithContext(ctx context.Context) WorkflowTemplateJobSparkSqlJobQueryListPtrOutput

type WorkflowTemplateJobSparkSqlJobQueryListPtrInput added in v5.2.0

type WorkflowTemplateJobSparkSqlJobQueryListPtrInput interface {
	pulumi.Input

	ToWorkflowTemplateJobSparkSqlJobQueryListPtrOutput() WorkflowTemplateJobSparkSqlJobQueryListPtrOutput
	ToWorkflowTemplateJobSparkSqlJobQueryListPtrOutputWithContext(context.Context) WorkflowTemplateJobSparkSqlJobQueryListPtrOutput
}

WorkflowTemplateJobSparkSqlJobQueryListPtrInput is an input type that accepts WorkflowTemplateJobSparkSqlJobQueryListArgs, WorkflowTemplateJobSparkSqlJobQueryListPtr and WorkflowTemplateJobSparkSqlJobQueryListPtrOutput values. You can construct a concrete instance of `WorkflowTemplateJobSparkSqlJobQueryListPtrInput` via:

        WorkflowTemplateJobSparkSqlJobQueryListArgs{...}

or:

        nil

type WorkflowTemplateJobSparkSqlJobQueryListPtrOutput added in v5.2.0

type WorkflowTemplateJobSparkSqlJobQueryListPtrOutput struct{ *pulumi.OutputState }

func (WorkflowTemplateJobSparkSqlJobQueryListPtrOutput) Elem added in v5.2.0

func (WorkflowTemplateJobSparkSqlJobQueryListPtrOutput) ElementType added in v5.2.0

func (WorkflowTemplateJobSparkSqlJobQueryListPtrOutput) Queries added in v5.2.0

Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": } }

func (WorkflowTemplateJobSparkSqlJobQueryListPtrOutput) ToWorkflowTemplateJobSparkSqlJobQueryListPtrOutput added in v5.2.0

func (o WorkflowTemplateJobSparkSqlJobQueryListPtrOutput) ToWorkflowTemplateJobSparkSqlJobQueryListPtrOutput() WorkflowTemplateJobSparkSqlJobQueryListPtrOutput

func (WorkflowTemplateJobSparkSqlJobQueryListPtrOutput) ToWorkflowTemplateJobSparkSqlJobQueryListPtrOutputWithContext added in v5.2.0

func (o WorkflowTemplateJobSparkSqlJobQueryListPtrOutput) ToWorkflowTemplateJobSparkSqlJobQueryListPtrOutputWithContext(ctx context.Context) WorkflowTemplateJobSparkSqlJobQueryListPtrOutput

type WorkflowTemplateMap added in v5.2.0

type WorkflowTemplateMap map[string]WorkflowTemplateInput

func (WorkflowTemplateMap) ElementType added in v5.2.0

func (WorkflowTemplateMap) ElementType() reflect.Type

func (WorkflowTemplateMap) ToWorkflowTemplateMapOutput added in v5.2.0

func (i WorkflowTemplateMap) ToWorkflowTemplateMapOutput() WorkflowTemplateMapOutput

func (WorkflowTemplateMap) ToWorkflowTemplateMapOutputWithContext added in v5.2.0

func (i WorkflowTemplateMap) ToWorkflowTemplateMapOutputWithContext(ctx context.Context) WorkflowTemplateMapOutput

type WorkflowTemplateMapInput added in v5.2.0

type WorkflowTemplateMapInput interface {
	pulumi.Input

	ToWorkflowTemplateMapOutput() WorkflowTemplateMapOutput
	ToWorkflowTemplateMapOutputWithContext(context.Context) WorkflowTemplateMapOutput
}

WorkflowTemplateMapInput is an input type that accepts WorkflowTemplateMap and WorkflowTemplateMapOutput values. You can construct a concrete instance of `WorkflowTemplateMapInput` via:

WorkflowTemplateMap{ "key": WorkflowTemplateArgs{...} }

type WorkflowTemplateMapOutput added in v5.2.0

type WorkflowTemplateMapOutput struct{ *pulumi.OutputState }

func (WorkflowTemplateMapOutput) ElementType added in v5.2.0

func (WorkflowTemplateMapOutput) ElementType() reflect.Type

func (WorkflowTemplateMapOutput) MapIndex added in v5.2.0

func (WorkflowTemplateMapOutput) ToWorkflowTemplateMapOutput added in v5.2.0

func (o WorkflowTemplateMapOutput) ToWorkflowTemplateMapOutput() WorkflowTemplateMapOutput

func (WorkflowTemplateMapOutput) ToWorkflowTemplateMapOutputWithContext added in v5.2.0

func (o WorkflowTemplateMapOutput) ToWorkflowTemplateMapOutputWithContext(ctx context.Context) WorkflowTemplateMapOutput

type WorkflowTemplateOutput added in v5.2.0

type WorkflowTemplateOutput struct{ *pulumi.OutputState }

func (WorkflowTemplateOutput) ElementType added in v5.2.0

func (WorkflowTemplateOutput) ElementType() reflect.Type

func (WorkflowTemplateOutput) ToWorkflowTemplateOutput added in v5.2.0

func (o WorkflowTemplateOutput) ToWorkflowTemplateOutput() WorkflowTemplateOutput

func (WorkflowTemplateOutput) ToWorkflowTemplateOutputWithContext added in v5.2.0

func (o WorkflowTemplateOutput) ToWorkflowTemplateOutputWithContext(ctx context.Context) WorkflowTemplateOutput

func (WorkflowTemplateOutput) ToWorkflowTemplatePtrOutput added in v5.2.0

func (o WorkflowTemplateOutput) ToWorkflowTemplatePtrOutput() WorkflowTemplatePtrOutput

func (WorkflowTemplateOutput) ToWorkflowTemplatePtrOutputWithContext added in v5.2.0

func (o WorkflowTemplateOutput) ToWorkflowTemplatePtrOutputWithContext(ctx context.Context) WorkflowTemplatePtrOutput

type WorkflowTemplateParameter added in v5.2.0

type WorkflowTemplateParameter struct {
	// Optional. Brief description of the parameter. Must not exceed 1024 characters.
	Description *string `pulumi:"description"`
	// Required. Paths to all fields that the parameter replaces. A field is allowed to appear in at most one parameter's list of field paths. A field path is similar in syntax to a .sparkJob.args
	Fields []string `pulumi:"fields"`
	// Required. Parameter name. The parameter name is used as the key, and paired with the parameter value, which are passed to the template when the template is instantiated. The name must contain only capital letters (A-Z), numbers (0-9), and underscores (_), and must not start with a number. The maximum length is 40 characters.
	Name string `pulumi:"name"`
	// Optional. Validation rules to be applied to this parameter's value.
	Validation *WorkflowTemplateParameterValidation `pulumi:"validation"`
}

type WorkflowTemplateParameterArgs added in v5.2.0

type WorkflowTemplateParameterArgs struct {
	// Optional. Brief description of the parameter. Must not exceed 1024 characters.
	Description pulumi.StringPtrInput `pulumi:"description"`
	// Required. Paths to all fields that the parameter replaces. A field is allowed to appear in at most one parameter's list of field paths. A field path is similar in syntax to a .sparkJob.args
	Fields pulumi.StringArrayInput `pulumi:"fields"`
	// Required. Parameter name. The parameter name is used as the key, and paired with the parameter value, which are passed to the template when the template is instantiated. The name must contain only capital letters (A-Z), numbers (0-9), and underscores (_), and must not start with a number. The maximum length is 40 characters.
	Name pulumi.StringInput `pulumi:"name"`
	// Optional. Validation rules to be applied to this parameter's value.
	Validation WorkflowTemplateParameterValidationPtrInput `pulumi:"validation"`
}

func (WorkflowTemplateParameterArgs) ElementType added in v5.2.0

func (WorkflowTemplateParameterArgs) ToWorkflowTemplateParameterOutput added in v5.2.0

func (i WorkflowTemplateParameterArgs) ToWorkflowTemplateParameterOutput() WorkflowTemplateParameterOutput

func (WorkflowTemplateParameterArgs) ToWorkflowTemplateParameterOutputWithContext added in v5.2.0

func (i WorkflowTemplateParameterArgs) ToWorkflowTemplateParameterOutputWithContext(ctx context.Context) WorkflowTemplateParameterOutput

type WorkflowTemplateParameterArray added in v5.2.0

type WorkflowTemplateParameterArray []WorkflowTemplateParameterInput

func (WorkflowTemplateParameterArray) ElementType added in v5.2.0

func (WorkflowTemplateParameterArray) ToWorkflowTemplateParameterArrayOutput added in v5.2.0

func (i WorkflowTemplateParameterArray) ToWorkflowTemplateParameterArrayOutput() WorkflowTemplateParameterArrayOutput

func (WorkflowTemplateParameterArray) ToWorkflowTemplateParameterArrayOutputWithContext added in v5.2.0

func (i WorkflowTemplateParameterArray) ToWorkflowTemplateParameterArrayOutputWithContext(ctx context.Context) WorkflowTemplateParameterArrayOutput

type WorkflowTemplateParameterArrayInput added in v5.2.0

type WorkflowTemplateParameterArrayInput interface {
	pulumi.Input

	ToWorkflowTemplateParameterArrayOutput() WorkflowTemplateParameterArrayOutput
	ToWorkflowTemplateParameterArrayOutputWithContext(context.Context) WorkflowTemplateParameterArrayOutput
}

WorkflowTemplateParameterArrayInput is an input type that accepts WorkflowTemplateParameterArray and WorkflowTemplateParameterArrayOutput values. You can construct a concrete instance of `WorkflowTemplateParameterArrayInput` via:

WorkflowTemplateParameterArray{ WorkflowTemplateParameterArgs{...} }

type WorkflowTemplateParameterArrayOutput added in v5.2.0

type WorkflowTemplateParameterArrayOutput struct{ *pulumi.OutputState }

func (WorkflowTemplateParameterArrayOutput) ElementType added in v5.2.0

func (WorkflowTemplateParameterArrayOutput) Index added in v5.2.0

func (WorkflowTemplateParameterArrayOutput) ToWorkflowTemplateParameterArrayOutput added in v5.2.0

func (o WorkflowTemplateParameterArrayOutput) ToWorkflowTemplateParameterArrayOutput() WorkflowTemplateParameterArrayOutput

func (WorkflowTemplateParameterArrayOutput) ToWorkflowTemplateParameterArrayOutputWithContext added in v5.2.0

func (o WorkflowTemplateParameterArrayOutput) ToWorkflowTemplateParameterArrayOutputWithContext(ctx context.Context) WorkflowTemplateParameterArrayOutput

type WorkflowTemplateParameterInput added in v5.2.0

type WorkflowTemplateParameterInput interface {
	pulumi.Input

	ToWorkflowTemplateParameterOutput() WorkflowTemplateParameterOutput
	ToWorkflowTemplateParameterOutputWithContext(context.Context) WorkflowTemplateParameterOutput
}

WorkflowTemplateParameterInput is an input type that accepts WorkflowTemplateParameterArgs and WorkflowTemplateParameterOutput values. You can construct a concrete instance of `WorkflowTemplateParameterInput` via:

WorkflowTemplateParameterArgs{...}

type WorkflowTemplateParameterOutput added in v5.2.0

type WorkflowTemplateParameterOutput struct{ *pulumi.OutputState }

func (WorkflowTemplateParameterOutput) Description added in v5.2.0

Optional. Brief description of the parameter. Must not exceed 1024 characters.

func (WorkflowTemplateParameterOutput) ElementType added in v5.2.0

func (WorkflowTemplateParameterOutput) Fields added in v5.2.0

Required. Paths to all fields that the parameter replaces. A field is allowed to appear in at most one parameter's list of field paths. A field path is similar in syntax to a .sparkJob.args

func (WorkflowTemplateParameterOutput) Name added in v5.2.0

Required. Parameter name. The parameter name is used as the key, and paired with the parameter value, which are passed to the template when the template is instantiated. The name must contain only capital letters (A-Z), numbers (0-9), and underscores (_), and must not start with a number. The maximum length is 40 characters.

func (WorkflowTemplateParameterOutput) ToWorkflowTemplateParameterOutput added in v5.2.0

func (o WorkflowTemplateParameterOutput) ToWorkflowTemplateParameterOutput() WorkflowTemplateParameterOutput

func (WorkflowTemplateParameterOutput) ToWorkflowTemplateParameterOutputWithContext added in v5.2.0

func (o WorkflowTemplateParameterOutput) ToWorkflowTemplateParameterOutputWithContext(ctx context.Context) WorkflowTemplateParameterOutput

func (WorkflowTemplateParameterOutput) Validation added in v5.2.0

Optional. Validation rules to be applied to this parameter's value.

type WorkflowTemplateParameterValidation added in v5.2.0

type WorkflowTemplateParameterValidation struct {
	// Validation based on regular expressions.
	Regex *WorkflowTemplateParameterValidationRegex `pulumi:"regex"`
	// Optional. Corresponds to the label values of reservation resource.
	Values *WorkflowTemplateParameterValidationValues `pulumi:"values"`
}

type WorkflowTemplateParameterValidationArgs added in v5.2.0

type WorkflowTemplateParameterValidationArgs struct {
	// Validation based on regular expressions.
	Regex WorkflowTemplateParameterValidationRegexPtrInput `pulumi:"regex"`
	// Optional. Corresponds to the label values of reservation resource.
	Values WorkflowTemplateParameterValidationValuesPtrInput `pulumi:"values"`
}

func (WorkflowTemplateParameterValidationArgs) ElementType added in v5.2.0

func (WorkflowTemplateParameterValidationArgs) ToWorkflowTemplateParameterValidationOutput added in v5.2.0

func (i WorkflowTemplateParameterValidationArgs) ToWorkflowTemplateParameterValidationOutput() WorkflowTemplateParameterValidationOutput

func (WorkflowTemplateParameterValidationArgs) ToWorkflowTemplateParameterValidationOutputWithContext added in v5.2.0

func (i WorkflowTemplateParameterValidationArgs) ToWorkflowTemplateParameterValidationOutputWithContext(ctx context.Context) WorkflowTemplateParameterValidationOutput

func (WorkflowTemplateParameterValidationArgs) ToWorkflowTemplateParameterValidationPtrOutput added in v5.2.0

func (i WorkflowTemplateParameterValidationArgs) ToWorkflowTemplateParameterValidationPtrOutput() WorkflowTemplateParameterValidationPtrOutput

func (WorkflowTemplateParameterValidationArgs) ToWorkflowTemplateParameterValidationPtrOutputWithContext added in v5.2.0

func (i WorkflowTemplateParameterValidationArgs) ToWorkflowTemplateParameterValidationPtrOutputWithContext(ctx context.Context) WorkflowTemplateParameterValidationPtrOutput

type WorkflowTemplateParameterValidationInput added in v5.2.0

type WorkflowTemplateParameterValidationInput interface {
	pulumi.Input

	ToWorkflowTemplateParameterValidationOutput() WorkflowTemplateParameterValidationOutput
	ToWorkflowTemplateParameterValidationOutputWithContext(context.Context) WorkflowTemplateParameterValidationOutput
}

WorkflowTemplateParameterValidationInput is an input type that accepts WorkflowTemplateParameterValidationArgs and WorkflowTemplateParameterValidationOutput values. You can construct a concrete instance of `WorkflowTemplateParameterValidationInput` via:

WorkflowTemplateParameterValidationArgs{...}

type WorkflowTemplateParameterValidationOutput added in v5.2.0

type WorkflowTemplateParameterValidationOutput struct{ *pulumi.OutputState }

func (WorkflowTemplateParameterValidationOutput) ElementType added in v5.2.0

func (WorkflowTemplateParameterValidationOutput) Regex added in v5.2.0

Validation based on regular expressions.

func (WorkflowTemplateParameterValidationOutput) ToWorkflowTemplateParameterValidationOutput added in v5.2.0

func (o WorkflowTemplateParameterValidationOutput) ToWorkflowTemplateParameterValidationOutput() WorkflowTemplateParameterValidationOutput

func (WorkflowTemplateParameterValidationOutput) ToWorkflowTemplateParameterValidationOutputWithContext added in v5.2.0

func (o WorkflowTemplateParameterValidationOutput) ToWorkflowTemplateParameterValidationOutputWithContext(ctx context.Context) WorkflowTemplateParameterValidationOutput

func (WorkflowTemplateParameterValidationOutput) ToWorkflowTemplateParameterValidationPtrOutput added in v5.2.0

func (o WorkflowTemplateParameterValidationOutput) ToWorkflowTemplateParameterValidationPtrOutput() WorkflowTemplateParameterValidationPtrOutput

func (WorkflowTemplateParameterValidationOutput) ToWorkflowTemplateParameterValidationPtrOutputWithContext added in v5.2.0

func (o WorkflowTemplateParameterValidationOutput) ToWorkflowTemplateParameterValidationPtrOutputWithContext(ctx context.Context) WorkflowTemplateParameterValidationPtrOutput

func (WorkflowTemplateParameterValidationOutput) Values added in v5.2.0

Optional. Corresponds to the label values of reservation resource.

type WorkflowTemplateParameterValidationPtrInput added in v5.2.0

type WorkflowTemplateParameterValidationPtrInput interface {
	pulumi.Input

	ToWorkflowTemplateParameterValidationPtrOutput() WorkflowTemplateParameterValidationPtrOutput
	ToWorkflowTemplateParameterValidationPtrOutputWithContext(context.Context) WorkflowTemplateParameterValidationPtrOutput
}

WorkflowTemplateParameterValidationPtrInput is an input type that accepts WorkflowTemplateParameterValidationArgs, WorkflowTemplateParameterValidationPtr and WorkflowTemplateParameterValidationPtrOutput values. You can construct a concrete instance of `WorkflowTemplateParameterValidationPtrInput` via:

        WorkflowTemplateParameterValidationArgs{...}

or:

        nil

type WorkflowTemplateParameterValidationPtrOutput added in v5.2.0

type WorkflowTemplateParameterValidationPtrOutput struct{ *pulumi.OutputState }

func (WorkflowTemplateParameterValidationPtrOutput) Elem added in v5.2.0

func (WorkflowTemplateParameterValidationPtrOutput) ElementType added in v5.2.0

func (WorkflowTemplateParameterValidationPtrOutput) Regex added in v5.2.0

Validation based on regular expressions.

func (WorkflowTemplateParameterValidationPtrOutput) ToWorkflowTemplateParameterValidationPtrOutput added in v5.2.0

func (o WorkflowTemplateParameterValidationPtrOutput) ToWorkflowTemplateParameterValidationPtrOutput() WorkflowTemplateParameterValidationPtrOutput

func (WorkflowTemplateParameterValidationPtrOutput) ToWorkflowTemplateParameterValidationPtrOutputWithContext added in v5.2.0

func (o WorkflowTemplateParameterValidationPtrOutput) ToWorkflowTemplateParameterValidationPtrOutputWithContext(ctx context.Context) WorkflowTemplateParameterValidationPtrOutput

func (WorkflowTemplateParameterValidationPtrOutput) Values added in v5.2.0

Optional. Corresponds to the label values of reservation resource.

type WorkflowTemplateParameterValidationRegex added in v5.2.0

type WorkflowTemplateParameterValidationRegex struct {
	// Required. RE2 regular expressions used to validate the parameter's value. The value must match the regex in its entirety (substring matches are not sufficient).
	Regexes []string `pulumi:"regexes"`
}

type WorkflowTemplateParameterValidationRegexArgs added in v5.2.0

type WorkflowTemplateParameterValidationRegexArgs struct {
	// Required. RE2 regular expressions used to validate the parameter's value. The value must match the regex in its entirety (substring matches are not sufficient).
	Regexes pulumi.StringArrayInput `pulumi:"regexes"`
}

func (WorkflowTemplateParameterValidationRegexArgs) ElementType added in v5.2.0

func (WorkflowTemplateParameterValidationRegexArgs) ToWorkflowTemplateParameterValidationRegexOutput added in v5.2.0

func (i WorkflowTemplateParameterValidationRegexArgs) ToWorkflowTemplateParameterValidationRegexOutput() WorkflowTemplateParameterValidationRegexOutput

func (WorkflowTemplateParameterValidationRegexArgs) ToWorkflowTemplateParameterValidationRegexOutputWithContext added in v5.2.0

func (i WorkflowTemplateParameterValidationRegexArgs) ToWorkflowTemplateParameterValidationRegexOutputWithContext(ctx context.Context) WorkflowTemplateParameterValidationRegexOutput

func (WorkflowTemplateParameterValidationRegexArgs) ToWorkflowTemplateParameterValidationRegexPtrOutput added in v5.2.0

func (i WorkflowTemplateParameterValidationRegexArgs) ToWorkflowTemplateParameterValidationRegexPtrOutput() WorkflowTemplateParameterValidationRegexPtrOutput

func (WorkflowTemplateParameterValidationRegexArgs) ToWorkflowTemplateParameterValidationRegexPtrOutputWithContext added in v5.2.0

func (i WorkflowTemplateParameterValidationRegexArgs) ToWorkflowTemplateParameterValidationRegexPtrOutputWithContext(ctx context.Context) WorkflowTemplateParameterValidationRegexPtrOutput

type WorkflowTemplateParameterValidationRegexInput added in v5.2.0

type WorkflowTemplateParameterValidationRegexInput interface {
	pulumi.Input

	ToWorkflowTemplateParameterValidationRegexOutput() WorkflowTemplateParameterValidationRegexOutput
	ToWorkflowTemplateParameterValidationRegexOutputWithContext(context.Context) WorkflowTemplateParameterValidationRegexOutput
}

WorkflowTemplateParameterValidationRegexInput is an input type that accepts WorkflowTemplateParameterValidationRegexArgs and WorkflowTemplateParameterValidationRegexOutput values. You can construct a concrete instance of `WorkflowTemplateParameterValidationRegexInput` via:

WorkflowTemplateParameterValidationRegexArgs{...}

type WorkflowTemplateParameterValidationRegexOutput added in v5.2.0

type WorkflowTemplateParameterValidationRegexOutput struct{ *pulumi.OutputState }

func (WorkflowTemplateParameterValidationRegexOutput) ElementType added in v5.2.0

func (WorkflowTemplateParameterValidationRegexOutput) Regexes added in v5.2.0

Required. RE2 regular expressions used to validate the parameter's value. The value must match the regex in its entirety (substring matches are not sufficient).

func (WorkflowTemplateParameterValidationRegexOutput) ToWorkflowTemplateParameterValidationRegexOutput added in v5.2.0

func (o WorkflowTemplateParameterValidationRegexOutput) ToWorkflowTemplateParameterValidationRegexOutput() WorkflowTemplateParameterValidationRegexOutput

func (WorkflowTemplateParameterValidationRegexOutput) ToWorkflowTemplateParameterValidationRegexOutputWithContext added in v5.2.0

func (o WorkflowTemplateParameterValidationRegexOutput) ToWorkflowTemplateParameterValidationRegexOutputWithContext(ctx context.Context) WorkflowTemplateParameterValidationRegexOutput

func (WorkflowTemplateParameterValidationRegexOutput) ToWorkflowTemplateParameterValidationRegexPtrOutput added in v5.2.0

func (o WorkflowTemplateParameterValidationRegexOutput) ToWorkflowTemplateParameterValidationRegexPtrOutput() WorkflowTemplateParameterValidationRegexPtrOutput

func (WorkflowTemplateParameterValidationRegexOutput) ToWorkflowTemplateParameterValidationRegexPtrOutputWithContext added in v5.2.0

func (o WorkflowTemplateParameterValidationRegexOutput) ToWorkflowTemplateParameterValidationRegexPtrOutputWithContext(ctx context.Context) WorkflowTemplateParameterValidationRegexPtrOutput

type WorkflowTemplateParameterValidationRegexPtrInput added in v5.2.0

type WorkflowTemplateParameterValidationRegexPtrInput interface {
	pulumi.Input

	ToWorkflowTemplateParameterValidationRegexPtrOutput() WorkflowTemplateParameterValidationRegexPtrOutput
	ToWorkflowTemplateParameterValidationRegexPtrOutputWithContext(context.Context) WorkflowTemplateParameterValidationRegexPtrOutput
}

WorkflowTemplateParameterValidationRegexPtrInput is an input type that accepts WorkflowTemplateParameterValidationRegexArgs, WorkflowTemplateParameterValidationRegexPtr and WorkflowTemplateParameterValidationRegexPtrOutput values. You can construct a concrete instance of `WorkflowTemplateParameterValidationRegexPtrInput` via:

        WorkflowTemplateParameterValidationRegexArgs{...}

or:

        nil

type WorkflowTemplateParameterValidationRegexPtrOutput added in v5.2.0

type WorkflowTemplateParameterValidationRegexPtrOutput struct{ *pulumi.OutputState }

func (WorkflowTemplateParameterValidationRegexPtrOutput) Elem added in v5.2.0

func (WorkflowTemplateParameterValidationRegexPtrOutput) ElementType added in v5.2.0

func (WorkflowTemplateParameterValidationRegexPtrOutput) Regexes added in v5.2.0

Required. RE2 regular expressions used to validate the parameter's value. The value must match the regex in its entirety (substring matches are not sufficient).

func (WorkflowTemplateParameterValidationRegexPtrOutput) ToWorkflowTemplateParameterValidationRegexPtrOutput added in v5.2.0

func (o WorkflowTemplateParameterValidationRegexPtrOutput) ToWorkflowTemplateParameterValidationRegexPtrOutput() WorkflowTemplateParameterValidationRegexPtrOutput

func (WorkflowTemplateParameterValidationRegexPtrOutput) ToWorkflowTemplateParameterValidationRegexPtrOutputWithContext added in v5.2.0

func (o WorkflowTemplateParameterValidationRegexPtrOutput) ToWorkflowTemplateParameterValidationRegexPtrOutputWithContext(ctx context.Context) WorkflowTemplateParameterValidationRegexPtrOutput

type WorkflowTemplateParameterValidationValues added in v5.2.0

type WorkflowTemplateParameterValidationValues struct {
	// Optional. Corresponds to the label values of reservation resource.
	Values []string `pulumi:"values"`
}

type WorkflowTemplateParameterValidationValuesArgs added in v5.2.0

type WorkflowTemplateParameterValidationValuesArgs struct {
	// Optional. Corresponds to the label values of reservation resource.
	Values pulumi.StringArrayInput `pulumi:"values"`
}

func (WorkflowTemplateParameterValidationValuesArgs) ElementType added in v5.2.0

func (WorkflowTemplateParameterValidationValuesArgs) ToWorkflowTemplateParameterValidationValuesOutput added in v5.2.0

func (i WorkflowTemplateParameterValidationValuesArgs) ToWorkflowTemplateParameterValidationValuesOutput() WorkflowTemplateParameterValidationValuesOutput

func (WorkflowTemplateParameterValidationValuesArgs) ToWorkflowTemplateParameterValidationValuesOutputWithContext added in v5.2.0

func (i WorkflowTemplateParameterValidationValuesArgs) ToWorkflowTemplateParameterValidationValuesOutputWithContext(ctx context.Context) WorkflowTemplateParameterValidationValuesOutput

func (WorkflowTemplateParameterValidationValuesArgs) ToWorkflowTemplateParameterValidationValuesPtrOutput added in v5.2.0

func (i WorkflowTemplateParameterValidationValuesArgs) ToWorkflowTemplateParameterValidationValuesPtrOutput() WorkflowTemplateParameterValidationValuesPtrOutput

func (WorkflowTemplateParameterValidationValuesArgs) ToWorkflowTemplateParameterValidationValuesPtrOutputWithContext added in v5.2.0

func (i WorkflowTemplateParameterValidationValuesArgs) ToWorkflowTemplateParameterValidationValuesPtrOutputWithContext(ctx context.Context) WorkflowTemplateParameterValidationValuesPtrOutput

type WorkflowTemplateParameterValidationValuesInput added in v5.2.0

type WorkflowTemplateParameterValidationValuesInput interface {
	pulumi.Input

	ToWorkflowTemplateParameterValidationValuesOutput() WorkflowTemplateParameterValidationValuesOutput
	ToWorkflowTemplateParameterValidationValuesOutputWithContext(context.Context) WorkflowTemplateParameterValidationValuesOutput
}

WorkflowTemplateParameterValidationValuesInput is an input type that accepts WorkflowTemplateParameterValidationValuesArgs and WorkflowTemplateParameterValidationValuesOutput values. You can construct a concrete instance of `WorkflowTemplateParameterValidationValuesInput` via:

WorkflowTemplateParameterValidationValuesArgs{...}

type WorkflowTemplateParameterValidationValuesOutput added in v5.2.0

type WorkflowTemplateParameterValidationValuesOutput struct{ *pulumi.OutputState }

func (WorkflowTemplateParameterValidationValuesOutput) ElementType added in v5.2.0

func (WorkflowTemplateParameterValidationValuesOutput) ToWorkflowTemplateParameterValidationValuesOutput added in v5.2.0

func (o WorkflowTemplateParameterValidationValuesOutput) ToWorkflowTemplateParameterValidationValuesOutput() WorkflowTemplateParameterValidationValuesOutput

func (WorkflowTemplateParameterValidationValuesOutput) ToWorkflowTemplateParameterValidationValuesOutputWithContext added in v5.2.0

func (o WorkflowTemplateParameterValidationValuesOutput) ToWorkflowTemplateParameterValidationValuesOutputWithContext(ctx context.Context) WorkflowTemplateParameterValidationValuesOutput

func (WorkflowTemplateParameterValidationValuesOutput) ToWorkflowTemplateParameterValidationValuesPtrOutput added in v5.2.0

func (o WorkflowTemplateParameterValidationValuesOutput) ToWorkflowTemplateParameterValidationValuesPtrOutput() WorkflowTemplateParameterValidationValuesPtrOutput

func (WorkflowTemplateParameterValidationValuesOutput) ToWorkflowTemplateParameterValidationValuesPtrOutputWithContext added in v5.2.0

func (o WorkflowTemplateParameterValidationValuesOutput) ToWorkflowTemplateParameterValidationValuesPtrOutputWithContext(ctx context.Context) WorkflowTemplateParameterValidationValuesPtrOutput

func (WorkflowTemplateParameterValidationValuesOutput) Values added in v5.2.0

Optional. Corresponds to the label values of reservation resource.

type WorkflowTemplateParameterValidationValuesPtrInput added in v5.2.0

type WorkflowTemplateParameterValidationValuesPtrInput interface {
	pulumi.Input

	ToWorkflowTemplateParameterValidationValuesPtrOutput() WorkflowTemplateParameterValidationValuesPtrOutput
	ToWorkflowTemplateParameterValidationValuesPtrOutputWithContext(context.Context) WorkflowTemplateParameterValidationValuesPtrOutput
}

WorkflowTemplateParameterValidationValuesPtrInput is an input type that accepts WorkflowTemplateParameterValidationValuesArgs, WorkflowTemplateParameterValidationValuesPtr and WorkflowTemplateParameterValidationValuesPtrOutput values. You can construct a concrete instance of `WorkflowTemplateParameterValidationValuesPtrInput` via:

        WorkflowTemplateParameterValidationValuesArgs{...}

or:

        nil

type WorkflowTemplateParameterValidationValuesPtrOutput added in v5.2.0

type WorkflowTemplateParameterValidationValuesPtrOutput struct{ *pulumi.OutputState }

func (WorkflowTemplateParameterValidationValuesPtrOutput) Elem added in v5.2.0

func (WorkflowTemplateParameterValidationValuesPtrOutput) ElementType added in v5.2.0

func (WorkflowTemplateParameterValidationValuesPtrOutput) ToWorkflowTemplateParameterValidationValuesPtrOutput added in v5.2.0

func (o WorkflowTemplateParameterValidationValuesPtrOutput) ToWorkflowTemplateParameterValidationValuesPtrOutput() WorkflowTemplateParameterValidationValuesPtrOutput

func (WorkflowTemplateParameterValidationValuesPtrOutput) ToWorkflowTemplateParameterValidationValuesPtrOutputWithContext added in v5.2.0

func (o WorkflowTemplateParameterValidationValuesPtrOutput) ToWorkflowTemplateParameterValidationValuesPtrOutputWithContext(ctx context.Context) WorkflowTemplateParameterValidationValuesPtrOutput

func (WorkflowTemplateParameterValidationValuesPtrOutput) Values added in v5.2.0

Optional. Corresponds to the label values of reservation resource.

type WorkflowTemplatePlacement added in v5.2.0

type WorkflowTemplatePlacement struct {
	// Optional. A selector that chooses target cluster for jobs based on metadata. The selector is evaluated at the time each job is submitted.
	ClusterSelector *WorkflowTemplatePlacementClusterSelector `pulumi:"clusterSelector"`
	// A cluster that is managed by the workflow.
	ManagedCluster *WorkflowTemplatePlacementManagedCluster `pulumi:"managedCluster"`
}

type WorkflowTemplatePlacementArgs added in v5.2.0

type WorkflowTemplatePlacementArgs struct {
	// Optional. A selector that chooses target cluster for jobs based on metadata. The selector is evaluated at the time each job is submitted.
	ClusterSelector WorkflowTemplatePlacementClusterSelectorPtrInput `pulumi:"clusterSelector"`
	// A cluster that is managed by the workflow.
	ManagedCluster WorkflowTemplatePlacementManagedClusterPtrInput `pulumi:"managedCluster"`
}

func (WorkflowTemplatePlacementArgs) ElementType added in v5.2.0

func (WorkflowTemplatePlacementArgs) ToWorkflowTemplatePlacementOutput added in v5.2.0

func (i WorkflowTemplatePlacementArgs) ToWorkflowTemplatePlacementOutput() WorkflowTemplatePlacementOutput

func (WorkflowTemplatePlacementArgs) ToWorkflowTemplatePlacementOutputWithContext added in v5.2.0

func (i WorkflowTemplatePlacementArgs) ToWorkflowTemplatePlacementOutputWithContext(ctx context.Context) WorkflowTemplatePlacementOutput

func (WorkflowTemplatePlacementArgs) ToWorkflowTemplatePlacementPtrOutput added in v5.2.0

func (i WorkflowTemplatePlacementArgs) ToWorkflowTemplatePlacementPtrOutput() WorkflowTemplatePlacementPtrOutput

func (WorkflowTemplatePlacementArgs) ToWorkflowTemplatePlacementPtrOutputWithContext added in v5.2.0

func (i WorkflowTemplatePlacementArgs) ToWorkflowTemplatePlacementPtrOutputWithContext(ctx context.Context) WorkflowTemplatePlacementPtrOutput

type WorkflowTemplatePlacementClusterSelector added in v5.2.0

type WorkflowTemplatePlacementClusterSelector struct {
	// Required. The cluster labels. Cluster must have all labels to match.
	ClusterLabels map[string]string `pulumi:"clusterLabels"`
	// Optional. The zone where the Compute Engine cluster will be located. On a create request, it is required in the "global" region. If omitted in a non-global Dataproc region, the service will pick a zone in the corresponding Compute Engine region. On a get request, zone will always be present. A full URL, partial URI, or short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/` * `us-central1-f`
	Zone *string `pulumi:"zone"`
}

type WorkflowTemplatePlacementClusterSelectorArgs added in v5.2.0

type WorkflowTemplatePlacementClusterSelectorArgs struct {
	// Required. The cluster labels. Cluster must have all labels to match.
	ClusterLabels pulumi.StringMapInput `pulumi:"clusterLabels"`
	// Optional. The zone where the Compute Engine cluster will be located. On a create request, it is required in the "global" region. If omitted in a non-global Dataproc region, the service will pick a zone in the corresponding Compute Engine region. On a get request, zone will always be present. A full URL, partial URI, or short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/` * `us-central1-f`
	Zone pulumi.StringPtrInput `pulumi:"zone"`
}

func (WorkflowTemplatePlacementClusterSelectorArgs) ElementType added in v5.2.0

func (WorkflowTemplatePlacementClusterSelectorArgs) ToWorkflowTemplatePlacementClusterSelectorOutput added in v5.2.0

func (i WorkflowTemplatePlacementClusterSelectorArgs) ToWorkflowTemplatePlacementClusterSelectorOutput() WorkflowTemplatePlacementClusterSelectorOutput

func (WorkflowTemplatePlacementClusterSelectorArgs) ToWorkflowTemplatePlacementClusterSelectorOutputWithContext added in v5.2.0

func (i WorkflowTemplatePlacementClusterSelectorArgs) ToWorkflowTemplatePlacementClusterSelectorOutputWithContext(ctx context.Context) WorkflowTemplatePlacementClusterSelectorOutput

func (WorkflowTemplatePlacementClusterSelectorArgs) ToWorkflowTemplatePlacementClusterSelectorPtrOutput added in v5.2.0

func (i WorkflowTemplatePlacementClusterSelectorArgs) ToWorkflowTemplatePlacementClusterSelectorPtrOutput() WorkflowTemplatePlacementClusterSelectorPtrOutput

func (WorkflowTemplatePlacementClusterSelectorArgs) ToWorkflowTemplatePlacementClusterSelectorPtrOutputWithContext added in v5.2.0

func (i WorkflowTemplatePlacementClusterSelectorArgs) ToWorkflowTemplatePlacementClusterSelectorPtrOutputWithContext(ctx context.Context) WorkflowTemplatePlacementClusterSelectorPtrOutput

type WorkflowTemplatePlacementClusterSelectorInput added in v5.2.0

type WorkflowTemplatePlacementClusterSelectorInput interface {
	pulumi.Input

	ToWorkflowTemplatePlacementClusterSelectorOutput() WorkflowTemplatePlacementClusterSelectorOutput
	ToWorkflowTemplatePlacementClusterSelectorOutputWithContext(context.Context) WorkflowTemplatePlacementClusterSelectorOutput
}

WorkflowTemplatePlacementClusterSelectorInput is an input type that accepts WorkflowTemplatePlacementClusterSelectorArgs and WorkflowTemplatePlacementClusterSelectorOutput values. You can construct a concrete instance of `WorkflowTemplatePlacementClusterSelectorInput` via:

WorkflowTemplatePlacementClusterSelectorArgs{...}

type WorkflowTemplatePlacementClusterSelectorOutput added in v5.2.0

type WorkflowTemplatePlacementClusterSelectorOutput struct{ *pulumi.OutputState }

func (WorkflowTemplatePlacementClusterSelectorOutput) ClusterLabels added in v5.2.0

Required. The cluster labels. Cluster must have all labels to match.

func (WorkflowTemplatePlacementClusterSelectorOutput) ElementType added in v5.2.0

func (WorkflowTemplatePlacementClusterSelectorOutput) ToWorkflowTemplatePlacementClusterSelectorOutput added in v5.2.0

func (o WorkflowTemplatePlacementClusterSelectorOutput) ToWorkflowTemplatePlacementClusterSelectorOutput() WorkflowTemplatePlacementClusterSelectorOutput

func (WorkflowTemplatePlacementClusterSelectorOutput) ToWorkflowTemplatePlacementClusterSelectorOutputWithContext added in v5.2.0

func (o WorkflowTemplatePlacementClusterSelectorOutput) ToWorkflowTemplatePlacementClusterSelectorOutputWithContext(ctx context.Context) WorkflowTemplatePlacementClusterSelectorOutput

func (WorkflowTemplatePlacementClusterSelectorOutput) ToWorkflowTemplatePlacementClusterSelectorPtrOutput added in v5.2.0

func (o WorkflowTemplatePlacementClusterSelectorOutput) ToWorkflowTemplatePlacementClusterSelectorPtrOutput() WorkflowTemplatePlacementClusterSelectorPtrOutput

func (WorkflowTemplatePlacementClusterSelectorOutput) ToWorkflowTemplatePlacementClusterSelectorPtrOutputWithContext added in v5.2.0

func (o WorkflowTemplatePlacementClusterSelectorOutput) ToWorkflowTemplatePlacementClusterSelectorPtrOutputWithContext(ctx context.Context) WorkflowTemplatePlacementClusterSelectorPtrOutput

func (WorkflowTemplatePlacementClusterSelectorOutput) Zone added in v5.2.0

Optional. The zone where the Compute Engine cluster will be located. On a create request, it is required in the "global" region. If omitted in a non-global Dataproc region, the service will pick a zone in the corresponding Compute Engine region. On a get request, zone will always be present. A full URL, partial URI, or short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/` * `us-central1-f`

type WorkflowTemplatePlacementClusterSelectorPtrInput added in v5.2.0

type WorkflowTemplatePlacementClusterSelectorPtrInput interface {
	pulumi.Input

	ToWorkflowTemplatePlacementClusterSelectorPtrOutput() WorkflowTemplatePlacementClusterSelectorPtrOutput
	ToWorkflowTemplatePlacementClusterSelectorPtrOutputWithContext(context.Context) WorkflowTemplatePlacementClusterSelectorPtrOutput
}

WorkflowTemplatePlacementClusterSelectorPtrInput is an input type that accepts WorkflowTemplatePlacementClusterSelectorArgs, WorkflowTemplatePlacementClusterSelectorPtr and WorkflowTemplatePlacementClusterSelectorPtrOutput values. You can construct a concrete instance of `WorkflowTemplatePlacementClusterSelectorPtrInput` via:

        WorkflowTemplatePlacementClusterSelectorArgs{...}

or:

        nil

type WorkflowTemplatePlacementClusterSelectorPtrOutput added in v5.2.0

type WorkflowTemplatePlacementClusterSelectorPtrOutput struct{ *pulumi.OutputState }

func (WorkflowTemplatePlacementClusterSelectorPtrOutput) ClusterLabels added in v5.2.0

Required. The cluster labels. Cluster must have all labels to match.

func (WorkflowTemplatePlacementClusterSelectorPtrOutput) Elem added in v5.2.0

func (WorkflowTemplatePlacementClusterSelectorPtrOutput) ElementType added in v5.2.0

func (WorkflowTemplatePlacementClusterSelectorPtrOutput) ToWorkflowTemplatePlacementClusterSelectorPtrOutput added in v5.2.0

func (o WorkflowTemplatePlacementClusterSelectorPtrOutput) ToWorkflowTemplatePlacementClusterSelectorPtrOutput() WorkflowTemplatePlacementClusterSelectorPtrOutput

func (WorkflowTemplatePlacementClusterSelectorPtrOutput) ToWorkflowTemplatePlacementClusterSelectorPtrOutputWithContext added in v5.2.0

func (o WorkflowTemplatePlacementClusterSelectorPtrOutput) ToWorkflowTemplatePlacementClusterSelectorPtrOutputWithContext(ctx context.Context) WorkflowTemplatePlacementClusterSelectorPtrOutput

func (WorkflowTemplatePlacementClusterSelectorPtrOutput) Zone added in v5.2.0

Optional. The zone where the Compute Engine cluster will be located. On a create request, it is required in the "global" region. If omitted in a non-global Dataproc region, the service will pick a zone in the corresponding Compute Engine region. On a get request, zone will always be present. A full URL, partial URI, or short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/` * `us-central1-f`

type WorkflowTemplatePlacementInput added in v5.2.0

type WorkflowTemplatePlacementInput interface {
	pulumi.Input

	ToWorkflowTemplatePlacementOutput() WorkflowTemplatePlacementOutput
	ToWorkflowTemplatePlacementOutputWithContext(context.Context) WorkflowTemplatePlacementOutput
}

WorkflowTemplatePlacementInput is an input type that accepts WorkflowTemplatePlacementArgs and WorkflowTemplatePlacementOutput values. You can construct a concrete instance of `WorkflowTemplatePlacementInput` via:

WorkflowTemplatePlacementArgs{...}

type WorkflowTemplatePlacementManagedCluster added in v5.2.0

type WorkflowTemplatePlacementManagedCluster struct {
	// Required. The cluster name prefix. A unique cluster name will be formed by appending a random suffix. The name must contain only lower-case letters (a-z), numbers (0-9), and hyphens (-). Must begin with a letter. Cannot begin or end with hyphen. Must consist of between 2 and 35 characters.
	ClusterName string `pulumi:"clusterName"`
	// Required. The cluster configuration.
	Config WorkflowTemplatePlacementManagedClusterConfig `pulumi:"config"`
	// Optional. The labels to associate with this cluster. Label keys must be between 1 and 63 characters long, and must conform to the following PCRE regular expression: {0,63} No more than 32 labels can be associated with a given cluster.
	Labels map[string]string `pulumi:"labels"`
}

type WorkflowTemplatePlacementManagedClusterArgs added in v5.2.0

type WorkflowTemplatePlacementManagedClusterArgs struct {
	// Required. The cluster name prefix. A unique cluster name will be formed by appending a random suffix. The name must contain only lower-case letters (a-z), numbers (0-9), and hyphens (-). Must begin with a letter. Cannot begin or end with hyphen. Must consist of between 2 and 35 characters.
	ClusterName pulumi.StringInput `pulumi:"clusterName"`
	// Required. The cluster configuration.
	Config WorkflowTemplatePlacementManagedClusterConfigInput `pulumi:"config"`
	// Optional. The labels to associate with this cluster. Label keys must be between 1 and 63 characters long, and must conform to the following PCRE regular expression: {0,63} No more than 32 labels can be associated with a given cluster.
	Labels pulumi.StringMapInput `pulumi:"labels"`
}

func (WorkflowTemplatePlacementManagedClusterArgs) ElementType added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterArgs) ToWorkflowTemplatePlacementManagedClusterOutput added in v5.2.0

func (i WorkflowTemplatePlacementManagedClusterArgs) ToWorkflowTemplatePlacementManagedClusterOutput() WorkflowTemplatePlacementManagedClusterOutput

func (WorkflowTemplatePlacementManagedClusterArgs) ToWorkflowTemplatePlacementManagedClusterOutputWithContext added in v5.2.0

func (i WorkflowTemplatePlacementManagedClusterArgs) ToWorkflowTemplatePlacementManagedClusterOutputWithContext(ctx context.Context) WorkflowTemplatePlacementManagedClusterOutput

func (WorkflowTemplatePlacementManagedClusterArgs) ToWorkflowTemplatePlacementManagedClusterPtrOutput added in v5.2.0

func (i WorkflowTemplatePlacementManagedClusterArgs) ToWorkflowTemplatePlacementManagedClusterPtrOutput() WorkflowTemplatePlacementManagedClusterPtrOutput

func (WorkflowTemplatePlacementManagedClusterArgs) ToWorkflowTemplatePlacementManagedClusterPtrOutputWithContext added in v5.2.0

func (i WorkflowTemplatePlacementManagedClusterArgs) ToWorkflowTemplatePlacementManagedClusterPtrOutputWithContext(ctx context.Context) WorkflowTemplatePlacementManagedClusterPtrOutput

type WorkflowTemplatePlacementManagedClusterConfig added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfig struct {
	// Optional. Autoscaling config for the policy associated with the cluster. Cluster does not autoscale if this field is unset.
	AutoscalingConfig *WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig `pulumi:"autoscalingConfig"`
	// Optional. Encryption settings for the cluster.
	EncryptionConfig *WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig `pulumi:"encryptionConfig"`
	// Optional. Port/endpoint configuration for this cluster
	EndpointConfig *WorkflowTemplatePlacementManagedClusterConfigEndpointConfig `pulumi:"endpointConfig"`
	// Optional. The shared Compute Engine config settings for all instances in a cluster.
	GceClusterConfig *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig `pulumi:"gceClusterConfig"`
	// Optional. The Kubernetes Engine config for Dataproc clusters deployed to Kubernetes. Setting this is considered mutually exclusive with Compute Engine-based options such as `gceClusterConfig`, `masterConfig`, `workerConfig`, `secondaryWorkerConfig`, and `autoscalingConfig`.
	GkeClusterConfig *WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig `pulumi:"gkeClusterConfig"`
	// Optional. Commands to execute on each node after config is completed. By default, executables are run on master and all worker nodes. You can test a node's `role` metadata to run an executable on a master or worker node, as shown below using `curl` (you can also use `wget`): ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) if ; then ... master specific actions ... else ... worker specific actions ... fi
	InitializationActions []WorkflowTemplatePlacementManagedClusterConfigInitializationAction `pulumi:"initializationActions"`
	// Optional. Lifecycle setting for the cluster.
	LifecycleConfig *WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig `pulumi:"lifecycleConfig"`
	// Optional. The Compute Engine config settings for additional worker instances in a cluster.
	MasterConfig *WorkflowTemplatePlacementManagedClusterConfigMasterConfig `pulumi:"masterConfig"`
	// Optional. Metastore configuration.
	MetastoreConfig *WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig `pulumi:"metastoreConfig"`
	// Optional. The Compute Engine config settings for additional worker instances in a cluster.
	SecondaryWorkerConfig *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig `pulumi:"secondaryWorkerConfig"`
	// Optional. Security settings for the cluster.
	SecurityConfig *WorkflowTemplatePlacementManagedClusterConfigSecurityConfig `pulumi:"securityConfig"`
	// Optional. The config settings for software inside the cluster.
	SoftwareConfig *WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig `pulumi:"softwareConfig"`
	// Optional. A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)).
	StagingBucket *string `pulumi:"stagingBucket"`
	// Optional. A Cloud Storage bucket used to store ephemeral cluster and jobs data, such as Spark and MapReduce history files. If you do not specify a temp bucket, Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's temp bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket. The default bucket has a TTL of 90 days, but you can use any TTL (or none) if you specify a bucket.
	TempBucket *string `pulumi:"tempBucket"`
	// Optional. The Compute Engine config settings for additional worker instances in a cluster.
	WorkerConfig *WorkflowTemplatePlacementManagedClusterConfigWorkerConfig `pulumi:"workerConfig"`
}

type WorkflowTemplatePlacementManagedClusterConfigArgs added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigArgs struct {
	// Optional. Autoscaling config for the policy associated with the cluster. Cluster does not autoscale if this field is unset.
	AutoscalingConfig WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigPtrInput `pulumi:"autoscalingConfig"`
	// Optional. Encryption settings for the cluster.
	EncryptionConfig WorkflowTemplatePlacementManagedClusterConfigEncryptionConfigPtrInput `pulumi:"encryptionConfig"`
	// Optional. Port/endpoint configuration for this cluster
	EndpointConfig WorkflowTemplatePlacementManagedClusterConfigEndpointConfigPtrInput `pulumi:"endpointConfig"`
	// Optional. The shared Compute Engine config settings for all instances in a cluster.
	GceClusterConfig WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPtrInput `pulumi:"gceClusterConfig"`
	// Optional. The Kubernetes Engine config for Dataproc clusters deployed to Kubernetes. Setting this is considered mutually exclusive with Compute Engine-based options such as `gceClusterConfig`, `masterConfig`, `workerConfig`, `secondaryWorkerConfig`, and `autoscalingConfig`.
	GkeClusterConfig WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigPtrInput `pulumi:"gkeClusterConfig"`
	// Optional. Commands to execute on each node after config is completed. By default, executables are run on master and all worker nodes. You can test a node's `role` metadata to run an executable on a master or worker node, as shown below using `curl` (you can also use `wget`): ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) if ; then ... master specific actions ... else ... worker specific actions ... fi
	InitializationActions WorkflowTemplatePlacementManagedClusterConfigInitializationActionArrayInput `pulumi:"initializationActions"`
	// Optional. Lifecycle setting for the cluster.
	LifecycleConfig WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigPtrInput `pulumi:"lifecycleConfig"`
	// Optional. The Compute Engine config settings for additional worker instances in a cluster.
	MasterConfig WorkflowTemplatePlacementManagedClusterConfigMasterConfigPtrInput `pulumi:"masterConfig"`
	// Optional. Metastore configuration.
	MetastoreConfig WorkflowTemplatePlacementManagedClusterConfigMetastoreConfigPtrInput `pulumi:"metastoreConfig"`
	// Optional. The Compute Engine config settings for additional worker instances in a cluster.
	SecondaryWorkerConfig WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPtrInput `pulumi:"secondaryWorkerConfig"`
	// Optional. Security settings for the cluster.
	SecurityConfig WorkflowTemplatePlacementManagedClusterConfigSecurityConfigPtrInput `pulumi:"securityConfig"`
	// Optional. The config settings for software inside the cluster.
	SoftwareConfig WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigPtrInput `pulumi:"softwareConfig"`
	// Optional. A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)).
	StagingBucket pulumi.StringPtrInput `pulumi:"stagingBucket"`
	// Optional. A Cloud Storage bucket used to store ephemeral cluster and jobs data, such as Spark and MapReduce history files. If you do not specify a temp bucket, Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's temp bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket. The default bucket has a TTL of 90 days, but you can use any TTL (or none) if you specify a bucket.
	TempBucket pulumi.StringPtrInput `pulumi:"tempBucket"`
	// Optional. The Compute Engine config settings for additional worker instances in a cluster.
	WorkerConfig WorkflowTemplatePlacementManagedClusterConfigWorkerConfigPtrInput `pulumi:"workerConfig"`
}

func (WorkflowTemplatePlacementManagedClusterConfigArgs) ElementType added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigOutput added in v5.2.0

func (i WorkflowTemplatePlacementManagedClusterConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigOutput() WorkflowTemplatePlacementManagedClusterConfigOutput

func (WorkflowTemplatePlacementManagedClusterConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigOutputWithContext added in v5.2.0

func (i WorkflowTemplatePlacementManagedClusterConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigOutputWithContext(ctx context.Context) WorkflowTemplatePlacementManagedClusterConfigOutput

func (WorkflowTemplatePlacementManagedClusterConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigPtrOutput added in v5.2.0

func (i WorkflowTemplatePlacementManagedClusterConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigPtrOutput() WorkflowTemplatePlacementManagedClusterConfigPtrOutput

func (WorkflowTemplatePlacementManagedClusterConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigPtrOutputWithContext added in v5.2.0

func (i WorkflowTemplatePlacementManagedClusterConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigPtrOutputWithContext(ctx context.Context) WorkflowTemplatePlacementManagedClusterConfigPtrOutput

type WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfig struct {
	// Optional. The autoscaling policy used by the cluster. Only resource names including projectid and location (region) are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/` Note that the policy must be in the same project and Dataproc region.
	Policy *string `pulumi:"policy"`
}

type WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigArgs added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigArgs struct {
	// Optional. The autoscaling policy used by the cluster. Only resource names including projectid and location (region) are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/` Note that the policy must be in the same project and Dataproc region.
	Policy pulumi.StringPtrInput `pulumi:"policy"`
}

func (WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigArgs) ElementType added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigOutputWithContext added in v5.2.0

func (i WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigOutputWithContext(ctx context.Context) WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigOutput

func (WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigPtrOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigPtrOutputWithContext added in v5.2.0

func (i WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigPtrOutputWithContext(ctx context.Context) WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigPtrOutput

type WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigInput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigInput interface {
	pulumi.Input

	ToWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigOutput() WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigOutput
	ToWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigOutputWithContext(context.Context) WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigOutput
}

WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigInput is an input type that accepts WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigArgs and WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigOutput values. You can construct a concrete instance of `WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigInput` via:

WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigArgs{...}

type WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigOutput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigOutput struct{ *pulumi.OutputState }

func (WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigOutput) ElementType added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigOutput) Policy added in v5.2.0

Optional. The autoscaling policy used by the cluster. Only resource names including projectid and location (region) are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/` Note that the policy must be in the same project and Dataproc region.

func (WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigOutput) ToWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigOutput) ToWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigOutputWithContext added in v5.2.0

func (o WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigOutput) ToWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigOutputWithContext(ctx context.Context) WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigOutput

func (WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigOutput) ToWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigPtrOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigOutput) ToWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigPtrOutputWithContext added in v5.2.0

func (o WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigOutput) ToWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigPtrOutputWithContext(ctx context.Context) WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigPtrOutput

type WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigPtrInput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigPtrInput interface {
	pulumi.Input

	ToWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigPtrOutput() WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigPtrOutput
	ToWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigPtrOutputWithContext(context.Context) WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigPtrOutput
}

WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigPtrInput is an input type that accepts WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigArgs, WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigPtr and WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigPtrOutput values. You can construct a concrete instance of `WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigPtrInput` via:

        WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigArgs{...}

or:

        nil

type WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigPtrOutput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigPtrOutput struct{ *pulumi.OutputState }

func (WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigPtrOutput) Elem added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigPtrOutput) ElementType added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigPtrOutput) Policy added in v5.2.0

Optional. The autoscaling policy used by the cluster. Only resource names including projectid and location (region) are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/` Note that the policy must be in the same project and Dataproc region.

func (WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigPtrOutput) ToWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigPtrOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigPtrOutput) ToWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigPtrOutputWithContext added in v5.2.0

func (o WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigPtrOutput) ToWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigPtrOutputWithContext(ctx context.Context) WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigPtrOutput

type WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigEncryptionConfig struct {
	// Optional. The Cloud KMS key name to use for PD disk encryption for all instances in the cluster.
	GcePdKmsKeyName *string `pulumi:"gcePdKmsKeyName"`
}

type WorkflowTemplatePlacementManagedClusterConfigEncryptionConfigArgs added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigEncryptionConfigArgs struct {
	// Optional. The Cloud KMS key name to use for PD disk encryption for all instances in the cluster.
	GcePdKmsKeyName pulumi.StringPtrInput `pulumi:"gcePdKmsKeyName"`
}

func (WorkflowTemplatePlacementManagedClusterConfigEncryptionConfigArgs) ElementType added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigEncryptionConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigEncryptionConfigOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigEncryptionConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigEncryptionConfigOutputWithContext added in v5.2.0

func (i WorkflowTemplatePlacementManagedClusterConfigEncryptionConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigEncryptionConfigOutputWithContext(ctx context.Context) WorkflowTemplatePlacementManagedClusterConfigEncryptionConfigOutput

func (WorkflowTemplatePlacementManagedClusterConfigEncryptionConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigEncryptionConfigPtrOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigEncryptionConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigEncryptionConfigPtrOutputWithContext added in v5.2.0

func (i WorkflowTemplatePlacementManagedClusterConfigEncryptionConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigEncryptionConfigPtrOutputWithContext(ctx context.Context) WorkflowTemplatePlacementManagedClusterConfigEncryptionConfigPtrOutput

type WorkflowTemplatePlacementManagedClusterConfigEncryptionConfigInput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigEncryptionConfigInput interface {
	pulumi.Input

	ToWorkflowTemplatePlacementManagedClusterConfigEncryptionConfigOutput() WorkflowTemplatePlacementManagedClusterConfigEncryptionConfigOutput
	ToWorkflowTemplatePlacementManagedClusterConfigEncryptionConfigOutputWithContext(context.Context) WorkflowTemplatePlacementManagedClusterConfigEncryptionConfigOutput
}

WorkflowTemplatePlacementManagedClusterConfigEncryptionConfigInput is an input type that accepts WorkflowTemplatePlacementManagedClusterConfigEncryptionConfigArgs and WorkflowTemplatePlacementManagedClusterConfigEncryptionConfigOutput values. You can construct a concrete instance of `WorkflowTemplatePlacementManagedClusterConfigEncryptionConfigInput` via:

WorkflowTemplatePlacementManagedClusterConfigEncryptionConfigArgs{...}

type WorkflowTemplatePlacementManagedClusterConfigEncryptionConfigOutput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigEncryptionConfigOutput struct{ *pulumi.OutputState }

func (WorkflowTemplatePlacementManagedClusterConfigEncryptionConfigOutput) ElementType added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigEncryptionConfigOutput) GcePdKmsKeyName added in v5.2.0

Optional. The Cloud KMS key name to use for PD disk encryption for all instances in the cluster.

func (WorkflowTemplatePlacementManagedClusterConfigEncryptionConfigOutput) ToWorkflowTemplatePlacementManagedClusterConfigEncryptionConfigOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigEncryptionConfigOutput) ToWorkflowTemplatePlacementManagedClusterConfigEncryptionConfigOutputWithContext added in v5.2.0

func (o WorkflowTemplatePlacementManagedClusterConfigEncryptionConfigOutput) ToWorkflowTemplatePlacementManagedClusterConfigEncryptionConfigOutputWithContext(ctx context.Context) WorkflowTemplatePlacementManagedClusterConfigEncryptionConfigOutput

func (WorkflowTemplatePlacementManagedClusterConfigEncryptionConfigOutput) ToWorkflowTemplatePlacementManagedClusterConfigEncryptionConfigPtrOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigEncryptionConfigOutput) ToWorkflowTemplatePlacementManagedClusterConfigEncryptionConfigPtrOutputWithContext added in v5.2.0

func (o WorkflowTemplatePlacementManagedClusterConfigEncryptionConfigOutput) ToWorkflowTemplatePlacementManagedClusterConfigEncryptionConfigPtrOutputWithContext(ctx context.Context) WorkflowTemplatePlacementManagedClusterConfigEncryptionConfigPtrOutput

type WorkflowTemplatePlacementManagedClusterConfigEncryptionConfigPtrInput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigEncryptionConfigPtrInput interface {
	pulumi.Input

	ToWorkflowTemplatePlacementManagedClusterConfigEncryptionConfigPtrOutput() WorkflowTemplatePlacementManagedClusterConfigEncryptionConfigPtrOutput
	ToWorkflowTemplatePlacementManagedClusterConfigEncryptionConfigPtrOutputWithContext(context.Context) WorkflowTemplatePlacementManagedClusterConfigEncryptionConfigPtrOutput
}

WorkflowTemplatePlacementManagedClusterConfigEncryptionConfigPtrInput is an input type that accepts WorkflowTemplatePlacementManagedClusterConfigEncryptionConfigArgs, WorkflowTemplatePlacementManagedClusterConfigEncryptionConfigPtr and WorkflowTemplatePlacementManagedClusterConfigEncryptionConfigPtrOutput values. You can construct a concrete instance of `WorkflowTemplatePlacementManagedClusterConfigEncryptionConfigPtrInput` via:

        WorkflowTemplatePlacementManagedClusterConfigEncryptionConfigArgs{...}

or:

        nil

type WorkflowTemplatePlacementManagedClusterConfigEncryptionConfigPtrOutput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigEncryptionConfigPtrOutput struct{ *pulumi.OutputState }

func (WorkflowTemplatePlacementManagedClusterConfigEncryptionConfigPtrOutput) Elem added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigEncryptionConfigPtrOutput) ElementType added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigEncryptionConfigPtrOutput) GcePdKmsKeyName added in v5.2.0

Optional. The Cloud KMS key name to use for PD disk encryption for all instances in the cluster.

func (WorkflowTemplatePlacementManagedClusterConfigEncryptionConfigPtrOutput) ToWorkflowTemplatePlacementManagedClusterConfigEncryptionConfigPtrOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigEncryptionConfigPtrOutput) ToWorkflowTemplatePlacementManagedClusterConfigEncryptionConfigPtrOutputWithContext added in v5.2.0

func (o WorkflowTemplatePlacementManagedClusterConfigEncryptionConfigPtrOutput) ToWorkflowTemplatePlacementManagedClusterConfigEncryptionConfigPtrOutputWithContext(ctx context.Context) WorkflowTemplatePlacementManagedClusterConfigEncryptionConfigPtrOutput

type WorkflowTemplatePlacementManagedClusterConfigEndpointConfig added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigEndpointConfig struct {
	// Optional. If true, enable http access to specific ports on the cluster from external sources. Defaults to false.
	EnableHttpPortAccess *bool `pulumi:"enableHttpPortAccess"`
	// -
	// Output only. The map of port descriptions to URLs. Will only be populated if enableHttpPortAccess is true.
	HttpPorts map[string]string `pulumi:"httpPorts"`
}

type WorkflowTemplatePlacementManagedClusterConfigEndpointConfigArgs added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigEndpointConfigArgs struct {
	// Optional. If true, enable http access to specific ports on the cluster from external sources. Defaults to false.
	EnableHttpPortAccess pulumi.BoolPtrInput `pulumi:"enableHttpPortAccess"`
	// -
	// Output only. The map of port descriptions to URLs. Will only be populated if enableHttpPortAccess is true.
	HttpPorts pulumi.StringMapInput `pulumi:"httpPorts"`
}

func (WorkflowTemplatePlacementManagedClusterConfigEndpointConfigArgs) ElementType added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigEndpointConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigEndpointConfigOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigEndpointConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigEndpointConfigOutputWithContext added in v5.2.0

func (i WorkflowTemplatePlacementManagedClusterConfigEndpointConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigEndpointConfigOutputWithContext(ctx context.Context) WorkflowTemplatePlacementManagedClusterConfigEndpointConfigOutput

func (WorkflowTemplatePlacementManagedClusterConfigEndpointConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigEndpointConfigPtrOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigEndpointConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigEndpointConfigPtrOutputWithContext added in v5.2.0

func (i WorkflowTemplatePlacementManagedClusterConfigEndpointConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigEndpointConfigPtrOutputWithContext(ctx context.Context) WorkflowTemplatePlacementManagedClusterConfigEndpointConfigPtrOutput

type WorkflowTemplatePlacementManagedClusterConfigEndpointConfigInput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigEndpointConfigInput interface {
	pulumi.Input

	ToWorkflowTemplatePlacementManagedClusterConfigEndpointConfigOutput() WorkflowTemplatePlacementManagedClusterConfigEndpointConfigOutput
	ToWorkflowTemplatePlacementManagedClusterConfigEndpointConfigOutputWithContext(context.Context) WorkflowTemplatePlacementManagedClusterConfigEndpointConfigOutput
}

WorkflowTemplatePlacementManagedClusterConfigEndpointConfigInput is an input type that accepts WorkflowTemplatePlacementManagedClusterConfigEndpointConfigArgs and WorkflowTemplatePlacementManagedClusterConfigEndpointConfigOutput values. You can construct a concrete instance of `WorkflowTemplatePlacementManagedClusterConfigEndpointConfigInput` via:

WorkflowTemplatePlacementManagedClusterConfigEndpointConfigArgs{...}

type WorkflowTemplatePlacementManagedClusterConfigEndpointConfigOutput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigEndpointConfigOutput struct{ *pulumi.OutputState }

func (WorkflowTemplatePlacementManagedClusterConfigEndpointConfigOutput) ElementType added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigEndpointConfigOutput) EnableHttpPortAccess added in v5.2.0

Optional. If true, enable http access to specific ports on the cluster from external sources. Defaults to false.

func (WorkflowTemplatePlacementManagedClusterConfigEndpointConfigOutput) HttpPorts added in v5.2.0

- Output only. The map of port descriptions to URLs. Will only be populated if enableHttpPortAccess is true.

func (WorkflowTemplatePlacementManagedClusterConfigEndpointConfigOutput) ToWorkflowTemplatePlacementManagedClusterConfigEndpointConfigOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigEndpointConfigOutput) ToWorkflowTemplatePlacementManagedClusterConfigEndpointConfigOutputWithContext added in v5.2.0

func (o WorkflowTemplatePlacementManagedClusterConfigEndpointConfigOutput) ToWorkflowTemplatePlacementManagedClusterConfigEndpointConfigOutputWithContext(ctx context.Context) WorkflowTemplatePlacementManagedClusterConfigEndpointConfigOutput

func (WorkflowTemplatePlacementManagedClusterConfigEndpointConfigOutput) ToWorkflowTemplatePlacementManagedClusterConfigEndpointConfigPtrOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigEndpointConfigOutput) ToWorkflowTemplatePlacementManagedClusterConfigEndpointConfigPtrOutputWithContext added in v5.2.0

func (o WorkflowTemplatePlacementManagedClusterConfigEndpointConfigOutput) ToWorkflowTemplatePlacementManagedClusterConfigEndpointConfigPtrOutputWithContext(ctx context.Context) WorkflowTemplatePlacementManagedClusterConfigEndpointConfigPtrOutput

type WorkflowTemplatePlacementManagedClusterConfigEndpointConfigPtrInput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigEndpointConfigPtrInput interface {
	pulumi.Input

	ToWorkflowTemplatePlacementManagedClusterConfigEndpointConfigPtrOutput() WorkflowTemplatePlacementManagedClusterConfigEndpointConfigPtrOutput
	ToWorkflowTemplatePlacementManagedClusterConfigEndpointConfigPtrOutputWithContext(context.Context) WorkflowTemplatePlacementManagedClusterConfigEndpointConfigPtrOutput
}

WorkflowTemplatePlacementManagedClusterConfigEndpointConfigPtrInput is an input type that accepts WorkflowTemplatePlacementManagedClusterConfigEndpointConfigArgs, WorkflowTemplatePlacementManagedClusterConfigEndpointConfigPtr and WorkflowTemplatePlacementManagedClusterConfigEndpointConfigPtrOutput values. You can construct a concrete instance of `WorkflowTemplatePlacementManagedClusterConfigEndpointConfigPtrInput` via:

        WorkflowTemplatePlacementManagedClusterConfigEndpointConfigArgs{...}

or:

        nil

type WorkflowTemplatePlacementManagedClusterConfigEndpointConfigPtrOutput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigEndpointConfigPtrOutput struct{ *pulumi.OutputState }

func (WorkflowTemplatePlacementManagedClusterConfigEndpointConfigPtrOutput) Elem added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigEndpointConfigPtrOutput) ElementType added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigEndpointConfigPtrOutput) EnableHttpPortAccess added in v5.2.0

Optional. If true, enable http access to specific ports on the cluster from external sources. Defaults to false.

func (WorkflowTemplatePlacementManagedClusterConfigEndpointConfigPtrOutput) HttpPorts added in v5.2.0

- Output only. The map of port descriptions to URLs. Will only be populated if enableHttpPortAccess is true.

func (WorkflowTemplatePlacementManagedClusterConfigEndpointConfigPtrOutput) ToWorkflowTemplatePlacementManagedClusterConfigEndpointConfigPtrOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigEndpointConfigPtrOutput) ToWorkflowTemplatePlacementManagedClusterConfigEndpointConfigPtrOutputWithContext added in v5.2.0

func (o WorkflowTemplatePlacementManagedClusterConfigEndpointConfigPtrOutput) ToWorkflowTemplatePlacementManagedClusterConfigEndpointConfigPtrOutputWithContext(ctx context.Context) WorkflowTemplatePlacementManagedClusterConfigEndpointConfigPtrOutput

type WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig struct {
	// Optional. If true, all instances in the cluster will only have internal IP addresses. By default, clusters are not restricted to internal IP addresses, and will have ephemeral external IP addresses assigned to each instance. This `internalIpOnly` restriction can only be enabled for subnetwork enabled networks, and all off-cluster dependencies must be configured to be accessible without external IP addresses.
	InternalIpOnly *bool `pulumi:"internalIpOnly"`
	// The Compute Engine metadata entries to add to all instances (see (https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
	Metadata map[string]string `pulumi:"metadata"`
	// Optional. The Compute Engine network to be used for machine communications. Cannot be specified with subnetwork_uri. If neither `networkUri` nor `subnetworkUri` is specified, the "default" network of the project is used, if it exists. Cannot be a "Custom Subnet Network" (see /regions/global/default`*`default`
	Network *string `pulumi:"network"`
	// Optional. Node Group Affinity for sole-tenant clusters.
	NodeGroupAffinity *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity `pulumi:"nodeGroupAffinity"`
	// Optional. The type of IPv6 access for a cluster. Possible values: PRIVATE_IPV6_GOOGLE_ACCESS_UNSPECIFIED, INHERIT_FROM_SUBNETWORK, OUTBOUND, BIDIRECTIONAL
	PrivateIpv6GoogleAccess *string `pulumi:"privateIpv6GoogleAccess"`
	// Optional. Reservation Affinity for consuming Zonal reservation.
	ReservationAffinity *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity `pulumi:"reservationAffinity"`
	// Optional. The (https://cloud.google.com/compute/docs/access/service-accounts#default_service_account) is used.
	ServiceAccount *string `pulumi:"serviceAccount"`
	// Optional. The URIs of service account scopes to be included in Compute Engine instances. The following base set of scopes is always included: * https://www.googleapis.com/auth/cloud.useraccounts.readonly * https://www.googleapis.com/auth/devstorage.read_write * https://www.googleapis.com/auth/logging.write If no scopes are specified, the following defaults are also provided: * https://www.googleapis.com/auth/bigquery * https://www.googleapis.com/auth/bigtable.admin.table * https://www.googleapis.com/auth/bigtable.data * https://www.googleapis.com/auth/devstorage.full_control
	ServiceAccountScopes []string `pulumi:"serviceAccountScopes"`
	// Optional. The Compute Engine subnetwork to be used for machine communications. Cannot be specified with network_uri. A full URL, partial URI, or short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects//regions/us-east1/subnetworks/sub0` * `sub0`
	Subnetwork *string `pulumi:"subnetwork"`
	// The Compute Engine tags to add to all instances (see (https://cloud.google.com/compute/docs/label-or-tag-resources#tags)).
	Tags []string `pulumi:"tags"`
	// Optional. The zone where the Compute Engine cluster will be located. On a create request, it is required in the "global" region. If omitted in a non-global Dataproc region, the service will pick a zone in the corresponding Compute Engine region. On a get request, zone will always be present. A full URL, partial URI, or short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/` * `us-central1-f`
	Zone *string `pulumi:"zone"`
}

type WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigArgs added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigArgs struct {
	// Optional. If true, all instances in the cluster will only have internal IP addresses. By default, clusters are not restricted to internal IP addresses, and will have ephemeral external IP addresses assigned to each instance. This `internalIpOnly` restriction can only be enabled for subnetwork enabled networks, and all off-cluster dependencies must be configured to be accessible without external IP addresses.
	InternalIpOnly pulumi.BoolPtrInput `pulumi:"internalIpOnly"`
	// The Compute Engine metadata entries to add to all instances (see (https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
	Metadata pulumi.StringMapInput `pulumi:"metadata"`
	// Optional. The Compute Engine network to be used for machine communications. Cannot be specified with subnetwork_uri. If neither `networkUri` nor `subnetworkUri` is specified, the "default" network of the project is used, if it exists. Cannot be a "Custom Subnet Network" (see /regions/global/default`*`default`
	Network pulumi.StringPtrInput `pulumi:"network"`
	// Optional. Node Group Affinity for sole-tenant clusters.
	NodeGroupAffinity WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinityPtrInput `pulumi:"nodeGroupAffinity"`
	// Optional. The type of IPv6 access for a cluster. Possible values: PRIVATE_IPV6_GOOGLE_ACCESS_UNSPECIFIED, INHERIT_FROM_SUBNETWORK, OUTBOUND, BIDIRECTIONAL
	PrivateIpv6GoogleAccess pulumi.StringPtrInput `pulumi:"privateIpv6GoogleAccess"`
	// Optional. Reservation Affinity for consuming Zonal reservation.
	ReservationAffinity WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityPtrInput `pulumi:"reservationAffinity"`
	// Optional. The (https://cloud.google.com/compute/docs/access/service-accounts#default_service_account) is used.
	ServiceAccount pulumi.StringPtrInput `pulumi:"serviceAccount"`
	// Optional. The URIs of service account scopes to be included in Compute Engine instances. The following base set of scopes is always included: * https://www.googleapis.com/auth/cloud.useraccounts.readonly * https://www.googleapis.com/auth/devstorage.read_write * https://www.googleapis.com/auth/logging.write If no scopes are specified, the following defaults are also provided: * https://www.googleapis.com/auth/bigquery * https://www.googleapis.com/auth/bigtable.admin.table * https://www.googleapis.com/auth/bigtable.data * https://www.googleapis.com/auth/devstorage.full_control
	ServiceAccountScopes pulumi.StringArrayInput `pulumi:"serviceAccountScopes"`
	// Optional. The Compute Engine subnetwork to be used for machine communications. Cannot be specified with network_uri. A full URL, partial URI, or short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects//regions/us-east1/subnetworks/sub0` * `sub0`
	Subnetwork pulumi.StringPtrInput `pulumi:"subnetwork"`
	// The Compute Engine tags to add to all instances (see (https://cloud.google.com/compute/docs/label-or-tag-resources#tags)).
	Tags pulumi.StringArrayInput `pulumi:"tags"`
	// Optional. The zone where the Compute Engine cluster will be located. On a create request, it is required in the "global" region. If omitted in a non-global Dataproc region, the service will pick a zone in the corresponding Compute Engine region. On a get request, zone will always be present. A full URL, partial URI, or short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/` * `us-central1-f`
	Zone pulumi.StringPtrInput `pulumi:"zone"`
}

func (WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigArgs) ElementType added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigOutputWithContext added in v5.2.0

func (i WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigOutputWithContext(ctx context.Context) WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigOutput

func (WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPtrOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPtrOutputWithContext added in v5.2.0

func (i WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPtrOutputWithContext(ctx context.Context) WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPtrOutput

type WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigInput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigInput interface {
	pulumi.Input

	ToWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigOutput() WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigOutput
	ToWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigOutputWithContext(context.Context) WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigOutput
}

WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigInput is an input type that accepts WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigArgs and WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigOutput values. You can construct a concrete instance of `WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigInput` via:

WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigArgs{...}

type WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity struct {
	// Required. The URI of a sole-tenant /zones/us-central1-a/nodeGroups/node-group-1`*`node-group-1`
	NodeGroup string `pulumi:"nodeGroup"`
}

type WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinityArgs added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinityArgs struct {
	// Required. The URI of a sole-tenant /zones/us-central1-a/nodeGroups/node-group-1`*`node-group-1`
	NodeGroup pulumi.StringInput `pulumi:"nodeGroup"`
}

func (WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinityArgs) ElementType added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinityArgs) ToWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinityOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinityArgs) ToWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinityOutputWithContext added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinityArgs) ToWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinityPtrOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinityArgs) ToWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinityPtrOutputWithContext added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinityInput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinityInput interface {
	pulumi.Input

	ToWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinityOutput() WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinityOutput
	ToWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinityOutputWithContext(context.Context) WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinityOutput
}

WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinityInput is an input type that accepts WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinityArgs and WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinityOutput values. You can construct a concrete instance of `WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinityInput` via:

WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinityArgs{...}

type WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinityOutput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinityOutput struct{ *pulumi.OutputState }

func (WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinityOutput) ElementType added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinityOutput) NodeGroup added in v5.2.0

Required. The URI of a sole-tenant /zones/us-central1-a/nodeGroups/node-group-1`*`node-group-1`

func (WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinityOutput) ToWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinityOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinityOutput) ToWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinityOutputWithContext added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinityOutput) ToWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinityPtrOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinityOutput) ToWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinityPtrOutputWithContext added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinityPtrInput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinityPtrInput interface {
	pulumi.Input

	ToWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinityPtrOutput() WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinityPtrOutput
	ToWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinityPtrOutputWithContext(context.Context) WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinityPtrOutput
}

WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinityPtrInput is an input type that accepts WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinityArgs, WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinityPtr and WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinityPtrOutput values. You can construct a concrete instance of `WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinityPtrInput` via:

        WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinityArgs{...}

or:

        nil

type WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinityPtrOutput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinityPtrOutput struct{ *pulumi.OutputState }

func (WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinityPtrOutput) Elem added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinityPtrOutput) ElementType added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinityPtrOutput) NodeGroup added in v5.2.0

Required. The URI of a sole-tenant /zones/us-central1-a/nodeGroups/node-group-1`*`node-group-1`

func (WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinityPtrOutput) ToWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinityPtrOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinityPtrOutput) ToWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinityPtrOutputWithContext added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigOutput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigOutput struct{ *pulumi.OutputState }

func (WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigOutput) ElementType added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigOutput) InternalIpOnly added in v5.2.0

Optional. If true, all instances in the cluster will only have internal IP addresses. By default, clusters are not restricted to internal IP addresses, and will have ephemeral external IP addresses assigned to each instance. This `internalIpOnly` restriction can only be enabled for subnetwork enabled networks, and all off-cluster dependencies must be configured to be accessible without external IP addresses.

func (WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigOutput) Metadata added in v5.2.0

The Compute Engine metadata entries to add to all instances (see (https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).

func (WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigOutput) Network added in v5.2.0

Optional. The Compute Engine network to be used for machine communications. Cannot be specified with subnetwork_uri. If neither `networkUri` nor `subnetworkUri` is specified, the "default" network of the project is used, if it exists. Cannot be a "Custom Subnet Network" (see /regions/global/default`*`default`

func (WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigOutput) NodeGroupAffinity added in v5.2.0

Optional. Node Group Affinity for sole-tenant clusters.

func (WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigOutput) PrivateIpv6GoogleAccess added in v5.2.0

Optional. The type of IPv6 access for a cluster. Possible values: PRIVATE_IPV6_GOOGLE_ACCESS_UNSPECIFIED, INHERIT_FROM_SUBNETWORK, OUTBOUND, BIDIRECTIONAL

func (WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigOutput) ReservationAffinity added in v5.2.0

Optional. Reservation Affinity for consuming Zonal reservation.

func (WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigOutput) ServiceAccount added in v5.2.0

Optional. The (https://cloud.google.com/compute/docs/access/service-accounts#default_service_account) is used.

func (WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigOutput) ServiceAccountScopes added in v5.2.0

Optional. The URIs of service account scopes to be included in Compute Engine instances. The following base set of scopes is always included: * https://www.googleapis.com/auth/cloud.useraccounts.readonly * https://www.googleapis.com/auth/devstorage.read_write * https://www.googleapis.com/auth/logging.write If no scopes are specified, the following defaults are also provided: * https://www.googleapis.com/auth/bigquery * https://www.googleapis.com/auth/bigtable.admin.table * https://www.googleapis.com/auth/bigtable.data * https://www.googleapis.com/auth/devstorage.full_control

func (WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigOutput) Subnetwork added in v5.2.0

Optional. The Compute Engine subnetwork to be used for machine communications. Cannot be specified with network_uri. A full URL, partial URI, or short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects//regions/us-east1/subnetworks/sub0` * `sub0`

func (WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigOutput) Tags added in v5.2.0

The Compute Engine tags to add to all instances (see (https://cloud.google.com/compute/docs/label-or-tag-resources#tags)).

func (WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigOutput) ToWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigOutput) ToWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigOutputWithContext added in v5.2.0

func (o WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigOutput) ToWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigOutputWithContext(ctx context.Context) WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigOutput

func (WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigOutput) ToWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPtrOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigOutput) ToWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPtrOutputWithContext added in v5.2.0

func (o WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigOutput) ToWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPtrOutputWithContext(ctx context.Context) WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPtrOutput

func (WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigOutput) Zone added in v5.2.0

Optional. The zone where the Compute Engine cluster will be located. On a create request, it is required in the "global" region. If omitted in a non-global Dataproc region, the service will pick a zone in the corresponding Compute Engine region. On a get request, zone will always be present. A full URL, partial URI, or short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/` * `us-central1-f`

type WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPtrInput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPtrInput interface {
	pulumi.Input

	ToWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPtrOutput() WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPtrOutput
	ToWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPtrOutputWithContext(context.Context) WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPtrOutput
}

WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPtrInput is an input type that accepts WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigArgs, WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPtr and WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPtrOutput values. You can construct a concrete instance of `WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPtrInput` via:

        WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigArgs{...}

or:

        nil

type WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPtrOutput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPtrOutput struct{ *pulumi.OutputState }

func (WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPtrOutput) Elem added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPtrOutput) ElementType added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPtrOutput) InternalIpOnly added in v5.2.0

Optional. If true, all instances in the cluster will only have internal IP addresses. By default, clusters are not restricted to internal IP addresses, and will have ephemeral external IP addresses assigned to each instance. This `internalIpOnly` restriction can only be enabled for subnetwork enabled networks, and all off-cluster dependencies must be configured to be accessible without external IP addresses.

func (WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPtrOutput) Metadata added in v5.2.0

The Compute Engine metadata entries to add to all instances (see (https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).

func (WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPtrOutput) Network added in v5.2.0

Optional. The Compute Engine network to be used for machine communications. Cannot be specified with subnetwork_uri. If neither `networkUri` nor `subnetworkUri` is specified, the "default" network of the project is used, if it exists. Cannot be a "Custom Subnet Network" (see /regions/global/default`*`default`

func (WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPtrOutput) NodeGroupAffinity added in v5.2.0

Optional. Node Group Affinity for sole-tenant clusters.

func (WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPtrOutput) PrivateIpv6GoogleAccess added in v5.2.0

Optional. The type of IPv6 access for a cluster. Possible values: PRIVATE_IPV6_GOOGLE_ACCESS_UNSPECIFIED, INHERIT_FROM_SUBNETWORK, OUTBOUND, BIDIRECTIONAL

func (WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPtrOutput) ReservationAffinity added in v5.2.0

Optional. Reservation Affinity for consuming Zonal reservation.

func (WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPtrOutput) ServiceAccount added in v5.2.0

Optional. The (https://cloud.google.com/compute/docs/access/service-accounts#default_service_account) is used.

func (WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPtrOutput) ServiceAccountScopes added in v5.2.0

Optional. The URIs of service account scopes to be included in Compute Engine instances. The following base set of scopes is always included: * https://www.googleapis.com/auth/cloud.useraccounts.readonly * https://www.googleapis.com/auth/devstorage.read_write * https://www.googleapis.com/auth/logging.write If no scopes are specified, the following defaults are also provided: * https://www.googleapis.com/auth/bigquery * https://www.googleapis.com/auth/bigtable.admin.table * https://www.googleapis.com/auth/bigtable.data * https://www.googleapis.com/auth/devstorage.full_control

func (WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPtrOutput) Subnetwork added in v5.2.0

Optional. The Compute Engine subnetwork to be used for machine communications. Cannot be specified with network_uri. A full URL, partial URI, or short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects//regions/us-east1/subnetworks/sub0` * `sub0`

func (WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPtrOutput) Tags added in v5.2.0

The Compute Engine tags to add to all instances (see (https://cloud.google.com/compute/docs/label-or-tag-resources#tags)).

func (WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPtrOutput) ToWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPtrOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPtrOutput) ToWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPtrOutputWithContext added in v5.2.0

func (o WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPtrOutput) ToWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPtrOutputWithContext(ctx context.Context) WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPtrOutput

func (WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPtrOutput) Zone added in v5.2.0

Optional. The zone where the Compute Engine cluster will be located. On a create request, it is required in the "global" region. If omitted in a non-global Dataproc region, the service will pick a zone in the corresponding Compute Engine region. On a get request, zone will always be present. A full URL, partial URI, or short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/` * `us-central1-f`

type WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity struct {
	// Optional. Type of reservation to consume Possible values: TYPE_UNSPECIFIED, NO_RESERVATION, ANY_RESERVATION, SPECIFIC_RESERVATION
	ConsumeReservationType *string `pulumi:"consumeReservationType"`
	// Optional. Corresponds to the label key of reservation resource.
	Key *string `pulumi:"key"`
	// Optional. Corresponds to the label values of reservation resource.
	Values []string `pulumi:"values"`
}

type WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityArgs added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityArgs struct {
	// Optional. Type of reservation to consume Possible values: TYPE_UNSPECIFIED, NO_RESERVATION, ANY_RESERVATION, SPECIFIC_RESERVATION
	ConsumeReservationType pulumi.StringPtrInput `pulumi:"consumeReservationType"`
	// Optional. Corresponds to the label key of reservation resource.
	Key pulumi.StringPtrInput `pulumi:"key"`
	// Optional. Corresponds to the label values of reservation resource.
	Values pulumi.StringArrayInput `pulumi:"values"`
}

func (WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityArgs) ElementType added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityArgs) ToWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityArgs) ToWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityOutputWithContext added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityArgs) ToWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityPtrOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityArgs) ToWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityPtrOutputWithContext added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityInput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityInput interface {
	pulumi.Input

	ToWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityOutput() WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityOutput
	ToWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityOutputWithContext(context.Context) WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityOutput
}

WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityInput is an input type that accepts WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityArgs and WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityOutput values. You can construct a concrete instance of `WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityInput` via:

WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityArgs{...}

type WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityOutput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityOutput struct{ *pulumi.OutputState }

func (WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityOutput) ConsumeReservationType added in v5.2.0

Optional. Type of reservation to consume Possible values: TYPE_UNSPECIFIED, NO_RESERVATION, ANY_RESERVATION, SPECIFIC_RESERVATION

func (WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityOutput) ElementType added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityOutput) Key added in v5.2.0

Optional. Corresponds to the label key of reservation resource.

func (WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityOutput) ToWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityOutput) ToWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityOutputWithContext added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityOutput) ToWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityPtrOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityOutput) ToWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityPtrOutputWithContext added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityOutput) Values added in v5.2.0

Optional. Corresponds to the label values of reservation resource.

type WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityPtrInput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityPtrInput interface {
	pulumi.Input

	ToWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityPtrOutput() WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityPtrOutput
	ToWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityPtrOutputWithContext(context.Context) WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityPtrOutput
}

WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityPtrInput is an input type that accepts WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityArgs, WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityPtr and WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityPtrOutput values. You can construct a concrete instance of `WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityPtrInput` via:

        WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityArgs{...}

or:

        nil

type WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityPtrOutput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityPtrOutput struct{ *pulumi.OutputState }

func (WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityPtrOutput) ConsumeReservationType added in v5.2.0

Optional. Type of reservation to consume Possible values: TYPE_UNSPECIFIED, NO_RESERVATION, ANY_RESERVATION, SPECIFIC_RESERVATION

func (WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityPtrOutput) Elem added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityPtrOutput) ElementType added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityPtrOutput) Key added in v5.2.0

Optional. Corresponds to the label key of reservation resource.

func (WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityPtrOutput) ToWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityPtrOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityPtrOutput) ToWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityPtrOutputWithContext added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityPtrOutput) Values added in v5.2.0

Optional. Corresponds to the label values of reservation resource.

type WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfig struct {
	// Optional. A target for the deployment.
	NamespacedGkeDeploymentTarget *WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget `pulumi:"namespacedGkeDeploymentTarget"`
}

type WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigArgs added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigArgs struct {
	// Optional. A target for the deployment.
	NamespacedGkeDeploymentTarget WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetPtrInput `pulumi:"namespacedGkeDeploymentTarget"`
}

func (WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigArgs) ElementType added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigOutputWithContext added in v5.2.0

func (i WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigOutputWithContext(ctx context.Context) WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigOutput

func (WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigPtrOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigPtrOutputWithContext added in v5.2.0

func (i WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigPtrOutputWithContext(ctx context.Context) WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigPtrOutput

type WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigInput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigInput interface {
	pulumi.Input

	ToWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigOutput() WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigOutput
	ToWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigOutputWithContext(context.Context) WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigOutput
}

WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigInput is an input type that accepts WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigArgs and WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigOutput values. You can construct a concrete instance of `WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigInput` via:

WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigArgs{...}

type WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTarget struct {
	// Optional. A namespace within the GKE cluster to deploy into.
	ClusterNamespace *string `pulumi:"clusterNamespace"`
	// Optional. The target GKE cluster to deploy to. Format: 'projects/{project}/locations/{location}/clusters/{cluster_id}'
	TargetGkeCluster *string `pulumi:"targetGkeCluster"`
}

type WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetArgs added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetArgs struct {
	// Optional. A namespace within the GKE cluster to deploy into.
	ClusterNamespace pulumi.StringPtrInput `pulumi:"clusterNamespace"`
	// Optional. The target GKE cluster to deploy to. Format: 'projects/{project}/locations/{location}/clusters/{cluster_id}'
	TargetGkeCluster pulumi.StringPtrInput `pulumi:"targetGkeCluster"`
}

func (WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetArgs) ElementType added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetArgs) ToWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetArgs) ToWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetOutputWithContext added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetArgs) ToWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetPtrOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetArgs) ToWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetPtrOutputWithContext added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetInput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetInput interface {
	pulumi.Input

	ToWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetOutput() WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetOutput
	ToWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetOutputWithContext(context.Context) WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetOutput
}

WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetInput is an input type that accepts WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetArgs and WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetOutput values. You can construct a concrete instance of `WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetInput` via:

WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetArgs{...}

type WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetOutput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetOutput struct{ *pulumi.OutputState }

func (WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetOutput) ClusterNamespace added in v5.2.0

Optional. A namespace within the GKE cluster to deploy into.

func (WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetOutput) ElementType added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetOutput) TargetGkeCluster added in v5.2.0

Optional. The target GKE cluster to deploy to. Format: 'projects/{project}/locations/{location}/clusters/{cluster_id}'

func (WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetOutput) ToWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetOutput) ToWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetOutputWithContext added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetOutput) ToWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetPtrOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetOutput) ToWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetPtrOutputWithContext added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetPtrInput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetPtrInput interface {
	pulumi.Input

	ToWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetPtrOutput() WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetPtrOutput
	ToWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetPtrOutputWithContext(context.Context) WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetPtrOutput
}

WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetPtrInput is an input type that accepts WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetArgs, WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetPtr and WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetPtrOutput values. You can construct a concrete instance of `WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetPtrInput` via:

        WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetArgs{...}

or:

        nil

type WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetPtrOutput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetPtrOutput struct{ *pulumi.OutputState }

func (WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetPtrOutput) ClusterNamespace added in v5.2.0

Optional. A namespace within the GKE cluster to deploy into.

func (WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetPtrOutput) Elem added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetPtrOutput) ElementType added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetPtrOutput) TargetGkeCluster added in v5.2.0

Optional. The target GKE cluster to deploy to. Format: 'projects/{project}/locations/{location}/clusters/{cluster_id}'

func (WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetPtrOutput) ToWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetPtrOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetPtrOutput) ToWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetPtrOutputWithContext added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigOutput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigOutput struct{ *pulumi.OutputState }

func (WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigOutput) ElementType added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigOutput) NamespacedGkeDeploymentTarget added in v5.2.0

Optional. A target for the deployment.

func (WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigOutput) ToWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigOutput) ToWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigOutputWithContext added in v5.2.0

func (o WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigOutput) ToWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigOutputWithContext(ctx context.Context) WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigOutput

func (WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigOutput) ToWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigPtrOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigOutput) ToWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigPtrOutputWithContext added in v5.2.0

func (o WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigOutput) ToWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigPtrOutputWithContext(ctx context.Context) WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigPtrOutput

type WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigPtrInput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigPtrInput interface {
	pulumi.Input

	ToWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigPtrOutput() WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigPtrOutput
	ToWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigPtrOutputWithContext(context.Context) WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigPtrOutput
}

WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigPtrInput is an input type that accepts WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigArgs, WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigPtr and WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigPtrOutput values. You can construct a concrete instance of `WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigPtrInput` via:

        WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigArgs{...}

or:

        nil

type WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigPtrOutput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigPtrOutput struct{ *pulumi.OutputState }

func (WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigPtrOutput) Elem added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigPtrOutput) ElementType added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigPtrOutput) NamespacedGkeDeploymentTarget added in v5.2.0

Optional. A target for the deployment.

func (WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigPtrOutput) ToWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigPtrOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigPtrOutput) ToWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigPtrOutputWithContext added in v5.2.0

func (o WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigPtrOutput) ToWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigPtrOutputWithContext(ctx context.Context) WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigPtrOutput

type WorkflowTemplatePlacementManagedClusterConfigInitializationAction added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigInitializationAction struct {
	// Required. Cloud Storage URI of executable file.
	ExecutableFile *string `pulumi:"executableFile"`
	// Optional. Amount of time executable has to complete. Default is 10 minutes (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). Cluster creation fails with an explanatory error message (the name of the executable that caused the error and the exceeded timeout period) if the executable is not completed at end of the timeout period.
	ExecutionTimeout *string `pulumi:"executionTimeout"`
}

type WorkflowTemplatePlacementManagedClusterConfigInitializationActionArgs added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigInitializationActionArgs struct {
	// Required. Cloud Storage URI of executable file.
	ExecutableFile pulumi.StringPtrInput `pulumi:"executableFile"`
	// Optional. Amount of time executable has to complete. Default is 10 minutes (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). Cluster creation fails with an explanatory error message (the name of the executable that caused the error and the exceeded timeout period) if the executable is not completed at end of the timeout period.
	ExecutionTimeout pulumi.StringPtrInput `pulumi:"executionTimeout"`
}

func (WorkflowTemplatePlacementManagedClusterConfigInitializationActionArgs) ElementType added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigInitializationActionArgs) ToWorkflowTemplatePlacementManagedClusterConfigInitializationActionOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigInitializationActionArgs) ToWorkflowTemplatePlacementManagedClusterConfigInitializationActionOutputWithContext added in v5.2.0

func (i WorkflowTemplatePlacementManagedClusterConfigInitializationActionArgs) ToWorkflowTemplatePlacementManagedClusterConfigInitializationActionOutputWithContext(ctx context.Context) WorkflowTemplatePlacementManagedClusterConfigInitializationActionOutput

type WorkflowTemplatePlacementManagedClusterConfigInitializationActionArray added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigInitializationActionArray []WorkflowTemplatePlacementManagedClusterConfigInitializationActionInput

func (WorkflowTemplatePlacementManagedClusterConfigInitializationActionArray) ElementType added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigInitializationActionArray) ToWorkflowTemplatePlacementManagedClusterConfigInitializationActionArrayOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigInitializationActionArray) ToWorkflowTemplatePlacementManagedClusterConfigInitializationActionArrayOutputWithContext added in v5.2.0

func (i WorkflowTemplatePlacementManagedClusterConfigInitializationActionArray) ToWorkflowTemplatePlacementManagedClusterConfigInitializationActionArrayOutputWithContext(ctx context.Context) WorkflowTemplatePlacementManagedClusterConfigInitializationActionArrayOutput

type WorkflowTemplatePlacementManagedClusterConfigInitializationActionArrayInput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigInitializationActionArrayInput interface {
	pulumi.Input

	ToWorkflowTemplatePlacementManagedClusterConfigInitializationActionArrayOutput() WorkflowTemplatePlacementManagedClusterConfigInitializationActionArrayOutput
	ToWorkflowTemplatePlacementManagedClusterConfigInitializationActionArrayOutputWithContext(context.Context) WorkflowTemplatePlacementManagedClusterConfigInitializationActionArrayOutput
}

WorkflowTemplatePlacementManagedClusterConfigInitializationActionArrayInput is an input type that accepts WorkflowTemplatePlacementManagedClusterConfigInitializationActionArray and WorkflowTemplatePlacementManagedClusterConfigInitializationActionArrayOutput values. You can construct a concrete instance of `WorkflowTemplatePlacementManagedClusterConfigInitializationActionArrayInput` via:

WorkflowTemplatePlacementManagedClusterConfigInitializationActionArray{ WorkflowTemplatePlacementManagedClusterConfigInitializationActionArgs{...} }

type WorkflowTemplatePlacementManagedClusterConfigInitializationActionArrayOutput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigInitializationActionArrayOutput struct{ *pulumi.OutputState }

func (WorkflowTemplatePlacementManagedClusterConfigInitializationActionArrayOutput) ElementType added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigInitializationActionArrayOutput) Index added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigInitializationActionArrayOutput) ToWorkflowTemplatePlacementManagedClusterConfigInitializationActionArrayOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigInitializationActionArrayOutput) ToWorkflowTemplatePlacementManagedClusterConfigInitializationActionArrayOutputWithContext added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigInitializationActionInput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigInitializationActionInput interface {
	pulumi.Input

	ToWorkflowTemplatePlacementManagedClusterConfigInitializationActionOutput() WorkflowTemplatePlacementManagedClusterConfigInitializationActionOutput
	ToWorkflowTemplatePlacementManagedClusterConfigInitializationActionOutputWithContext(context.Context) WorkflowTemplatePlacementManagedClusterConfigInitializationActionOutput
}

WorkflowTemplatePlacementManagedClusterConfigInitializationActionInput is an input type that accepts WorkflowTemplatePlacementManagedClusterConfigInitializationActionArgs and WorkflowTemplatePlacementManagedClusterConfigInitializationActionOutput values. You can construct a concrete instance of `WorkflowTemplatePlacementManagedClusterConfigInitializationActionInput` via:

WorkflowTemplatePlacementManagedClusterConfigInitializationActionArgs{...}

type WorkflowTemplatePlacementManagedClusterConfigInitializationActionOutput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigInitializationActionOutput struct{ *pulumi.OutputState }

func (WorkflowTemplatePlacementManagedClusterConfigInitializationActionOutput) ElementType added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigInitializationActionOutput) ExecutableFile added in v5.2.0

Required. Cloud Storage URI of executable file.

func (WorkflowTemplatePlacementManagedClusterConfigInitializationActionOutput) ExecutionTimeout added in v5.2.0

Optional. Amount of time executable has to complete. Default is 10 minutes (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). Cluster creation fails with an explanatory error message (the name of the executable that caused the error and the exceeded timeout period) if the executable is not completed at end of the timeout period.

func (WorkflowTemplatePlacementManagedClusterConfigInitializationActionOutput) ToWorkflowTemplatePlacementManagedClusterConfigInitializationActionOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigInitializationActionOutput) ToWorkflowTemplatePlacementManagedClusterConfigInitializationActionOutputWithContext added in v5.2.0

func (o WorkflowTemplatePlacementManagedClusterConfigInitializationActionOutput) ToWorkflowTemplatePlacementManagedClusterConfigInitializationActionOutputWithContext(ctx context.Context) WorkflowTemplatePlacementManagedClusterConfigInitializationActionOutput

type WorkflowTemplatePlacementManagedClusterConfigInput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigInput interface {
	pulumi.Input

	ToWorkflowTemplatePlacementManagedClusterConfigOutput() WorkflowTemplatePlacementManagedClusterConfigOutput
	ToWorkflowTemplatePlacementManagedClusterConfigOutputWithContext(context.Context) WorkflowTemplatePlacementManagedClusterConfigOutput
}

WorkflowTemplatePlacementManagedClusterConfigInput is an input type that accepts WorkflowTemplatePlacementManagedClusterConfigArgs and WorkflowTemplatePlacementManagedClusterConfigOutput values. You can construct a concrete instance of `WorkflowTemplatePlacementManagedClusterConfigInput` via:

WorkflowTemplatePlacementManagedClusterConfigArgs{...}

type WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig struct {
	// Optional. The time when cluster will be auto-deleted (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)).
	AutoDeleteTime *string `pulumi:"autoDeleteTime"`
	// Optional. The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)).
	AutoDeleteTtl *string `pulumi:"autoDeleteTtl"`
	// Optional. The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json).
	IdleDeleteTtl *string `pulumi:"idleDeleteTtl"`
	// -
	// Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)).
	IdleStartTime *string `pulumi:"idleStartTime"`
}

type WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigArgs added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigArgs struct {
	// Optional. The time when cluster will be auto-deleted (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)).
	AutoDeleteTime pulumi.StringPtrInput `pulumi:"autoDeleteTime"`
	// Optional. The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)).
	AutoDeleteTtl pulumi.StringPtrInput `pulumi:"autoDeleteTtl"`
	// Optional. The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json).
	IdleDeleteTtl pulumi.StringPtrInput `pulumi:"idleDeleteTtl"`
	// -
	// Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)).
	IdleStartTime pulumi.StringPtrInput `pulumi:"idleStartTime"`
}

func (WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigArgs) ElementType added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigLifecycleConfigOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigLifecycleConfigOutputWithContext added in v5.2.0

func (i WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigLifecycleConfigOutputWithContext(ctx context.Context) WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigOutput

func (WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigLifecycleConfigPtrOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigLifecycleConfigPtrOutputWithContext added in v5.2.0

func (i WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigLifecycleConfigPtrOutputWithContext(ctx context.Context) WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigPtrOutput

type WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigInput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigInput interface {
	pulumi.Input

	ToWorkflowTemplatePlacementManagedClusterConfigLifecycleConfigOutput() WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigOutput
	ToWorkflowTemplatePlacementManagedClusterConfigLifecycleConfigOutputWithContext(context.Context) WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigOutput
}

WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigInput is an input type that accepts WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigArgs and WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigOutput values. You can construct a concrete instance of `WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigInput` via:

WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigArgs{...}

type WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigOutput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigOutput struct{ *pulumi.OutputState }

func (WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigOutput) AutoDeleteTime added in v5.2.0

Optional. The time when cluster will be auto-deleted (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)).

func (WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigOutput) AutoDeleteTtl added in v5.2.0

Optional. The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)).

func (WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigOutput) ElementType added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigOutput) IdleDeleteTtl added in v5.2.0

Optional. The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json).

func (WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigOutput) IdleStartTime added in v5.2.0

- Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)).

func (WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigOutput) ToWorkflowTemplatePlacementManagedClusterConfigLifecycleConfigOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigOutput) ToWorkflowTemplatePlacementManagedClusterConfigLifecycleConfigOutputWithContext added in v5.2.0

func (o WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigOutput) ToWorkflowTemplatePlacementManagedClusterConfigLifecycleConfigOutputWithContext(ctx context.Context) WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigOutput

func (WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigOutput) ToWorkflowTemplatePlacementManagedClusterConfigLifecycleConfigPtrOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigOutput) ToWorkflowTemplatePlacementManagedClusterConfigLifecycleConfigPtrOutputWithContext added in v5.2.0

func (o WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigOutput) ToWorkflowTemplatePlacementManagedClusterConfigLifecycleConfigPtrOutputWithContext(ctx context.Context) WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigPtrOutput

type WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigPtrInput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigPtrInput interface {
	pulumi.Input

	ToWorkflowTemplatePlacementManagedClusterConfigLifecycleConfigPtrOutput() WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigPtrOutput
	ToWorkflowTemplatePlacementManagedClusterConfigLifecycleConfigPtrOutputWithContext(context.Context) WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigPtrOutput
}

WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigPtrInput is an input type that accepts WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigArgs, WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigPtr and WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigPtrOutput values. You can construct a concrete instance of `WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigPtrInput` via:

        WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigArgs{...}

or:

        nil

type WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigPtrOutput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigPtrOutput struct{ *pulumi.OutputState }

func (WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigPtrOutput) AutoDeleteTime added in v5.2.0

Optional. The time when cluster will be auto-deleted (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)).

func (WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigPtrOutput) AutoDeleteTtl added in v5.2.0

Optional. The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)).

func (WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigPtrOutput) Elem added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigPtrOutput) ElementType added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigPtrOutput) IdleDeleteTtl added in v5.2.0

Optional. The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json).

func (WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigPtrOutput) IdleStartTime added in v5.2.0

- Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)).

func (WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigPtrOutput) ToWorkflowTemplatePlacementManagedClusterConfigLifecycleConfigPtrOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigPtrOutput) ToWorkflowTemplatePlacementManagedClusterConfigLifecycleConfigPtrOutputWithContext added in v5.2.0

func (o WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigPtrOutput) ToWorkflowTemplatePlacementManagedClusterConfigLifecycleConfigPtrOutputWithContext(ctx context.Context) WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigPtrOutput

type WorkflowTemplatePlacementManagedClusterConfigMasterConfig added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigMasterConfig struct {
	// Optional. The Compute Engine accelerator configuration for these instances.
	Accelerators []WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerator `pulumi:"accelerators"`
	// Optional. Disk option config settings.
	DiskConfig *WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig `pulumi:"diskConfig"`
	// Optional. The Compute Engine image resource used for cluster instances. The URI can represent an image or image family. Image examples: * `https://www.googleapis.com/compute/beta/projects/` If the URI is unspecified, it will be inferred from `SoftwareConfig.image_version` or the system default.
	Image *string `pulumi:"image"`
	// -
	// Output only. The list of instance names. Dataproc derives the names from `clusterName`, `numInstances`, and the instance group.
	InstanceNames []string `pulumi:"instanceNames"`
	// -
	// Output only. Specifies that this instance group contains preemptible instances.
	IsPreemptible *bool `pulumi:"isPreemptible"`
	// Optional. The Compute Engine machine type used for cluster instances. A full URL, partial URI, or short name are valid. Examples: * ` https://www.googleapis.com/compute/v1/projects/(https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example,  `n1-standard-2`.
	MachineType *string `pulumi:"machineType"`
	// -
	// Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups.
	ManagedGroupConfigs []WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig `pulumi:"managedGroupConfigs"`
	// Optional. Specifies the minimum cpu platform for the Instance Group. See (https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu).
	MinCpuPlatform *string `pulumi:"minCpuPlatform"`
	// Optional. The number of VM instances in the instance group. For master instance groups, must be set to 1.
	NumInstances *int `pulumi:"numInstances"`
	// Optional. Specifies the preemptibility of the instance group. The default value for master and worker groups is `NON_PREEMPTIBLE`. This default cannot be changed. The default value for secondary instances is `PREEMPTIBLE`. Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE, PREEMPTIBLE
	Preemptibility *string `pulumi:"preemptibility"`
}

type WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerator added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigMasterConfigAccelerator struct {
	// The number of the accelerator cards of this type exposed to this instance.
	AcceleratorCount *int `pulumi:"acceleratorCount"`
	// Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, `nvidia-tesla-k80`.
	AcceleratorType *string `pulumi:"acceleratorType"`
}

type WorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorArgs added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorArgs struct {
	// The number of the accelerator cards of this type exposed to this instance.
	AcceleratorCount pulumi.IntPtrInput `pulumi:"acceleratorCount"`
	// Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, `nvidia-tesla-k80`.
	AcceleratorType pulumi.StringPtrInput `pulumi:"acceleratorType"`
}

func (WorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorArgs) ElementType added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorArgs) ToWorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorArgs) ToWorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorOutputWithContext added in v5.2.0

func (i WorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorArgs) ToWorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorOutputWithContext(ctx context.Context) WorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorOutput

type WorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorArray added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorArray []WorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorInput

func (WorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorArray) ElementType added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorArray) ToWorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorArrayOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorArray) ToWorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorArrayOutputWithContext added in v5.2.0

func (i WorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorArray) ToWorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorArrayOutputWithContext(ctx context.Context) WorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorArrayOutput

type WorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorArrayInput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorArrayInput interface {
	pulumi.Input

	ToWorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorArrayOutput() WorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorArrayOutput
	ToWorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorArrayOutputWithContext(context.Context) WorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorArrayOutput
}

WorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorArrayInput is an input type that accepts WorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorArray and WorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorArrayOutput values. You can construct a concrete instance of `WorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorArrayInput` via:

WorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorArray{ WorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorArgs{...} }

type WorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorArrayOutput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorArrayOutput struct{ *pulumi.OutputState }

func (WorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorArrayOutput) ElementType added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorArrayOutput) Index added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorArrayOutput) ToWorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorArrayOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorArrayOutput) ToWorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorArrayOutputWithContext added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorInput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorInput interface {
	pulumi.Input

	ToWorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorOutput() WorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorOutput
	ToWorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorOutputWithContext(context.Context) WorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorOutput
}

WorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorInput is an input type that accepts WorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorArgs and WorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorOutput values. You can construct a concrete instance of `WorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorInput` via:

WorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorArgs{...}

type WorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorOutput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorOutput struct{ *pulumi.OutputState }

func (WorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorOutput) AcceleratorCount added in v5.2.0

The number of the accelerator cards of this type exposed to this instance.

func (WorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorOutput) AcceleratorType added in v5.2.0

Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, `nvidia-tesla-k80`.

func (WorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorOutput) ElementType added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorOutput) ToWorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorOutput) ToWorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorOutputWithContext added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigMasterConfigArgs added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigMasterConfigArgs struct {
	// Optional. The Compute Engine accelerator configuration for these instances.
	Accelerators WorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorArrayInput `pulumi:"accelerators"`
	// Optional. Disk option config settings.
	DiskConfig WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigPtrInput `pulumi:"diskConfig"`
	// Optional. The Compute Engine image resource used for cluster instances. The URI can represent an image or image family. Image examples: * `https://www.googleapis.com/compute/beta/projects/` If the URI is unspecified, it will be inferred from `SoftwareConfig.image_version` or the system default.
	Image pulumi.StringPtrInput `pulumi:"image"`
	// -
	// Output only. The list of instance names. Dataproc derives the names from `clusterName`, `numInstances`, and the instance group.
	InstanceNames pulumi.StringArrayInput `pulumi:"instanceNames"`
	// -
	// Output only. Specifies that this instance group contains preemptible instances.
	IsPreemptible pulumi.BoolPtrInput `pulumi:"isPreemptible"`
	// Optional. The Compute Engine machine type used for cluster instances. A full URL, partial URI, or short name are valid. Examples: * ` https://www.googleapis.com/compute/v1/projects/(https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example,  `n1-standard-2`.
	MachineType pulumi.StringPtrInput `pulumi:"machineType"`
	// -
	// Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups.
	ManagedGroupConfigs WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigArrayInput `pulumi:"managedGroupConfigs"`
	// Optional. Specifies the minimum cpu platform for the Instance Group. See (https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu).
	MinCpuPlatform pulumi.StringPtrInput `pulumi:"minCpuPlatform"`
	// Optional. The number of VM instances in the instance group. For master instance groups, must be set to 1.
	NumInstances pulumi.IntPtrInput `pulumi:"numInstances"`
	// Optional. Specifies the preemptibility of the instance group. The default value for master and worker groups is `NON_PREEMPTIBLE`. This default cannot be changed. The default value for secondary instances is `PREEMPTIBLE`. Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE, PREEMPTIBLE
	Preemptibility pulumi.StringPtrInput `pulumi:"preemptibility"`
}

func (WorkflowTemplatePlacementManagedClusterConfigMasterConfigArgs) ElementType added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigMasterConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigMasterConfigOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigMasterConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigMasterConfigOutputWithContext added in v5.2.0

func (i WorkflowTemplatePlacementManagedClusterConfigMasterConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigMasterConfigOutputWithContext(ctx context.Context) WorkflowTemplatePlacementManagedClusterConfigMasterConfigOutput

func (WorkflowTemplatePlacementManagedClusterConfigMasterConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigMasterConfigPtrOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigMasterConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigMasterConfigPtrOutputWithContext added in v5.2.0

func (i WorkflowTemplatePlacementManagedClusterConfigMasterConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigMasterConfigPtrOutputWithContext(ctx context.Context) WorkflowTemplatePlacementManagedClusterConfigMasterConfigPtrOutput

type WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfig struct {
	// Optional. Size in GB of the boot disk (default is 500GB).
	BootDiskSizeGb *int `pulumi:"bootDiskSizeGb"`
	// Optional. Type of the boot disk (default is "pd-standard"). Valid values: "pd-ssd" (Persistent Disk Solid State Drive) or "pd-standard" (Persistent Disk Hard Disk Drive).
	BootDiskType *string `pulumi:"bootDiskType"`
	// Optional. Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.
	NumLocalSsds *int `pulumi:"numLocalSsds"`
}

type WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigArgs added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigArgs struct {
	// Optional. Size in GB of the boot disk (default is 500GB).
	BootDiskSizeGb pulumi.IntPtrInput `pulumi:"bootDiskSizeGb"`
	// Optional. Type of the boot disk (default is "pd-standard"). Valid values: "pd-ssd" (Persistent Disk Solid State Drive) or "pd-standard" (Persistent Disk Hard Disk Drive).
	BootDiskType pulumi.StringPtrInput `pulumi:"bootDiskType"`
	// Optional. Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.
	NumLocalSsds pulumi.IntPtrInput `pulumi:"numLocalSsds"`
}

func (WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigArgs) ElementType added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigOutputWithContext added in v5.2.0

func (i WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigOutputWithContext(ctx context.Context) WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigOutput

func (WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigPtrOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigPtrOutputWithContext added in v5.2.0

func (i WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigPtrOutputWithContext(ctx context.Context) WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigPtrOutput

type WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigInput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigInput interface {
	pulumi.Input

	ToWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigOutput() WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigOutput
	ToWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigOutputWithContext(context.Context) WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigOutput
}

WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigInput is an input type that accepts WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigArgs and WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigOutput values. You can construct a concrete instance of `WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigInput` via:

WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigArgs{...}

type WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigOutput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigOutput struct{ *pulumi.OutputState }

func (WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigOutput) BootDiskSizeGb added in v5.2.0

Optional. Size in GB of the boot disk (default is 500GB).

func (WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigOutput) BootDiskType added in v5.2.0

Optional. Type of the boot disk (default is "pd-standard"). Valid values: "pd-ssd" (Persistent Disk Solid State Drive) or "pd-standard" (Persistent Disk Hard Disk Drive).

func (WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigOutput) ElementType added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigOutput) NumLocalSsds added in v5.2.0

Optional. Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.

func (WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigOutput) ToWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigOutput) ToWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigOutputWithContext added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigOutput) ToWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigPtrOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigOutput) ToWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigPtrOutputWithContext added in v5.2.0

func (o WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigOutput) ToWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigPtrOutputWithContext(ctx context.Context) WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigPtrOutput

type WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigPtrInput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigPtrInput interface {
	pulumi.Input

	ToWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigPtrOutput() WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigPtrOutput
	ToWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigPtrOutputWithContext(context.Context) WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigPtrOutput
}

WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigPtrInput is an input type that accepts WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigArgs, WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigPtr and WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigPtrOutput values. You can construct a concrete instance of `WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigPtrInput` via:

        WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigArgs{...}

or:

        nil

type WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigPtrOutput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigPtrOutput struct{ *pulumi.OutputState }

func (WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigPtrOutput) BootDiskSizeGb added in v5.2.0

Optional. Size in GB of the boot disk (default is 500GB).

func (WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigPtrOutput) BootDiskType added in v5.2.0

Optional. Type of the boot disk (default is "pd-standard"). Valid values: "pd-ssd" (Persistent Disk Solid State Drive) or "pd-standard" (Persistent Disk Hard Disk Drive).

func (WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigPtrOutput) Elem added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigPtrOutput) ElementType added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigPtrOutput) NumLocalSsds added in v5.2.0

Optional. Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.

func (WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigPtrOutput) ToWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigPtrOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigPtrOutput) ToWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigPtrOutputWithContext added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigMasterConfigInput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigMasterConfigInput interface {
	pulumi.Input

	ToWorkflowTemplatePlacementManagedClusterConfigMasterConfigOutput() WorkflowTemplatePlacementManagedClusterConfigMasterConfigOutput
	ToWorkflowTemplatePlacementManagedClusterConfigMasterConfigOutputWithContext(context.Context) WorkflowTemplatePlacementManagedClusterConfigMasterConfigOutput
}

WorkflowTemplatePlacementManagedClusterConfigMasterConfigInput is an input type that accepts WorkflowTemplatePlacementManagedClusterConfigMasterConfigArgs and WorkflowTemplatePlacementManagedClusterConfigMasterConfigOutput values. You can construct a concrete instance of `WorkflowTemplatePlacementManagedClusterConfigMasterConfigInput` via:

WorkflowTemplatePlacementManagedClusterConfigMasterConfigArgs{...}

type WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig struct {
	InstanceGroupManagerName *string `pulumi:"instanceGroupManagerName"`
	InstanceTemplateName     *string `pulumi:"instanceTemplateName"`
}

type WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigArgs added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigArgs struct {
	InstanceGroupManagerName pulumi.StringPtrInput `pulumi:"instanceGroupManagerName"`
	InstanceTemplateName     pulumi.StringPtrInput `pulumi:"instanceTemplateName"`
}

func (WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigArgs) ElementType added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigOutputWithContext added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigArray added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigArray []WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigInput

func (WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigArray) ElementType added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigArray) ToWorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigArrayOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigArray) ToWorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigArrayOutputWithContext added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigArrayInput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigArrayInput interface {
	pulumi.Input

	ToWorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigArrayOutput() WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigArrayOutput
	ToWorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigArrayOutputWithContext(context.Context) WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigArrayOutput
}

WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigArrayInput is an input type that accepts WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigArray and WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigArrayOutput values. You can construct a concrete instance of `WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigArrayInput` via:

WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigArray{ WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigArgs{...} }

type WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigArrayOutput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigArrayOutput struct{ *pulumi.OutputState }

func (WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigArrayOutput) ElementType added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigArrayOutput) Index added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigArrayOutput) ToWorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigArrayOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigArrayOutput) ToWorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigArrayOutputWithContext added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigInput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigInput interface {
	pulumi.Input

	ToWorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigOutput() WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigOutput
	ToWorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigOutputWithContext(context.Context) WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigOutput
}

WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigInput is an input type that accepts WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigArgs and WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigOutput values. You can construct a concrete instance of `WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigInput` via:

WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigArgs{...}

type WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigOutput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigOutput struct{ *pulumi.OutputState }

func (WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigOutput) ElementType added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigOutput) InstanceGroupManagerName added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigOutput) InstanceTemplateName added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigOutput) ToWorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigOutput) ToWorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigOutputWithContext added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigMasterConfigOutput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigMasterConfigOutput struct{ *pulumi.OutputState }

func (WorkflowTemplatePlacementManagedClusterConfigMasterConfigOutput) Accelerators added in v5.2.0

Optional. The Compute Engine accelerator configuration for these instances.

func (WorkflowTemplatePlacementManagedClusterConfigMasterConfigOutput) DiskConfig added in v5.2.0

Optional. Disk option config settings.

func (WorkflowTemplatePlacementManagedClusterConfigMasterConfigOutput) ElementType added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigMasterConfigOutput) Image added in v5.2.0

Optional. The Compute Engine image resource used for cluster instances. The URI can represent an image or image family. Image examples: * `https://www.googleapis.com/compute/beta/projects/` If the URI is unspecified, it will be inferred from `SoftwareConfig.image_version` or the system default.

func (WorkflowTemplatePlacementManagedClusterConfigMasterConfigOutput) InstanceNames added in v5.2.0

- Output only. The list of instance names. Dataproc derives the names from `clusterName`, `numInstances`, and the instance group.

func (WorkflowTemplatePlacementManagedClusterConfigMasterConfigOutput) IsPreemptible added in v5.2.0

- Output only. Specifies that this instance group contains preemptible instances.

func (WorkflowTemplatePlacementManagedClusterConfigMasterConfigOutput) MachineType added in v5.2.0

Optional. The Compute Engine machine type used for cluster instances. A full URL, partial URI, or short name are valid. Examples: * ` https://www.googleapis.com/compute/v1/projects/(https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example, `n1-standard-2`.

func (WorkflowTemplatePlacementManagedClusterConfigMasterConfigOutput) ManagedGroupConfigs added in v5.2.0

- Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups.

func (WorkflowTemplatePlacementManagedClusterConfigMasterConfigOutput) MinCpuPlatform added in v5.2.0

Optional. Specifies the minimum cpu platform for the Instance Group. See (https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu).

func (WorkflowTemplatePlacementManagedClusterConfigMasterConfigOutput) NumInstances added in v5.2.0

Optional. The number of VM instances in the instance group. For master instance groups, must be set to 1.

func (WorkflowTemplatePlacementManagedClusterConfigMasterConfigOutput) Preemptibility added in v5.2.0

Optional. Specifies the preemptibility of the instance group. The default value for master and worker groups is `NON_PREEMPTIBLE`. This default cannot be changed. The default value for secondary instances is `PREEMPTIBLE`. Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE, PREEMPTIBLE

func (WorkflowTemplatePlacementManagedClusterConfigMasterConfigOutput) ToWorkflowTemplatePlacementManagedClusterConfigMasterConfigOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigMasterConfigOutput) ToWorkflowTemplatePlacementManagedClusterConfigMasterConfigOutputWithContext added in v5.2.0

func (o WorkflowTemplatePlacementManagedClusterConfigMasterConfigOutput) ToWorkflowTemplatePlacementManagedClusterConfigMasterConfigOutputWithContext(ctx context.Context) WorkflowTemplatePlacementManagedClusterConfigMasterConfigOutput

func (WorkflowTemplatePlacementManagedClusterConfigMasterConfigOutput) ToWorkflowTemplatePlacementManagedClusterConfigMasterConfigPtrOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigMasterConfigOutput) ToWorkflowTemplatePlacementManagedClusterConfigMasterConfigPtrOutputWithContext added in v5.2.0

func (o WorkflowTemplatePlacementManagedClusterConfigMasterConfigOutput) ToWorkflowTemplatePlacementManagedClusterConfigMasterConfigPtrOutputWithContext(ctx context.Context) WorkflowTemplatePlacementManagedClusterConfigMasterConfigPtrOutput

type WorkflowTemplatePlacementManagedClusterConfigMasterConfigPtrInput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigMasterConfigPtrInput interface {
	pulumi.Input

	ToWorkflowTemplatePlacementManagedClusterConfigMasterConfigPtrOutput() WorkflowTemplatePlacementManagedClusterConfigMasterConfigPtrOutput
	ToWorkflowTemplatePlacementManagedClusterConfigMasterConfigPtrOutputWithContext(context.Context) WorkflowTemplatePlacementManagedClusterConfigMasterConfigPtrOutput
}

WorkflowTemplatePlacementManagedClusterConfigMasterConfigPtrInput is an input type that accepts WorkflowTemplatePlacementManagedClusterConfigMasterConfigArgs, WorkflowTemplatePlacementManagedClusterConfigMasterConfigPtr and WorkflowTemplatePlacementManagedClusterConfigMasterConfigPtrOutput values. You can construct a concrete instance of `WorkflowTemplatePlacementManagedClusterConfigMasterConfigPtrInput` via:

        WorkflowTemplatePlacementManagedClusterConfigMasterConfigArgs{...}

or:

        nil

type WorkflowTemplatePlacementManagedClusterConfigMasterConfigPtrOutput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigMasterConfigPtrOutput struct{ *pulumi.OutputState }

func (WorkflowTemplatePlacementManagedClusterConfigMasterConfigPtrOutput) Accelerators added in v5.2.0

Optional. The Compute Engine accelerator configuration for these instances.

func (WorkflowTemplatePlacementManagedClusterConfigMasterConfigPtrOutput) DiskConfig added in v5.2.0

Optional. Disk option config settings.

func (WorkflowTemplatePlacementManagedClusterConfigMasterConfigPtrOutput) Elem added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigMasterConfigPtrOutput) ElementType added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigMasterConfigPtrOutput) Image added in v5.2.0

Optional. The Compute Engine image resource used for cluster instances. The URI can represent an image or image family. Image examples: * `https://www.googleapis.com/compute/beta/projects/` If the URI is unspecified, it will be inferred from `SoftwareConfig.image_version` or the system default.

func (WorkflowTemplatePlacementManagedClusterConfigMasterConfigPtrOutput) InstanceNames added in v5.2.0

- Output only. The list of instance names. Dataproc derives the names from `clusterName`, `numInstances`, and the instance group.

func (WorkflowTemplatePlacementManagedClusterConfigMasterConfigPtrOutput) IsPreemptible added in v5.2.0

- Output only. Specifies that this instance group contains preemptible instances.

func (WorkflowTemplatePlacementManagedClusterConfigMasterConfigPtrOutput) MachineType added in v5.2.0

Optional. The Compute Engine machine type used for cluster instances. A full URL, partial URI, or short name are valid. Examples: * ` https://www.googleapis.com/compute/v1/projects/(https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example, `n1-standard-2`.

func (WorkflowTemplatePlacementManagedClusterConfigMasterConfigPtrOutput) ManagedGroupConfigs added in v5.2.0

- Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups.

func (WorkflowTemplatePlacementManagedClusterConfigMasterConfigPtrOutput) MinCpuPlatform added in v5.2.0

Optional. Specifies the minimum cpu platform for the Instance Group. See (https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu).

func (WorkflowTemplatePlacementManagedClusterConfigMasterConfigPtrOutput) NumInstances added in v5.2.0

Optional. The number of VM instances in the instance group. For master instance groups, must be set to 1.

func (WorkflowTemplatePlacementManagedClusterConfigMasterConfigPtrOutput) Preemptibility added in v5.2.0

Optional. Specifies the preemptibility of the instance group. The default value for master and worker groups is `NON_PREEMPTIBLE`. This default cannot be changed. The default value for secondary instances is `PREEMPTIBLE`. Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE, PREEMPTIBLE

func (WorkflowTemplatePlacementManagedClusterConfigMasterConfigPtrOutput) ToWorkflowTemplatePlacementManagedClusterConfigMasterConfigPtrOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigMasterConfigPtrOutput) ToWorkflowTemplatePlacementManagedClusterConfigMasterConfigPtrOutputWithContext added in v5.2.0

func (o WorkflowTemplatePlacementManagedClusterConfigMasterConfigPtrOutput) ToWorkflowTemplatePlacementManagedClusterConfigMasterConfigPtrOutputWithContext(ctx context.Context) WorkflowTemplatePlacementManagedClusterConfigMasterConfigPtrOutput

type WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigMetastoreConfig struct {
	// Required. Resource name of an existing Dataproc Metastore service. Example: * `projects/`
	DataprocMetastoreService string `pulumi:"dataprocMetastoreService"`
}

type WorkflowTemplatePlacementManagedClusterConfigMetastoreConfigArgs added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigMetastoreConfigArgs struct {
	// Required. Resource name of an existing Dataproc Metastore service. Example: * `projects/`
	DataprocMetastoreService pulumi.StringInput `pulumi:"dataprocMetastoreService"`
}

func (WorkflowTemplatePlacementManagedClusterConfigMetastoreConfigArgs) ElementType added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigMetastoreConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigMetastoreConfigOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigMetastoreConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigMetastoreConfigOutputWithContext added in v5.2.0

func (i WorkflowTemplatePlacementManagedClusterConfigMetastoreConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigMetastoreConfigOutputWithContext(ctx context.Context) WorkflowTemplatePlacementManagedClusterConfigMetastoreConfigOutput

func (WorkflowTemplatePlacementManagedClusterConfigMetastoreConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigMetastoreConfigPtrOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigMetastoreConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigMetastoreConfigPtrOutputWithContext added in v5.2.0

func (i WorkflowTemplatePlacementManagedClusterConfigMetastoreConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigMetastoreConfigPtrOutputWithContext(ctx context.Context) WorkflowTemplatePlacementManagedClusterConfigMetastoreConfigPtrOutput

type WorkflowTemplatePlacementManagedClusterConfigMetastoreConfigInput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigMetastoreConfigInput interface {
	pulumi.Input

	ToWorkflowTemplatePlacementManagedClusterConfigMetastoreConfigOutput() WorkflowTemplatePlacementManagedClusterConfigMetastoreConfigOutput
	ToWorkflowTemplatePlacementManagedClusterConfigMetastoreConfigOutputWithContext(context.Context) WorkflowTemplatePlacementManagedClusterConfigMetastoreConfigOutput
}

WorkflowTemplatePlacementManagedClusterConfigMetastoreConfigInput is an input type that accepts WorkflowTemplatePlacementManagedClusterConfigMetastoreConfigArgs and WorkflowTemplatePlacementManagedClusterConfigMetastoreConfigOutput values. You can construct a concrete instance of `WorkflowTemplatePlacementManagedClusterConfigMetastoreConfigInput` via:

WorkflowTemplatePlacementManagedClusterConfigMetastoreConfigArgs{...}

type WorkflowTemplatePlacementManagedClusterConfigMetastoreConfigOutput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigMetastoreConfigOutput struct{ *pulumi.OutputState }

func (WorkflowTemplatePlacementManagedClusterConfigMetastoreConfigOutput) DataprocMetastoreService added in v5.2.0

Required. Resource name of an existing Dataproc Metastore service. Example: * `projects/`

func (WorkflowTemplatePlacementManagedClusterConfigMetastoreConfigOutput) ElementType added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigMetastoreConfigOutput) ToWorkflowTemplatePlacementManagedClusterConfigMetastoreConfigOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigMetastoreConfigOutput) ToWorkflowTemplatePlacementManagedClusterConfigMetastoreConfigOutputWithContext added in v5.2.0

func (o WorkflowTemplatePlacementManagedClusterConfigMetastoreConfigOutput) ToWorkflowTemplatePlacementManagedClusterConfigMetastoreConfigOutputWithContext(ctx context.Context) WorkflowTemplatePlacementManagedClusterConfigMetastoreConfigOutput

func (WorkflowTemplatePlacementManagedClusterConfigMetastoreConfigOutput) ToWorkflowTemplatePlacementManagedClusterConfigMetastoreConfigPtrOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigMetastoreConfigOutput) ToWorkflowTemplatePlacementManagedClusterConfigMetastoreConfigPtrOutputWithContext added in v5.2.0

func (o WorkflowTemplatePlacementManagedClusterConfigMetastoreConfigOutput) ToWorkflowTemplatePlacementManagedClusterConfigMetastoreConfigPtrOutputWithContext(ctx context.Context) WorkflowTemplatePlacementManagedClusterConfigMetastoreConfigPtrOutput

type WorkflowTemplatePlacementManagedClusterConfigMetastoreConfigPtrInput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigMetastoreConfigPtrInput interface {
	pulumi.Input

	ToWorkflowTemplatePlacementManagedClusterConfigMetastoreConfigPtrOutput() WorkflowTemplatePlacementManagedClusterConfigMetastoreConfigPtrOutput
	ToWorkflowTemplatePlacementManagedClusterConfigMetastoreConfigPtrOutputWithContext(context.Context) WorkflowTemplatePlacementManagedClusterConfigMetastoreConfigPtrOutput
}

WorkflowTemplatePlacementManagedClusterConfigMetastoreConfigPtrInput is an input type that accepts WorkflowTemplatePlacementManagedClusterConfigMetastoreConfigArgs, WorkflowTemplatePlacementManagedClusterConfigMetastoreConfigPtr and WorkflowTemplatePlacementManagedClusterConfigMetastoreConfigPtrOutput values. You can construct a concrete instance of `WorkflowTemplatePlacementManagedClusterConfigMetastoreConfigPtrInput` via:

        WorkflowTemplatePlacementManagedClusterConfigMetastoreConfigArgs{...}

or:

        nil

type WorkflowTemplatePlacementManagedClusterConfigMetastoreConfigPtrOutput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigMetastoreConfigPtrOutput struct{ *pulumi.OutputState }

func (WorkflowTemplatePlacementManagedClusterConfigMetastoreConfigPtrOutput) DataprocMetastoreService added in v5.2.0

Required. Resource name of an existing Dataproc Metastore service. Example: * `projects/`

func (WorkflowTemplatePlacementManagedClusterConfigMetastoreConfigPtrOutput) Elem added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigMetastoreConfigPtrOutput) ElementType added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigMetastoreConfigPtrOutput) ToWorkflowTemplatePlacementManagedClusterConfigMetastoreConfigPtrOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigMetastoreConfigPtrOutput) ToWorkflowTemplatePlacementManagedClusterConfigMetastoreConfigPtrOutputWithContext added in v5.2.0

func (o WorkflowTemplatePlacementManagedClusterConfigMetastoreConfigPtrOutput) ToWorkflowTemplatePlacementManagedClusterConfigMetastoreConfigPtrOutputWithContext(ctx context.Context) WorkflowTemplatePlacementManagedClusterConfigMetastoreConfigPtrOutput

type WorkflowTemplatePlacementManagedClusterConfigOutput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigOutput struct{ *pulumi.OutputState }

func (WorkflowTemplatePlacementManagedClusterConfigOutput) AutoscalingConfig added in v5.2.0

Optional. Autoscaling config for the policy associated with the cluster. Cluster does not autoscale if this field is unset.

func (WorkflowTemplatePlacementManagedClusterConfigOutput) ElementType added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigOutput) EncryptionConfig added in v5.2.0

Optional. Encryption settings for the cluster.

func (WorkflowTemplatePlacementManagedClusterConfigOutput) EndpointConfig added in v5.2.0

Optional. Port/endpoint configuration for this cluster

func (WorkflowTemplatePlacementManagedClusterConfigOutput) GceClusterConfig added in v5.2.0

Optional. The shared Compute Engine config settings for all instances in a cluster.

func (WorkflowTemplatePlacementManagedClusterConfigOutput) GkeClusterConfig added in v5.2.0

Optional. The Kubernetes Engine config for Dataproc clusters deployed to Kubernetes. Setting this is considered mutually exclusive with Compute Engine-based options such as `gceClusterConfig`, `masterConfig`, `workerConfig`, `secondaryWorkerConfig`, and `autoscalingConfig`.

func (WorkflowTemplatePlacementManagedClusterConfigOutput) InitializationActions added in v5.2.0

Optional. Commands to execute on each node after config is completed. By default, executables are run on master and all worker nodes. You can test a node's `role` metadata to run an executable on a master or worker node, as shown below using `curl` (you can also use `wget`): ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) if ; then ... master specific actions ... else ... worker specific actions ... fi

func (WorkflowTemplatePlacementManagedClusterConfigOutput) LifecycleConfig added in v5.2.0

Optional. Lifecycle setting for the cluster.

func (WorkflowTemplatePlacementManagedClusterConfigOutput) MasterConfig added in v5.2.0

Optional. The Compute Engine config settings for additional worker instances in a cluster.

func (WorkflowTemplatePlacementManagedClusterConfigOutput) MetastoreConfig added in v5.2.0

Optional. Metastore configuration.

func (WorkflowTemplatePlacementManagedClusterConfigOutput) SecondaryWorkerConfig added in v5.2.0

Optional. The Compute Engine config settings for additional worker instances in a cluster.

func (WorkflowTemplatePlacementManagedClusterConfigOutput) SecurityConfig added in v5.2.0

Optional. Security settings for the cluster.

func (WorkflowTemplatePlacementManagedClusterConfigOutput) SoftwareConfig added in v5.2.0

Optional. The config settings for software inside the cluster.

func (WorkflowTemplatePlacementManagedClusterConfigOutput) StagingBucket added in v5.2.0

Optional. A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)).

func (WorkflowTemplatePlacementManagedClusterConfigOutput) TempBucket added in v5.2.0

Optional. A Cloud Storage bucket used to store ephemeral cluster and jobs data, such as Spark and MapReduce history files. If you do not specify a temp bucket, Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's temp bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket. The default bucket has a TTL of 90 days, but you can use any TTL (or none) if you specify a bucket.

func (WorkflowTemplatePlacementManagedClusterConfigOutput) ToWorkflowTemplatePlacementManagedClusterConfigOutput added in v5.2.0

func (o WorkflowTemplatePlacementManagedClusterConfigOutput) ToWorkflowTemplatePlacementManagedClusterConfigOutput() WorkflowTemplatePlacementManagedClusterConfigOutput

func (WorkflowTemplatePlacementManagedClusterConfigOutput) ToWorkflowTemplatePlacementManagedClusterConfigOutputWithContext added in v5.2.0

func (o WorkflowTemplatePlacementManagedClusterConfigOutput) ToWorkflowTemplatePlacementManagedClusterConfigOutputWithContext(ctx context.Context) WorkflowTemplatePlacementManagedClusterConfigOutput

func (WorkflowTemplatePlacementManagedClusterConfigOutput) ToWorkflowTemplatePlacementManagedClusterConfigPtrOutput added in v5.2.0

func (o WorkflowTemplatePlacementManagedClusterConfigOutput) ToWorkflowTemplatePlacementManagedClusterConfigPtrOutput() WorkflowTemplatePlacementManagedClusterConfigPtrOutput

func (WorkflowTemplatePlacementManagedClusterConfigOutput) ToWorkflowTemplatePlacementManagedClusterConfigPtrOutputWithContext added in v5.2.0

func (o WorkflowTemplatePlacementManagedClusterConfigOutput) ToWorkflowTemplatePlacementManagedClusterConfigPtrOutputWithContext(ctx context.Context) WorkflowTemplatePlacementManagedClusterConfigPtrOutput

func (WorkflowTemplatePlacementManagedClusterConfigOutput) WorkerConfig added in v5.2.0

Optional. The Compute Engine config settings for additional worker instances in a cluster.

type WorkflowTemplatePlacementManagedClusterConfigPtrInput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigPtrInput interface {
	pulumi.Input

	ToWorkflowTemplatePlacementManagedClusterConfigPtrOutput() WorkflowTemplatePlacementManagedClusterConfigPtrOutput
	ToWorkflowTemplatePlacementManagedClusterConfigPtrOutputWithContext(context.Context) WorkflowTemplatePlacementManagedClusterConfigPtrOutput
}

WorkflowTemplatePlacementManagedClusterConfigPtrInput is an input type that accepts WorkflowTemplatePlacementManagedClusterConfigArgs, WorkflowTemplatePlacementManagedClusterConfigPtr and WorkflowTemplatePlacementManagedClusterConfigPtrOutput values. You can construct a concrete instance of `WorkflowTemplatePlacementManagedClusterConfigPtrInput` via:

        WorkflowTemplatePlacementManagedClusterConfigArgs{...}

or:

        nil

type WorkflowTemplatePlacementManagedClusterConfigPtrOutput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigPtrOutput struct{ *pulumi.OutputState }

func (WorkflowTemplatePlacementManagedClusterConfigPtrOutput) AutoscalingConfig added in v5.2.0

Optional. Autoscaling config for the policy associated with the cluster. Cluster does not autoscale if this field is unset.

func (WorkflowTemplatePlacementManagedClusterConfigPtrOutput) Elem added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigPtrOutput) ElementType added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigPtrOutput) EncryptionConfig added in v5.2.0

Optional. Encryption settings for the cluster.

func (WorkflowTemplatePlacementManagedClusterConfigPtrOutput) EndpointConfig added in v5.2.0

Optional. Port/endpoint configuration for this cluster

func (WorkflowTemplatePlacementManagedClusterConfigPtrOutput) GceClusterConfig added in v5.2.0

Optional. The shared Compute Engine config settings for all instances in a cluster.

func (WorkflowTemplatePlacementManagedClusterConfigPtrOutput) GkeClusterConfig added in v5.2.0

Optional. The Kubernetes Engine config for Dataproc clusters deployed to Kubernetes. Setting this is considered mutually exclusive with Compute Engine-based options such as `gceClusterConfig`, `masterConfig`, `workerConfig`, `secondaryWorkerConfig`, and `autoscalingConfig`.

func (WorkflowTemplatePlacementManagedClusterConfigPtrOutput) InitializationActions added in v5.2.0

Optional. Commands to execute on each node after config is completed. By default, executables are run on master and all worker nodes. You can test a node's `role` metadata to run an executable on a master or worker node, as shown below using `curl` (you can also use `wget`): ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) if ; then ... master specific actions ... else ... worker specific actions ... fi

func (WorkflowTemplatePlacementManagedClusterConfigPtrOutput) LifecycleConfig added in v5.2.0

Optional. Lifecycle setting for the cluster.

func (WorkflowTemplatePlacementManagedClusterConfigPtrOutput) MasterConfig added in v5.2.0

Optional. The Compute Engine config settings for additional worker instances in a cluster.

func (WorkflowTemplatePlacementManagedClusterConfigPtrOutput) MetastoreConfig added in v5.2.0

Optional. Metastore configuration.

func (WorkflowTemplatePlacementManagedClusterConfigPtrOutput) SecondaryWorkerConfig added in v5.2.0

Optional. The Compute Engine config settings for additional worker instances in a cluster.

func (WorkflowTemplatePlacementManagedClusterConfigPtrOutput) SecurityConfig added in v5.2.0

Optional. Security settings for the cluster.

func (WorkflowTemplatePlacementManagedClusterConfigPtrOutput) SoftwareConfig added in v5.2.0

Optional. The config settings for software inside the cluster.

func (WorkflowTemplatePlacementManagedClusterConfigPtrOutput) StagingBucket added in v5.2.0

Optional. A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)).

func (WorkflowTemplatePlacementManagedClusterConfigPtrOutput) TempBucket added in v5.2.0

Optional. A Cloud Storage bucket used to store ephemeral cluster and jobs data, such as Spark and MapReduce history files. If you do not specify a temp bucket, Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's temp bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket. The default bucket has a TTL of 90 days, but you can use any TTL (or none) if you specify a bucket.

func (WorkflowTemplatePlacementManagedClusterConfigPtrOutput) ToWorkflowTemplatePlacementManagedClusterConfigPtrOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigPtrOutput) ToWorkflowTemplatePlacementManagedClusterConfigPtrOutputWithContext added in v5.2.0

func (o WorkflowTemplatePlacementManagedClusterConfigPtrOutput) ToWorkflowTemplatePlacementManagedClusterConfigPtrOutputWithContext(ctx context.Context) WorkflowTemplatePlacementManagedClusterConfigPtrOutput

func (WorkflowTemplatePlacementManagedClusterConfigPtrOutput) WorkerConfig added in v5.2.0

Optional. The Compute Engine config settings for additional worker instances in a cluster.

type WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfig struct {
	// Optional. The Compute Engine accelerator configuration for these instances.
	Accelerators []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerator `pulumi:"accelerators"`
	// Optional. Disk option config settings.
	DiskConfig *WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig `pulumi:"diskConfig"`
	// Optional. The Compute Engine image resource used for cluster instances. The URI can represent an image or image family. Image examples: * `https://www.googleapis.com/compute/beta/projects/` If the URI is unspecified, it will be inferred from `SoftwareConfig.image_version` or the system default.
	Image *string `pulumi:"image"`
	// -
	// Output only. The list of instance names. Dataproc derives the names from `clusterName`, `numInstances`, and the instance group.
	InstanceNames []string `pulumi:"instanceNames"`
	// -
	// Output only. Specifies that this instance group contains preemptible instances.
	IsPreemptible *bool `pulumi:"isPreemptible"`
	// Optional. The Compute Engine machine type used for cluster instances. A full URL, partial URI, or short name are valid. Examples: * ` https://www.googleapis.com/compute/v1/projects/(https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example,  `n1-standard-2`.
	MachineType *string `pulumi:"machineType"`
	// -
	// Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups.
	ManagedGroupConfigs []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig `pulumi:"managedGroupConfigs"`
	// Optional. Specifies the minimum cpu platform for the Instance Group. See (https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu).
	MinCpuPlatform *string `pulumi:"minCpuPlatform"`
	// Optional. The number of VM instances in the instance group. For master instance groups, must be set to 1.
	NumInstances *int `pulumi:"numInstances"`
	// Optional. Specifies the preemptibility of the instance group. The default value for master and worker groups is `NON_PREEMPTIBLE`. This default cannot be changed. The default value for secondary instances is `PREEMPTIBLE`. Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE, PREEMPTIBLE
	Preemptibility *string `pulumi:"preemptibility"`
}

type WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerator added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAccelerator struct {
	// The number of the accelerator cards of this type exposed to this instance.
	AcceleratorCount *int `pulumi:"acceleratorCount"`
	// Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, `nvidia-tesla-k80`.
	AcceleratorType *string `pulumi:"acceleratorType"`
}

type WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorArgs added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorArgs struct {
	// The number of the accelerator cards of this type exposed to this instance.
	AcceleratorCount pulumi.IntPtrInput `pulumi:"acceleratorCount"`
	// Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, `nvidia-tesla-k80`.
	AcceleratorType pulumi.StringPtrInput `pulumi:"acceleratorType"`
}

func (WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorArgs) ElementType added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorArgs) ToWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorArgs) ToWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorOutputWithContext added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorArray added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorArray []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorInput

func (WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorArray) ElementType added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorArray) ToWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorArrayOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorArray) ToWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorArrayOutputWithContext added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorArrayInput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorArrayInput interface {
	pulumi.Input

	ToWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorArrayOutput() WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorArrayOutput
	ToWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorArrayOutputWithContext(context.Context) WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorArrayOutput
}

WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorArrayInput is an input type that accepts WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorArray and WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorArrayOutput values. You can construct a concrete instance of `WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorArrayInput` via:

WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorArray{ WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorArgs{...} }

type WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorArrayOutput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorArrayOutput struct{ *pulumi.OutputState }

func (WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorArrayOutput) ElementType added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorArrayOutput) Index added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorArrayOutput) ToWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorArrayOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorArrayOutput) ToWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorArrayOutputWithContext added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorInput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorInput interface {
	pulumi.Input

	ToWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorOutput() WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorOutput
	ToWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorOutputWithContext(context.Context) WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorOutput
}

WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorInput is an input type that accepts WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorArgs and WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorOutput values. You can construct a concrete instance of `WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorInput` via:

WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorArgs{...}

type WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorOutput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorOutput struct{ *pulumi.OutputState }

func (WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorOutput) AcceleratorCount added in v5.2.0

The number of the accelerator cards of this type exposed to this instance.

func (WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorOutput) AcceleratorType added in v5.2.0

Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, `nvidia-tesla-k80`.

func (WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorOutput) ElementType added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorOutput) ToWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorOutput) ToWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorOutputWithContext added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigArgs added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigArgs struct {
	// Optional. The Compute Engine accelerator configuration for these instances.
	Accelerators WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorArrayInput `pulumi:"accelerators"`
	// Optional. Disk option config settings.
	DiskConfig WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigPtrInput `pulumi:"diskConfig"`
	// Optional. The Compute Engine image resource used for cluster instances. The URI can represent an image or image family. Image examples: * `https://www.googleapis.com/compute/beta/projects/` If the URI is unspecified, it will be inferred from `SoftwareConfig.image_version` or the system default.
	Image pulumi.StringPtrInput `pulumi:"image"`
	// -
	// Output only. The list of instance names. Dataproc derives the names from `clusterName`, `numInstances`, and the instance group.
	InstanceNames pulumi.StringArrayInput `pulumi:"instanceNames"`
	// -
	// Output only. Specifies that this instance group contains preemptible instances.
	IsPreemptible pulumi.BoolPtrInput `pulumi:"isPreemptible"`
	// Optional. The Compute Engine machine type used for cluster instances. A full URL, partial URI, or short name are valid. Examples: * ` https://www.googleapis.com/compute/v1/projects/(https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example,  `n1-standard-2`.
	MachineType pulumi.StringPtrInput `pulumi:"machineType"`
	// -
	// Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups.
	ManagedGroupConfigs WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigArrayInput `pulumi:"managedGroupConfigs"`
	// Optional. Specifies the minimum cpu platform for the Instance Group. See (https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu).
	MinCpuPlatform pulumi.StringPtrInput `pulumi:"minCpuPlatform"`
	// Optional. The number of VM instances in the instance group. For master instance groups, must be set to 1.
	NumInstances pulumi.IntPtrInput `pulumi:"numInstances"`
	// Optional. Specifies the preemptibility of the instance group. The default value for master and worker groups is `NON_PREEMPTIBLE`. This default cannot be changed. The default value for secondary instances is `PREEMPTIBLE`. Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE, PREEMPTIBLE
	Preemptibility pulumi.StringPtrInput `pulumi:"preemptibility"`
}

func (WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigArgs) ElementType added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigOutputWithContext added in v5.2.0

func (i WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigOutputWithContext(ctx context.Context) WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigOutput

func (WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPtrOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPtrOutputWithContext added in v5.2.0

func (i WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPtrOutputWithContext(ctx context.Context) WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPtrOutput

type WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfig struct {
	// Optional. Size in GB of the boot disk (default is 500GB).
	BootDiskSizeGb *int `pulumi:"bootDiskSizeGb"`
	// Optional. Type of the boot disk (default is "pd-standard"). Valid values: "pd-ssd" (Persistent Disk Solid State Drive) or "pd-standard" (Persistent Disk Hard Disk Drive).
	BootDiskType *string `pulumi:"bootDiskType"`
	// Optional. Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.
	NumLocalSsds *int `pulumi:"numLocalSsds"`
}

type WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigArgs added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigArgs struct {
	// Optional. Size in GB of the boot disk (default is 500GB).
	BootDiskSizeGb pulumi.IntPtrInput `pulumi:"bootDiskSizeGb"`
	// Optional. Type of the boot disk (default is "pd-standard"). Valid values: "pd-ssd" (Persistent Disk Solid State Drive) or "pd-standard" (Persistent Disk Hard Disk Drive).
	BootDiskType pulumi.StringPtrInput `pulumi:"bootDiskType"`
	// Optional. Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.
	NumLocalSsds pulumi.IntPtrInput `pulumi:"numLocalSsds"`
}

func (WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigArgs) ElementType added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigOutputWithContext added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigPtrOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigPtrOutputWithContext added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigInput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigInput interface {
	pulumi.Input

	ToWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigOutput() WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigOutput
	ToWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigOutputWithContext(context.Context) WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigOutput
}

WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigInput is an input type that accepts WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigArgs and WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigOutput values. You can construct a concrete instance of `WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigInput` via:

WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigArgs{...}

type WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigOutput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigOutput struct{ *pulumi.OutputState }

func (WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigOutput) BootDiskSizeGb added in v5.2.0

Optional. Size in GB of the boot disk (default is 500GB).

func (WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigOutput) BootDiskType added in v5.2.0

Optional. Type of the boot disk (default is "pd-standard"). Valid values: "pd-ssd" (Persistent Disk Solid State Drive) or "pd-standard" (Persistent Disk Hard Disk Drive).

func (WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigOutput) ElementType added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigOutput) NumLocalSsds added in v5.2.0

Optional. Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.

func (WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigOutput) ToWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigOutput) ToWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigOutputWithContext added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigOutput) ToWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigPtrOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigOutput) ToWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigPtrOutputWithContext added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigPtrInput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigPtrInput interface {
	pulumi.Input

	ToWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigPtrOutput() WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigPtrOutput
	ToWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigPtrOutputWithContext(context.Context) WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigPtrOutput
}

WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigPtrInput is an input type that accepts WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigArgs, WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigPtr and WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigPtrOutput values. You can construct a concrete instance of `WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigPtrInput` via:

        WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigArgs{...}

or:

        nil

type WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigPtrOutput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigPtrOutput struct{ *pulumi.OutputState }

func (WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigPtrOutput) BootDiskSizeGb added in v5.2.0

Optional. Size in GB of the boot disk (default is 500GB).

func (WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigPtrOutput) BootDiskType added in v5.2.0

Optional. Type of the boot disk (default is "pd-standard"). Valid values: "pd-ssd" (Persistent Disk Solid State Drive) or "pd-standard" (Persistent Disk Hard Disk Drive).

func (WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigPtrOutput) Elem added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigPtrOutput) ElementType added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigPtrOutput) NumLocalSsds added in v5.2.0

Optional. Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.

func (WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigPtrOutput) ToWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigPtrOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigPtrOutput) ToWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigPtrOutputWithContext added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigInput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigInput interface {
	pulumi.Input

	ToWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigOutput() WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigOutput
	ToWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigOutputWithContext(context.Context) WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigOutput
}

WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigInput is an input type that accepts WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigArgs and WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigOutput values. You can construct a concrete instance of `WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigInput` via:

WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigArgs{...}

type WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfig struct {
	InstanceGroupManagerName *string `pulumi:"instanceGroupManagerName"`
	InstanceTemplateName     *string `pulumi:"instanceTemplateName"`
}

type WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigArgs added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigArgs struct {
	InstanceGroupManagerName pulumi.StringPtrInput `pulumi:"instanceGroupManagerName"`
	InstanceTemplateName     pulumi.StringPtrInput `pulumi:"instanceTemplateName"`
}

func (WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigArgs) ElementType added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigOutputWithContext added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigArray added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigArray []WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigInput

func (WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigArray) ElementType added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigArray) ToWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigArrayOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigArray) ToWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigArrayOutputWithContext added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigArrayInput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigArrayInput interface {
	pulumi.Input

	ToWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigArrayOutput() WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigArrayOutput
	ToWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigArrayOutputWithContext(context.Context) WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigArrayOutput
}

WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigArrayInput is an input type that accepts WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigArray and WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigArrayOutput values. You can construct a concrete instance of `WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigArrayInput` via:

WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigArray{ WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigArgs{...} }

type WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigArrayOutput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigArrayOutput struct{ *pulumi.OutputState }

func (WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigArrayOutput) ElementType added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigArrayOutput) Index added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigArrayOutput) ToWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigArrayOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigArrayOutput) ToWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigArrayOutputWithContext added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigInput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigInput interface {
	pulumi.Input

	ToWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigOutput() WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigOutput
	ToWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigOutputWithContext(context.Context) WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigOutput
}

WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigInput is an input type that accepts WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigArgs and WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigOutput values. You can construct a concrete instance of `WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigInput` via:

WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigArgs{...}

type WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigOutput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigOutput struct{ *pulumi.OutputState }

func (WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigOutput) ElementType added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigOutput) InstanceGroupManagerName added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigOutput) InstanceTemplateName added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigOutput) ToWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigOutput) ToWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigOutputWithContext added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigOutput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigOutput struct{ *pulumi.OutputState }

func (WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigOutput) Accelerators added in v5.2.0

Optional. The Compute Engine accelerator configuration for these instances.

func (WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigOutput) DiskConfig added in v5.2.0

Optional. Disk option config settings.

func (WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigOutput) ElementType added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigOutput) Image added in v5.2.0

Optional. The Compute Engine image resource used for cluster instances. The URI can represent an image or image family. Image examples: * `https://www.googleapis.com/compute/beta/projects/` If the URI is unspecified, it will be inferred from `SoftwareConfig.image_version` or the system default.

func (WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigOutput) InstanceNames added in v5.2.0

- Output only. The list of instance names. Dataproc derives the names from `clusterName`, `numInstances`, and the instance group.

func (WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigOutput) IsPreemptible added in v5.2.0

- Output only. Specifies that this instance group contains preemptible instances.

func (WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigOutput) MachineType added in v5.2.0

Optional. The Compute Engine machine type used for cluster instances. A full URL, partial URI, or short name are valid. Examples: * ` https://www.googleapis.com/compute/v1/projects/(https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example, `n1-standard-2`.

func (WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigOutput) ManagedGroupConfigs added in v5.2.0

- Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups.

func (WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigOutput) MinCpuPlatform added in v5.2.0

Optional. Specifies the minimum cpu platform for the Instance Group. See (https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu).

func (WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigOutput) NumInstances added in v5.2.0

Optional. The number of VM instances in the instance group. For master instance groups, must be set to 1.

func (WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigOutput) Preemptibility added in v5.2.0

Optional. Specifies the preemptibility of the instance group. The default value for master and worker groups is `NON_PREEMPTIBLE`. This default cannot be changed. The default value for secondary instances is `PREEMPTIBLE`. Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE, PREEMPTIBLE

func (WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigOutput) ToWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigOutput) ToWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigOutputWithContext added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigOutput) ToWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPtrOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigOutput) ToWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPtrOutputWithContext added in v5.2.0

func (o WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigOutput) ToWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPtrOutputWithContext(ctx context.Context) WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPtrOutput

type WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPtrInput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPtrInput interface {
	pulumi.Input

	ToWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPtrOutput() WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPtrOutput
	ToWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPtrOutputWithContext(context.Context) WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPtrOutput
}

WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPtrInput is an input type that accepts WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigArgs, WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPtr and WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPtrOutput values. You can construct a concrete instance of `WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPtrInput` via:

        WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigArgs{...}

or:

        nil

type WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPtrOutput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPtrOutput struct{ *pulumi.OutputState }

func (WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPtrOutput) Accelerators added in v5.2.0

Optional. The Compute Engine accelerator configuration for these instances.

func (WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPtrOutput) DiskConfig added in v5.2.0

Optional. Disk option config settings.

func (WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPtrOutput) Elem added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPtrOutput) ElementType added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPtrOutput) Image added in v5.2.0

Optional. The Compute Engine image resource used for cluster instances. The URI can represent an image or image family. Image examples: * `https://www.googleapis.com/compute/beta/projects/` If the URI is unspecified, it will be inferred from `SoftwareConfig.image_version` or the system default.

func (WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPtrOutput) InstanceNames added in v5.2.0

- Output only. The list of instance names. Dataproc derives the names from `clusterName`, `numInstances`, and the instance group.

func (WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPtrOutput) IsPreemptible added in v5.2.0

- Output only. Specifies that this instance group contains preemptible instances.

func (WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPtrOutput) MachineType added in v5.2.0

Optional. The Compute Engine machine type used for cluster instances. A full URL, partial URI, or short name are valid. Examples: * ` https://www.googleapis.com/compute/v1/projects/(https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example, `n1-standard-2`.

func (WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPtrOutput) ManagedGroupConfigs added in v5.2.0

- Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups.

func (WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPtrOutput) MinCpuPlatform added in v5.2.0

Optional. Specifies the minimum cpu platform for the Instance Group. See (https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu).

func (WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPtrOutput) NumInstances added in v5.2.0

Optional. The number of VM instances in the instance group. For master instance groups, must be set to 1.

func (WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPtrOutput) Preemptibility added in v5.2.0

Optional. Specifies the preemptibility of the instance group. The default value for master and worker groups is `NON_PREEMPTIBLE`. This default cannot be changed. The default value for secondary instances is `PREEMPTIBLE`. Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE, PREEMPTIBLE

func (WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPtrOutput) ToWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPtrOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPtrOutput) ToWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigPtrOutputWithContext added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigSecurityConfig added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigSecurityConfig struct {
	// Kerberos related configuration.
	KerberosConfig *WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig `pulumi:"kerberosConfig"`
}

type WorkflowTemplatePlacementManagedClusterConfigSecurityConfigArgs added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigSecurityConfigArgs struct {
	// Kerberos related configuration.
	KerberosConfig WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigPtrInput `pulumi:"kerberosConfig"`
}

func (WorkflowTemplatePlacementManagedClusterConfigSecurityConfigArgs) ElementType added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigSecurityConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigSecurityConfigOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigSecurityConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigSecurityConfigOutputWithContext added in v5.2.0

func (i WorkflowTemplatePlacementManagedClusterConfigSecurityConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigSecurityConfigOutputWithContext(ctx context.Context) WorkflowTemplatePlacementManagedClusterConfigSecurityConfigOutput

func (WorkflowTemplatePlacementManagedClusterConfigSecurityConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigSecurityConfigPtrOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigSecurityConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigSecurityConfigPtrOutputWithContext added in v5.2.0

func (i WorkflowTemplatePlacementManagedClusterConfigSecurityConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigSecurityConfigPtrOutputWithContext(ctx context.Context) WorkflowTemplatePlacementManagedClusterConfigSecurityConfigPtrOutput

type WorkflowTemplatePlacementManagedClusterConfigSecurityConfigInput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigSecurityConfigInput interface {
	pulumi.Input

	ToWorkflowTemplatePlacementManagedClusterConfigSecurityConfigOutput() WorkflowTemplatePlacementManagedClusterConfigSecurityConfigOutput
	ToWorkflowTemplatePlacementManagedClusterConfigSecurityConfigOutputWithContext(context.Context) WorkflowTemplatePlacementManagedClusterConfigSecurityConfigOutput
}

WorkflowTemplatePlacementManagedClusterConfigSecurityConfigInput is an input type that accepts WorkflowTemplatePlacementManagedClusterConfigSecurityConfigArgs and WorkflowTemplatePlacementManagedClusterConfigSecurityConfigOutput values. You can construct a concrete instance of `WorkflowTemplatePlacementManagedClusterConfigSecurityConfigInput` via:

WorkflowTemplatePlacementManagedClusterConfigSecurityConfigArgs{...}

type WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfig struct {
	// Optional. The admin server (IP or hostname) for the remote trusted realm in a cross realm trust relationship.
	CrossRealmTrustAdminServer *string `pulumi:"crossRealmTrustAdminServer"`
	// Optional. The KDC (IP or hostname) for the remote trusted realm in a cross realm trust relationship.
	CrossRealmTrustKdc *string `pulumi:"crossRealmTrustKdc"`
	// Optional. The remote realm the Dataproc on-cluster KDC will trust, should the user enable cross realm trust.
	CrossRealmTrustRealm *string `pulumi:"crossRealmTrustRealm"`
	// Optional. The Cloud Storage URI of a KMS encrypted file containing the shared password between the on-cluster Kerberos realm and the remote trusted realm, in a cross realm trust relationship.
	CrossRealmTrustSharedPassword *string `pulumi:"crossRealmTrustSharedPassword"`
	// Optional. Flag to indicate whether to Kerberize the cluster (default: false). Set this field to true to enable Kerberos on a cluster.
	EnableKerberos *bool `pulumi:"enableKerberos"`
	// Optional. The Cloud Storage URI of a KMS encrypted file containing the master key of the KDC database.
	KdcDbKey *string `pulumi:"kdcDbKey"`
	// Optional. The Cloud Storage URI of a KMS encrypted file containing the password to the user provided key. For the self-signed certificate, this password is generated by Dataproc.
	KeyPassword *string `pulumi:"keyPassword"`
	// Optional. The Cloud Storage URI of the keystore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.
	Keystore *string `pulumi:"keystore"`
	// Optional. The Cloud Storage URI of a KMS encrypted file containing the password to the user provided keystore. For the self-signed certificate, this password is generated by Dataproc.
	KeystorePassword *string `pulumi:"keystorePassword"`
	// Optional. The uri of the KMS key used to encrypt various sensitive files.
	KmsKey *string `pulumi:"kmsKey"`
	// Optional. The name of the on-cluster Kerberos realm. If not specified, the uppercased domain of hostnames will be the realm.
	Realm *string `pulumi:"realm"`
	// Optional. The Cloud Storage URI of a KMS encrypted file containing the root principal password.
	RootPrincipalPassword *string `pulumi:"rootPrincipalPassword"`
	// Optional. The lifetime of the ticket granting ticket, in hours. If not specified, or user specifies 0, then default value 10 will be used.
	TgtLifetimeHours *int `pulumi:"tgtLifetimeHours"`
	// Optional. The Cloud Storage URI of the truststore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.
	Truststore *string `pulumi:"truststore"`
	// Optional. The Cloud Storage URI of a KMS encrypted file containing the password to the user provided truststore. For the self-signed certificate, this password is generated by Dataproc.
	TruststorePassword *string `pulumi:"truststorePassword"`
}

type WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigArgs added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigArgs struct {
	// Optional. The admin server (IP or hostname) for the remote trusted realm in a cross realm trust relationship.
	CrossRealmTrustAdminServer pulumi.StringPtrInput `pulumi:"crossRealmTrustAdminServer"`
	// Optional. The KDC (IP or hostname) for the remote trusted realm in a cross realm trust relationship.
	CrossRealmTrustKdc pulumi.StringPtrInput `pulumi:"crossRealmTrustKdc"`
	// Optional. The remote realm the Dataproc on-cluster KDC will trust, should the user enable cross realm trust.
	CrossRealmTrustRealm pulumi.StringPtrInput `pulumi:"crossRealmTrustRealm"`
	// Optional. The Cloud Storage URI of a KMS encrypted file containing the shared password between the on-cluster Kerberos realm and the remote trusted realm, in a cross realm trust relationship.
	CrossRealmTrustSharedPassword pulumi.StringPtrInput `pulumi:"crossRealmTrustSharedPassword"`
	// Optional. Flag to indicate whether to Kerberize the cluster (default: false). Set this field to true to enable Kerberos on a cluster.
	EnableKerberos pulumi.BoolPtrInput `pulumi:"enableKerberos"`
	// Optional. The Cloud Storage URI of a KMS encrypted file containing the master key of the KDC database.
	KdcDbKey pulumi.StringPtrInput `pulumi:"kdcDbKey"`
	// Optional. The Cloud Storage URI of a KMS encrypted file containing the password to the user provided key. For the self-signed certificate, this password is generated by Dataproc.
	KeyPassword pulumi.StringPtrInput `pulumi:"keyPassword"`
	// Optional. The Cloud Storage URI of the keystore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.
	Keystore pulumi.StringPtrInput `pulumi:"keystore"`
	// Optional. The Cloud Storage URI of a KMS encrypted file containing the password to the user provided keystore. For the self-signed certificate, this password is generated by Dataproc.
	KeystorePassword pulumi.StringPtrInput `pulumi:"keystorePassword"`
	// Optional. The uri of the KMS key used to encrypt various sensitive files.
	KmsKey pulumi.StringPtrInput `pulumi:"kmsKey"`
	// Optional. The name of the on-cluster Kerberos realm. If not specified, the uppercased domain of hostnames will be the realm.
	Realm pulumi.StringPtrInput `pulumi:"realm"`
	// Optional. The Cloud Storage URI of a KMS encrypted file containing the root principal password.
	RootPrincipalPassword pulumi.StringPtrInput `pulumi:"rootPrincipalPassword"`
	// Optional. The lifetime of the ticket granting ticket, in hours. If not specified, or user specifies 0, then default value 10 will be used.
	TgtLifetimeHours pulumi.IntPtrInput `pulumi:"tgtLifetimeHours"`
	// Optional. The Cloud Storage URI of the truststore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.
	Truststore pulumi.StringPtrInput `pulumi:"truststore"`
	// Optional. The Cloud Storage URI of a KMS encrypted file containing the password to the user provided truststore. For the self-signed certificate, this password is generated by Dataproc.
	TruststorePassword pulumi.StringPtrInput `pulumi:"truststorePassword"`
}

func (WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigArgs) ElementType added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigOutputWithContext added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigPtrOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigPtrOutputWithContext added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigInput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigInput interface {
	pulumi.Input

	ToWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigOutput() WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigOutput
	ToWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigOutputWithContext(context.Context) WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigOutput
}

WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigInput is an input type that accepts WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigArgs and WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigOutput values. You can construct a concrete instance of `WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigInput` via:

WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigArgs{...}

type WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigOutput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigOutput struct{ *pulumi.OutputState }

func (WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigOutput) CrossRealmTrustAdminServer added in v5.2.0

Optional. The admin server (IP or hostname) for the remote trusted realm in a cross realm trust relationship.

func (WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigOutput) CrossRealmTrustKdc added in v5.2.0

Optional. The KDC (IP or hostname) for the remote trusted realm in a cross realm trust relationship.

func (WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigOutput) CrossRealmTrustRealm added in v5.2.0

Optional. The remote realm the Dataproc on-cluster KDC will trust, should the user enable cross realm trust.

func (WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigOutput) CrossRealmTrustSharedPassword added in v5.2.0

Optional. The Cloud Storage URI of a KMS encrypted file containing the shared password between the on-cluster Kerberos realm and the remote trusted realm, in a cross realm trust relationship.

func (WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigOutput) ElementType added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigOutput) EnableKerberos added in v5.2.0

Optional. Flag to indicate whether to Kerberize the cluster (default: false). Set this field to true to enable Kerberos on a cluster.

func (WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigOutput) KdcDbKey added in v5.2.0

Optional. The Cloud Storage URI of a KMS encrypted file containing the master key of the KDC database.

func (WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigOutput) KeyPassword added in v5.2.0

Optional. The Cloud Storage URI of a KMS encrypted file containing the password to the user provided key. For the self-signed certificate, this password is generated by Dataproc.

func (WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigOutput) Keystore added in v5.2.0

Optional. The Cloud Storage URI of the keystore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.

func (WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigOutput) KeystorePassword added in v5.2.0

Optional. The Cloud Storage URI of a KMS encrypted file containing the password to the user provided keystore. For the self-signed certificate, this password is generated by Dataproc.

func (WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigOutput) KmsKey added in v5.2.0

Optional. The uri of the KMS key used to encrypt various sensitive files.

func (WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigOutput) Realm added in v5.2.0

Optional. The name of the on-cluster Kerberos realm. If not specified, the uppercased domain of hostnames will be the realm.

func (WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigOutput) RootPrincipalPassword added in v5.2.0

Optional. The Cloud Storage URI of a KMS encrypted file containing the root principal password.

func (WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigOutput) TgtLifetimeHours added in v5.2.0

Optional. The lifetime of the ticket granting ticket, in hours. If not specified, or user specifies 0, then default value 10 will be used.

func (WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigOutput) ToWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigOutput) ToWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigOutputWithContext added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigOutput) ToWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigPtrOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigOutput) ToWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigPtrOutputWithContext added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigOutput) Truststore added in v5.2.0

Optional. The Cloud Storage URI of the truststore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.

func (WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigOutput) TruststorePassword added in v5.2.0

Optional. The Cloud Storage URI of a KMS encrypted file containing the password to the user provided truststore. For the self-signed certificate, this password is generated by Dataproc.

type WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigPtrInput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigPtrInput interface {
	pulumi.Input

	ToWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigPtrOutput() WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigPtrOutput
	ToWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigPtrOutputWithContext(context.Context) WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigPtrOutput
}

WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigPtrInput is an input type that accepts WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigArgs, WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigPtr and WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigPtrOutput values. You can construct a concrete instance of `WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigPtrInput` via:

        WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigArgs{...}

or:

        nil

type WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigPtrOutput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigPtrOutput struct{ *pulumi.OutputState }

func (WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigPtrOutput) CrossRealmTrustAdminServer added in v5.2.0

Optional. The admin server (IP or hostname) for the remote trusted realm in a cross realm trust relationship.

func (WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigPtrOutput) CrossRealmTrustKdc added in v5.2.0

Optional. The KDC (IP or hostname) for the remote trusted realm in a cross realm trust relationship.

func (WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigPtrOutput) CrossRealmTrustRealm added in v5.2.0

Optional. The remote realm the Dataproc on-cluster KDC will trust, should the user enable cross realm trust.

func (WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigPtrOutput) CrossRealmTrustSharedPassword added in v5.2.0

Optional. The Cloud Storage URI of a KMS encrypted file containing the shared password between the on-cluster Kerberos realm and the remote trusted realm, in a cross realm trust relationship.

func (WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigPtrOutput) Elem added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigPtrOutput) ElementType added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigPtrOutput) EnableKerberos added in v5.2.0

Optional. Flag to indicate whether to Kerberize the cluster (default: false). Set this field to true to enable Kerberos on a cluster.

func (WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigPtrOutput) KdcDbKey added in v5.2.0

Optional. The Cloud Storage URI of a KMS encrypted file containing the master key of the KDC database.

func (WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigPtrOutput) KeyPassword added in v5.2.0

Optional. The Cloud Storage URI of a KMS encrypted file containing the password to the user provided key. For the self-signed certificate, this password is generated by Dataproc.

func (WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigPtrOutput) Keystore added in v5.2.0

Optional. The Cloud Storage URI of the keystore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.

func (WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigPtrOutput) KeystorePassword added in v5.2.0

Optional. The Cloud Storage URI of a KMS encrypted file containing the password to the user provided keystore. For the self-signed certificate, this password is generated by Dataproc.

func (WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigPtrOutput) KmsKey added in v5.2.0

Optional. The uri of the KMS key used to encrypt various sensitive files.

func (WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigPtrOutput) Realm added in v5.2.0

Optional. The name of the on-cluster Kerberos realm. If not specified, the uppercased domain of hostnames will be the realm.

func (WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigPtrOutput) RootPrincipalPassword added in v5.2.0

Optional. The Cloud Storage URI of a KMS encrypted file containing the root principal password.

func (WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigPtrOutput) TgtLifetimeHours added in v5.2.0

Optional. The lifetime of the ticket granting ticket, in hours. If not specified, or user specifies 0, then default value 10 will be used.

func (WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigPtrOutput) ToWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigPtrOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigPtrOutput) ToWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigPtrOutputWithContext added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigPtrOutput) Truststore added in v5.2.0

Optional. The Cloud Storage URI of the truststore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.

func (WorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigPtrOutput) TruststorePassword added in v5.2.0

Optional. The Cloud Storage URI of a KMS encrypted file containing the password to the user provided truststore. For the self-signed certificate, this password is generated by Dataproc.

type WorkflowTemplatePlacementManagedClusterConfigSecurityConfigOutput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigSecurityConfigOutput struct{ *pulumi.OutputState }

func (WorkflowTemplatePlacementManagedClusterConfigSecurityConfigOutput) ElementType added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigSecurityConfigOutput) KerberosConfig added in v5.2.0

Kerberos related configuration.

func (WorkflowTemplatePlacementManagedClusterConfigSecurityConfigOutput) ToWorkflowTemplatePlacementManagedClusterConfigSecurityConfigOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigSecurityConfigOutput) ToWorkflowTemplatePlacementManagedClusterConfigSecurityConfigOutputWithContext added in v5.2.0

func (o WorkflowTemplatePlacementManagedClusterConfigSecurityConfigOutput) ToWorkflowTemplatePlacementManagedClusterConfigSecurityConfigOutputWithContext(ctx context.Context) WorkflowTemplatePlacementManagedClusterConfigSecurityConfigOutput

func (WorkflowTemplatePlacementManagedClusterConfigSecurityConfigOutput) ToWorkflowTemplatePlacementManagedClusterConfigSecurityConfigPtrOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigSecurityConfigOutput) ToWorkflowTemplatePlacementManagedClusterConfigSecurityConfigPtrOutputWithContext added in v5.2.0

func (o WorkflowTemplatePlacementManagedClusterConfigSecurityConfigOutput) ToWorkflowTemplatePlacementManagedClusterConfigSecurityConfigPtrOutputWithContext(ctx context.Context) WorkflowTemplatePlacementManagedClusterConfigSecurityConfigPtrOutput

type WorkflowTemplatePlacementManagedClusterConfigSecurityConfigPtrInput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigSecurityConfigPtrInput interface {
	pulumi.Input

	ToWorkflowTemplatePlacementManagedClusterConfigSecurityConfigPtrOutput() WorkflowTemplatePlacementManagedClusterConfigSecurityConfigPtrOutput
	ToWorkflowTemplatePlacementManagedClusterConfigSecurityConfigPtrOutputWithContext(context.Context) WorkflowTemplatePlacementManagedClusterConfigSecurityConfigPtrOutput
}

WorkflowTemplatePlacementManagedClusterConfigSecurityConfigPtrInput is an input type that accepts WorkflowTemplatePlacementManagedClusterConfigSecurityConfigArgs, WorkflowTemplatePlacementManagedClusterConfigSecurityConfigPtr and WorkflowTemplatePlacementManagedClusterConfigSecurityConfigPtrOutput values. You can construct a concrete instance of `WorkflowTemplatePlacementManagedClusterConfigSecurityConfigPtrInput` via:

        WorkflowTemplatePlacementManagedClusterConfigSecurityConfigArgs{...}

or:

        nil

type WorkflowTemplatePlacementManagedClusterConfigSecurityConfigPtrOutput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigSecurityConfigPtrOutput struct{ *pulumi.OutputState }

func (WorkflowTemplatePlacementManagedClusterConfigSecurityConfigPtrOutput) Elem added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigSecurityConfigPtrOutput) ElementType added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigSecurityConfigPtrOutput) KerberosConfig added in v5.2.0

Kerberos related configuration.

func (WorkflowTemplatePlacementManagedClusterConfigSecurityConfigPtrOutput) ToWorkflowTemplatePlacementManagedClusterConfigSecurityConfigPtrOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigSecurityConfigPtrOutput) ToWorkflowTemplatePlacementManagedClusterConfigSecurityConfigPtrOutputWithContext added in v5.2.0

func (o WorkflowTemplatePlacementManagedClusterConfigSecurityConfigPtrOutput) ToWorkflowTemplatePlacementManagedClusterConfigSecurityConfigPtrOutputWithContext(ctx context.Context) WorkflowTemplatePlacementManagedClusterConfigSecurityConfigPtrOutput

type WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig struct {
	// Optional. The version of software inside the cluster. It must be one of the supported (https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#other_versions). If unspecified, it defaults to the latest Debian version.
	ImageVersion       *string  `pulumi:"imageVersion"`
	OptionalComponents []string `pulumi:"optionalComponents"`
	// Optional. The properties to set on daemon config files. Property keys are specified in `prefix:property` format, for example `core:hadoop.tmp.dir`. The following are supported prefixes and their mappings: * capacity-scheduler: `capacity-scheduler.xml` * core: `core-site.xml` * distcp: `distcp-default.xml` * hdfs: `hdfs-site.xml` * hive: `hive-site.xml` * mapred: `mapred-site.xml` * pig: `pig.properties` * spark: `spark-defaults.conf` * yarn: `yarn-site.xml` For more information, see (https://cloud.google.com/dataproc/docs/concepts/cluster-properties).
	Properties map[string]string `pulumi:"properties"`
}

type WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigArgs added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigArgs struct {
	// Optional. The version of software inside the cluster. It must be one of the supported (https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#other_versions). If unspecified, it defaults to the latest Debian version.
	ImageVersion       pulumi.StringPtrInput   `pulumi:"imageVersion"`
	OptionalComponents pulumi.StringArrayInput `pulumi:"optionalComponents"`
	// Optional. The properties to set on daemon config files. Property keys are specified in `prefix:property` format, for example `core:hadoop.tmp.dir`. The following are supported prefixes and their mappings: * capacity-scheduler: `capacity-scheduler.xml` * core: `core-site.xml` * distcp: `distcp-default.xml` * hdfs: `hdfs-site.xml` * hive: `hive-site.xml` * mapred: `mapred-site.xml` * pig: `pig.properties` * spark: `spark-defaults.conf` * yarn: `yarn-site.xml` For more information, see (https://cloud.google.com/dataproc/docs/concepts/cluster-properties).
	Properties pulumi.StringMapInput `pulumi:"properties"`
}

func (WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigArgs) ElementType added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOutputWithContext added in v5.2.0

func (i WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOutputWithContext(ctx context.Context) WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOutput

func (WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigSoftwareConfigPtrOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigSoftwareConfigPtrOutputWithContext added in v5.2.0

func (i WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigSoftwareConfigPtrOutputWithContext(ctx context.Context) WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigPtrOutput

type WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigInput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigInput interface {
	pulumi.Input

	ToWorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOutput() WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOutput
	ToWorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOutputWithContext(context.Context) WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOutput
}

WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigInput is an input type that accepts WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigArgs and WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOutput values. You can construct a concrete instance of `WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigInput` via:

WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigArgs{...}

type WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOutput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOutput struct{ *pulumi.OutputState }

func (WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOutput) ElementType added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOutput) ImageVersion added in v5.2.0

Optional. The version of software inside the cluster. It must be one of the supported (https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#other_versions). If unspecified, it defaults to the latest Debian version.

func (WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOutput) OptionalComponents added in v5.22.0

func (WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOutput) Properties added in v5.2.0

Optional. The properties to set on daemon config files. Property keys are specified in `prefix:property` format, for example `core:hadoop.tmp.dir`. The following are supported prefixes and their mappings: * capacity-scheduler: `capacity-scheduler.xml` * core: `core-site.xml` * distcp: `distcp-default.xml` * hdfs: `hdfs-site.xml` * hive: `hive-site.xml` * mapred: `mapred-site.xml` * pig: `pig.properties` * spark: `spark-defaults.conf` * yarn: `yarn-site.xml` For more information, see (https://cloud.google.com/dataproc/docs/concepts/cluster-properties).

func (WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOutput) ToWorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOutput) ToWorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOutputWithContext added in v5.2.0

func (o WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOutput) ToWorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOutputWithContext(ctx context.Context) WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOutput

func (WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOutput) ToWorkflowTemplatePlacementManagedClusterConfigSoftwareConfigPtrOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOutput) ToWorkflowTemplatePlacementManagedClusterConfigSoftwareConfigPtrOutputWithContext added in v5.2.0

func (o WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOutput) ToWorkflowTemplatePlacementManagedClusterConfigSoftwareConfigPtrOutputWithContext(ctx context.Context) WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigPtrOutput

type WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigPtrInput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigPtrInput interface {
	pulumi.Input

	ToWorkflowTemplatePlacementManagedClusterConfigSoftwareConfigPtrOutput() WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigPtrOutput
	ToWorkflowTemplatePlacementManagedClusterConfigSoftwareConfigPtrOutputWithContext(context.Context) WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigPtrOutput
}

WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigPtrInput is an input type that accepts WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigArgs, WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigPtr and WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigPtrOutput values. You can construct a concrete instance of `WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigPtrInput` via:

        WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigArgs{...}

or:

        nil

type WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigPtrOutput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigPtrOutput struct{ *pulumi.OutputState }

func (WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigPtrOutput) Elem added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigPtrOutput) ElementType added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigPtrOutput) ImageVersion added in v5.2.0

Optional. The version of software inside the cluster. It must be one of the supported (https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#other_versions). If unspecified, it defaults to the latest Debian version.

func (WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigPtrOutput) OptionalComponents added in v5.22.0

func (WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigPtrOutput) Properties added in v5.2.0

Optional. The properties to set on daemon config files. Property keys are specified in `prefix:property` format, for example `core:hadoop.tmp.dir`. The following are supported prefixes and their mappings: * capacity-scheduler: `capacity-scheduler.xml` * core: `core-site.xml` * distcp: `distcp-default.xml` * hdfs: `hdfs-site.xml` * hive: `hive-site.xml` * mapred: `mapred-site.xml` * pig: `pig.properties` * spark: `spark-defaults.conf` * yarn: `yarn-site.xml` For more information, see (https://cloud.google.com/dataproc/docs/concepts/cluster-properties).

func (WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigPtrOutput) ToWorkflowTemplatePlacementManagedClusterConfigSoftwareConfigPtrOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigPtrOutput) ToWorkflowTemplatePlacementManagedClusterConfigSoftwareConfigPtrOutputWithContext added in v5.2.0

func (o WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigPtrOutput) ToWorkflowTemplatePlacementManagedClusterConfigSoftwareConfigPtrOutputWithContext(ctx context.Context) WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigPtrOutput

type WorkflowTemplatePlacementManagedClusterConfigWorkerConfig added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigWorkerConfig struct {
	// Optional. The Compute Engine accelerator configuration for these instances.
	Accelerators []WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerator `pulumi:"accelerators"`
	// Optional. Disk option config settings.
	DiskConfig *WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig `pulumi:"diskConfig"`
	// Optional. The Compute Engine image resource used for cluster instances. The URI can represent an image or image family. Image examples: * `https://www.googleapis.com/compute/beta/projects/` If the URI is unspecified, it will be inferred from `SoftwareConfig.image_version` or the system default.
	Image *string `pulumi:"image"`
	// -
	// Output only. The list of instance names. Dataproc derives the names from `clusterName`, `numInstances`, and the instance group.
	InstanceNames []string `pulumi:"instanceNames"`
	// -
	// Output only. Specifies that this instance group contains preemptible instances.
	IsPreemptible *bool `pulumi:"isPreemptible"`
	// Optional. The Compute Engine machine type used for cluster instances. A full URL, partial URI, or short name are valid. Examples: * ` https://www.googleapis.com/compute/v1/projects/(https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example,  `n1-standard-2`.
	MachineType *string `pulumi:"machineType"`
	// -
	// Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups.
	ManagedGroupConfigs []WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig `pulumi:"managedGroupConfigs"`
	// Optional. Specifies the minimum cpu platform for the Instance Group. See (https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu).
	MinCpuPlatform *string `pulumi:"minCpuPlatform"`
	// Optional. The number of VM instances in the instance group. For master instance groups, must be set to 1.
	NumInstances *int `pulumi:"numInstances"`
	// Optional. Specifies the preemptibility of the instance group. The default value for master and worker groups is `NON_PREEMPTIBLE`. This default cannot be changed. The default value for secondary instances is `PREEMPTIBLE`. Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE, PREEMPTIBLE
	Preemptibility *string `pulumi:"preemptibility"`
}

type WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerator added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAccelerator struct {
	// The number of the accelerator cards of this type exposed to this instance.
	AcceleratorCount *int `pulumi:"acceleratorCount"`
	// Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, `nvidia-tesla-k80`.
	AcceleratorType *string `pulumi:"acceleratorType"`
}

type WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorArgs added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorArgs struct {
	// The number of the accelerator cards of this type exposed to this instance.
	AcceleratorCount pulumi.IntPtrInput `pulumi:"acceleratorCount"`
	// Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, `nvidia-tesla-k80`.
	AcceleratorType pulumi.StringPtrInput `pulumi:"acceleratorType"`
}

func (WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorArgs) ElementType added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorArgs) ToWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorArgs) ToWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorOutputWithContext added in v5.2.0

func (i WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorArgs) ToWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorOutputWithContext(ctx context.Context) WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorOutput

type WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorArray added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorArray []WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorInput

func (WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorArray) ElementType added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorArray) ToWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorArrayOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorArray) ToWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorArrayOutputWithContext added in v5.2.0

func (i WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorArray) ToWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorArrayOutputWithContext(ctx context.Context) WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorArrayOutput

type WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorArrayInput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorArrayInput interface {
	pulumi.Input

	ToWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorArrayOutput() WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorArrayOutput
	ToWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorArrayOutputWithContext(context.Context) WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorArrayOutput
}

WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorArrayInput is an input type that accepts WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorArray and WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorArrayOutput values. You can construct a concrete instance of `WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorArrayInput` via:

WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorArray{ WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorArgs{...} }

type WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorArrayOutput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorArrayOutput struct{ *pulumi.OutputState }

func (WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorArrayOutput) ElementType added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorArrayOutput) Index added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorArrayOutput) ToWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorArrayOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorArrayOutput) ToWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorArrayOutputWithContext added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorInput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorInput interface {
	pulumi.Input

	ToWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorOutput() WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorOutput
	ToWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorOutputWithContext(context.Context) WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorOutput
}

WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorInput is an input type that accepts WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorArgs and WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorOutput values. You can construct a concrete instance of `WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorInput` via:

WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorArgs{...}

type WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorOutput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorOutput struct{ *pulumi.OutputState }

func (WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorOutput) AcceleratorCount added in v5.2.0

The number of the accelerator cards of this type exposed to this instance.

func (WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorOutput) AcceleratorType added in v5.2.0

Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, `nvidia-tesla-k80`.

func (WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorOutput) ElementType added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorOutput) ToWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorOutput) ToWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorOutputWithContext added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigWorkerConfigArgs added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigWorkerConfigArgs struct {
	// Optional. The Compute Engine accelerator configuration for these instances.
	Accelerators WorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorArrayInput `pulumi:"accelerators"`
	// Optional. Disk option config settings.
	DiskConfig WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigPtrInput `pulumi:"diskConfig"`
	// Optional. The Compute Engine image resource used for cluster instances. The URI can represent an image or image family. Image examples: * `https://www.googleapis.com/compute/beta/projects/` If the URI is unspecified, it will be inferred from `SoftwareConfig.image_version` or the system default.
	Image pulumi.StringPtrInput `pulumi:"image"`
	// -
	// Output only. The list of instance names. Dataproc derives the names from `clusterName`, `numInstances`, and the instance group.
	InstanceNames pulumi.StringArrayInput `pulumi:"instanceNames"`
	// -
	// Output only. Specifies that this instance group contains preemptible instances.
	IsPreemptible pulumi.BoolPtrInput `pulumi:"isPreemptible"`
	// Optional. The Compute Engine machine type used for cluster instances. A full URL, partial URI, or short name are valid. Examples: * ` https://www.googleapis.com/compute/v1/projects/(https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example,  `n1-standard-2`.
	MachineType pulumi.StringPtrInput `pulumi:"machineType"`
	// -
	// Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups.
	ManagedGroupConfigs WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigArrayInput `pulumi:"managedGroupConfigs"`
	// Optional. Specifies the minimum cpu platform for the Instance Group. See (https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu).
	MinCpuPlatform pulumi.StringPtrInput `pulumi:"minCpuPlatform"`
	// Optional. The number of VM instances in the instance group. For master instance groups, must be set to 1.
	NumInstances pulumi.IntPtrInput `pulumi:"numInstances"`
	// Optional. Specifies the preemptibility of the instance group. The default value for master and worker groups is `NON_PREEMPTIBLE`. This default cannot be changed. The default value for secondary instances is `PREEMPTIBLE`. Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE, PREEMPTIBLE
	Preemptibility pulumi.StringPtrInput `pulumi:"preemptibility"`
}

func (WorkflowTemplatePlacementManagedClusterConfigWorkerConfigArgs) ElementType added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigWorkerConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigWorkerConfigOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigWorkerConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigWorkerConfigOutputWithContext added in v5.2.0

func (i WorkflowTemplatePlacementManagedClusterConfigWorkerConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigWorkerConfigOutputWithContext(ctx context.Context) WorkflowTemplatePlacementManagedClusterConfigWorkerConfigOutput

func (WorkflowTemplatePlacementManagedClusterConfigWorkerConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigWorkerConfigPtrOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigWorkerConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigWorkerConfigPtrOutputWithContext added in v5.2.0

func (i WorkflowTemplatePlacementManagedClusterConfigWorkerConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigWorkerConfigPtrOutputWithContext(ctx context.Context) WorkflowTemplatePlacementManagedClusterConfigWorkerConfigPtrOutput

type WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfig struct {
	// Optional. Size in GB of the boot disk (default is 500GB).
	BootDiskSizeGb *int `pulumi:"bootDiskSizeGb"`
	// Optional. Type of the boot disk (default is "pd-standard"). Valid values: "pd-ssd" (Persistent Disk Solid State Drive) or "pd-standard" (Persistent Disk Hard Disk Drive).
	BootDiskType *string `pulumi:"bootDiskType"`
	// Optional. Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.
	NumLocalSsds *int `pulumi:"numLocalSsds"`
}

type WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigArgs added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigArgs struct {
	// Optional. Size in GB of the boot disk (default is 500GB).
	BootDiskSizeGb pulumi.IntPtrInput `pulumi:"bootDiskSizeGb"`
	// Optional. Type of the boot disk (default is "pd-standard"). Valid values: "pd-ssd" (Persistent Disk Solid State Drive) or "pd-standard" (Persistent Disk Hard Disk Drive).
	BootDiskType pulumi.StringPtrInput `pulumi:"bootDiskType"`
	// Optional. Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.
	NumLocalSsds pulumi.IntPtrInput `pulumi:"numLocalSsds"`
}

func (WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigArgs) ElementType added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigOutputWithContext added in v5.2.0

func (i WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigOutputWithContext(ctx context.Context) WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigOutput

func (WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigPtrOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigPtrOutputWithContext added in v5.2.0

func (i WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigPtrOutputWithContext(ctx context.Context) WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigPtrOutput

type WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigInput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigInput interface {
	pulumi.Input

	ToWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigOutput() WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigOutput
	ToWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigOutputWithContext(context.Context) WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigOutput
}

WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigInput is an input type that accepts WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigArgs and WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigOutput values. You can construct a concrete instance of `WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigInput` via:

WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigArgs{...}

type WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigOutput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigOutput struct{ *pulumi.OutputState }

func (WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigOutput) BootDiskSizeGb added in v5.2.0

Optional. Size in GB of the boot disk (default is 500GB).

func (WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigOutput) BootDiskType added in v5.2.0

Optional. Type of the boot disk (default is "pd-standard"). Valid values: "pd-ssd" (Persistent Disk Solid State Drive) or "pd-standard" (Persistent Disk Hard Disk Drive).

func (WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigOutput) ElementType added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigOutput) NumLocalSsds added in v5.2.0

Optional. Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.

func (WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigOutput) ToWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigOutput) ToWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigOutputWithContext added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigOutput) ToWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigPtrOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigOutput) ToWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigPtrOutputWithContext added in v5.2.0

func (o WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigOutput) ToWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigPtrOutputWithContext(ctx context.Context) WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigPtrOutput

type WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigPtrInput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigPtrInput interface {
	pulumi.Input

	ToWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigPtrOutput() WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigPtrOutput
	ToWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigPtrOutputWithContext(context.Context) WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigPtrOutput
}

WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigPtrInput is an input type that accepts WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigArgs, WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigPtr and WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigPtrOutput values. You can construct a concrete instance of `WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigPtrInput` via:

        WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigArgs{...}

or:

        nil

type WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigPtrOutput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigPtrOutput struct{ *pulumi.OutputState }

func (WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigPtrOutput) BootDiskSizeGb added in v5.2.0

Optional. Size in GB of the boot disk (default is 500GB).

func (WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigPtrOutput) BootDiskType added in v5.2.0

Optional. Type of the boot disk (default is "pd-standard"). Valid values: "pd-ssd" (Persistent Disk Solid State Drive) or "pd-standard" (Persistent Disk Hard Disk Drive).

func (WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigPtrOutput) Elem added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigPtrOutput) ElementType added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigPtrOutput) NumLocalSsds added in v5.2.0

Optional. Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.

func (WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigPtrOutput) ToWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigPtrOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigPtrOutput) ToWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigPtrOutputWithContext added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigWorkerConfigInput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigWorkerConfigInput interface {
	pulumi.Input

	ToWorkflowTemplatePlacementManagedClusterConfigWorkerConfigOutput() WorkflowTemplatePlacementManagedClusterConfigWorkerConfigOutput
	ToWorkflowTemplatePlacementManagedClusterConfigWorkerConfigOutputWithContext(context.Context) WorkflowTemplatePlacementManagedClusterConfigWorkerConfigOutput
}

WorkflowTemplatePlacementManagedClusterConfigWorkerConfigInput is an input type that accepts WorkflowTemplatePlacementManagedClusterConfigWorkerConfigArgs and WorkflowTemplatePlacementManagedClusterConfigWorkerConfigOutput values. You can construct a concrete instance of `WorkflowTemplatePlacementManagedClusterConfigWorkerConfigInput` via:

WorkflowTemplatePlacementManagedClusterConfigWorkerConfigArgs{...}

type WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfig struct {
	InstanceGroupManagerName *string `pulumi:"instanceGroupManagerName"`
	InstanceTemplateName     *string `pulumi:"instanceTemplateName"`
}

type WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigArgs added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigArgs struct {
	InstanceGroupManagerName pulumi.StringPtrInput `pulumi:"instanceGroupManagerName"`
	InstanceTemplateName     pulumi.StringPtrInput `pulumi:"instanceTemplateName"`
}

func (WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigArgs) ElementType added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigArgs) ToWorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigOutputWithContext added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigArray added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigArray []WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigInput

func (WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigArray) ElementType added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigArray) ToWorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigArrayOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigArray) ToWorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigArrayOutputWithContext added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigArrayInput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigArrayInput interface {
	pulumi.Input

	ToWorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigArrayOutput() WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigArrayOutput
	ToWorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigArrayOutputWithContext(context.Context) WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigArrayOutput
}

WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigArrayInput is an input type that accepts WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigArray and WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigArrayOutput values. You can construct a concrete instance of `WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigArrayInput` via:

WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigArray{ WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigArgs{...} }

type WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigArrayOutput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigArrayOutput struct{ *pulumi.OutputState }

func (WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigArrayOutput) ElementType added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigArrayOutput) Index added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigArrayOutput) ToWorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigArrayOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigArrayOutput) ToWorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigArrayOutputWithContext added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigInput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigInput interface {
	pulumi.Input

	ToWorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigOutput() WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigOutput
	ToWorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigOutputWithContext(context.Context) WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigOutput
}

WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigInput is an input type that accepts WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigArgs and WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigOutput values. You can construct a concrete instance of `WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigInput` via:

WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigArgs{...}

type WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigOutput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigOutput struct{ *pulumi.OutputState }

func (WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigOutput) ElementType added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigOutput) InstanceGroupManagerName added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigOutput) InstanceTemplateName added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigOutput) ToWorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigOutput) ToWorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigOutputWithContext added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigWorkerConfigOutput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigWorkerConfigOutput struct{ *pulumi.OutputState }

func (WorkflowTemplatePlacementManagedClusterConfigWorkerConfigOutput) Accelerators added in v5.2.0

Optional. The Compute Engine accelerator configuration for these instances.

func (WorkflowTemplatePlacementManagedClusterConfigWorkerConfigOutput) DiskConfig added in v5.2.0

Optional. Disk option config settings.

func (WorkflowTemplatePlacementManagedClusterConfigWorkerConfigOutput) ElementType added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigWorkerConfigOutput) Image added in v5.2.0

Optional. The Compute Engine image resource used for cluster instances. The URI can represent an image or image family. Image examples: * `https://www.googleapis.com/compute/beta/projects/` If the URI is unspecified, it will be inferred from `SoftwareConfig.image_version` or the system default.

func (WorkflowTemplatePlacementManagedClusterConfigWorkerConfigOutput) InstanceNames added in v5.2.0

- Output only. The list of instance names. Dataproc derives the names from `clusterName`, `numInstances`, and the instance group.

func (WorkflowTemplatePlacementManagedClusterConfigWorkerConfigOutput) IsPreemptible added in v5.2.0

- Output only. Specifies that this instance group contains preemptible instances.

func (WorkflowTemplatePlacementManagedClusterConfigWorkerConfigOutput) MachineType added in v5.2.0

Optional. The Compute Engine machine type used for cluster instances. A full URL, partial URI, or short name are valid. Examples: * ` https://www.googleapis.com/compute/v1/projects/(https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example, `n1-standard-2`.

func (WorkflowTemplatePlacementManagedClusterConfigWorkerConfigOutput) ManagedGroupConfigs added in v5.2.0

- Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups.

func (WorkflowTemplatePlacementManagedClusterConfigWorkerConfigOutput) MinCpuPlatform added in v5.2.0

Optional. Specifies the minimum cpu platform for the Instance Group. See (https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu).

func (WorkflowTemplatePlacementManagedClusterConfigWorkerConfigOutput) NumInstances added in v5.2.0

Optional. The number of VM instances in the instance group. For master instance groups, must be set to 1.

func (WorkflowTemplatePlacementManagedClusterConfigWorkerConfigOutput) Preemptibility added in v5.2.0

Optional. Specifies the preemptibility of the instance group. The default value for master and worker groups is `NON_PREEMPTIBLE`. This default cannot be changed. The default value for secondary instances is `PREEMPTIBLE`. Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE, PREEMPTIBLE

func (WorkflowTemplatePlacementManagedClusterConfigWorkerConfigOutput) ToWorkflowTemplatePlacementManagedClusterConfigWorkerConfigOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigWorkerConfigOutput) ToWorkflowTemplatePlacementManagedClusterConfigWorkerConfigOutputWithContext added in v5.2.0

func (o WorkflowTemplatePlacementManagedClusterConfigWorkerConfigOutput) ToWorkflowTemplatePlacementManagedClusterConfigWorkerConfigOutputWithContext(ctx context.Context) WorkflowTemplatePlacementManagedClusterConfigWorkerConfigOutput

func (WorkflowTemplatePlacementManagedClusterConfigWorkerConfigOutput) ToWorkflowTemplatePlacementManagedClusterConfigWorkerConfigPtrOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigWorkerConfigOutput) ToWorkflowTemplatePlacementManagedClusterConfigWorkerConfigPtrOutputWithContext added in v5.2.0

func (o WorkflowTemplatePlacementManagedClusterConfigWorkerConfigOutput) ToWorkflowTemplatePlacementManagedClusterConfigWorkerConfigPtrOutputWithContext(ctx context.Context) WorkflowTemplatePlacementManagedClusterConfigWorkerConfigPtrOutput

type WorkflowTemplatePlacementManagedClusterConfigWorkerConfigPtrInput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigWorkerConfigPtrInput interface {
	pulumi.Input

	ToWorkflowTemplatePlacementManagedClusterConfigWorkerConfigPtrOutput() WorkflowTemplatePlacementManagedClusterConfigWorkerConfigPtrOutput
	ToWorkflowTemplatePlacementManagedClusterConfigWorkerConfigPtrOutputWithContext(context.Context) WorkflowTemplatePlacementManagedClusterConfigWorkerConfigPtrOutput
}

WorkflowTemplatePlacementManagedClusterConfigWorkerConfigPtrInput is an input type that accepts WorkflowTemplatePlacementManagedClusterConfigWorkerConfigArgs, WorkflowTemplatePlacementManagedClusterConfigWorkerConfigPtr and WorkflowTemplatePlacementManagedClusterConfigWorkerConfigPtrOutput values. You can construct a concrete instance of `WorkflowTemplatePlacementManagedClusterConfigWorkerConfigPtrInput` via:

        WorkflowTemplatePlacementManagedClusterConfigWorkerConfigArgs{...}

or:

        nil

type WorkflowTemplatePlacementManagedClusterConfigWorkerConfigPtrOutput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterConfigWorkerConfigPtrOutput struct{ *pulumi.OutputState }

func (WorkflowTemplatePlacementManagedClusterConfigWorkerConfigPtrOutput) Accelerators added in v5.2.0

Optional. The Compute Engine accelerator configuration for these instances.

func (WorkflowTemplatePlacementManagedClusterConfigWorkerConfigPtrOutput) DiskConfig added in v5.2.0

Optional. Disk option config settings.

func (WorkflowTemplatePlacementManagedClusterConfigWorkerConfigPtrOutput) Elem added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigWorkerConfigPtrOutput) ElementType added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigWorkerConfigPtrOutput) Image added in v5.2.0

Optional. The Compute Engine image resource used for cluster instances. The URI can represent an image or image family. Image examples: * `https://www.googleapis.com/compute/beta/projects/` If the URI is unspecified, it will be inferred from `SoftwareConfig.image_version` or the system default.

func (WorkflowTemplatePlacementManagedClusterConfigWorkerConfigPtrOutput) InstanceNames added in v5.2.0

- Output only. The list of instance names. Dataproc derives the names from `clusterName`, `numInstances`, and the instance group.

func (WorkflowTemplatePlacementManagedClusterConfigWorkerConfigPtrOutput) IsPreemptible added in v5.2.0

- Output only. Specifies that this instance group contains preemptible instances.

func (WorkflowTemplatePlacementManagedClusterConfigWorkerConfigPtrOutput) MachineType added in v5.2.0

Optional. The Compute Engine machine type used for cluster instances. A full URL, partial URI, or short name are valid. Examples: * ` https://www.googleapis.com/compute/v1/projects/(https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example, `n1-standard-2`.

func (WorkflowTemplatePlacementManagedClusterConfigWorkerConfigPtrOutput) ManagedGroupConfigs added in v5.2.0

- Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups.

func (WorkflowTemplatePlacementManagedClusterConfigWorkerConfigPtrOutput) MinCpuPlatform added in v5.2.0

Optional. Specifies the minimum cpu platform for the Instance Group. See (https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu).

func (WorkflowTemplatePlacementManagedClusterConfigWorkerConfigPtrOutput) NumInstances added in v5.2.0

Optional. The number of VM instances in the instance group. For master instance groups, must be set to 1.

func (WorkflowTemplatePlacementManagedClusterConfigWorkerConfigPtrOutput) Preemptibility added in v5.2.0

Optional. Specifies the preemptibility of the instance group. The default value for master and worker groups is `NON_PREEMPTIBLE`. This default cannot be changed. The default value for secondary instances is `PREEMPTIBLE`. Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE, PREEMPTIBLE

func (WorkflowTemplatePlacementManagedClusterConfigWorkerConfigPtrOutput) ToWorkflowTemplatePlacementManagedClusterConfigWorkerConfigPtrOutput added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterConfigWorkerConfigPtrOutput) ToWorkflowTemplatePlacementManagedClusterConfigWorkerConfigPtrOutputWithContext added in v5.2.0

func (o WorkflowTemplatePlacementManagedClusterConfigWorkerConfigPtrOutput) ToWorkflowTemplatePlacementManagedClusterConfigWorkerConfigPtrOutputWithContext(ctx context.Context) WorkflowTemplatePlacementManagedClusterConfigWorkerConfigPtrOutput

type WorkflowTemplatePlacementManagedClusterInput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterInput interface {
	pulumi.Input

	ToWorkflowTemplatePlacementManagedClusterOutput() WorkflowTemplatePlacementManagedClusterOutput
	ToWorkflowTemplatePlacementManagedClusterOutputWithContext(context.Context) WorkflowTemplatePlacementManagedClusterOutput
}

WorkflowTemplatePlacementManagedClusterInput is an input type that accepts WorkflowTemplatePlacementManagedClusterArgs and WorkflowTemplatePlacementManagedClusterOutput values. You can construct a concrete instance of `WorkflowTemplatePlacementManagedClusterInput` via:

WorkflowTemplatePlacementManagedClusterArgs{...}

type WorkflowTemplatePlacementManagedClusterOutput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterOutput struct{ *pulumi.OutputState }

func (WorkflowTemplatePlacementManagedClusterOutput) ClusterName added in v5.2.0

Required. The cluster name prefix. A unique cluster name will be formed by appending a random suffix. The name must contain only lower-case letters (a-z), numbers (0-9), and hyphens (-). Must begin with a letter. Cannot begin or end with hyphen. Must consist of between 2 and 35 characters.

func (WorkflowTemplatePlacementManagedClusterOutput) Config added in v5.2.0

Required. The cluster configuration.

func (WorkflowTemplatePlacementManagedClusterOutput) ElementType added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterOutput) Labels added in v5.2.0

Optional. The labels to associate with this cluster. Label keys must be between 1 and 63 characters long, and must conform to the following PCRE regular expression: {0,63} No more than 32 labels can be associated with a given cluster.

func (WorkflowTemplatePlacementManagedClusterOutput) ToWorkflowTemplatePlacementManagedClusterOutput added in v5.2.0

func (o WorkflowTemplatePlacementManagedClusterOutput) ToWorkflowTemplatePlacementManagedClusterOutput() WorkflowTemplatePlacementManagedClusterOutput

func (WorkflowTemplatePlacementManagedClusterOutput) ToWorkflowTemplatePlacementManagedClusterOutputWithContext added in v5.2.0

func (o WorkflowTemplatePlacementManagedClusterOutput) ToWorkflowTemplatePlacementManagedClusterOutputWithContext(ctx context.Context) WorkflowTemplatePlacementManagedClusterOutput

func (WorkflowTemplatePlacementManagedClusterOutput) ToWorkflowTemplatePlacementManagedClusterPtrOutput added in v5.2.0

func (o WorkflowTemplatePlacementManagedClusterOutput) ToWorkflowTemplatePlacementManagedClusterPtrOutput() WorkflowTemplatePlacementManagedClusterPtrOutput

func (WorkflowTemplatePlacementManagedClusterOutput) ToWorkflowTemplatePlacementManagedClusterPtrOutputWithContext added in v5.2.0

func (o WorkflowTemplatePlacementManagedClusterOutput) ToWorkflowTemplatePlacementManagedClusterPtrOutputWithContext(ctx context.Context) WorkflowTemplatePlacementManagedClusterPtrOutput

type WorkflowTemplatePlacementManagedClusterPtrInput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterPtrInput interface {
	pulumi.Input

	ToWorkflowTemplatePlacementManagedClusterPtrOutput() WorkflowTemplatePlacementManagedClusterPtrOutput
	ToWorkflowTemplatePlacementManagedClusterPtrOutputWithContext(context.Context) WorkflowTemplatePlacementManagedClusterPtrOutput
}

WorkflowTemplatePlacementManagedClusterPtrInput is an input type that accepts WorkflowTemplatePlacementManagedClusterArgs, WorkflowTemplatePlacementManagedClusterPtr and WorkflowTemplatePlacementManagedClusterPtrOutput values. You can construct a concrete instance of `WorkflowTemplatePlacementManagedClusterPtrInput` via:

        WorkflowTemplatePlacementManagedClusterArgs{...}

or:

        nil

type WorkflowTemplatePlacementManagedClusterPtrOutput added in v5.2.0

type WorkflowTemplatePlacementManagedClusterPtrOutput struct{ *pulumi.OutputState }

func (WorkflowTemplatePlacementManagedClusterPtrOutput) ClusterName added in v5.2.0

Required. The cluster name prefix. A unique cluster name will be formed by appending a random suffix. The name must contain only lower-case letters (a-z), numbers (0-9), and hyphens (-). Must begin with a letter. Cannot begin or end with hyphen. Must consist of between 2 and 35 characters.

func (WorkflowTemplatePlacementManagedClusterPtrOutput) Config added in v5.2.0

Required. The cluster configuration.

func (WorkflowTemplatePlacementManagedClusterPtrOutput) Elem added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterPtrOutput) ElementType added in v5.2.0

func (WorkflowTemplatePlacementManagedClusterPtrOutput) Labels added in v5.2.0

Optional. The labels to associate with this cluster. Label keys must be between 1 and 63 characters long, and must conform to the following PCRE regular expression: {0,63} No more than 32 labels can be associated with a given cluster.

func (WorkflowTemplatePlacementManagedClusterPtrOutput) ToWorkflowTemplatePlacementManagedClusterPtrOutput added in v5.2.0

func (o WorkflowTemplatePlacementManagedClusterPtrOutput) ToWorkflowTemplatePlacementManagedClusterPtrOutput() WorkflowTemplatePlacementManagedClusterPtrOutput

func (WorkflowTemplatePlacementManagedClusterPtrOutput) ToWorkflowTemplatePlacementManagedClusterPtrOutputWithContext added in v5.2.0

func (o WorkflowTemplatePlacementManagedClusterPtrOutput) ToWorkflowTemplatePlacementManagedClusterPtrOutputWithContext(ctx context.Context) WorkflowTemplatePlacementManagedClusterPtrOutput

type WorkflowTemplatePlacementOutput added in v5.2.0

type WorkflowTemplatePlacementOutput struct{ *pulumi.OutputState }

func (WorkflowTemplatePlacementOutput) ClusterSelector added in v5.2.0

Optional. A selector that chooses target cluster for jobs based on metadata. The selector is evaluated at the time each job is submitted.

func (WorkflowTemplatePlacementOutput) ElementType added in v5.2.0

func (WorkflowTemplatePlacementOutput) ManagedCluster added in v5.2.0

A cluster that is managed by the workflow.

func (WorkflowTemplatePlacementOutput) ToWorkflowTemplatePlacementOutput added in v5.2.0

func (o WorkflowTemplatePlacementOutput) ToWorkflowTemplatePlacementOutput() WorkflowTemplatePlacementOutput

func (WorkflowTemplatePlacementOutput) ToWorkflowTemplatePlacementOutputWithContext added in v5.2.0

func (o WorkflowTemplatePlacementOutput) ToWorkflowTemplatePlacementOutputWithContext(ctx context.Context) WorkflowTemplatePlacementOutput

func (WorkflowTemplatePlacementOutput) ToWorkflowTemplatePlacementPtrOutput added in v5.2.0

func (o WorkflowTemplatePlacementOutput) ToWorkflowTemplatePlacementPtrOutput() WorkflowTemplatePlacementPtrOutput

func (WorkflowTemplatePlacementOutput) ToWorkflowTemplatePlacementPtrOutputWithContext added in v5.2.0

func (o WorkflowTemplatePlacementOutput) ToWorkflowTemplatePlacementPtrOutputWithContext(ctx context.Context) WorkflowTemplatePlacementPtrOutput

type WorkflowTemplatePlacementPtrInput added in v5.2.0

type WorkflowTemplatePlacementPtrInput interface {
	pulumi.Input

	ToWorkflowTemplatePlacementPtrOutput() WorkflowTemplatePlacementPtrOutput
	ToWorkflowTemplatePlacementPtrOutputWithContext(context.Context) WorkflowTemplatePlacementPtrOutput
}

WorkflowTemplatePlacementPtrInput is an input type that accepts WorkflowTemplatePlacementArgs, WorkflowTemplatePlacementPtr and WorkflowTemplatePlacementPtrOutput values. You can construct a concrete instance of `WorkflowTemplatePlacementPtrInput` via:

        WorkflowTemplatePlacementArgs{...}

or:

        nil

func WorkflowTemplatePlacementPtr added in v5.2.0

type WorkflowTemplatePlacementPtrOutput added in v5.2.0

type WorkflowTemplatePlacementPtrOutput struct{ *pulumi.OutputState }

func (WorkflowTemplatePlacementPtrOutput) ClusterSelector added in v5.2.0

Optional. A selector that chooses target cluster for jobs based on metadata. The selector is evaluated at the time each job is submitted.

func (WorkflowTemplatePlacementPtrOutput) Elem added in v5.2.0

func (WorkflowTemplatePlacementPtrOutput) ElementType added in v5.2.0

func (WorkflowTemplatePlacementPtrOutput) ManagedCluster added in v5.2.0

A cluster that is managed by the workflow.

func (WorkflowTemplatePlacementPtrOutput) ToWorkflowTemplatePlacementPtrOutput added in v5.2.0

func (o WorkflowTemplatePlacementPtrOutput) ToWorkflowTemplatePlacementPtrOutput() WorkflowTemplatePlacementPtrOutput

func (WorkflowTemplatePlacementPtrOutput) ToWorkflowTemplatePlacementPtrOutputWithContext added in v5.2.0

func (o WorkflowTemplatePlacementPtrOutput) ToWorkflowTemplatePlacementPtrOutputWithContext(ctx context.Context) WorkflowTemplatePlacementPtrOutput

type WorkflowTemplatePtrInput added in v5.2.0

type WorkflowTemplatePtrInput interface {
	pulumi.Input

	ToWorkflowTemplatePtrOutput() WorkflowTemplatePtrOutput
	ToWorkflowTemplatePtrOutputWithContext(ctx context.Context) WorkflowTemplatePtrOutput
}

type WorkflowTemplatePtrOutput added in v5.2.0

type WorkflowTemplatePtrOutput struct{ *pulumi.OutputState }

func (WorkflowTemplatePtrOutput) Elem added in v5.21.0

func (WorkflowTemplatePtrOutput) ElementType added in v5.2.0

func (WorkflowTemplatePtrOutput) ElementType() reflect.Type

func (WorkflowTemplatePtrOutput) ToWorkflowTemplatePtrOutput added in v5.2.0

func (o WorkflowTemplatePtrOutput) ToWorkflowTemplatePtrOutput() WorkflowTemplatePtrOutput

func (WorkflowTemplatePtrOutput) ToWorkflowTemplatePtrOutputWithContext added in v5.2.0

func (o WorkflowTemplatePtrOutput) ToWorkflowTemplatePtrOutputWithContext(ctx context.Context) WorkflowTemplatePtrOutput

type WorkflowTemplateState added in v5.2.0

type WorkflowTemplateState struct {
	// Output only. The time template was created.
	CreateTime pulumi.StringPtrInput
	// (Beta only) Optional. Timeout duration for the DAG of jobs. You can use "s", "m", "h", and "d" suffixes for second, minute, hour, and day duration values, respectively. The timeout duration must be from 10 minutes ("10m") to 24 hours ("24h" or "1d"). The timer begins when the first job is submitted. If the workflow is running at the end of the timeout period, any remaining jobs are cancelled, the workflow is ended, and if the workflow was running on a (/dataproc/docs/concepts/workflows/using-workflows#configuring_or_selecting_a_cluster), the cluster is deleted.
	DagTimeout pulumi.StringPtrInput
	// Required. The Directed Acyclic Graph of Jobs to submit.
	Jobs WorkflowTemplateJobArrayInput
	// Optional. The labels to associate with this cluster. Label keys must be between 1 and 63 characters long, and must conform to the following PCRE regular expression: {0,63} No more than 32 labels can be associated with a given cluster.
	Labels pulumi.StringMapInput
	// The location for the resource
	Location pulumi.StringPtrInput
	// Required. Parameter name. The parameter name is used as the key, and paired with the parameter value, which are passed to the template when the template is instantiated. The name must contain only capital letters (A-Z), numbers (0-9), and underscores (_), and must not start with a number. The maximum length is 40 characters.
	Name pulumi.StringPtrInput
	// Optional. Template parameters whose values are substituted into the template. Values for parameters must be provided when the template is instantiated.
	Parameters WorkflowTemplateParameterArrayInput
	// Required. WorkflowTemplate scheduling information.
	Placement WorkflowTemplatePlacementPtrInput
	// The project for the resource
	Project pulumi.StringPtrInput
	// Output only. The time template was last updated.
	UpdateTime pulumi.StringPtrInput
	// Optional. Used to perform a consistent read-modify-write. This field should be left blank for a `CreateWorkflowTemplate` request. It is required for an `UpdateWorkflowTemplate` request, and must match the current server version. A typical update template flow would fetch the current template with a `GetWorkflowTemplate` request, which will return the current template with the `version` field filled in with the current server version. The user updates other fields in the template, then returns it as part of the `UpdateWorkflowTemplate` request.
	//
	// Deprecated: version is not useful as a configurable field, and will be removed in the future.
	Version pulumi.IntPtrInput
}

func (WorkflowTemplateState) ElementType added in v5.2.0

func (WorkflowTemplateState) ElementType() reflect.Type

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL